Page MenuHomeFreeBSD

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/cddl/lib/libuutil/Makefile b/cddl/lib/libuutil/Makefile
index 169b8d8afe01..fdbac6de2748 100644
--- a/cddl/lib/libuutil/Makefile
+++ b/cddl/lib/libuutil/Makefile
@@ -1,27 +1,26 @@
# $FreeBSD$
.PATH: ${SRCTOP}/sys/contrib/openzfs/lib/libuutil
PACKAGE= runtime
LIB= uutil
SRCS=\
uu_alloc.c \
uu_avl.c \
uu_ident.c \
uu_list.c \
uu_misc.c \
- uu_pname.c \
uu_string.c
WARNS?= 2
CFLAGS+= -DIN_BASE
CFLAGS+= -I${SRCTOP}/sys/contrib/openzfs/include
CFLAGS+= -I${SRCTOP}/sys/contrib/openzfs/lib/libspl/include/
CFLAGS+= -I${SRCTOP}/sys/contrib/openzfs/lib/libspl/include/os/freebsd
CFLAGS+= -I${SRCTOP}/sys
CFLAGS+= -I${SRCTOP}/cddl/compat/opensolaris/include
CFLAGS+= -include ${SRCTOP}/sys/contrib/openzfs/include/os/freebsd/spl/sys/ccompile.h
LIBADD= avl spl
.include <bsd.lib.mk>
diff --git a/cddl/lib/libzpool/Makefile b/cddl/lib/libzpool/Makefile
index 92f1bda6d82a..b9ef6575c99a 100644
--- a/cddl/lib/libzpool/Makefile
+++ b/cddl/lib/libzpool/Makefile
@@ -1,333 +1,332 @@
# $FreeBSD$
ZFSTOP= ${SRCTOP}/sys/contrib/openzfs
# ZFS_COMMON_SRCS
.PATH: ${ZFSTOP}/module/zfs
.PATH: ${ZFSTOP}/module/zcommon
.PATH: ${ZFSTOP}/module/unicode
# LUA_SRCS
.PATH: ${ZFSTOP}/module/lua
# ZSTD_SRCS
.PATH: ${ZFSTOP}/module/zstd
.PATH: ${ZFSTOP}/module/zstd/lib/common
.PATH: ${ZFSTOP}/module/zstd/lib/compress
.PATH: ${ZFSTOP}/module/zstd/lib/decompress
.PATH: ${ZFSTOP}/module/os/linux/zfs
.PATH: ${ZFSTOP}/lib/libzpool
.if exists(${SRCTOP}/sys/cddl/contrib/opensolaris/common/atomic/${MACHINE_ARCH}/opensolaris_atomic.S)
.PATH: ${SRCTOP}/sys/cddl/contrib/opensolaris/common/atomic/${MACHINE_ARCH}
ATOMIC_SRCS= opensolaris_atomic.S
ACFLAGS+= -Wa,--noexecstack
.else
.PATH: ${SRCTOP}/sys/cddl/compat/opensolaris/kern
ATOMIC_SRCS= opensolaris_atomic.c
.endif
.if ${MACHINE_ARCH} == "powerpc" || ${MACHINE_ARCH} == "powerpcspe"
# Don't waste GOT entries on small data.
PICFLAG= -fPIC
.endif
LIB= zpool
USER_C = \
kernel.c \
taskq.c \
util.c
KERNEL_C = \
zfeature_common.c \
zfs_comutil.c \
zfs_deleg.c \
zfs_fletcher.c \
zfs_fletcher_superscalar.c \
zfs_fletcher_superscalar4.c \
zfs_namecheck.c \
zfs_prop.c \
zfs_zstd.c \
zpool_prop.c \
zprop_common.c \
abd.c \
abd_os.c \
aggsum.c \
arc.c \
arc_os.c \
blake3_zfs.c \
blkptr.c \
bplist.c \
bpobj.c \
bptree.c \
btree.c \
bqueue.c \
cityhash.c \
dbuf.c \
dbuf_stats.c \
ddt.c \
ddt_zap.c \
dmu.c \
dmu_diff.c \
dmu_object.c \
dmu_objset.c \
dmu_recv.c \
dmu_redact.c \
dmu_send.c \
dmu_traverse.c \
dmu_tx.c \
dmu_zfetch.c \
dnode.c \
dnode_sync.c \
dsl_bookmark.c \
dsl_dataset.c \
dsl_deadlist.c \
dsl_deleg.c \
dsl_dir.c \
dsl_crypt.c \
dsl_pool.c \
dsl_prop.c \
dsl_scan.c \
dsl_synctask.c \
dsl_destroy.c \
dsl_userhold.c \
edonr_zfs.c \
entropy_common.c \
error_private.c \
fm.c \
fse_compress.c \
fse_decompress.c \
gzip.c \
hist.c \
hkdf.c \
huf_compress.c \
huf_decompress.c \
lzjb.c \
lz4.c \
lz4_zfs.c \
metaslab.c \
mmp.c \
multilist.c \
objlist.c \
pathname.c \
pool.c \
range_tree.c \
refcount.c \
rrwlock.c \
sa.c \
sha256.c \
skein_zfs.c \
spa.c \
- spa_boot.c \
spa_checkpoint.c \
spa_config.c \
spa_errlog.c \
spa_history.c \
spa_log_spacemap.c \
spa_misc.c \
spa_stats.c \
space_map.c \
space_reftree.c \
txg.c \
trace.c \
uberblock.c \
unique.c \
vdev.c \
vdev_cache.c \
vdev_draid.c \
vdev_draid_rand.c \
vdev_file.c \
vdev_indirect_births.c \
vdev_indirect.c \
vdev_indirect_mapping.c \
vdev_initialize.c \
vdev_label.c \
vdev_mirror.c \
vdev_missing.c \
vdev_queue.c \
vdev_raidz.c \
vdev_raidz_math_aarch64_neon.c \
vdev_raidz_math_aarch64_neonx2.c \
vdev_raidz_math_avx2.c \
vdev_raidz_math_avx512bw.c \
vdev_raidz_math_avx512f.c \
vdev_raidz_math.c \
vdev_raidz_math_scalar.c \
vdev_rebuild.c \
vdev_removal.c \
vdev_root.c \
vdev_trim.c \
xxhash.c \
zap.c \
zap_leaf.c \
zap_micro.c \
zcp.c \
zcp_get.c \
zcp_global.c \
zcp_iter.c \
zcp_set.c \
zcp_synctask.c \
zfeature.c \
zfs_byteswap.c \
zfs_chksum.c \
zfs_debug.c \
zfs_fm.c \
zfs_fuid.c \
zfs_sa.c \
zfs_znode.c \
zfs_racct.c \
zfs_ratelimit.c \
zfs_rlock.c \
zil.c \
zio.c \
zio_checksum.c \
zio_compress.c \
zio_crypt.c \
zio_inject.c \
zle.c \
zrlock.c \
zstd_common.c \
zstd_compress.c \
zstd_compress_literals.c \
zstd_compress_sequences.c \
zstd_compress_superblock.c \
zstd_ddict.c \
zstd_decompress.c \
zstd_decompress_block.c \
zstd_double_fast.c \
zstd_fast.c \
zstd_lazy.c \
zstd_ldm.c \
zstd_opt.c \
zthr.c
ARCH_C =
.if ${MACHINE_ARCH} == "amd64" || ${MACHINE_ARCH} == "i386"
ARCH_C += vdev_raidz_math_sse2.c \
vdev_raidz_math_ssse3.c \
zfs_fletcher_intel.c \
zfs_fletcher_sse.c
CFLAGS += -DHAVE_SSE2 -DHAVE_SSE3
.endif
.if ${MACHINE_ARCH} == "amd64"
ARCH_C += zfs_fletcher_avx512.c
CFLAGS+= -DHAVE_AVX2 -DHAVE_AVX -D__x86_64 -DHAVE_AVX512F \
-DHAVE_AVX512BW
.endif
.if ${MACHINE_CPUARCH} == "aarch64"
ARCH_C += zfs_fletcher_aarch64_neon.c
.endif
LUA_C = \
lapi.c \
lauxlib.c \
lbaselib.c \
lcode.c \
lcompat.c \
lcorolib.c \
lctype.c \
ldebug.c \
ldo.c \
lfunc.c \
lgc.c \
llex.c \
lmem.c \
lobject.c \
lopcodes.c \
lparser.c \
lstate.c \
lstring.c \
lstrlib.c \
ltable.c \
ltablib.c \
ltm.c \
lvm.c \
lzio.c
UNICODE_C = u8_textprep.c uconv.c
SRCS= ${USER_C} ${KERNEL_C} ${LUA_C} ${UNICODE_C} ${ARCH_C}
WARNS?= 2
CFLAGS+= \
-DIN_BASE \
-I${ZFSTOP}/include \
-I${ZFSTOP}/lib/libspl/include \
-I${ZFSTOP}/lib/libspl/include/os/freebsd \
-I${SRCTOP}/sys \
-I${ZFSTOP}/include/os/freebsd/zfs \
-I${SRCTOP}/cddl/compat/opensolaris/include \
-I${ZFSTOP}/module/icp/include \
-include ${ZFSTOP}/include/os/freebsd/spl/sys/ccompile.h \
-DHAVE_ISSETUGID \
-include ${SRCTOP}/sys/modules/zfs/zfs_config.h \
-I${SRCTOP}/sys/modules/zfs \
-I${ZFSTOP}/include/os/freebsd/zfs \
-DLIB_ZPOOL_BUILD -DZFS_DEBUG \
# XXX: pthread doesn't have mutex_owned() equivalent, so we need to look
# into libthr private structures. That's sooo evil, but it's only for
# ZFS debugging tools needs.
CFLAGS+= -DWANTS_MUTEX_OWNED
CFLAGS+= -I${SRCTOP}/lib/libpthread/thread
CFLAGS+= -I${SRCTOP}/lib/libpthread/sys
CFLAGS+= -I${SRCTOP}/lib/libthr/arch/${MACHINE_CPUARCH}/include
CFLAGS.gcc+= -fms-extensions
LIBADD= md pthread z spl icp nvpair avl umem
# atomic.S doesn't like profiling.
MK_PROFILE= no
CSTD= c99
# Since there are many asserts in this library, it makes no sense to compile
# it without debugging.
CFLAGS+= -g -DDEBUG=1
CFLAGS.entropy_common.c= -fno-tree-vectorize
CFLAGS.entropy_common.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.error_private.c= -fno-tree-vectorize
CFLAGS.error_private.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.fse_compress.c= -fno-tree-vectorize
CFLAGS.fse_compress.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.fse_decompress.c= -fno-tree-vectorize
CFLAGS.fse_decompress.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.hist.c= -fno-tree-vectorize
CFLAGS.hist.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.huf_compress.c= -fno-tree-vectorize
CFLAGS.huf_compress.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.huf_decompress.c= -fno-tree-vectorize
CFLAGS.huf_decompress.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.pool.c= -fno-tree-vectorize
CFLAGS.pool.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.xxhash.c= -fno-tree-vectorize
CFLAGS.xxhash.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_compress.c= -fno-tree-vectorize
CFLAGS.zstd_compress.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_compress_literals.c= -fno-tree-vectorize
CFLAGS.zstd_compress_literals.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_compress_sequences.c= -fno-tree-vectorize
CFLAGS.zstd_compress_sequences.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_compress_superblock.c= -fno-tree-vectorize
CFLAGS.zstd_compress_superblock.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_double_fast.c= -fno-tree-vectorize
CFLAGS.zstd_double_fast.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_fast.c= -fno-tree-vectorize
CFLAGS.zstd_fast.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_lazy.c= -fno-tree-vectorize
CFLAGS.zstd_lazy.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_ldm.c= -fno-tree-vectorize
CFLAGS.zstd_ldm.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_opt.c= -fno-tree-vectorize
CFLAGS.zstd_opt.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_ddict.c= -fno-tree-vectorize
CFLAGS.zstd_ddict.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_decompress.c= -fno-tree-vectorize
CFLAGS.zstd_decompress.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_decompress_block.c= -fno-tree-vectorize
CFLAGS.zstd_decompress_block.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
.include <bsd.lib.mk>
diff --git a/stand/libsa/zfs/blake3_impl_hack.c b/stand/libsa/zfs/blake3_impl_hack.c
index 709ce510dad6..bc958b961a08 100644
--- a/stand/libsa/zfs/blake3_impl_hack.c
+++ b/stand/libsa/zfs/blake3_impl_hack.c
@@ -1,28 +1,35 @@
/*
* Copyright 2022, Netflix, Inc
*
* SPDX-License-Identifier: BSD-2-Clause
*/
/*
* Hack for aarch64... There's no way to tell it omit the SIMD
* versions, so we fake it here.
*/
+#ifndef isspace
+static __inline int isspace(int c)
+{
+ return c == ' ' || (c >= 0x9 && c <= 0xd);
+}
+#endif
+
#include "blake3_impl.c"
static inline boolean_t blake3_is_not_supported(void)
{
return (B_FALSE);
}
-const blake3_impl_ops_t blake3_sse2_impl = {
+const blake3_ops_t blake3_sse2_impl = {
.is_supported = blake3_is_not_supported,
.degree = 4,
.name = "fakesse2"
};
-const blake3_impl_ops_t blake3_sse41_impl = {
+const blake3_ops_t blake3_sse41_impl = {
.is_supported = blake3_is_not_supported,
.degree = 4,
.name = "fakesse41"
};
diff --git a/sys/conf/files b/sys/conf/files
index 725e140dca19..b7da3626111d 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -1,5231 +1,5231 @@
# $FreeBSD$
#
# The long compile-with and dependency lines are required because of
# limitations in config: backslash-newline doesn't work in strings, and
# dependency lines other than the first are silently ignored.
#
acpi_quirks.h optional acpi \
dependency "$S/tools/acpi_quirks2h.awk $S/dev/acpica/acpi_quirks" \
compile-with "${AWK} -f $S/tools/acpi_quirks2h.awk $S/dev/acpica/acpi_quirks" \
no-obj no-implicit-rule before-depend \
clean "acpi_quirks.h"
bhnd_nvram_map.h optional bhnd \
dependency "$S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/tools/nvram_map_gen.awk $S/dev/bhnd/nvram/nvram_map" \
compile-with "sh $S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/nvram/nvram_map -h" \
no-obj no-implicit-rule before-depend \
clean "bhnd_nvram_map.h"
bhnd_nvram_map_data.h optional bhnd \
dependency "$S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/tools/nvram_map_gen.awk $S/dev/bhnd/nvram/nvram_map" \
compile-with "sh $S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/nvram/nvram_map -d" \
no-obj no-implicit-rule before-depend \
clean "bhnd_nvram_map_data.h"
fdt_static_dtb.h optional fdt fdt_dtb_static \
compile-with "sh -c 'MACHINE=${MACHINE} $S/tools/fdt/make_dtbh.sh ${FDT_DTS_FILE} ${.CURDIR}'" \
dependency "${FDT_DTS_FILE:T:R}.dtb" \
no-obj no-implicit-rule before-depend \
clean "fdt_static_dtb.h"
feeder_eq_gen.h optional sound \
dependency "$S/tools/sound/feeder_eq_mkfilter.awk" \
compile-with "${AWK} -f $S/tools/sound/feeder_eq_mkfilter.awk -- ${FEEDER_EQ_PRESETS} > feeder_eq_gen.h" \
no-obj no-implicit-rule before-depend \
clean "feeder_eq_gen.h"
feeder_rate_gen.h optional sound \
dependency "$S/tools/sound/feeder_rate_mkfilter.awk" \
compile-with "${AWK} -f $S/tools/sound/feeder_rate_mkfilter.awk -- ${FEEDER_RATE_PRESETS} > feeder_rate_gen.h" \
no-obj no-implicit-rule before-depend \
clean "feeder_rate_gen.h"
font.h optional sc_dflt_font \
compile-with "uudecode < ${SRCTOP}/share/syscons/fonts/${SC_DFLT_FONT}-8x16.fnt && file2c 'u_char dflt_font_16[16*256] = {' '};' < ${SC_DFLT_FONT}-8x16 > font.h && uudecode < ${SRCTOP}/share/syscons/fonts/${SC_DFLT_FONT}-8x14.fnt && file2c 'u_char dflt_font_14[14*256] = {' '};' < ${SC_DFLT_FONT}-8x14 >> font.h && uudecode < ${SRCTOP}/share/syscons/fonts/${SC_DFLT_FONT}-8x8.fnt && file2c 'u_char dflt_font_8[8*256] = {' '};' < ${SC_DFLT_FONT}-8x8 >> font.h" \
no-obj no-implicit-rule before-depend \
clean "font.h ${SC_DFLT_FONT}-8x14 ${SC_DFLT_FONT}-8x16 ${SC_DFLT_FONT}-8x8"
snd_fxdiv_gen.h optional sound \
dependency "$S/tools/sound/snd_fxdiv_gen.awk" \
compile-with "${AWK} -f $S/tools/sound/snd_fxdiv_gen.awk -- > snd_fxdiv_gen.h" \
no-obj no-implicit-rule before-depend \
clean "snd_fxdiv_gen.h"
miidevs.h optional miibus | mii \
dependency "$S/tools/miidevs2h.awk $S/dev/mii/miidevs" \
compile-with "${AWK} -f $S/tools/miidevs2h.awk $S/dev/mii/miidevs" \
no-obj no-implicit-rule before-depend \
clean "miidevs.h"
kbdmuxmap.h optional kbdmux_dflt_keymap \
compile-with "${KEYMAP} -L ${KBDMUX_DFLT_KEYMAP} | ${KEYMAP_FIX} > ${.TARGET}" \
no-obj no-implicit-rule before-depend \
clean "kbdmuxmap.h"
teken_state.h optional sc | vt \
dependency "$S/teken/gensequences $S/teken/sequences" \
compile-with "${AWK} -f $S/teken/gensequences $S/teken/sequences > teken_state.h" \
no-obj no-implicit-rule before-depend \
clean "teken_state.h"
ukbdmap.h optional ukbd_dflt_keymap \
compile-with "${KEYMAP} -L ${UKBD_DFLT_KEYMAP} | ${KEYMAP_FIX} > ${.TARGET}" \
no-obj no-implicit-rule before-depend \
clean "ukbdmap.h"
usbdevs.h optional usb | hid \
dependency "$S/tools/usbdevs2h.awk $S/dev/usb/usbdevs" \
compile-with "${AWK} -f $S/tools/usbdevs2h.awk $S/dev/usb/usbdevs -h" \
no-obj no-implicit-rule before-depend \
clean "usbdevs.h"
usbdevs_data.h optional usb \
dependency "$S/tools/usbdevs2h.awk $S/dev/usb/usbdevs" \
compile-with "${AWK} -f $S/tools/usbdevs2h.awk $S/dev/usb/usbdevs -d" \
no-obj no-implicit-rule before-depend \
clean "usbdevs_data.h"
sdiodevs.h optional mmccam \
dependency "$S/tools/sdiodevs2h.awk $S/dev/sdio/sdiodevs" \
compile-with "${AWK} -f $S/tools/sdiodevs2h.awk $S/dev/sdio/sdiodevs -h" \
no-obj no-implicit-rule before-depend \
clean "sdiodevs.h"
sdiodevs_data.h optional mmccam \
dependency "$S/tools/sdiodevs2h.awk $S/dev/sdio/sdiodevs" \
compile-with "${AWK} -f $S/tools/sdiodevs2h.awk $S/dev/sdio/sdiodevs -d" \
no-obj no-implicit-rule before-depend \
clean "sdiodevs_data.h"
cam/cam.c optional scbus
cam/cam_compat.c optional scbus
cam/cam_iosched.c optional scbus
cam/cam_periph.c optional scbus
cam/cam_queue.c optional scbus
cam/cam_sim.c optional scbus
cam/cam_xpt.c optional scbus
cam/ata/ata_all.c optional scbus
cam/ata/ata_xpt.c optional scbus
cam/ata/ata_pmp.c optional scbus
cam/nvme/nvme_all.c optional scbus
cam/nvme/nvme_da.c optional nda | da
cam/nvme/nvme_xpt.c optional scbus
cam/scsi/scsi_xpt.c optional scbus
cam/scsi/scsi_all.c optional scbus
cam/scsi/scsi_cd.c optional cd
cam/scsi/scsi_ch.c optional ch
cam/ata/ata_da.c optional ada | da
cam/ctl/ctl.c optional ctl
cam/ctl/ctl_backend.c optional ctl
cam/ctl/ctl_backend_block.c optional ctl
cam/ctl/ctl_backend_ramdisk.c optional ctl
cam/ctl/ctl_cmd_table.c optional ctl
cam/ctl/ctl_frontend.c optional ctl
cam/ctl/ctl_frontend_cam_sim.c optional ctl
cam/ctl/ctl_frontend_ioctl.c optional ctl
cam/ctl/ctl_frontend_iscsi.c optional ctl cfiscsi
cam/ctl/ctl_ha.c optional ctl
cam/ctl/ctl_scsi_all.c optional ctl
cam/ctl/ctl_tpc.c optional ctl
cam/ctl/ctl_tpc_local.c optional ctl
cam/ctl/ctl_error.c optional ctl
cam/ctl/ctl_util.c optional ctl
cam/ctl/scsi_ctl.c optional ctl
cam/mmc/mmc_xpt.c optional scbus mmccam
cam/mmc/mmc_sim.c optional scbus mmccam
cam/mmc/mmc_sim_if.m optional scbus mmccam
cam/mmc/mmc_da.c optional scbus mmccam da
cam/scsi/scsi_da.c optional da
cam/scsi/scsi_pass.c optional pass
cam/scsi/scsi_pt.c optional pt
cam/scsi/scsi_sa.c optional sa
cam/scsi/scsi_enc.c optional ses
cam/scsi/scsi_enc_ses.c optional ses
cam/scsi/scsi_enc_safte.c optional ses
cam/scsi/scsi_sg.c optional sg
cam/scsi/scsi_targ_bh.c optional targbh
cam/scsi/scsi_target.c optional targ
cam/scsi/smp_all.c optional scbus
# shared between zfs and dtrace
cddl/compat/opensolaris/kern/opensolaris.c optional dtrace compile-with "${CDDL_C}"
cddl/compat/opensolaris/kern/opensolaris_proc.c optional zfs | dtrace compile-with "${CDDL_C}"
contrib/openzfs/module/os/freebsd/spl/spl_misc.c optional zfs | dtrace compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_cmn_err.c optional zfs | dtrace compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_taskq.c optional zfs | dtrace compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_kmem.c optional zfs | dtrace compile-with "${ZFS_C}"
#zfs solaris portability layer
contrib/openzfs/module/os/freebsd/spl/acl_common.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/callb.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/list.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_acl.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_dtrace.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_kstat.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_policy.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_string.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_sunddi.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_sysevent.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_uio.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_vfs.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_vm.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_zone.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_procfs_list.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_zlib.c optional zfs compile-with "${ZFS_C}"
# zfs specific
#zfs avl
contrib/openzfs/module/avl/avl.c optional zfs compile-with "${ZFS_C}"
# zfs lua support
contrib/openzfs/module/lua/lapi.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lauxlib.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lbaselib.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lcode.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lcompat.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lcorolib.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lctype.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/ldebug.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/ldo.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lfunc.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lgc.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/llex.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lmem.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lobject.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lopcodes.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lparser.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lstate.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lstring.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lstrlib.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/ltable.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/ltablib.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/ltm.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lvm.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lzio.c optional zfs compile-with "${ZFS_C}"
# zfs nvpair support
contrib/openzfs/module/nvpair/fnvpair.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/nvpair/nvpair.c optional zfs compile-with "${ZFS_RPC_C}"
contrib/openzfs/module/nvpair/nvpair_alloc_fixed.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/nvpair/nvpair_alloc_spl.c optional zfs compile-with "${ZFS_C}"
#zfs platform compatibility code
contrib/openzfs/module/os/freebsd/zfs/abd_os.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/arc_os.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/crypto_os.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/dmu_os.c optional zfs compile-with "${ZFS_C}"
+contrib/openzfs/module/os/freebsd/zfs/event_os.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/hkdf.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/kmod_core.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/spa_os.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c optional zfs compile-with "${ZFS_C} -include $S/modules/zfs/zfs_config.h"
contrib/openzfs/module/os/freebsd/zfs/vdev_file.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/vdev_label_os.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/zfs_acl.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/zfs_debug.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/zfs_dir.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/zfs_ioctl_compat.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/zfs_ioctl_os.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/zfs_racct.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/zfs_znode.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/zio_crypt.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/zvol_os.c optional zfs compile-with "${ZFS_C}"
#zfs unicode support
contrib/openzfs/module/unicode/uconv.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/unicode/u8_textprep.c optional zfs compile-with "${ZFS_C}"
#zfs checksums / zcommon
contrib/openzfs/module/zcommon/cityhash.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zcommon/zfeature_common.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zcommon/zfs_comutil.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zcommon/zfs_deleg.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zcommon/zfs_fletcher.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zcommon/zfs_fletcher_superscalar.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zcommon/zfs_fletcher_superscalar4.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zcommon/zfs_namecheck.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zcommon/zfs_prop.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zcommon/zpool_prop.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zcommon/zprop_common.c optional zfs compile-with "${ZFS_C}"
# zfs edon-r hash support
contrib/openzfs/module/icp/algs/edonr/edonr.c optional zfs compile-with "${ZFS_C}"
# zfs blake3 hash support
contrib/openzfs/module/icp/algs/blake3/blake3.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/icp/algs/blake3/blake3_generic.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/icp/algs/blake3/blake3_impl.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/icp/algs/blake3/blake3_x86-64.c optional zfs compile-with "${ZFS_C}"
#zfs core common code
contrib/openzfs/module/zfs/abd.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/aggsum.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/arc.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/blake3_zfs.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/blkptr.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/bplist.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/bpobj.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/bptree.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/btree.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/bqueue.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dbuf.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dbuf_stats.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dataset_kstats.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/ddt.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/ddt_zap.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dmu.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dmu_diff.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dmu_object.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dmu_objset.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dmu_recv.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dmu_redact.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dmu_send.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dmu_traverse.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dmu_tx.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dmu_zfetch.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dnode.c optional zfs compile-with "${ZFS_C} ${NO_WUNUSED_BUT_SET_VARIABLE}" \
warning "kernel contains CDDL licensed ZFS filesystem"
contrib/openzfs/module/zfs/dnode_sync.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dsl_bookmark.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dsl_crypt.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dsl_dataset.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dsl_deadlist.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dsl_deleg.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dsl_destroy.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dsl_dir.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dsl_pool.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dsl_prop.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dsl_scan.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dsl_synctask.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dsl_userhold.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/edonr_zfs.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/fm.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/gzip.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/lzjb.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/lz4.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/lz4_zfs.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/metaslab.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/mmp.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/multilist.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/objlist.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/pathname.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/range_tree.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/refcount.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/rrwlock.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/sa.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/sha256.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/skein_zfs.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/spa.c optional zfs compile-with "${ZFS_C}"
-contrib/openzfs/module/zfs/spa_boot.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/spa_checkpoint.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/spa_config.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/spa_errlog.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/spa_history.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/spa_log_spacemap.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/spa_misc.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/spa_stats.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/space_map.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/space_reftree.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/txg.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/uberblock.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/unique.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_cache.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_draid.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_draid_rand.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_indirect.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_indirect_births.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_indirect_mapping.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_initialize.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_label.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_mirror.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_missing.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_queue.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_raidz.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_raidz_math.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_raidz_math_scalar.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_rebuild.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_removal.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_root.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_trim.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zap.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zap_leaf.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zap_micro.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zcp.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zcp_get.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zcp_global.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zcp_iter.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zcp_set.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zcp_synctask.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zfeature.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zfs_byteswap.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zfs_chksum.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zfs_fm.c optional zfs compile-with "${ZFS_C} ${NO_WUNUSED_BUT_SET_VARIABLE}"
contrib/openzfs/module/zfs/zfs_fuid.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zfs_ioctl.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zfs_log.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zfs_onexit.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zfs_quota.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zfs_ratelimit.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zfs_replay.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zfs_rlock.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zfs_sa.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zfs_vnops.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zstd/zfs_zstd.c optional zfs zstdio compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zil.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zio.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zio_checksum.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zio_compress.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zio_inject.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zle.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zrlock.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zthr.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zvol.c optional zfs compile-with "${ZFS_C}"
# dtrace specific
cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c optional dtrace compile-with "${DTRACE_C}" \
warning "kernel contains CDDL licensed DTRACE"
cddl/contrib/opensolaris/uts/common/dtrace/dtrace_xoroshiro128_plus.c optional dtrace compile-with "${DTRACE_C}"
cddl/dev/dtmalloc/dtmalloc.c optional dtmalloc | dtraceall compile-with "${CDDL_C}"
cddl/dev/profile/profile.c optional dtrace_profile | dtraceall compile-with "${CDDL_C}"
cddl/dev/sdt/sdt.c optional dtrace_sdt | dtraceall compile-with "${CDDL_C}"
cddl/dev/fbt/fbt.c optional dtrace_fbt | dtraceall compile-with "${FBT_C}"
cddl/dev/systrace/systrace.c optional dtrace_systrace | dtraceall compile-with "${CDDL_C}"
cddl/dev/prototype.c optional dtrace_prototype | dtraceall compile-with "${CDDL_C}"
fs/nfsclient/nfs_clkdtrace.c optional dtnfscl nfscl | dtraceall nfscl compile-with "${CDDL_C}"
compat/freebsd32/freebsd32_abort2.c optional compat_freebsd32
compat/freebsd32/freebsd32_capability.c optional compat_freebsd32
compat/freebsd32/freebsd32_ioctl.c optional compat_freebsd32
compat/freebsd32/freebsd32_misc.c optional compat_freebsd32
compat/freebsd32/freebsd32_syscalls.c optional compat_freebsd32
compat/freebsd32/freebsd32_sysent.c optional compat_freebsd32
contrib/ck/src/ck_array.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include"
contrib/ck/src/ck_barrier_centralized.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include"
contrib/ck/src/ck_barrier_combining.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include"
contrib/ck/src/ck_barrier_dissemination.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include"
contrib/ck/src/ck_barrier_mcs.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include"
contrib/ck/src/ck_barrier_tournament.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include"
contrib/ck/src/ck_epoch.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include"
contrib/ck/src/ck_hp.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include"
contrib/ck/src/ck_hs.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include"
contrib/ck/src/ck_ht.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include"
contrib/ck/src/ck_rhs.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include"
contrib/dev/acpica/common/ahids.c optional acpi acpi_debug
contrib/dev/acpica/common/ahuuids.c optional acpi acpi_debug
contrib/dev/acpica/components/debugger/dbcmds.c optional acpi acpi_debug
contrib/dev/acpica/components/debugger/dbconvert.c optional acpi acpi_debug
contrib/dev/acpica/components/debugger/dbdisply.c optional acpi acpi_debug
contrib/dev/acpica/components/debugger/dbexec.c optional acpi acpi_debug
contrib/dev/acpica/components/debugger/dbhistry.c optional acpi acpi_debug
contrib/dev/acpica/components/debugger/dbinput.c optional acpi acpi_debug
contrib/dev/acpica/components/debugger/dbmethod.c optional acpi acpi_debug
contrib/dev/acpica/components/debugger/dbnames.c optional acpi acpi_debug
contrib/dev/acpica/components/debugger/dbobject.c optional acpi acpi_debug
contrib/dev/acpica/components/debugger/dbstats.c optional acpi acpi_debug
contrib/dev/acpica/components/debugger/dbtest.c optional acpi acpi_debug
contrib/dev/acpica/components/debugger/dbutils.c optional acpi acpi_debug
contrib/dev/acpica/components/debugger/dbxface.c optional acpi acpi_debug
contrib/dev/acpica/components/disassembler/dmbuffer.c optional acpi acpi_debug
contrib/dev/acpica/components/disassembler/dmcstyle.c optional acpi acpi_debug
contrib/dev/acpica/components/disassembler/dmdeferred.c optional acpi acpi_debug
contrib/dev/acpica/components/disassembler/dmnames.c optional acpi acpi_debug
contrib/dev/acpica/components/disassembler/dmopcode.c optional acpi acpi_debug
contrib/dev/acpica/components/disassembler/dmresrc.c optional acpi acpi_debug
contrib/dev/acpica/components/disassembler/dmresrcl.c optional acpi acpi_debug
contrib/dev/acpica/components/disassembler/dmresrcl2.c optional acpi acpi_debug
contrib/dev/acpica/components/disassembler/dmresrcs.c optional acpi acpi_debug
contrib/dev/acpica/components/disassembler/dmutils.c optional acpi acpi_debug
contrib/dev/acpica/components/disassembler/dmwalk.c optional acpi acpi_debug
contrib/dev/acpica/components/dispatcher/dsargs.c optional acpi
contrib/dev/acpica/components/dispatcher/dscontrol.c optional acpi
contrib/dev/acpica/components/dispatcher/dsdebug.c optional acpi
contrib/dev/acpica/components/dispatcher/dsfield.c optional acpi
contrib/dev/acpica/components/dispatcher/dsinit.c optional acpi
contrib/dev/acpica/components/dispatcher/dsmethod.c optional acpi
contrib/dev/acpica/components/dispatcher/dsmthdat.c optional acpi
contrib/dev/acpica/components/dispatcher/dsobject.c optional acpi
contrib/dev/acpica/components/dispatcher/dsopcode.c optional acpi
contrib/dev/acpica/components/dispatcher/dspkginit.c optional acpi
contrib/dev/acpica/components/dispatcher/dsutils.c optional acpi
contrib/dev/acpica/components/dispatcher/dswexec.c optional acpi
contrib/dev/acpica/components/dispatcher/dswload.c optional acpi
contrib/dev/acpica/components/dispatcher/dswload2.c optional acpi
contrib/dev/acpica/components/dispatcher/dswscope.c optional acpi
contrib/dev/acpica/components/dispatcher/dswstate.c optional acpi
contrib/dev/acpica/components/events/evevent.c optional acpi
contrib/dev/acpica/components/events/evglock.c optional acpi
contrib/dev/acpica/components/events/evgpe.c optional acpi
contrib/dev/acpica/components/events/evgpeblk.c optional acpi
contrib/dev/acpica/components/events/evgpeinit.c optional acpi
contrib/dev/acpica/components/events/evgpeutil.c optional acpi
contrib/dev/acpica/components/events/evhandler.c optional acpi
contrib/dev/acpica/components/events/evmisc.c optional acpi
contrib/dev/acpica/components/events/evregion.c optional acpi
contrib/dev/acpica/components/events/evrgnini.c optional acpi
contrib/dev/acpica/components/events/evsci.c optional acpi
contrib/dev/acpica/components/events/evxface.c optional acpi
contrib/dev/acpica/components/events/evxfevnt.c optional acpi
contrib/dev/acpica/components/events/evxfgpe.c optional acpi
contrib/dev/acpica/components/events/evxfregn.c optional acpi
contrib/dev/acpica/components/executer/exconcat.c optional acpi
contrib/dev/acpica/components/executer/exconfig.c optional acpi
contrib/dev/acpica/components/executer/exconvrt.c optional acpi
contrib/dev/acpica/components/executer/excreate.c optional acpi
contrib/dev/acpica/components/executer/exdebug.c optional acpi
contrib/dev/acpica/components/executer/exdump.c optional acpi
contrib/dev/acpica/components/executer/exfield.c optional acpi
contrib/dev/acpica/components/executer/exfldio.c optional acpi
contrib/dev/acpica/components/executer/exmisc.c optional acpi
contrib/dev/acpica/components/executer/exmutex.c optional acpi
contrib/dev/acpica/components/executer/exnames.c optional acpi
contrib/dev/acpica/components/executer/exoparg1.c optional acpi
contrib/dev/acpica/components/executer/exoparg2.c optional acpi
contrib/dev/acpica/components/executer/exoparg3.c optional acpi
contrib/dev/acpica/components/executer/exoparg6.c optional acpi
contrib/dev/acpica/components/executer/exprep.c optional acpi
contrib/dev/acpica/components/executer/exregion.c optional acpi
contrib/dev/acpica/components/executer/exresnte.c optional acpi
contrib/dev/acpica/components/executer/exresolv.c optional acpi
contrib/dev/acpica/components/executer/exresop.c optional acpi
contrib/dev/acpica/components/executer/exserial.c optional acpi
contrib/dev/acpica/components/executer/exstore.c optional acpi
contrib/dev/acpica/components/executer/exstoren.c optional acpi
contrib/dev/acpica/components/executer/exstorob.c optional acpi
contrib/dev/acpica/components/executer/exsystem.c optional acpi
contrib/dev/acpica/components/executer/extrace.c optional acpi
contrib/dev/acpica/components/executer/exutils.c optional acpi
contrib/dev/acpica/components/hardware/hwacpi.c optional acpi
contrib/dev/acpica/components/hardware/hwesleep.c optional acpi
contrib/dev/acpica/components/hardware/hwgpe.c optional acpi
contrib/dev/acpica/components/hardware/hwpci.c optional acpi
contrib/dev/acpica/components/hardware/hwregs.c optional acpi
contrib/dev/acpica/components/hardware/hwsleep.c optional acpi
contrib/dev/acpica/components/hardware/hwtimer.c optional acpi
contrib/dev/acpica/components/hardware/hwvalid.c optional acpi
contrib/dev/acpica/components/hardware/hwxface.c optional acpi
contrib/dev/acpica/components/hardware/hwxfsleep.c optional acpi
contrib/dev/acpica/components/namespace/nsaccess.c optional acpi \
compile-with "${NORMAL_C} ${NO_WUNUSED_BUT_SET_VARIABLE}"
contrib/dev/acpica/components/namespace/nsalloc.c optional acpi
contrib/dev/acpica/components/namespace/nsarguments.c optional acpi
contrib/dev/acpica/components/namespace/nsconvert.c optional acpi
contrib/dev/acpica/components/namespace/nsdump.c optional acpi
contrib/dev/acpica/components/namespace/nseval.c optional acpi
contrib/dev/acpica/components/namespace/nsinit.c optional acpi
contrib/dev/acpica/components/namespace/nsload.c optional acpi
contrib/dev/acpica/components/namespace/nsnames.c optional acpi
contrib/dev/acpica/components/namespace/nsobject.c optional acpi
contrib/dev/acpica/components/namespace/nsparse.c optional acpi
contrib/dev/acpica/components/namespace/nspredef.c optional acpi
contrib/dev/acpica/components/namespace/nsprepkg.c optional acpi
contrib/dev/acpica/components/namespace/nsrepair.c optional acpi
contrib/dev/acpica/components/namespace/nsrepair2.c optional acpi
contrib/dev/acpica/components/namespace/nssearch.c optional acpi
contrib/dev/acpica/components/namespace/nsutils.c optional acpi
contrib/dev/acpica/components/namespace/nswalk.c optional acpi
contrib/dev/acpica/components/namespace/nsxfeval.c optional acpi
contrib/dev/acpica/components/namespace/nsxfname.c optional acpi
contrib/dev/acpica/components/namespace/nsxfobj.c optional acpi
contrib/dev/acpica/components/parser/psargs.c optional acpi
contrib/dev/acpica/components/parser/psloop.c optional acpi
contrib/dev/acpica/components/parser/psobject.c optional acpi
contrib/dev/acpica/components/parser/psopcode.c optional acpi
contrib/dev/acpica/components/parser/psopinfo.c optional acpi
contrib/dev/acpica/components/parser/psparse.c optional acpi
contrib/dev/acpica/components/parser/psscope.c optional acpi
contrib/dev/acpica/components/parser/pstree.c optional acpi
contrib/dev/acpica/components/parser/psutils.c optional acpi
contrib/dev/acpica/components/parser/pswalk.c optional acpi
contrib/dev/acpica/components/parser/psxface.c optional acpi
contrib/dev/acpica/components/resources/rsaddr.c optional acpi
contrib/dev/acpica/components/resources/rscalc.c optional acpi
contrib/dev/acpica/components/resources/rscreate.c optional acpi
contrib/dev/acpica/components/resources/rsdump.c optional acpi acpi_debug
contrib/dev/acpica/components/resources/rsdumpinfo.c optional acpi
contrib/dev/acpica/components/resources/rsinfo.c optional acpi
contrib/dev/acpica/components/resources/rsio.c optional acpi
contrib/dev/acpica/components/resources/rsirq.c optional acpi
contrib/dev/acpica/components/resources/rslist.c optional acpi
contrib/dev/acpica/components/resources/rsmemory.c optional acpi
contrib/dev/acpica/components/resources/rsmisc.c optional acpi
contrib/dev/acpica/components/resources/rsserial.c optional acpi
contrib/dev/acpica/components/resources/rsutils.c optional acpi
contrib/dev/acpica/components/resources/rsxface.c optional acpi
contrib/dev/acpica/components/tables/tbdata.c optional acpi
contrib/dev/acpica/components/tables/tbfadt.c optional acpi
contrib/dev/acpica/components/tables/tbfind.c optional acpi
contrib/dev/acpica/components/tables/tbinstal.c optional acpi
contrib/dev/acpica/components/tables/tbprint.c optional acpi
contrib/dev/acpica/components/tables/tbutils.c optional acpi
contrib/dev/acpica/components/tables/tbxface.c optional acpi
contrib/dev/acpica/components/tables/tbxfload.c optional acpi
contrib/dev/acpica/components/tables/tbxfroot.c optional acpi
contrib/dev/acpica/components/utilities/utaddress.c optional acpi
contrib/dev/acpica/components/utilities/utalloc.c optional acpi
contrib/dev/acpica/components/utilities/utascii.c optional acpi
contrib/dev/acpica/components/utilities/utbuffer.c optional acpi
contrib/dev/acpica/components/utilities/utcache.c optional acpi
contrib/dev/acpica/components/utilities/utcopy.c optional acpi
contrib/dev/acpica/components/utilities/utdebug.c optional acpi
contrib/dev/acpica/components/utilities/utdecode.c optional acpi
contrib/dev/acpica/components/utilities/utdelete.c optional acpi
contrib/dev/acpica/components/utilities/uterror.c optional acpi
contrib/dev/acpica/components/utilities/uteval.c optional acpi
contrib/dev/acpica/components/utilities/utexcep.c optional acpi
contrib/dev/acpica/components/utilities/utglobal.c optional acpi
contrib/dev/acpica/components/utilities/uthex.c optional acpi
contrib/dev/acpica/components/utilities/utids.c optional acpi
contrib/dev/acpica/components/utilities/utinit.c optional acpi
contrib/dev/acpica/components/utilities/utlock.c optional acpi
contrib/dev/acpica/components/utilities/utmath.c optional acpi
contrib/dev/acpica/components/utilities/utmisc.c optional acpi
contrib/dev/acpica/components/utilities/utmutex.c optional acpi
contrib/dev/acpica/components/utilities/utnonansi.c optional acpi
contrib/dev/acpica/components/utilities/utobject.c optional acpi
contrib/dev/acpica/components/utilities/utosi.c optional acpi
contrib/dev/acpica/components/utilities/utownerid.c optional acpi
contrib/dev/acpica/components/utilities/utpredef.c optional acpi
contrib/dev/acpica/components/utilities/utresdecode.c optional acpi acpi_debug
contrib/dev/acpica/components/utilities/utresrc.c optional acpi
contrib/dev/acpica/components/utilities/utstate.c optional acpi
contrib/dev/acpica/components/utilities/utstring.c optional acpi
contrib/dev/acpica/components/utilities/utstrsuppt.c optional acpi
contrib/dev/acpica/components/utilities/utstrtoul64.c optional acpi
contrib/dev/acpica/components/utilities/utuuid.c optional acpi acpi_debug
contrib/dev/acpica/components/utilities/utxface.c optional acpi
contrib/dev/acpica/components/utilities/utxferror.c optional acpi
contrib/dev/acpica/components/utilities/utxfinit.c optional acpi
contrib/dev/acpica/os_specific/service_layers/osgendbg.c optional acpi acpi_debug
netpfil/ipfilter/netinet/fil.c optional ipfilter inet \
compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN} -Wno-unused -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/ip_auth.c optional ipfilter inet \
compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/ip_fil_freebsd.c optional ipfilter inet \
compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/ip_frag.c optional ipfilter inet \
compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/ip_log.c optional ipfilter inet \
compile-with "${NORMAL_C} -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/ip_nat.c optional ipfilter inet \
compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/ip_proxy.c optional ipfilter inet \
compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN} -Wno-unused -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/ip_state.c optional ipfilter inet \
compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/ip_lookup.c optional ipfilter inet \
compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN} -Wno-unused -Wno-error -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/ip_pool.c optional ipfilter inet \
compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/ip_htable.c optional ipfilter inet \
compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter ${NO_WTAUTOLOGICAL_POINTER_COMPARE}"
netpfil/ipfilter/netinet/ip_sync.c optional ipfilter inet \
compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/mlfk_ipl.c optional ipfilter inet \
compile-with "${NORMAL_C} -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/ip_nat6.c optional ipfilter inet \
compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/ip_rules.c optional ipfilter inet \
compile-with "${NORMAL_C} -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/ip_scan.c optional ipfilter inet \
compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/ip_dstlist.c optional ipfilter inet \
compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/radix_ipf.c optional ipfilter inet \
compile-with "${NORMAL_C} -I$S/netpfil/ipfilter"
contrib/libfdt/fdt.c optional fdt
contrib/libfdt/fdt_ro.c optional fdt
contrib/libfdt/fdt_rw.c optional fdt
contrib/libfdt/fdt_strerror.c optional fdt
contrib/libfdt/fdt_sw.c optional fdt
contrib/libfdt/fdt_wip.c optional fdt
contrib/libnv/cnvlist.c standard
contrib/libnv/dnvlist.c standard
contrib/libnv/nvlist.c standard
contrib/libnv/bsd_nvpair.c standard
contrib/ngatm/netnatm/api/cc_conn.c optional ngatm_ccatm \
compile-with "${NORMAL_C_NOWERROR} -I$S/contrib/ngatm"
contrib/ngatm/netnatm/api/cc_data.c optional ngatm_ccatm \
compile-with "${NORMAL_C} -I$S/contrib/ngatm"
contrib/ngatm/netnatm/api/cc_dump.c optional ngatm_ccatm \
compile-with "${NORMAL_C} -I$S/contrib/ngatm"
contrib/ngatm/netnatm/api/cc_port.c optional ngatm_ccatm \
compile-with "${NORMAL_C} -I$S/contrib/ngatm"
contrib/ngatm/netnatm/api/cc_sig.c optional ngatm_ccatm \
compile-with "${NORMAL_C} -I$S/contrib/ngatm"
contrib/ngatm/netnatm/api/cc_user.c optional ngatm_ccatm \
compile-with "${NORMAL_C} -I$S/contrib/ngatm"
contrib/ngatm/netnatm/api/unisap.c optional ngatm_ccatm \
compile-with "${NORMAL_C} -I$S/contrib/ngatm"
contrib/ngatm/netnatm/misc/straddr.c optional ngatm_atmbase \
compile-with "${NORMAL_C} -I$S/contrib/ngatm"
contrib/ngatm/netnatm/misc/unimsg_common.c optional ngatm_atmbase \
compile-with "${NORMAL_C} -I$S/contrib/ngatm"
contrib/ngatm/netnatm/msg/traffic.c optional ngatm_atmbase \
compile-with "${NORMAL_C} -I$S/contrib/ngatm"
contrib/ngatm/netnatm/msg/uni_ie.c optional ngatm_atmbase \
compile-with "${NORMAL_C} -I$S/contrib/ngatm"
contrib/ngatm/netnatm/msg/uni_msg.c optional ngatm_atmbase \
compile-with "${NORMAL_C} -I$S/contrib/ngatm"
contrib/ngatm/netnatm/saal/saal_sscfu.c optional ngatm_sscfu \
compile-with "${NORMAL_C} -I$S/contrib/ngatm"
contrib/ngatm/netnatm/saal/saal_sscop.c optional ngatm_sscop \
compile-with "${NORMAL_C} -I$S/contrib/ngatm"
contrib/ngatm/netnatm/sig/sig_call.c optional ngatm_uni \
compile-with "${NORMAL_C} -I$S/contrib/ngatm"
contrib/ngatm/netnatm/sig/sig_coord.c optional ngatm_uni \
compile-with "${NORMAL_C} -I$S/contrib/ngatm"
contrib/ngatm/netnatm/sig/sig_party.c optional ngatm_uni \
compile-with "${NORMAL_C} -I$S/contrib/ngatm"
contrib/ngatm/netnatm/sig/sig_print.c optional ngatm_uni \
compile-with "${NORMAL_C} -I$S/contrib/ngatm"
contrib/ngatm/netnatm/sig/sig_reset.c optional ngatm_uni \
compile-with "${NORMAL_C} -I$S/contrib/ngatm"
contrib/ngatm/netnatm/sig/sig_uni.c optional ngatm_uni \
compile-with "${NORMAL_C} -I$S/contrib/ngatm"
contrib/ngatm/netnatm/sig/sig_unimsgcpy.c optional ngatm_uni \
compile-with "${NORMAL_C} -I$S/contrib/ngatm"
contrib/ngatm/netnatm/sig/sig_verify.c optional ngatm_uni \
compile-with "${NORMAL_C} -I$S/contrib/ngatm"
# xz
dev/xz/xz_mod.c optional xz \
compile-with "${NORMAL_C} -DXZ_USE_CRC64 -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/"
contrib/xz-embedded/linux/lib/xz/xz_crc32.c optional xz \
compile-with "${NORMAL_C} -DXZ_USE_CRC64 -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/"
contrib/xz-embedded/linux/lib/xz/xz_crc64.c optional xz \
compile-with "${NORMAL_C} -DXZ_USE_CRC64 -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/"
contrib/xz-embedded/linux/lib/xz/xz_dec_bcj.c optional xz \
compile-with "${NORMAL_C} -DXZ_USE_CRC64 -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/"
contrib/xz-embedded/linux/lib/xz/xz_dec_lzma2.c optional xz \
compile-with "${NORMAL_C} -DXZ_USE_CRC64 -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/"
contrib/xz-embedded/linux/lib/xz/xz_dec_stream.c optional xz \
compile-with "${NORMAL_C} -DXZ_USE_CRC64 -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/"
# Zstd
contrib/zstd/lib/freebsd/zstd_kmalloc.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/common/zstd_common.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/common/fse_decompress.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/common/entropy_common.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/common/error_private.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/common/xxhash.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/compress/zstd_compress.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/compress/zstd_compress_literals.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/compress/zstd_compress_sequences.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/compress/zstd_compress_superblock.c optional zstdio compile-with "${ZSTD_C} ${NO_WUNUSED_BUT_SET_VARIABLE}"
contrib/zstd/lib/compress/fse_compress.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/compress/hist.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/compress/huf_compress.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/compress/zstd_double_fast.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/compress/zstd_fast.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/compress/zstd_lazy.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/compress/zstd_ldm.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/compress/zstd_opt.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/decompress/zstd_ddict.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/decompress/zstd_decompress.c optional zstdio compile-with ${ZSTD_C}
# See comment in sys/conf/kern.pre.mk
contrib/zstd/lib/decompress/zstd_decompress_block.c optional zstdio \
compile-with "${ZSTD_C} ${ZSTD_DECOMPRESS_BLOCK_FLAGS}"
contrib/zstd/lib/decompress/huf_decompress.c optional zstdio compile-with "${ZSTD_C} ${NO_WBITWISE_INSTEAD_OF_LOGICAL}"
# Blake 2
contrib/libb2/blake2b-ref.c optional crypto | !random_loadable random_fenestrasx \
compile-with "${NORMAL_C} -I$S/crypto/blake2 -Wno-cast-qual -DSUFFIX=_ref -Wno-unused-function"
contrib/libb2/blake2s-ref.c optional crypto \
compile-with "${NORMAL_C} -I$S/crypto/blake2 -Wno-cast-qual -DSUFFIX=_ref -Wno-unused-function"
crypto/blake2/blake2-sw.c optional crypto \
compile-with "${NORMAL_C} -I$S/crypto/blake2 -Wno-cast-qual"
crypto/camellia/camellia.c optional crypto
crypto/camellia/camellia-api.c optional crypto
crypto/chacha20/chacha.c standard
crypto/chacha20/chacha-sw.c optional crypto
crypto/chacha20_poly1305.c optional crypto
crypto/curve25519.c optional crypto \
compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include -I$S/crypto/libsodium"
crypto/des/des_ecb.c optional netsmb
crypto/des/des_setkey.c optional netsmb
crypto/openssl/ossl.c optional ossl
crypto/openssl/ossl_aes.c optional ossl
crypto/openssl/ossl_chacha20.c optional ossl
crypto/openssl/ossl_poly1305.c optional ossl
crypto/openssl/ossl_sha1.c optional ossl
crypto/openssl/ossl_sha256.c optional ossl
crypto/openssl/ossl_sha512.c optional ossl
crypto/rc4/rc4.c optional netgraph_mppc_encryption
crypto/rijndael/rijndael-alg-fst.c optional crypto | ekcd | geom_bde | \
!random_loadable | wlan_ccmp
crypto/rijndael/rijndael-api-fst.c optional ekcd | geom_bde | !random_loadable
crypto/rijndael/rijndael-api.c optional crypto | wlan_ccmp
crypto/sha1.c optional carp | crypto | ether | \
netgraph_mppc_encryption | sctp
crypto/sha2/sha256c.c optional crypto | ekcd | geom_bde | \
!random_loadable | sctp | zfs
crypto/sha2/sha512c.c optional crypto | geom_bde | zfs
crypto/skein/skein.c optional crypto | zfs
crypto/skein/skein_block.c optional crypto | zfs
crypto/siphash/siphash.c optional inet | inet6
crypto/siphash/siphash_test.c optional inet | inet6
ddb/db_access.c optional ddb
ddb/db_break.c optional ddb
ddb/db_capture.c optional ddb
ddb/db_command.c optional ddb
ddb/db_examine.c optional ddb
ddb/db_expr.c optional ddb
ddb/db_input.c optional ddb
ddb/db_lex.c optional ddb
ddb/db_main.c optional ddb
ddb/db_output.c optional ddb
ddb/db_print.c optional ddb
ddb/db_ps.c optional ddb
ddb/db_run.c optional ddb
ddb/db_script.c optional ddb
ddb/db_sym.c optional ddb
ddb/db_thread.c optional ddb
ddb/db_textdump.c optional ddb
ddb/db_variables.c optional ddb
ddb/db_watch.c optional ddb
ddb/db_write_cmd.c optional ddb
dev/aac/aac.c optional aac
dev/aac/aac_cam.c optional aacp aac
dev/aac/aac_debug.c optional aac
dev/aac/aac_disk.c optional aac
dev/aac/aac_pci.c optional aac pci
dev/aacraid/aacraid.c optional aacraid
dev/aacraid/aacraid_cam.c optional aacraid scbus
dev/aacraid/aacraid_debug.c optional aacraid
dev/aacraid/aacraid_pci.c optional aacraid pci
dev/acpi_support/acpi_wmi.c optional acpi_wmi acpi
dev/acpi_support/acpi_asus.c optional acpi_asus acpi
dev/acpi_support/acpi_asus_wmi.c optional acpi_asus_wmi acpi
dev/acpi_support/acpi_fujitsu.c optional acpi_fujitsu acpi
dev/acpi_support/acpi_hp.c optional acpi_hp acpi
dev/acpi_support/acpi_ibm.c optional acpi_ibm acpi
dev/acpi_support/acpi_panasonic.c optional acpi_panasonic acpi
dev/acpi_support/acpi_sony.c optional acpi_sony acpi
dev/acpi_support/acpi_toshiba.c optional acpi_toshiba acpi
dev/acpi_support/atk0110.c optional aibs acpi
dev/acpica/Osd/OsdDebug.c optional acpi
dev/acpica/Osd/OsdHardware.c optional acpi
dev/acpica/Osd/OsdInterrupt.c optional acpi
dev/acpica/Osd/OsdMemory.c optional acpi
dev/acpica/Osd/OsdSchedule.c optional acpi
dev/acpica/Osd/OsdStream.c optional acpi
dev/acpica/Osd/OsdSynch.c optional acpi
dev/acpica/Osd/OsdTable.c optional acpi
dev/acpica/acpi.c optional acpi
dev/acpica/acpi_acad.c optional acpi
dev/acpica/acpi_apei.c optional acpi
dev/acpica/acpi_battery.c optional acpi
dev/acpica/acpi_button.c optional acpi
dev/acpica/acpi_cmbat.c optional acpi
dev/acpica/acpi_cpu.c optional acpi
dev/acpica/acpi_ec.c optional acpi
dev/acpica/acpi_isab.c optional acpi isa
dev/acpica/acpi_lid.c optional acpi
dev/acpica/acpi_package.c optional acpi
dev/acpica/acpi_perf.c optional acpi
dev/acpica/acpi_powerres.c optional acpi
dev/acpica/acpi_quirk.c optional acpi
dev/acpica/acpi_resource.c optional acpi
dev/acpica/acpi_container.c optional acpi
dev/acpica/acpi_smbat.c optional acpi
dev/acpica/acpi_thermal.c optional acpi
dev/acpica/acpi_throttle.c optional acpi
dev/acpica/acpi_video.c optional acpi_video acpi
dev/acpica/acpi_dock.c optional acpi_dock acpi
dev/adlink/adlink.c optional adlink
dev/ae/if_ae.c optional ae pci
dev/age/if_age.c optional age pci
dev/agp/agp.c optional agp pci
dev/agp/agp_if.m optional agp pci
dev/ahci/ahci.c optional ahci
dev/ahci/ahciem.c optional ahci
dev/ahci/ahci_pci.c optional ahci pci
dev/aic7xxx/ahc_isa.c optional ahc isa
dev/aic7xxx/ahc_pci.c optional ahc pci \
compile-with "${NORMAL_C} ${NO_WCONSTANT_CONVERSION}"
dev/aic7xxx/ahd_pci.c optional ahd pci \
compile-with "${NORMAL_C} ${NO_WCONSTANT_CONVERSION}"
dev/aic7xxx/aic7770.c optional ahc
dev/aic7xxx/aic79xx.c optional ahd pci
dev/aic7xxx/aic79xx_osm.c optional ahd pci
dev/aic7xxx/aic79xx_pci.c optional ahd pci
dev/aic7xxx/aic79xx_reg_print.c optional ahd pci ahd_reg_pretty_print
dev/aic7xxx/aic7xxx.c optional ahc
dev/aic7xxx/aic7xxx_93cx6.c optional ahc
dev/aic7xxx/aic7xxx_osm.c optional ahc
dev/aic7xxx/aic7xxx_pci.c optional ahc pci
dev/aic7xxx/aic7xxx_reg_print.c optional ahc ahc_reg_pretty_print
dev/al_eth/al_eth.c optional al_eth fdt \
no-depend \
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
dev/al_eth/al_init_eth_lm.c optional al_eth fdt \
no-depend \
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
dev/al_eth/al_init_eth_kr.c optional al_eth fdt \
no-depend \
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
contrib/alpine-hal/al_hal_iofic.c optional al_iofic \
no-depend \
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
contrib/alpine-hal/al_hal_serdes_25g.c optional al_serdes \
no-depend \
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
contrib/alpine-hal/al_hal_serdes_hssp.c optional al_serdes \
no-depend \
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
contrib/alpine-hal/al_hal_udma_config.c optional al_udma \
no-depend \
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
contrib/alpine-hal/al_hal_udma_debug.c optional al_udma \
no-depend \
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
contrib/alpine-hal/al_hal_udma_iofic.c optional al_udma \
no-depend \
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
contrib/alpine-hal/al_hal_udma_main.c optional al_udma \
no-depend \
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
contrib/alpine-hal/al_serdes.c optional al_serdes \
no-depend \
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
contrib/alpine-hal/eth/al_hal_eth_kr.c optional al_eth \
no-depend \
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
contrib/alpine-hal/eth/al_hal_eth_main.c optional al_eth \
no-depend \
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
dev/alc/if_alc.c optional alc pci
dev/ale/if_ale.c optional ale pci
dev/alpm/alpm.c optional alpm pci
dev/altera/avgen/altera_avgen.c optional altera_avgen
dev/altera/avgen/altera_avgen_fdt.c optional altera_avgen fdt
dev/altera/avgen/altera_avgen_nexus.c optional altera_avgen
dev/altera/msgdma/msgdma.c optional altera_msgdma xdma
dev/altera/sdcard/altera_sdcard.c optional altera_sdcard
dev/altera/sdcard/altera_sdcard_disk.c optional altera_sdcard
dev/altera/sdcard/altera_sdcard_io.c optional altera_sdcard
dev/altera/sdcard/altera_sdcard_fdt.c optional altera_sdcard fdt
dev/altera/sdcard/altera_sdcard_nexus.c optional altera_sdcard
dev/altera/softdma/softdma.c optional altera_softdma xdma fdt
dev/altera/pio/pio.c optional altera_pio
dev/altera/pio/pio_if.m optional altera_pio
dev/amdpm/amdpm.c optional amdpm pci | nfpm pci
dev/amdsmb/amdsmb.c optional amdsmb pci
#
dev/ata/ata_if.m optional ata | atacore
dev/ata/ata-all.c optional ata | atacore
dev/ata/ata-dma.c optional ata | atacore
dev/ata/ata-lowlevel.c optional ata | atacore
dev/ata/ata-sata.c optional ata | atacore
dev/ata/ata-isa.c optional ata isa | ataisa
dev/ata/ata-pci.c optional ata pci | atapci
dev/ata/chipsets/ata-acard.c optional ata pci | ataacard
dev/ata/chipsets/ata-acerlabs.c optional ata pci | ataacerlabs
dev/ata/chipsets/ata-amd.c optional ata pci | ataamd
dev/ata/chipsets/ata-ati.c optional ata pci | ataati
dev/ata/chipsets/ata-cenatek.c optional ata pci | atacenatek
dev/ata/chipsets/ata-cypress.c optional ata pci | atacypress
dev/ata/chipsets/ata-cyrix.c optional ata pci | atacyrix
dev/ata/chipsets/ata-highpoint.c optional ata pci | atahighpoint
dev/ata/chipsets/ata-intel.c optional ata pci | ataintel
dev/ata/chipsets/ata-ite.c optional ata pci | ataite
dev/ata/chipsets/ata-jmicron.c optional ata pci | atajmicron
dev/ata/chipsets/ata-marvell.c optional ata pci | atamarvell
dev/ata/chipsets/ata-micron.c optional ata pci | atamicron
dev/ata/chipsets/ata-national.c optional ata pci | atanational
dev/ata/chipsets/ata-netcell.c optional ata pci | atanetcell
dev/ata/chipsets/ata-nvidia.c optional ata pci | atanvidia
dev/ata/chipsets/ata-promise.c optional ata pci | atapromise
dev/ata/chipsets/ata-serverworks.c optional ata pci | ataserverworks
dev/ata/chipsets/ata-siliconimage.c optional ata pci | atasiliconimage | ataati
dev/ata/chipsets/ata-sis.c optional ata pci | atasis
dev/ata/chipsets/ata-via.c optional ata pci | atavia
#
dev/ath/if_ath_pci.c optional ath_pci pci \
compile-with "${ATH_C}"
#
dev/ath/if_ath_ahb.c optional ath_ahb \
compile-with "${ATH_C}"
#
dev/ath/if_ath.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_alq.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_beacon.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_btcoex.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_btcoex_mci.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_debug.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_descdma.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_keycache.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_ioctl.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_led.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_lna_div.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_tx.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_tx_edma.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_tx_ht.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_tdma.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_sysctl.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_rx.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_rx_edma.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_spectral.c optional ath \
compile-with "${ATH_C}"
dev/ath/ah_osdep.c optional ath \
compile-with "${ATH_C}"
#
dev/ath/ath_hal/ah.c optional ath \
compile-with "${ATH_C}"
dev/ath/ath_hal/ah_eeprom_v1.c optional ath_hal | ath_ar5210 \
compile-with "${ATH_C}"
dev/ath/ath_hal/ah_eeprom_v3.c optional ath_hal | ath_ar5211 | ath_ar5212 \
compile-with "${ATH_C}"
dev/ath/ath_hal/ah_eeprom_v14.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 \
compile-with "${ATH_C}"
dev/ath/ath_hal/ah_eeprom_v4k.c \
optional ath_hal | ath_ar9285 \
compile-with "${ATH_C}"
dev/ath/ath_hal/ah_eeprom_9287.c \
optional ath_hal | ath_ar9287 \
compile-with "${ATH_C}"
dev/ath/ath_hal/ah_regdomain.c optional ath \
compile-with "${ATH_C} ${NO_WSHIFT_COUNT_NEGATIVE} ${NO_WSHIFT_COUNT_OVERFLOW}"
# ar5210
dev/ath/ath_hal/ar5210/ar5210_attach.c optional ath_hal | ath_ar5210 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5210/ar5210_beacon.c optional ath_hal | ath_ar5210 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5210/ar5210_interrupts.c optional ath_hal | ath_ar5210 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5210/ar5210_keycache.c optional ath_hal | ath_ar5210 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5210/ar5210_misc.c optional ath_hal | ath_ar5210 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5210/ar5210_phy.c optional ath_hal | ath_ar5210 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5210/ar5210_power.c optional ath_hal | ath_ar5210 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5210/ar5210_recv.c optional ath_hal | ath_ar5210 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5210/ar5210_reset.c optional ath_hal | ath_ar5210 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5210/ar5210_xmit.c optional ath_hal | ath_ar5210 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
# ar5211
dev/ath/ath_hal/ar5211/ar5211_attach.c optional ath_hal | ath_ar5211 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5211/ar5211_beacon.c optional ath_hal | ath_ar5211 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5211/ar5211_interrupts.c optional ath_hal | ath_ar5211 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5211/ar5211_keycache.c optional ath_hal | ath_ar5211 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5211/ar5211_misc.c optional ath_hal | ath_ar5211 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5211/ar5211_phy.c optional ath_hal | ath_ar5211 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5211/ar5211_power.c optional ath_hal | ath_ar5211 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5211/ar5211_recv.c optional ath_hal | ath_ar5211 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5211/ar5211_reset.c optional ath_hal | ath_ar5211 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5211/ar5211_xmit.c optional ath_hal | ath_ar5211 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
# ar5212
dev/ath/ath_hal/ar5212/ar5212_ani.c \
optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \
ath_ar9285 ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5212_attach.c \
optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \
ath_ar9285 ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5212_beacon.c \
optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \
ath_ar9285 ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5212_eeprom.c \
optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \
ath_ar9285 ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5212_gpio.c \
optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \
ath_ar9285 ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5212_interrupts.c \
optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \
ath_ar9285 ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5212_keycache.c \
optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \
ath_ar9285 ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5212_misc.c \
optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \
ath_ar9285 ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5212_phy.c \
optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \
ath_ar9285 ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5212_power.c \
optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \
ath_ar9285 ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5212_recv.c \
optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \
ath_ar9285 ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5212_reset.c \
optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \
ath_ar9285 ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5212_rfgain.c \
optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \
ath_ar9285 ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5212_xmit.c \
optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \
ath_ar9285 ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
# ar5416 (depends on ar5212)
dev/ath/ath_hal/ar5416/ar5416_ani.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_attach.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_beacon.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_btcoex.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_cal.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_cal_iq.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_cal_adcgain.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_cal_adcdc.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_eeprom.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_gpio.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_interrupts.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_keycache.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_misc.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_phy.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_power.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_radar.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_recv.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_reset.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_spectral.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_xmit.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
# ar9130 (depends upon ar5416) - also requires AH_SUPPORT_AR9130
#
# Since this is an embedded MAC SoC, there's no need to compile it into the
# default HAL.
dev/ath/ath_hal/ar9001/ar9130_attach.c optional ath_ar9130 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar9001/ar9130_phy.c optional ath_ar9130 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar9001/ar9130_eeprom.c optional ath_ar9130 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
# ar9160 (depends on ar5416)
dev/ath/ath_hal/ar9001/ar9160_attach.c optional ath_hal | ath_ar9160 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
# ar9280 (depends on ar5416)
dev/ath/ath_hal/ar9002/ar9280_attach.c optional ath_hal | ath_ar9280 | \
ath_ar9285 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar9002/ar9280_olc.c optional ath_hal | ath_ar9280 | \
ath_ar9285 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
# ar9285 (depends on ar5416 and ar9280)
dev/ath/ath_hal/ar9002/ar9285_attach.c optional ath_hal | ath_ar9285 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar9002/ar9285_btcoex.c optional ath_hal | ath_ar9285 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar9002/ar9285_reset.c optional ath_hal | ath_ar9285 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar9002/ar9285_cal.c optional ath_hal | ath_ar9285 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar9002/ar9285_phy.c optional ath_hal | ath_ar9285 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar9002/ar9285_diversity.c optional ath_hal | ath_ar9285 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
# ar9287 (depends on ar5416)
dev/ath/ath_hal/ar9002/ar9287_attach.c optional ath_hal | ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar9002/ar9287_reset.c optional ath_hal | ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar9002/ar9287_cal.c optional ath_hal | ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar9002/ar9287_olc.c optional ath_hal | ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
# ar9300
contrib/dev/ath/ath_hal/ar9300/ar9300_ani.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_attach.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_beacon.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_eeprom.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal ${NO_WCONSTANT_CONVERSION}"
contrib/dev/ath/ath_hal/ar9300/ar9300_freebsd.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_gpio.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_interrupts.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_keycache.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_mci.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_misc.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_paprd.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_phy.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_power.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_radar.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_radio.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_recv.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_recv_ds.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_reset.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal ${NO_WSOMETIMES_UNINITIALIZED} -Wno-unused-function"
contrib/dev/ath/ath_hal/ar9300/ar9300_stub.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_stub_funcs.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_spectral.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_timer.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_xmit.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_xmit_ds.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
# rf backends
dev/ath/ath_hal/ar5212/ar2316.c optional ath_rf2316 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar2317.c optional ath_rf2317 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar2413.c optional ath_hal | ath_rf2413 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar2425.c optional ath_hal | ath_rf2425 | ath_rf2417 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5111.c optional ath_hal | ath_rf5111 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5112.c optional ath_hal | ath_rf5112 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5413.c optional ath_hal | ath_rf5413 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar2133.c optional ath_hal | ath_ar5416 | \
ath_ar9130 | ath_ar9160 | ath_ar9280 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar9002/ar9280.c optional ath_hal | ath_ar9280 | ath_ar9285 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar9002/ar9285.c optional ath_hal | ath_ar9285 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar9002/ar9287.c optional ath_hal | ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
# ath rate control algorithms
dev/ath/ath_rate/amrr/amrr.c optional ath_rate_amrr \
compile-with "${ATH_C}"
dev/ath/ath_rate/onoe/onoe.c optional ath_rate_onoe \
compile-with "${ATH_C}"
dev/ath/ath_rate/sample/sample.c optional ath_rate_sample \
compile-with "${ATH_C}"
# ath DFS modules
dev/ath/ath_dfs/null/dfs_null.c optional ath \
compile-with "${ATH_C}"
#
dev/backlight/backlight_if.m optional backlight | compat_linuxkpi
dev/backlight/backlight.c optional backlight | compat_linuxkpi
dev/bce/if_bce.c optional bce
dev/bfe/if_bfe.c optional bfe
dev/bge/if_bge.c optional bge
dev/bhnd/bhnd.c optional bhnd
dev/bhnd/bhnd_erom.c optional bhnd
dev/bhnd/bhnd_erom_if.m optional bhnd
dev/bhnd/bhnd_subr.c optional bhnd
dev/bhnd/bhnd_bus_if.m optional bhnd
dev/bhnd/bhndb/bhnd_bhndb.c optional bhndb bhnd
dev/bhnd/bhndb/bhndb.c optional bhndb bhnd
dev/bhnd/bhndb/bhndb_bus_if.m optional bhndb bhnd
dev/bhnd/bhndb/bhndb_hwdata.c optional bhndb bhnd
dev/bhnd/bhndb/bhndb_if.m optional bhndb bhnd
dev/bhnd/bhndb/bhndb_pci.c optional bhndb_pci bhndb bhnd pci
dev/bhnd/bhndb/bhndb_pci_hwdata.c optional bhndb_pci bhndb bhnd pci
dev/bhnd/bhndb/bhndb_pci_sprom.c optional bhndb_pci bhndb bhnd pci
dev/bhnd/bhndb/bhndb_subr.c optional bhndb bhnd
dev/bhnd/bcma/bcma.c optional bcma bhnd
dev/bhnd/bcma/bcma_bhndb.c optional bcma bhnd bhndb
dev/bhnd/bcma/bcma_erom.c optional bcma bhnd
dev/bhnd/bcma/bcma_subr.c optional bcma bhnd
dev/bhnd/cores/chipc/bhnd_chipc_if.m optional bhnd
dev/bhnd/cores/chipc/bhnd_sprom_chipc.c optional bhnd
dev/bhnd/cores/chipc/bhnd_pmu_chipc.c optional bhnd
dev/bhnd/cores/chipc/chipc.c optional bhnd
dev/bhnd/cores/chipc/chipc_cfi.c optional bhnd cfi
dev/bhnd/cores/chipc/chipc_gpio.c optional bhnd gpio
dev/bhnd/cores/chipc/chipc_slicer.c optional bhnd cfi | bhnd spibus
dev/bhnd/cores/chipc/chipc_spi.c optional bhnd spibus
dev/bhnd/cores/chipc/chipc_subr.c optional bhnd
dev/bhnd/cores/chipc/pwrctl/bhnd_pwrctl.c optional bhnd
dev/bhnd/cores/chipc/pwrctl/bhnd_pwrctl_if.m optional bhnd
dev/bhnd/cores/chipc/pwrctl/bhnd_pwrctl_hostb_if.m optional bhnd
dev/bhnd/cores/chipc/pwrctl/bhnd_pwrctl_subr.c optional bhnd
dev/bhnd/cores/pci/bhnd_pci.c optional bhnd pci
dev/bhnd/cores/pci/bhnd_pci_hostb.c optional bhndb bhnd pci
dev/bhnd/cores/pci/bhnd_pcib.c optional bhnd_pcib bhnd pci
dev/bhnd/cores/pcie2/bhnd_pcie2.c optional bhnd pci
dev/bhnd/cores/pcie2/bhnd_pcie2_hostb.c optional bhndb bhnd pci
dev/bhnd/cores/pcie2/bhnd_pcie2b.c optional bhnd_pcie2b bhnd pci
dev/bhnd/cores/pmu/bhnd_pmu.c optional bhnd
dev/bhnd/cores/pmu/bhnd_pmu_core.c optional bhnd
dev/bhnd/cores/pmu/bhnd_pmu_if.m optional bhnd
dev/bhnd/cores/pmu/bhnd_pmu_subr.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_data.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_data_bcm.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_data_bcmraw.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_data_btxt.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_data_sprom.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_data_sprom_subr.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_data_tlv.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_if.m optional bhnd
dev/bhnd/nvram/bhnd_nvram_io.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_iobuf.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_ioptr.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_iores.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_plist.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_store.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_store_subr.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_subr.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_value.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_value_fmts.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_value_prf.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_value_subr.c optional bhnd
dev/bhnd/nvram/bhnd_sprom.c optional bhnd
dev/bhnd/siba/siba.c optional siba bhnd
dev/bhnd/siba/siba_bhndb.c optional siba bhnd bhndb
dev/bhnd/siba/siba_erom.c optional siba bhnd
dev/bhnd/siba/siba_subr.c optional siba bhnd
#
dev/bnxt/bnxt_hwrm.c optional bnxt iflib pci
dev/bnxt/bnxt_sysctl.c optional bnxt iflib pci
dev/bnxt/bnxt_txrx.c optional bnxt iflib pci
dev/bnxt/if_bnxt.c optional bnxt iflib pci
dev/bwi/bwimac.c optional bwi
dev/bwi/bwiphy.c optional bwi
dev/bwi/bwirf.c optional bwi
dev/bwi/if_bwi.c optional bwi
dev/bwi/if_bwi_pci.c optional bwi pci
dev/bwn/if_bwn.c optional bwn bhnd
dev/bwn/if_bwn_pci.c optional bwn pci bhnd bhndb bhndb_pci
dev/bwn/if_bwn_phy_common.c optional bwn bhnd
dev/bwn/if_bwn_phy_g.c optional bwn bhnd
dev/bwn/if_bwn_phy_lp.c optional bwn bhnd
dev/bwn/if_bwn_phy_n.c optional bwn bhnd
dev/bwn/if_bwn_util.c optional bwn bhnd
dev/cadence/if_cgem.c optional cgem fdt
dev/cardbus/card_if.m standard
dev/cardbus/cardbus.c optional cardbus
dev/cardbus/cardbus_cis.c optional cardbus
dev/cardbus/cardbus_device.c optional cardbus
dev/cardbus/power_if.m standard
dev/cas/if_cas.c optional cas
dev/cfi/cfi_bus_fdt.c optional cfi fdt
dev/cfi/cfi_bus_nexus.c optional cfi
dev/cfi/cfi_core.c optional cfi
dev/cfi/cfi_dev.c optional cfi
dev/cfi/cfi_disk.c optional cfid
dev/chromebook_platform/chromebook_platform.c optional chromebook_platform
dev/ciss/ciss.c optional ciss
dev/cpufreq/ichss.c optional cpufreq pci
dev/cxgb/cxgb_main.c optional cxgb pci \
compile-with "${NORMAL_C} -I$S/dev/cxgb"
dev/cxgb/cxgb_sge.c optional cxgb pci \
compile-with "${NORMAL_C} -I$S/dev/cxgb"
dev/cxgb/common/cxgb_mc5.c optional cxgb pci \
compile-with "${NORMAL_C} -I$S/dev/cxgb"
dev/cxgb/common/cxgb_vsc7323.c optional cxgb pci \
compile-with "${NORMAL_C} -I$S/dev/cxgb"
dev/cxgb/common/cxgb_vsc8211.c optional cxgb pci \
compile-with "${NORMAL_C} -I$S/dev/cxgb"
dev/cxgb/common/cxgb_ael1002.c optional cxgb pci \
compile-with "${NORMAL_C} -I$S/dev/cxgb"
dev/cxgb/common/cxgb_aq100x.c optional cxgb pci \
compile-with "${NORMAL_C} -I$S/dev/cxgb"
dev/cxgb/common/cxgb_mv88e1xxx.c optional cxgb pci \
compile-with "${NORMAL_C} -I$S/dev/cxgb"
dev/cxgb/common/cxgb_xgmac.c optional cxgb pci \
compile-with "${NORMAL_C} -I$S/dev/cxgb"
dev/cxgb/common/cxgb_t3_hw.c optional cxgb pci \
compile-with "${NORMAL_C} -I$S/dev/cxgb"
dev/cxgb/common/cxgb_tn1010.c optional cxgb pci \
compile-with "${NORMAL_C} -I$S/dev/cxgb"
dev/cxgb/sys/uipc_mvec.c optional cxgb pci \
compile-with "${NORMAL_C} -I$S/dev/cxgb"
dev/cxgb/cxgb_t3fw.c optional cxgb cxgb_t3fw \
compile-with "${NORMAL_C} -I$S/dev/cxgb"
dev/cxgbe/t4_clip.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_filter.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_if.m optional cxgbe pci
dev/cxgbe/t4_iov.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_mp_ring.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_main.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_netmap.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_sched.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_sge.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_smt.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_l2t.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_tracer.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_vf.c optional cxgbev pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/common/t4_hw.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/common/t4vf_hw.c optional cxgbev pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/crypto/t6_kern_tls.c optional cxgbe pci kern_tls \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/crypto/t4_keyctx.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/cudbg/cudbg_common.c optional cxgbe \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/cudbg/cudbg_flash_utils.c optional cxgbe \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/cudbg/cudbg_lib.c optional cxgbe \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/cudbg/cudbg_wtp.c optional cxgbe \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/cudbg/fastlz.c optional cxgbe \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/cudbg/fastlz_api.c optional cxgbe \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
t4fw_cfg.c optional cxgbe \
compile-with "${AWK} -f $S/tools/fw_stub.awk t4fw_cfg.fw:t4fw_cfg t4fw_cfg_uwire.fw:t4fw_cfg_uwire t4fw.fw:t4fw -mt4fw_cfg -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "t4fw_cfg.c"
t4fw_cfg.fwo optional cxgbe \
dependency "t4fw_cfg.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "t4fw_cfg.fwo"
t4fw_cfg.fw optional cxgbe \
dependency "$S/dev/cxgbe/firmware/t4fw_cfg.txt" \
compile-with "${CP} ${.ALLSRC} ${.TARGET}" \
no-obj no-implicit-rule \
clean "t4fw_cfg.fw"
t4fw_cfg_uwire.fwo optional cxgbe \
dependency "t4fw_cfg_uwire.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "t4fw_cfg_uwire.fwo"
t4fw_cfg_uwire.fw optional cxgbe \
dependency "$S/dev/cxgbe/firmware/t4fw_cfg_uwire.txt" \
compile-with "${CP} ${.ALLSRC} ${.TARGET}" \
no-obj no-implicit-rule \
clean "t4fw_cfg_uwire.fw"
t4fw.fwo optional cxgbe \
dependency "t4fw.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "t4fw.fwo"
t4fw.fw optional cxgbe \
dependency "$S/dev/cxgbe/firmware/t4fw-1.27.0.0.bin" \
compile-with "${CP} ${.ALLSRC} ${.TARGET}" \
no-obj no-implicit-rule \
clean "t4fw.fw"
t5fw_cfg.c optional cxgbe \
compile-with "${AWK} -f $S/tools/fw_stub.awk t5fw_cfg.fw:t5fw_cfg t5fw_cfg_uwire.fw:t5fw_cfg_uwire t5fw.fw:t5fw -mt5fw_cfg -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "t5fw_cfg.c"
t5fw_cfg.fwo optional cxgbe \
dependency "t5fw_cfg.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "t5fw_cfg.fwo"
t5fw_cfg.fw optional cxgbe \
dependency "$S/dev/cxgbe/firmware/t5fw_cfg.txt" \
compile-with "${CP} ${.ALLSRC} ${.TARGET}" \
no-obj no-implicit-rule \
clean "t5fw_cfg.fw"
t5fw_cfg_uwire.fwo optional cxgbe \
dependency "t5fw_cfg_uwire.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "t5fw_cfg_uwire.fwo"
t5fw_cfg_uwire.fw optional cxgbe \
dependency "$S/dev/cxgbe/firmware/t5fw_cfg_uwire.txt" \
compile-with "${CP} ${.ALLSRC} ${.TARGET}" \
no-obj no-implicit-rule \
clean "t5fw_cfg_uwire.fw"
t5fw.fwo optional cxgbe \
dependency "t5fw.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "t5fw.fwo"
t5fw.fw optional cxgbe \
dependency "$S/dev/cxgbe/firmware/t5fw-1.27.0.0.bin" \
compile-with "${CP} ${.ALLSRC} ${.TARGET}" \
no-obj no-implicit-rule \
clean "t5fw.fw"
t6fw_cfg.c optional cxgbe \
compile-with "${AWK} -f $S/tools/fw_stub.awk t6fw_cfg.fw:t6fw_cfg t6fw_cfg_uwire.fw:t6fw_cfg_uwire t6fw.fw:t6fw -mt6fw_cfg -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "t6fw_cfg.c"
t6fw_cfg.fwo optional cxgbe \
dependency "t6fw_cfg.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "t6fw_cfg.fwo"
t6fw_cfg.fw optional cxgbe \
dependency "$S/dev/cxgbe/firmware/t6fw_cfg.txt" \
compile-with "${CP} ${.ALLSRC} ${.TARGET}" \
no-obj no-implicit-rule \
clean "t6fw_cfg.fw"
t6fw_cfg_uwire.fwo optional cxgbe \
dependency "t6fw_cfg_uwire.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "t6fw_cfg_uwire.fwo"
t6fw_cfg_uwire.fw optional cxgbe \
dependency "$S/dev/cxgbe/firmware/t6fw_cfg_uwire.txt" \
compile-with "${CP} ${.ALLSRC} ${.TARGET}" \
no-obj no-implicit-rule \
clean "t6fw_cfg_uwire.fw"
t6fw.fwo optional cxgbe \
dependency "t6fw.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "t6fw.fwo"
t6fw.fw optional cxgbe \
dependency "$S/dev/cxgbe/firmware/t6fw-1.27.0.0.bin" \
compile-with "${CP} ${.ALLSRC} ${.TARGET}" \
no-obj no-implicit-rule \
clean "t6fw.fw"
dev/cxgbe/crypto/t4_crypto.c optional ccr \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cyapa/cyapa.c optional cyapa iicbus
dev/dc/if_dc.c optional dc pci
dev/dc/dcphy.c optional dc pci
dev/dc/pnphy.c optional dc pci
dev/dcons/dcons.c optional dcons
dev/dcons/dcons_crom.c optional dcons_crom
dev/dcons/dcons_os.c optional dcons
dev/dialog/da9063/da9063_if.m optional da9063_pmic
dev/dialog/da9063/da9063_iic.c optional da9063_pmic iicbus fdt
dev/dialog/da9063/da9063_rtc.c optional da9063_rtc fdt
dev/dme/if_dme.c optional dme
dev/drm2/drm_agpsupport.c optional drm2
dev/drm2/drm_auth.c optional drm2
dev/drm2/drm_bufs.c optional drm2
dev/drm2/drm_buffer.c optional drm2
dev/drm2/drm_context.c optional drm2
dev/drm2/drm_crtc.c optional drm2
dev/drm2/drm_crtc_helper.c optional drm2
dev/drm2/drm_dma.c optional drm2
dev/drm2/drm_dp_helper.c optional drm2
dev/drm2/drm_dp_iic_helper.c optional drm2
dev/drm2/drm_drv.c optional drm2
dev/drm2/drm_edid.c optional drm2
dev/drm2/drm_fb_helper.c optional drm2
dev/drm2/drm_fops.c optional drm2
dev/drm2/drm_gem.c optional drm2
dev/drm2/drm_gem_names.c optional drm2
dev/drm2/drm_global.c optional drm2
dev/drm2/drm_hashtab.c optional drm2
dev/drm2/drm_ioctl.c optional drm2
dev/drm2/drm_irq.c optional drm2
dev/drm2/drm_linux_list_sort.c optional drm2
dev/drm2/drm_lock.c optional drm2
dev/drm2/drm_memory.c optional drm2
dev/drm2/drm_mm.c optional drm2
dev/drm2/drm_modes.c optional drm2
dev/drm2/drm_pci.c optional drm2
dev/drm2/drm_platform.c optional drm2
dev/drm2/drm_scatter.c optional drm2
dev/drm2/drm_stub.c optional drm2
dev/drm2/drm_sysctl.c optional drm2
dev/drm2/drm_vm.c optional drm2
dev/drm2/drm_os_freebsd.c optional drm2
dev/drm2/ttm/ttm_agp_backend.c optional drm2
dev/drm2/ttm/ttm_lock.c optional drm2
dev/drm2/ttm/ttm_object.c optional drm2
dev/drm2/ttm/ttm_tt.c optional drm2
dev/drm2/ttm/ttm_bo_util.c optional drm2
dev/drm2/ttm/ttm_bo.c optional drm2
dev/drm2/ttm/ttm_bo_manager.c optional drm2
dev/drm2/ttm/ttm_execbuf_util.c optional drm2
dev/drm2/ttm/ttm_memory.c optional drm2
dev/drm2/ttm/ttm_page_alloc.c optional drm2
dev/drm2/ttm/ttm_bo_vm.c optional drm2
dev/efidev/efidev.c optional efirt
dev/efidev/efirt.c optional efirt
dev/efidev/efirtc.c optional efirt
dev/e1000/if_em.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/em_txrx.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/igb_txrx.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_80003es2lan.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_82540.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_82541.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_82542.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_82543.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_82571.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_82575.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_ich8lan.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_i210.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_api.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_base.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_mac.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_manage.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_nvm.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_phy.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_vf.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_mbx.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_osdep.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/et/if_et.c optional et
dev/ena/ena.c optional ena \
compile-with "${NORMAL_C} -I$S/contrib"
dev/ena/ena_datapath.c optional ena \
compile-with "${NORMAL_C} -I$S/contrib"
dev/ena/ena_netmap.c optional ena \
compile-with "${NORMAL_C} -I$S/contrib"
dev/ena/ena_rss.c optional ena \
compile-with "${NORMAL_C} -I$S/contrib"
dev/ena/ena_sysctl.c optional ena \
compile-with "${NORMAL_C} -I$S/contrib"
contrib/ena-com/ena_com.c optional ena
contrib/ena-com/ena_eth_com.c optional ena
dev/etherswitch/arswitch/arswitch.c optional arswitch
dev/etherswitch/arswitch/arswitch_reg.c optional arswitch
dev/etherswitch/arswitch/arswitch_phy.c optional arswitch
dev/etherswitch/arswitch/arswitch_8216.c optional arswitch
dev/etherswitch/arswitch/arswitch_8226.c optional arswitch
dev/etherswitch/arswitch/arswitch_8316.c optional arswitch
dev/etherswitch/arswitch/arswitch_8327.c optional arswitch
dev/etherswitch/arswitch/arswitch_7240.c optional arswitch
dev/etherswitch/arswitch/arswitch_9340.c optional arswitch
dev/etherswitch/arswitch/arswitch_vlans.c optional arswitch
dev/etherswitch/etherswitch.c optional etherswitch
dev/etherswitch/etherswitch_if.m optional etherswitch
dev/etherswitch/ip17x/ip17x.c optional ip17x
dev/etherswitch/ip17x/ip175c.c optional ip17x
dev/etherswitch/ip17x/ip175d.c optional ip17x
dev/etherswitch/ip17x/ip17x_phy.c optional ip17x
dev/etherswitch/ip17x/ip17x_vlans.c optional ip17x
dev/etherswitch/miiproxy.c optional miiproxy
dev/etherswitch/rtl8366/rtl8366rb.c optional rtl8366rb
dev/etherswitch/e6000sw/e6000sw.c optional e6000sw
dev/etherswitch/e6000sw/e6060sw.c optional e6060sw
dev/etherswitch/infineon/adm6996fc.c optional adm6996fc
dev/etherswitch/micrel/ksz8995ma.c optional ksz8995ma
dev/etherswitch/ukswitch/ukswitch.c optional ukswitch
dev/evdev/cdev.c optional evdev
dev/evdev/evdev.c optional evdev
dev/evdev/evdev_mt.c optional evdev
dev/evdev/evdev_utils.c optional evdev
dev/evdev/uinput.c optional evdev uinput
dev/exca/exca.c optional cbb
dev/extres/clk/clk.c optional clk fdt
dev/extres/clk/clkdev_if.m optional clk fdt
dev/extres/clk/clknode_if.m optional clk fdt
dev/extres/clk/clk_bus.c optional clk fdt
dev/extres/clk/clk_div.c optional clk fdt
dev/extres/clk/clk_fixed.c optional clk fdt
dev/extres/clk/clk_gate.c optional clk fdt
dev/extres/clk/clk_link.c optional clk fdt
dev/extres/clk/clk_mux.c optional clk fdt
dev/extres/phy/phy.c optional phy fdt
dev/extres/phy/phydev_if.m optional phy fdt
dev/extres/phy/phynode_if.m optional phy fdt
dev/extres/phy/phy_usb.c optional phy fdt
dev/extres/phy/phynode_usb_if.m optional phy fdt
dev/extres/hwreset/hwreset.c optional hwreset fdt
dev/extres/hwreset/hwreset_if.m optional hwreset fdt
dev/extres/nvmem/nvmem.c optional nvmem fdt
dev/extres/nvmem/nvmem_if.m optional nvmem fdt
dev/extres/regulator/regdev_if.m optional regulator fdt
dev/extres/regulator/regnode_if.m optional regulator fdt
dev/extres/regulator/regulator.c optional regulator fdt
dev/extres/regulator/regulator_bus.c optional regulator fdt
dev/extres/regulator/regulator_fixed.c optional regulator fdt
dev/extres/syscon/syscon.c optional syscon
dev/extres/syscon/syscon_generic.c optional syscon fdt
dev/extres/syscon/syscon_if.m optional syscon
dev/extres/syscon/syscon_power.c optional syscon syscon_power fdt
dev/fb/fbd.c optional fbd | vt
dev/fb/fb_if.m standard
dev/fb/splash.c optional sc splash
dev/fdt/fdt_clock.c optional fdt fdt_clock
dev/fdt/fdt_clock_if.m optional fdt fdt_clock
dev/fdt/fdt_common.c optional fdt
dev/fdt/fdt_pinctrl.c optional fdt fdt_pinctrl
dev/fdt/fdt_pinctrl_if.m optional fdt fdt_pinctrl
dev/fdt/fdt_slicer.c optional fdt cfi | fdt mx25l | fdt n25q | fdt at45d
dev/fdt/fdt_static_dtb.S optional fdt fdt_dtb_static \
dependency "${FDT_DTS_FILE:T:R}.dtb"
dev/fdt/simplebus.c optional fdt
dev/fdt/simple_mfd.c optional syscon fdt
dev/filemon/filemon.c optional filemon
dev/firewire/firewire.c optional firewire
dev/firewire/fwcrom.c optional firewire
dev/firewire/fwdev.c optional firewire
dev/firewire/fwdma.c optional firewire
dev/firewire/fwmem.c optional firewire
dev/firewire/fwohci.c optional firewire
dev/firewire/fwohci_pci.c optional firewire pci
dev/firewire/if_fwe.c optional fwe
dev/firewire/if_fwip.c optional fwip
dev/firewire/sbp.c optional sbp
dev/firewire/sbp_targ.c optional sbp_targ
dev/flash/at45d.c optional at45d
dev/flash/cqspi.c optional cqspi fdt xdma
dev/flash/mx25l.c optional mx25l
dev/flash/n25q.c optional n25q fdt
dev/flash/qspi_if.m optional cqspi fdt | n25q fdt
dev/fxp/if_fxp.c optional fxp
dev/fxp/inphy.c optional fxp
dev/gem/if_gem.c optional gem
dev/gem/if_gem_pci.c optional gem pci
dev/goldfish/goldfish_rtc.c optional goldfish_rtc fdt
dev/gpio/dwgpio/dwgpio.c optional gpio dwgpio fdt
dev/gpio/dwgpio/dwgpio_bus.c optional gpio dwgpio fdt
dev/gpio/dwgpio/dwgpio_if.m optional gpio dwgpio fdt
dev/gpio/gpiobacklight.c optional gpiobacklight fdt
dev/gpio/gpiokeys.c optional gpiokeys fdt
dev/gpio/gpiokeys_codes.c optional gpiokeys fdt
dev/gpio/gpiobus.c optional gpio \
dependency "gpiobus_if.h"
dev/gpio/gpioc.c optional gpio \
dependency "gpio_if.h"
dev/gpio/gpioiic.c optional gpioiic
dev/gpio/gpioled.c optional gpioled !fdt
dev/gpio/gpioled_fdt.c optional gpioled fdt
dev/gpio/gpiomdio.c optional gpiomdio mii_bitbang
dev/gpio/gpiopower.c optional gpiopower fdt
dev/gpio/gpioregulator.c optional gpioregulator fdt
dev/gpio/gpiospi.c optional gpiospi
dev/gpio/gpioths.c optional gpioths
dev/gpio/gpio_if.m optional gpio
dev/gpio/gpiobus_if.m optional gpio
dev/gpio/gpiopps.c optional gpiopps fdt
dev/gpio/ofw_gpiobus.c optional fdt gpio
dev/hid/bcm5974.c optional bcm5974
dev/hid/hconf.c optional hconf
dev/hid/hcons.c optional hcons
dev/hid/hgame.c optional hgame
dev/hid/hid.c optional hid
dev/hid/hid_if.m optional hid
dev/hid/hidbus.c optional hidbus
dev/hid/hidmap.c optional hidmap
dev/hid/hidquirk.c optional hid
dev/hid/hidraw.c optional hidraw
dev/hid/hkbd.c optional hkbd
dev/hid/hms.c optional hms
dev/hid/hmt.c optional hmt hconf
dev/hid/hpen.c optional hpen
dev/hid/hsctrl.c optional hsctrl
dev/hid/ietp.c optional ietp
dev/hid/ps4dshock.c optional ps4dshock
dev/hid/xb360gp.c optional xb360gp
dev/hifn/hifn7751.c optional hifn
dev/hptiop/hptiop.c optional hptiop scbus
dev/hwpmc/hwpmc_logging.c optional hwpmc
dev/hwpmc/hwpmc_mod.c optional hwpmc
dev/hwpmc/hwpmc_soft.c optional hwpmc
dev/ichiic/ig4_acpi.c optional ig4 acpi iicbus
dev/ichiic/ig4_iic.c optional ig4 iicbus
dev/ichiic/ig4_pci.c optional ig4 pci iicbus
dev/ichsmb/ichsmb.c optional ichsmb
dev/ichsmb/ichsmb_pci.c optional ichsmb pci
dev/ida/ida.c optional ida
dev/ida/ida_disk.c optional ida
dev/ida/ida_pci.c optional ida pci
dev/iicbus/acpi_iicbus.c optional acpi iicbus | acpi compat_linuxkpi
dev/iicbus/ad7418.c optional ad7418
dev/iicbus/ads111x.c optional ads111x
dev/iicbus/ds1307.c optional ds1307
dev/iicbus/ds13rtc.c optional ds13rtc | ds133x | ds1374
dev/iicbus/ds1672.c optional ds1672
dev/iicbus/ds3231.c optional ds3231
dev/iicbus/htu21.c optional htu21
dev/iicbus/icee.c optional icee
dev/iicbus/if_ic.c optional ic
dev/iicbus/iic.c optional iic
dev/iicbus/iic_recover_bus.c optional iicbus | compat_linuxkpi
dev/iicbus/iicbb.c optional iicbb | compat_linuxkpi
dev/iicbus/iicbb_if.m optional iicbb | compat_linuxkpi
dev/iicbus/iicbus.c optional iicbus | compat_linuxkpi
dev/iicbus/iicbus_if.m optional iicbus | compat_linuxkpi
dev/iicbus/iichid.c optional iichid acpi hid iicbus
dev/iicbus/iiconf.c optional iicbus | compat_linuxkpi
dev/iicbus/iicsmb.c optional iicsmb \
dependency "iicbus_if.h"
dev/iicbus/iicoc.c optional iicoc
dev/iicbus/iicoc_fdt.c optional iicoc fdt
dev/iicbus/iicoc_pci.c optional iicoc pci
dev/iicbus/isl12xx.c optional isl12xx
dev/iicbus/lm75.c optional lm75
dev/iicbus/max44009.c optional max44009
dev/iicbus/mux/iicmux.c optional iicmux
dev/iicbus/mux/iicmux_if.m optional iicmux
dev/iicbus/mux/iic_gpiomux.c optional iic_gpiomux fdt
dev/iicbus/mux/ltc430x.c optional ltc430x
dev/iicbus/mux/pca954x.c optional pca954x iicbus iicmux
dev/iicbus/nxprtc.c optional nxprtc | pcf8563
dev/iicbus/ofw_iicbus.c optional fdt iicbus
dev/iicbus/ofw_iicbus_if.m optional fdt iicbus
dev/iicbus/pcf8574.c optional pcf8574
dev/iicbus/pcf8591.c optional pcf8591
dev/iicbus/rtc8583.c optional rtc8583
dev/iicbus/rtc/pcf85063.c optional pcf85063 iicbus fdt
dev/iicbus/rtc/rx8803.c optional rx8803 iicbus fdt
dev/iicbus/s35390a.c optional s35390a
dev/iicbus/sy8106a.c optional sy8106a fdt
dev/iicbus/syr827.c optional syr827 fdt
dev/iicbus/gpio/tca64xx.c optional tca64xx fdt gpio
dev/iicbus/pmic/fan53555.c optional fan53555 fdt
dev/igc/if_igc.c optional igc iflib pci
dev/igc/igc_api.c optional igc iflib pci
dev/igc/igc_base.c optional igc iflib pci
dev/igc/igc_i225.c optional igc iflib pci
dev/igc/igc_mac.c optional igc iflib pci
dev/igc/igc_nvm.c optional igc iflib pci
dev/igc/igc_phy.c optional igc iflib pci
dev/igc/igc_txrx.c optional igc iflib pci
dev/intpm/intpm.c optional intpm pci
# XXX Work around clang warning, until maintainer approves fix.
dev/ips/ips.c optional ips \
compile-with "${NORMAL_C} ${NO_WSOMETIMES_UNINITIALIZED}"
dev/ips/ips_commands.c optional ips
dev/ips/ips_disk.c optional ips
dev/ips/ips_ioctl.c optional ips
dev/ips/ips_pci.c optional ips pci
dev/ipw/if_ipw.c optional ipw
ipwbssfw.c optional ipwbssfw | ipwfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk ipw_bss.fw:ipw_bss:130 -lintel_ipw -mipw_bss -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "ipwbssfw.c"
ipw_bss.fwo optional ipwbssfw | ipwfw \
dependency "ipw_bss.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "ipw_bss.fwo"
ipw_bss.fw optional ipwbssfw | ipwfw \
dependency "$S/contrib/dev/ipw/ipw2100-1.3.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "ipw_bss.fw"
ipwibssfw.c optional ipwibssfw | ipwfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk ipw_ibss.fw:ipw_ibss:130 -lintel_ipw -mipw_ibss -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "ipwibssfw.c"
ipw_ibss.fwo optional ipwibssfw | ipwfw \
dependency "ipw_ibss.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "ipw_ibss.fwo"
ipw_ibss.fw optional ipwibssfw | ipwfw \
dependency "$S/contrib/dev/ipw/ipw2100-1.3-i.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "ipw_ibss.fw"
ipwmonitorfw.c optional ipwmonitorfw | ipwfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk ipw_monitor.fw:ipw_monitor:130 -lintel_ipw -mipw_monitor -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "ipwmonitorfw.c"
ipw_monitor.fwo optional ipwmonitorfw | ipwfw \
dependency "ipw_monitor.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "ipw_monitor.fwo"
ipw_monitor.fw optional ipwmonitorfw | ipwfw \
dependency "$S/contrib/dev/ipw/ipw2100-1.3-p.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "ipw_monitor.fw"
dev/iscsi/icl.c optional iscsi
dev/iscsi/icl_conn_if.m optional cfiscsi | iscsi
dev/iscsi/icl_soft.c optional iscsi
dev/iscsi/icl_soft_proxy.c optional iscsi
dev/iscsi/iscsi.c optional iscsi scbus
dev/ismt/ismt.c optional ismt
dev/isl/isl.c optional isl iicbus
dev/isp/isp.c optional isp
dev/isp/isp_freebsd.c optional isp
dev/isp/isp_library.c optional isp
dev/isp/isp_pci.c optional isp pci
dev/isp/isp_target.c optional isp
dev/ispfw/ispfw.c optional ispfw
dev/iwi/if_iwi.c optional iwi
iwibssfw.c optional iwibssfw | iwifw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwi_bss.fw:iwi_bss:300 -lintel_iwi -miwi_bss -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwibssfw.c"
iwi_bss.fwo optional iwibssfw | iwifw \
dependency "iwi_bss.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwi_bss.fwo"
iwi_bss.fw optional iwibssfw | iwifw \
dependency "$S/contrib/dev/iwi/ipw2200-bss.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwi_bss.fw"
iwiibssfw.c optional iwiibssfw | iwifw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwi_ibss.fw:iwi_ibss:300 -lintel_iwi -miwi_ibss -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwiibssfw.c"
iwi_ibss.fwo optional iwiibssfw | iwifw \
dependency "iwi_ibss.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwi_ibss.fwo"
iwi_ibss.fw optional iwiibssfw | iwifw \
dependency "$S/contrib/dev/iwi/ipw2200-ibss.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwi_ibss.fw"
iwimonitorfw.c optional iwimonitorfw | iwifw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwi_monitor.fw:iwi_monitor:300 -lintel_iwi -miwi_monitor -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwimonitorfw.c"
iwi_monitor.fwo optional iwimonitorfw | iwifw \
dependency "iwi_monitor.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwi_monitor.fwo"
iwi_monitor.fw optional iwimonitorfw | iwifw \
dependency "$S/contrib/dev/iwi/ipw2200-sniffer.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwi_monitor.fw"
dev/iwm/if_iwm.c optional iwm
dev/iwm/if_iwm_7000.c optional iwm
dev/iwm/if_iwm_8000.c optional iwm
dev/iwm/if_iwm_9000.c optional iwm
dev/iwm/if_iwm_9260.c optional iwm
dev/iwm/if_iwm_binding.c optional iwm
dev/iwm/if_iwm_fw.c optional iwm
dev/iwm/if_iwm_led.c optional iwm
dev/iwm/if_iwm_mac_ctxt.c optional iwm
dev/iwm/if_iwm_notif_wait.c optional iwm
dev/iwm/if_iwm_pcie_trans.c optional iwm
dev/iwm/if_iwm_phy_ctxt.c optional iwm
dev/iwm/if_iwm_phy_db.c optional iwm
dev/iwm/if_iwm_power.c optional iwm
dev/iwm/if_iwm_scan.c optional iwm
dev/iwm/if_iwm_sf.c optional iwm
dev/iwm/if_iwm_sta.c optional iwm
dev/iwm/if_iwm_time_event.c optional iwm
dev/iwm/if_iwm_util.c optional iwm
iwm3160fw.c optional iwm3160fw | iwmfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwm3160.fw:iwm3160fw -miwm3160fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwm3160fw.c"
iwm3160fw.fwo optional iwm3160fw | iwmfw \
dependency "iwm3160.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwm3160fw.fwo"
iwm3160.fw optional iwm3160fw | iwmfw \
dependency "$S/contrib/dev/iwm/iwm-3160-17.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwm3160.fw"
iwm3168fw.c optional iwm3168fw | iwmfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwm3168.fw:iwm3168fw -miwm3168fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwm3168fw.c"
iwm3168fw.fwo optional iwm3168fw | iwmfw \
dependency "iwm3168.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwm3168fw.fwo"
iwm3168.fw optional iwm3168fw | iwmfw \
dependency "$S/contrib/dev/iwm/iwm-3168-22.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwm3168.fw"
iwm7260fw.c optional iwm7260fw | iwmfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwm7260.fw:iwm7260fw -miwm7260fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwm7260fw.c"
iwm7260fw.fwo optional iwm7260fw | iwmfw \
dependency "iwm7260.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwm7260fw.fwo"
iwm7260.fw optional iwm7260fw | iwmfw \
dependency "$S/contrib/dev/iwm/iwm-7260-17.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwm7260.fw"
iwm7265fw.c optional iwm7265fw | iwmfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwm7265.fw:iwm7265fw -miwm7265fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwm7265fw.c"
iwm7265fw.fwo optional iwm7265fw | iwmfw \
dependency "iwm7265.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwm7265fw.fwo"
iwm7265.fw optional iwm7265fw | iwmfw \
dependency "$S/contrib/dev/iwm/iwm-7265-17.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwm7265.fw"
iwm7265Dfw.c optional iwm7265Dfw | iwmfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwm7265D.fw:iwm7265Dfw -miwm7265Dfw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwm7265Dfw.c"
iwm7265Dfw.fwo optional iwm7265Dfw | iwmfw \
dependency "iwm7265D.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwm7265Dfw.fwo"
iwm7265D.fw optional iwm7265Dfw | iwmfw \
dependency "$S/contrib/dev/iwm/iwm-7265D-17.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwm7265D.fw"
iwm8000Cfw.c optional iwm8000Cfw | iwmfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwm8000C.fw:iwm8000Cfw -miwm8000Cfw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwm8000Cfw.c"
iwm8000Cfw.fwo optional iwm8000Cfw | iwmfw \
dependency "iwm8000C.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwm8000Cfw.fwo"
iwm8000C.fw optional iwm8000Cfw | iwmfw \
dependency "$S/contrib/dev/iwm/iwm-8000C-16.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwm8000C.fw"
iwm8265.fw optional iwm8265fw | iwmfw \
dependency "$S/contrib/dev/iwm/iwm-8265-22.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwm8265.fw"
iwm8265fw.c optional iwm8265fw | iwmfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwm8265.fw:iwm8265fw -miwm8265fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwm8265fw.c"
iwm8265fw.fwo optional iwm8265fw | iwmfw \
dependency "iwm8265.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwm8265fw.fwo"
dev/iwn/if_iwn.c optional iwn
iwn1000fw.c optional iwn1000fw | iwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwn1000.fw:iwn1000fw -miwn1000fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwn1000fw.c"
iwn1000fw.fwo optional iwn1000fw | iwnfw \
dependency "iwn1000.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwn1000fw.fwo"
iwn1000.fw optional iwn1000fw | iwnfw \
dependency "$S/contrib/dev/iwn/iwlwifi-1000-39.31.5.1.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwn1000.fw"
iwn100fw.c optional iwn100fw | iwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwn100.fw:iwn100fw -miwn100fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwn100fw.c"
iwn100fw.fwo optional iwn100fw | iwnfw \
dependency "iwn100.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwn100fw.fwo"
iwn100.fw optional iwn100fw | iwnfw \
dependency "$S/contrib/dev/iwn/iwlwifi-100-39.31.5.1.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwn100.fw"
iwn105fw.c optional iwn105fw | iwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwn105.fw:iwn105fw -miwn105fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwn105fw.c"
iwn105fw.fwo optional iwn105fw | iwnfw \
dependency "iwn105.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwn105fw.fwo"
iwn105.fw optional iwn105fw | iwnfw \
dependency "$S/contrib/dev/iwn/iwlwifi-105-6-18.168.6.1.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwn105.fw"
iwn135fw.c optional iwn135fw | iwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwn135.fw:iwn135fw -miwn135fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwn135fw.c"
iwn135fw.fwo optional iwn135fw | iwnfw \
dependency "iwn135.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwn135fw.fwo"
iwn135.fw optional iwn135fw | iwnfw \
dependency "$S/contrib/dev/iwn/iwlwifi-135-6-18.168.6.1.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwn135.fw"
iwn2000fw.c optional iwn2000fw | iwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwn2000.fw:iwn2000fw -miwn2000fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwn2000fw.c"
iwn2000fw.fwo optional iwn2000fw | iwnfw \
dependency "iwn2000.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwn2000fw.fwo"
iwn2000.fw optional iwn2000fw | iwnfw \
dependency "$S/contrib/dev/iwn/iwlwifi-2000-18.168.6.1.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwn2000.fw"
iwn2030fw.c optional iwn2030fw | iwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwn2030.fw:iwn2030fw -miwn2030fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwn2030fw.c"
iwn2030fw.fwo optional iwn2030fw | iwnfw \
dependency "iwn2030.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwn2030fw.fwo"
iwn2030.fw optional iwn2030fw | iwnfw \
dependency "$S/contrib/dev/iwn/iwnwifi-2030-18.168.6.1.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwn2030.fw"
iwn4965fw.c optional iwn4965fw | iwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwn4965.fw:iwn4965fw -miwn4965fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwn4965fw.c"
iwn4965fw.fwo optional iwn4965fw | iwnfw \
dependency "iwn4965.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwn4965fw.fwo"
iwn4965.fw optional iwn4965fw | iwnfw \
dependency "$S/contrib/dev/iwn/iwlwifi-4965-228.61.2.24.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwn4965.fw"
iwn5000fw.c optional iwn5000fw | iwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwn5000.fw:iwn5000fw -miwn5000fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwn5000fw.c"
iwn5000fw.fwo optional iwn5000fw | iwnfw \
dependency "iwn5000.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwn5000fw.fwo"
iwn5000.fw optional iwn5000fw | iwnfw \
dependency "$S/contrib/dev/iwn/iwlwifi-5000-8.83.5.1.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwn5000.fw"
iwn5150fw.c optional iwn5150fw | iwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwn5150.fw:iwn5150fw -miwn5150fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwn5150fw.c"
iwn5150fw.fwo optional iwn5150fw | iwnfw \
dependency "iwn5150.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwn5150fw.fwo"
iwn5150.fw optional iwn5150fw | iwnfw \
dependency "$S/contrib/dev/iwn/iwlwifi-5150-8.24.2.2.fw.uu"\
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwn5150.fw"
iwn6000fw.c optional iwn6000fw | iwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwn6000.fw:iwn6000fw -miwn6000fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwn6000fw.c"
iwn6000fw.fwo optional iwn6000fw | iwnfw \
dependency "iwn6000.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwn6000fw.fwo"
iwn6000.fw optional iwn6000fw | iwnfw \
dependency "$S/contrib/dev/iwn/iwlwifi-6000-9.221.4.1.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwn6000.fw"
iwn6000g2afw.c optional iwn6000g2afw | iwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwn6000g2a.fw:iwn6000g2afw -miwn6000g2afw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwn6000g2afw.c"
iwn6000g2afw.fwo optional iwn6000g2afw | iwnfw \
dependency "iwn6000g2a.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwn6000g2afw.fwo"
iwn6000g2a.fw optional iwn6000g2afw | iwnfw \
dependency "$S/contrib/dev/iwn/iwlwifi-6000g2a-18.168.6.1.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwn6000g2a.fw"
iwn6000g2bfw.c optional iwn6000g2bfw | iwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwn6000g2b.fw:iwn6000g2bfw -miwn6000g2bfw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwn6000g2bfw.c"
iwn6000g2bfw.fwo optional iwn6000g2bfw | iwnfw \
dependency "iwn6000g2b.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwn6000g2bfw.fwo"
iwn6000g2b.fw optional iwn6000g2bfw | iwnfw \
dependency "$S/contrib/dev/iwn/iwlwifi-6000g2b-18.168.6.1.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwn6000g2b.fw"
iwn6050fw.c optional iwn6050fw | iwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwn6050.fw:iwn6050fw -miwn6050fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwn6050fw.c"
iwn6050fw.fwo optional iwn6050fw | iwnfw \
dependency "iwn6050.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwn6050fw.fwo"
iwn6050.fw optional iwn6050fw | iwnfw \
dependency "$S/contrib/dev/iwn/iwlwifi-6050-41.28.5.1.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwn6050.fw"
dev/ixgbe/if_ix.c optional ix inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe -DSMP"
dev/ixgbe/if_ixv.c optional ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe -DSMP"
dev/ixgbe/if_bypass.c optional ix inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/if_fdir.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/if_sriov.c optional ix inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ix_txrx.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_osdep.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_phy.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_api.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_common.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_mbx.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_vf.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_82598.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_82599.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_x540.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_x550.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_dcb.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_dcb_82598.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_dcb_82599.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/jedec_dimm/jedec_dimm.c optional jedec_dimm smbus
dev/jme/if_jme.c optional jme pci
dev/kbd/kbd.c optional atkbd | pckbd | sc | ukbd | vt | hkbd
dev/kbdmux/kbdmux.c optional kbdmux
dev/ksyms/ksyms.c optional ksyms
dev/le/am7990.c optional le
dev/le/am79900.c optional le
dev/le/if_le_pci.c optional le pci
dev/le/lance.c optional le
dev/led/led.c standard
dev/lge/if_lge.c optional lge
dev/liquidio/base/cn23xx_pf_device.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/base/lio_console.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/base/lio_ctrl.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/base/lio_device.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/base/lio_droq.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/base/lio_mem_ops.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/base/lio_request_manager.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/base/lio_response_manager.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/lio_core.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/lio_ioctl.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/lio_main.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/lio_rss.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/lio_rxtx.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/lio_sysctl.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
lio.c optional lio \
compile-with "${AWK} -f $S/tools/fw_stub.awk lio_23xx_nic.bin.fw:lio_23xx_nic.bin -mlio_23xx_nic.bin -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "lio.c"
lio_23xx_nic.bin.fw.fwo optional lio \
dependency "lio_23xx_nic.bin.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "lio_23xx_nic.bin.fw.fwo"
lio_23xx_nic.bin.fw optional lio \
dependency "$S/contrib/dev/liquidio/lio_23xx_nic.bin.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "lio_23xx_nic.bin.fw"
dev/malo/if_malo.c optional malo
dev/malo/if_malohal.c optional malo
dev/malo/if_malo_pci.c optional malo pci
dev/md/md.c optional md
dev/mdio/mdio_if.m optional miiproxy | mdio
dev/mdio/mdio.c optional miiproxy | mdio
dev/mem/memdev.c optional mem
dev/mem/memutil.c optional mem
dev/mfi/mfi.c optional mfi
dev/mfi/mfi_debug.c optional mfi
dev/mfi/mfi_pci.c optional mfi pci
dev/mfi/mfi_disk.c optional mfi
dev/mfi/mfi_syspd.c optional mfi
dev/mfi/mfi_tbolt.c optional mfi
dev/mfi/mfi_cam.c optional mfip scbus
dev/mii/acphy.c optional miibus | acphy
dev/mii/amphy.c optional miibus | amphy
dev/mii/atphy.c optional miibus | atphy
dev/mii/axphy.c optional miibus | axphy
dev/mii/bmtphy.c optional miibus | bmtphy
dev/mii/brgphy.c optional miibus | brgphy
dev/mii/ciphy.c optional miibus | ciphy
dev/mii/dp83822phy.c optional miibus | dp83822phy
dev/mii/dp83867phy.c optional miibus | dp83867phy
dev/mii/e1000phy.c optional miibus | e1000phy
dev/mii/gentbi.c optional miibus | gentbi
dev/mii/icsphy.c optional miibus | icsphy
dev/mii/ip1000phy.c optional miibus | ip1000phy
dev/mii/jmphy.c optional miibus | jmphy
dev/mii/lxtphy.c optional miibus | lxtphy
dev/mii/micphy.c optional miibus fdt | micphy fdt
dev/mii/mii.c optional miibus | mii
dev/mii/mii_bitbang.c optional miibus | mii_bitbang
dev/mii/mii_physubr.c optional miibus | mii
dev/mii/mii_fdt.c optional miibus fdt | mii fdt
dev/mii/miibus_if.m optional miibus | mii
dev/mii/mv88e151x.c optional miibus | mv88e151x
dev/mii/nsgphy.c optional miibus | nsgphy
dev/mii/nsphy.c optional miibus | nsphy
dev/mii/nsphyter.c optional miibus | nsphyter
dev/mii/pnaphy.c optional miibus | pnaphy
dev/mii/qsphy.c optional miibus | qsphy
dev/mii/rdcphy.c optional miibus | rdcphy
dev/mii/rgephy.c optional miibus | rgephy
dev/mii/rlphy.c optional miibus | rlphy
dev/mii/rlswitch.c optional rlswitch
dev/mii/smcphy.c optional miibus | smcphy
dev/mii/smscphy.c optional miibus | smscphy
dev/mii/tdkphy.c optional miibus | tdkphy
dev/mii/truephy.c optional miibus | truephy
dev/mii/ukphy.c optional miibus | mii
dev/mii/ukphy_subr.c optional miibus | mii
dev/mii/vscphy.c optional miibus | vscphy
dev/mii/xmphy.c optional miibus | xmphy
dev/mlxfw/mlxfw_fsm.c optional mlxfw \
compile-with "${MLXFW_C}"
dev/mlxfw/mlxfw_mfa2.c optional mlxfw \
compile-with "${MLXFW_C}"
dev/mlxfw/mlxfw_mfa2_tlv_multi.c optional mlxfw \
compile-with "${MLXFW_C}"
dev/mlx/mlx.c optional mlx
dev/mlx/mlx_disk.c optional mlx
dev/mlx/mlx_pci.c optional mlx pci
dev/mmc/mmc_subr.c optional mmc | mmcsd !mmccam
dev/mmc/mmc.c optional mmc !mmccam
dev/mmc/mmcbr_if.m standard
dev/mmc/mmcbus_if.m standard
dev/mmc/mmcsd.c optional mmcsd !mmccam
dev/mmc/mmc_fdt_helpers.c optional mmc regulator clk fdt | mmccam regulator clk fdt
dev/mmc/mmc_helpers.c optional mmc gpio regulator clk | mmccam gpio regulator clk
dev/mmc/mmc_pwrseq.c optional mmc clk regulator fdt | mmccam clk regulator fdt
dev/mmc/mmc_pwrseq_if.m optional mmc clk regulator fdt | mmccam clk regulator fdt
dev/mmcnull/mmcnull.c optional mmcnull
dev/mpr/mpr.c optional mpr
dev/mpr/mpr_config.c optional mpr
# XXX Work around clang warning, until maintainer approves fix.
dev/mpr/mpr_mapping.c optional mpr \
compile-with "${NORMAL_C} ${NO_WSOMETIMES_UNINITIALIZED}"
dev/mpr/mpr_pci.c optional mpr pci
dev/mpr/mpr_sas.c optional mpr \
compile-with "${NORMAL_C} ${NO_WUNNEEDED_INTERNAL_DECL}"
dev/mpr/mpr_sas_lsi.c optional mpr
dev/mpr/mpr_table.c optional mpr
dev/mpr/mpr_user.c optional mpr
dev/mps/mps.c optional mps
dev/mps/mps_config.c optional mps
# XXX Work around clang warning, until maintainer approves fix.
dev/mps/mps_mapping.c optional mps \
compile-with "${NORMAL_C} ${NO_WSOMETIMES_UNINITIALIZED}"
dev/mps/mps_pci.c optional mps pci
dev/mps/mps_sas.c optional mps \
compile-with "${NORMAL_C} ${NO_WUNNEEDED_INTERNAL_DECL}"
dev/mps/mps_sas_lsi.c optional mps
dev/mps/mps_table.c optional mps
dev/mps/mps_user.c optional mps
dev/mpt/mpt.c optional mpt
dev/mpt/mpt_cam.c optional mpt
dev/mpt/mpt_debug.c optional mpt
dev/mpt/mpt_pci.c optional mpt pci
dev/mpt/mpt_raid.c optional mpt
dev/mpt/mpt_user.c optional mpt
dev/mrsas/mrsas.c optional mrsas
dev/mrsas/mrsas_cam.c optional mrsas
dev/mrsas/mrsas_ioctl.c optional mrsas
dev/mrsas/mrsas_fp.c optional mrsas
dev/msk/if_msk.c optional msk
dev/mvs/mvs.c optional mvs
dev/mvs/mvs_if.m optional mvs
dev/mvs/mvs_pci.c optional mvs pci
dev/mwl/if_mwl.c optional mwl
dev/mwl/if_mwl_pci.c optional mwl pci
dev/mwl/mwlhal.c optional mwl
mwlfw.c optional mwlfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk mw88W8363.fw:mw88W8363fw mwlboot.fw:mwlboot -mmwl -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "mwlfw.c"
mw88W8363.fwo optional mwlfw \
dependency "mw88W8363.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "mw88W8363.fwo"
mw88W8363.fw optional mwlfw \
dependency "$S/contrib/dev/mwl/mw88W8363.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "mw88W8363.fw"
mwlboot.fwo optional mwlfw \
dependency "mwlboot.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "mwlboot.fwo"
mwlboot.fw optional mwlfw \
dependency "$S/contrib/dev/mwl/mwlboot.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "mwlboot.fw"
dev/mxge/if_mxge.c optional mxge pci
dev/mxge/mxge_eth_z8e.c optional mxge pci
dev/mxge/mxge_ethp_z8e.c optional mxge pci
dev/mxge/mxge_rss_eth_z8e.c optional mxge pci
dev/mxge/mxge_rss_ethp_z8e.c optional mxge pci
dev/my/if_my.c optional my
dev/netmap/if_ptnet.c optional netmap inet
dev/netmap/netmap.c optional netmap
dev/netmap/netmap_bdg.c optional netmap
dev/netmap/netmap_freebsd.c optional netmap
dev/netmap/netmap_generic.c optional netmap
dev/netmap/netmap_kloop.c optional netmap
dev/netmap/netmap_legacy.c optional netmap
dev/netmap/netmap_mbq.c optional netmap
dev/netmap/netmap_mem2.c optional netmap
dev/netmap/netmap_monitor.c optional netmap
dev/netmap/netmap_null.c optional netmap
dev/netmap/netmap_offloadings.c optional netmap
dev/netmap/netmap_pipe.c optional netmap
dev/netmap/netmap_vale.c optional netmap
# compile-with "${NORMAL_C} -Wconversion -Wextra"
dev/nfsmb/nfsmb.c optional nfsmb pci
dev/nge/if_nge.c optional nge
dev/nmdm/nmdm.c optional nmdm
dev/null/null.c standard
dev/nvd/nvd.c optional nvd nvme
dev/nvme/nvme.c optional nvme
dev/nvme/nvme_ahci.c optional nvme ahci
dev/nvme/nvme_ctrlr.c optional nvme
dev/nvme/nvme_ctrlr_cmd.c optional nvme
dev/nvme/nvme_ns.c optional nvme
dev/nvme/nvme_ns_cmd.c optional nvme
dev/nvme/nvme_pci.c optional nvme pci
dev/nvme/nvme_qpair.c optional nvme
dev/nvme/nvme_sim.c optional nvme scbus
dev/nvme/nvme_sysctl.c optional nvme
dev/nvme/nvme_test.c optional nvme
dev/nvme/nvme_util.c optional nvme
dev/oce/oce_hw.c optional oce pci
dev/oce/oce_if.c optional oce pci
dev/oce/oce_mbox.c optional oce pci
dev/oce/oce_queue.c optional oce pci
dev/oce/oce_sysctl.c optional oce pci
dev/oce/oce_util.c optional oce pci
dev/ocs_fc/ocs_gendump.c optional ocs_fc pci
dev/ocs_fc/ocs_pci.c optional ocs_fc pci
dev/ocs_fc/ocs_ioctl.c optional ocs_fc pci
dev/ocs_fc/ocs_os.c optional ocs_fc pci
dev/ocs_fc/ocs_utils.c optional ocs_fc pci
dev/ocs_fc/ocs_hw.c optional ocs_fc pci
dev/ocs_fc/ocs_hw_queues.c optional ocs_fc pci
dev/ocs_fc/sli4.c optional ocs_fc pci
dev/ocs_fc/ocs_sm.c optional ocs_fc pci
dev/ocs_fc/ocs_device.c optional ocs_fc pci
dev/ocs_fc/ocs_xport.c optional ocs_fc pci
dev/ocs_fc/ocs_domain.c optional ocs_fc pci
dev/ocs_fc/ocs_sport.c optional ocs_fc pci
dev/ocs_fc/ocs_els.c optional ocs_fc pci
dev/ocs_fc/ocs_fabric.c optional ocs_fc pci
dev/ocs_fc/ocs_io.c optional ocs_fc pci
dev/ocs_fc/ocs_node.c optional ocs_fc pci
dev/ocs_fc/ocs_scsi.c optional ocs_fc pci
dev/ocs_fc/ocs_unsol.c optional ocs_fc pci
dev/ocs_fc/ocs_ddump.c optional ocs_fc pci
dev/ocs_fc/ocs_mgmt.c optional ocs_fc pci
dev/ocs_fc/ocs_cam.c optional ocs_fc pci
dev/ofw/ofw_bus_if.m optional fdt
dev/ofw/ofw_bus_subr.c optional fdt
dev/ofw/ofw_cpu.c optional fdt
dev/ofw/ofw_fdt.c optional fdt
dev/ofw/ofw_if.m optional fdt
dev/ofw/ofw_graph.c optional fdt
dev/ofw/ofw_subr.c optional fdt
dev/ofw/ofwbus.c optional fdt
dev/ofw/openfirm.c optional fdt
dev/ofw/openfirmio.c optional fdt
dev/ow/ow.c optional ow \
dependency "owll_if.h" \
dependency "own_if.h"
dev/ow/owll_if.m optional ow
dev/ow/own_if.m optional ow
dev/ow/ow_temp.c optional ow_temp
dev/ow/owc_gpiobus.c optional owc gpio
dev/pbio/pbio.c optional pbio isa
dev/pccbb/pccbb.c optional cbb
dev/pccbb/pccbb_pci.c optional cbb pci
dev/pcf/pcf.c optional pcf
dev/pci/fixup_pci.c optional pci
dev/pci/hostb_pci.c optional pci
dev/pci/ignore_pci.c optional pci
dev/pci/isa_pci.c optional pci isa
dev/pci/pci.c optional pci
dev/pci/pci_if.m standard
dev/pci/pci_iov.c optional pci pci_iov
dev/pci/pci_iov_if.m standard
dev/pci/pci_iov_schema.c optional pci pci_iov
dev/pci/pci_pci.c optional pci
dev/pci/pci_subr.c optional pci
dev/pci/pci_user.c optional pci
dev/pci/pcib_if.m standard
dev/pci/pcib_support.c standard
dev/pci/vga_pci.c optional pci
dev/pms/freebsd/driver/ini/src/agtiapi.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/sadisc.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/mpi.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/saframe.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/sahw.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/sainit.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/saint.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/sampicmd.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/sampirsp.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/saphy.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/saport.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/sasata.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/sasmp.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/sassp.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/satimer.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/sautil.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/saioctlcmd.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/mpidebug.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/discovery/dm/dminit.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/discovery/dm/dmsmp.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/discovery/dm/dmdisc.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/discovery/dm/dmport.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/discovery/dm/dmtimer.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/discovery/dm/dmmisc.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sat/src/sminit.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sat/src/smmisc.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sat/src/smsat.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sat/src/smsatcb.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sat/src/smsathw.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sat/src/smtimer.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/common/tdinit.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/common/tdmisc.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/common/tdesgl.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/common/tdport.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/common/tdint.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/common/tdioctl.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/common/tdhw.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/common/ossacmnapi.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/common/tddmcmnapi.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/common/tdsmcmnapi.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/common/tdtimers.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/sas/ini/itdio.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/sas/ini/itdcb.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/sas/ini/itdinit.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/sas/ini/itddisc.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/sata/host/sat.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/sata/host/ossasat.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/sata/host/sathw.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/ppbus/if_plip.c optional plip
dev/ppbus/lpbb.c optional lpbb
dev/ppbus/lpt.c optional lpt
dev/ppbus/pcfclock.c optional pcfclock
dev/ppbus/ppb_1284.c optional ppbus
dev/ppbus/ppb_base.c optional ppbus
dev/ppbus/ppb_msq.c optional ppbus
dev/ppbus/ppbconf.c optional ppbus
dev/ppbus/ppbus_if.m optional ppbus
dev/ppbus/ppi.c optional ppi
dev/ppbus/pps.c optional pps
dev/ppc/ppc.c optional ppc
dev/ppc/ppc_acpi.c optional ppc acpi
dev/ppc/ppc_isa.c optional ppc isa
dev/ppc/ppc_pci.c optional ppc pci
dev/ppc/ppc_puc.c optional ppc puc
dev/proto/proto_bus_isa.c optional proto acpi | proto isa
dev/proto/proto_bus_pci.c optional proto pci
dev/proto/proto_busdma.c optional proto
dev/proto/proto_core.c optional proto
dev/pst/pst-iop.c optional pst
dev/pst/pst-pci.c optional pst pci
dev/pst/pst-raid.c optional pst
dev/pty/pty.c optional pty
dev/puc/puc.c optional puc
dev/puc/puc_cfg.c optional puc
dev/puc/puc_pci.c optional puc pci
dev/pwm/pwmc.c optional pwm | pwmc
dev/pwm/pwmbus.c optional pwm | pwmbus
dev/pwm/pwmbus_if.m optional pwm | pwmbus
dev/pwm/ofw_pwm.c optional pwm fdt | pwmbus fdt
dev/pwm/ofw_pwmbus.c optional pwm fdt | pwmbus fdt
dev/pwm/pwm_backlight.c optional pwm pwm_backlight fdt
dev/quicc/quicc_core.c optional quicc
dev/ral/rt2560.c optional ral
dev/ral/rt2661.c optional ral
dev/ral/rt2860.c optional ral
dev/ral/if_ral_pci.c optional ral pci
rt2561fw.c optional rt2561fw | ralfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk rt2561.fw:rt2561fw -mrt2561 -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "rt2561fw.c"
rt2561fw.fwo optional rt2561fw | ralfw \
dependency "rt2561.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "rt2561fw.fwo"
rt2561.fw optional rt2561fw | ralfw \
dependency "$S/contrib/dev/ral/rt2561.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "rt2561.fw"
rt2561sfw.c optional rt2561sfw | ralfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk rt2561s.fw:rt2561sfw -mrt2561s -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "rt2561sfw.c"
rt2561sfw.fwo optional rt2561sfw | ralfw \
dependency "rt2561s.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "rt2561sfw.fwo"
rt2561s.fw optional rt2561sfw | ralfw \
dependency "$S/contrib/dev/ral/rt2561s.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "rt2561s.fw"
rt2661fw.c optional rt2661fw | ralfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk rt2661.fw:rt2661fw -mrt2661 -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "rt2661fw.c"
rt2661fw.fwo optional rt2661fw | ralfw \
dependency "rt2661.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "rt2661fw.fwo"
rt2661.fw optional rt2661fw | ralfw \
dependency "$S/contrib/dev/ral/rt2661.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "rt2661.fw"
rt2860fw.c optional rt2860fw | ralfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk rt2860.fw:rt2860fw -mrt2860 -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "rt2860fw.c"
rt2860fw.fwo optional rt2860fw | ralfw \
dependency "rt2860.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "rt2860fw.fwo"
rt2860.fw optional rt2860fw | ralfw \
dependency "$S/contrib/dev/ral/rt2860.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "rt2860.fw"
dev/random/random_infra.c standard
dev/random/random_harvestq.c standard
dev/random/randomdev.c optional !random_loadable
dev/random/fenestrasX/fx_brng.c optional !random_loadable random_fenestrasx
dev/random/fenestrasX/fx_main.c optional !random_loadable random_fenestrasx \
compile-with "${NORMAL_C} -I$S/crypto/blake2"
dev/random/fenestrasX/fx_pool.c optional !random_loadable random_fenestrasx \
compile-with "${NORMAL_C} -I$S/crypto/blake2"
dev/random/fenestrasX/fx_rng.c optional !random_loadable random_fenestrasx \
compile-with "${NORMAL_C} -I$S/crypto/blake2"
dev/random/fortuna.c optional !random_loadable !random_fenestrasx
dev/random/hash.c optional !random_loadable
dev/rccgpio/rccgpio.c optional rccgpio gpio
dev/re/if_re.c optional re
dev/rl/if_rl.c optional rl pci
dev/rndtest/rndtest.c optional rndtest
#
dev/rtsx/rtsx.c optional rtsx pci
#
dev/rtwn/if_rtwn.c optional rtwn
dev/rtwn/if_rtwn_beacon.c optional rtwn
dev/rtwn/if_rtwn_calib.c optional rtwn
dev/rtwn/if_rtwn_cam.c optional rtwn
dev/rtwn/if_rtwn_efuse.c optional rtwn
dev/rtwn/if_rtwn_fw.c optional rtwn
dev/rtwn/if_rtwn_rx.c optional rtwn
dev/rtwn/if_rtwn_task.c optional rtwn
dev/rtwn/if_rtwn_tx.c optional rtwn
#
dev/rtwn/pci/rtwn_pci_attach.c optional rtwn_pci pci
dev/rtwn/pci/rtwn_pci_reg.c optional rtwn_pci pci
dev/rtwn/pci/rtwn_pci_rx.c optional rtwn_pci pci
dev/rtwn/pci/rtwn_pci_tx.c optional rtwn_pci pci
#
dev/rtwn/usb/rtwn_usb_attach.c optional rtwn_usb
dev/rtwn/usb/rtwn_usb_ep.c optional rtwn_usb
dev/rtwn/usb/rtwn_usb_reg.c optional rtwn_usb
dev/rtwn/usb/rtwn_usb_rx.c optional rtwn_usb
dev/rtwn/usb/rtwn_usb_tx.c optional rtwn_usb
# RTL8188E
dev/rtwn/rtl8188e/r88e_beacon.c optional rtwn
dev/rtwn/rtl8188e/r88e_calib.c optional rtwn
dev/rtwn/rtl8188e/r88e_chan.c optional rtwn
dev/rtwn/rtl8188e/r88e_fw.c optional rtwn
dev/rtwn/rtl8188e/r88e_init.c optional rtwn
dev/rtwn/rtl8188e/r88e_led.c optional rtwn
dev/rtwn/rtl8188e/r88e_tx.c optional rtwn
dev/rtwn/rtl8188e/r88e_rf.c optional rtwn
dev/rtwn/rtl8188e/r88e_rom.c optional rtwn
dev/rtwn/rtl8188e/r88e_rx.c optional rtwn
dev/rtwn/rtl8188e/pci/r88ee_attach.c optional rtwn_pci pci
dev/rtwn/rtl8188e/pci/r88ee_init.c optional rtwn_pci pci
dev/rtwn/rtl8188e/pci/r88ee_rx.c optional rtwn_pci pci
dev/rtwn/rtl8188e/usb/r88eu_attach.c optional rtwn_usb
dev/rtwn/rtl8188e/usb/r88eu_init.c optional rtwn_usb
# RTL8192C
dev/rtwn/rtl8192c/r92c_attach.c optional rtwn
dev/rtwn/rtl8192c/r92c_beacon.c optional rtwn
dev/rtwn/rtl8192c/r92c_calib.c optional rtwn
dev/rtwn/rtl8192c/r92c_chan.c optional rtwn
dev/rtwn/rtl8192c/r92c_fw.c optional rtwn
dev/rtwn/rtl8192c/r92c_init.c optional rtwn
dev/rtwn/rtl8192c/r92c_llt.c optional rtwn
dev/rtwn/rtl8192c/r92c_rf.c optional rtwn
dev/rtwn/rtl8192c/r92c_rom.c optional rtwn
dev/rtwn/rtl8192c/r92c_rx.c optional rtwn
dev/rtwn/rtl8192c/r92c_tx.c optional rtwn
dev/rtwn/rtl8192c/pci/r92ce_attach.c optional rtwn_pci pci
dev/rtwn/rtl8192c/pci/r92ce_calib.c optional rtwn_pci pci
dev/rtwn/rtl8192c/pci/r92ce_fw.c optional rtwn_pci pci
dev/rtwn/rtl8192c/pci/r92ce_init.c optional rtwn_pci pci
dev/rtwn/rtl8192c/pci/r92ce_led.c optional rtwn_pci pci
dev/rtwn/rtl8192c/pci/r92ce_rx.c optional rtwn_pci pci
dev/rtwn/rtl8192c/pci/r92ce_tx.c optional rtwn_pci pci
dev/rtwn/rtl8192c/usb/r92cu_attach.c optional rtwn_usb
dev/rtwn/rtl8192c/usb/r92cu_init.c optional rtwn_usb
dev/rtwn/rtl8192c/usb/r92cu_led.c optional rtwn_usb
dev/rtwn/rtl8192c/usb/r92cu_rx.c optional rtwn_usb
dev/rtwn/rtl8192c/usb/r92cu_tx.c optional rtwn_usb
# RTL8192E
dev/rtwn/rtl8192e/r92e_chan.c optional rtwn
dev/rtwn/rtl8192e/r92e_fw.c optional rtwn
dev/rtwn/rtl8192e/r92e_init.c optional rtwn
dev/rtwn/rtl8192e/r92e_led.c optional rtwn
dev/rtwn/rtl8192e/r92e_rf.c optional rtwn
dev/rtwn/rtl8192e/r92e_rom.c optional rtwn
dev/rtwn/rtl8192e/r92e_rx.c optional rtwn
dev/rtwn/rtl8192e/usb/r92eu_attach.c optional rtwn_usb
dev/rtwn/rtl8192e/usb/r92eu_init.c optional rtwn_usb
# RTL8812A
dev/rtwn/rtl8812a/r12a_beacon.c optional rtwn
dev/rtwn/rtl8812a/r12a_calib.c optional rtwn
dev/rtwn/rtl8812a/r12a_caps.c optional rtwn
dev/rtwn/rtl8812a/r12a_chan.c optional rtwn
dev/rtwn/rtl8812a/r12a_fw.c optional rtwn
dev/rtwn/rtl8812a/r12a_init.c optional rtwn
dev/rtwn/rtl8812a/r12a_led.c optional rtwn
dev/rtwn/rtl8812a/r12a_rf.c optional rtwn
dev/rtwn/rtl8812a/r12a_rom.c optional rtwn
dev/rtwn/rtl8812a/r12a_rx.c optional rtwn
dev/rtwn/rtl8812a/r12a_tx.c optional rtwn
dev/rtwn/rtl8812a/usb/r12au_attach.c optional rtwn_usb
dev/rtwn/rtl8812a/usb/r12au_init.c optional rtwn_usb
dev/rtwn/rtl8812a/usb/r12au_rx.c optional rtwn_usb
dev/rtwn/rtl8812a/usb/r12au_tx.c optional rtwn_usb
# RTL8821A
dev/rtwn/rtl8821a/r21a_beacon.c optional rtwn
dev/rtwn/rtl8821a/r21a_calib.c optional rtwn
dev/rtwn/rtl8821a/r21a_chan.c optional rtwn
dev/rtwn/rtl8821a/r21a_fw.c optional rtwn
dev/rtwn/rtl8821a/r21a_init.c optional rtwn
dev/rtwn/rtl8821a/r21a_led.c optional rtwn
dev/rtwn/rtl8821a/r21a_rom.c optional rtwn
dev/rtwn/rtl8821a/r21a_rx.c optional rtwn
dev/rtwn/rtl8821a/usb/r21au_attach.c optional rtwn_usb
dev/rtwn/rtl8821a/usb/r21au_dfs.c optional rtwn_usb
dev/rtwn/rtl8821a/usb/r21au_init.c optional rtwn_usb
rtwn-rtl8188eefw.c optional rtwn-rtl8188eefw | rtwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8188eefw.fw:rtwn-rtl8188eefw:111 -mrtwn-rtl8188eefw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "rtwn-rtl8188eefw.c"
rtwn-rtl8188eefw.fwo optional rtwn-rtl8188eefw | rtwnfw \
dependency "rtwn-rtl8188eefw.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "rtwn-rtl8188eefw.fwo"
rtwn-rtl8188eefw.fw optional rtwn-rtl8188eefw | rtwnfw \
dependency "$S/contrib/dev/rtwn/rtwn-rtl8188eefw.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "rtwn-rtl8188eefw.fw"
rtwn-rtl8188eufw.c optional rtwn-rtl8188eufw | rtwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8188eufw.fw:rtwn-rtl8188eufw:111 -mrtwn-rtl8188eufw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "rtwn-rtl8188eufw.c"
rtwn-rtl8188eufw.fwo optional rtwn-rtl8188eufw | rtwnfw \
dependency "rtwn-rtl8188eufw.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "rtwn-rtl8188eufw.fwo"
rtwn-rtl8188eufw.fw optional rtwn-rtl8188eufw | rtwnfw \
dependency "$S/contrib/dev/rtwn/rtwn-rtl8188eufw.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "rtwn-rtl8188eufw.fw"
rtwn-rtl8192cfwE.c optional rtwn-rtl8192cfwE | rtwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8192cfwE.fw:rtwn-rtl8192cfwE:111 -mrtwn-rtl8192cfwE -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "rtwn-rtl8192cfwE.c"
rtwn-rtl8192cfwE.fwo optional rtwn-rtl8192cfwE | rtwnfw \
dependency "rtwn-rtl8192cfwE.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "rtwn-rtl8192cfwE.fwo"
rtwn-rtl8192cfwE.fw optional rtwn-rtl8192cfwE | rtwnfw \
dependency "$S/contrib/dev/rtwn/rtwn-rtl8192cfwE.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "rtwn-rtl8192cfwE.fw"
rtwn-rtl8192cfwE_B.c optional rtwn-rtl8192cfwE_B | rtwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8192cfwE_B.fw:rtwn-rtl8192cfwE_B:111 -mrtwn-rtl8192cfwE_B -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "rtwn-rtl8192cfwE_B.c"
rtwn-rtl8192cfwE_B.fwo optional rtwn-rtl8192cfwE_B | rtwnfw \
dependency "rtwn-rtl8192cfwE_B.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "rtwn-rtl8192cfwE_B.fwo"
rtwn-rtl8192cfwE_B.fw optional rtwn-rtl8192cfwE_B | rtwnfw \
dependency "$S/contrib/dev/rtwn/rtwn-rtl8192cfwE_B.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "rtwn-rtl8192cfwE_B.fw"
rtwn-rtl8192cfwT.c optional rtwn-rtl8192cfwT | rtwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8192cfwT.fw:rtwn-rtl8192cfwT:111 -mrtwn-rtl8192cfwT -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "rtwn-rtl8192cfwT.c"
rtwn-rtl8192cfwT.fwo optional rtwn-rtl8192cfwT | rtwnfw \
dependency "rtwn-rtl8192cfwT.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "rtwn-rtl8192cfwT.fwo"
rtwn-rtl8192cfwT.fw optional rtwn-rtl8192cfwT | rtwnfw \
dependency "$S/contrib/dev/rtwn/rtwn-rtl8192cfwT.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "rtwn-rtl8192cfwT.fw"
rtwn-rtl8192cfwU.c optional rtwn-rtl8192cfwU | rtwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8192cfwU.fw:rtwn-rtl8192cfwU:111 -mrtwn-rtl8192cfwU -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "rtwn-rtl8192cfwU.c"
rtwn-rtl8192cfwU.fwo optional rtwn-rtl8192cfwU | rtwnfw \
dependency "rtwn-rtl8192cfwU.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "rtwn-rtl8192cfwU.fwo"
rtwn-rtl8192cfwU.fw optional rtwn-rtl8192cfwU | rtwnfw \
dependency "$S/contrib/dev/rtwn/rtwn-rtl8192cfwU.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "rtwn-rtl8192cfwU.fw"
rtwn-rtl8192eufw.c optional rtwn-rtl8192eufw | rtwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8192eufw.fw:rtwn-rtl8192eufw:111 -mrtwn-rtl8192eufw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "rtwn-rtl8192eufw.c"
rtwn-rtl8192eufw.fwo optional rtwn-rtl8192eufw | rtwnfw \
dependency "rtwn-rtl8192eufw.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "rtwn-rtl8192eufw.fwo"
rtwn-rtl8192eufw.fw optional rtwn-rtl8192eufw | rtwnfw \
dependency "$S/contrib/dev/rtwn/rtwn-rtl8192eufw.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "rtwn-rtl8192eufw.fw"
rtwn-rtl8812aufw.c optional rtwn-rtl8812aufw | rtwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8812aufw.fw:rtwn-rtl8812aufw:111 -mrtwn-rtl8812aufw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "rtwn-rtl8812aufw.c"
rtwn-rtl8812aufw.fwo optional rtwn-rtl8812aufw | rtwnfw \
dependency "rtwn-rtl8812aufw.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "rtwn-rtl8812aufw.fwo"
rtwn-rtl8812aufw.fw optional rtwn-rtl8812aufw | rtwnfw \
dependency "$S/contrib/dev/rtwn/rtwn-rtl8812aufw.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "rtwn-rtl8812aufw.fw"
rtwn-rtl8821aufw.c optional rtwn-rtl8821aufw | rtwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8821aufw.fw:rtwn-rtl8821aufw:111 -mrtwn-rtl8821aufw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "rtwn-rtl8821aufw.c"
rtwn-rtl8821aufw.fwo optional rtwn-rtl8821aufw | rtwnfw \
dependency "rtwn-rtl8821aufw.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "rtwn-rtl8821aufw.fwo"
rtwn-rtl8821aufw.fw optional rtwn-rtl8821aufw | rtwnfw \
dependency "$S/contrib/dev/rtwn/rtwn-rtl8821aufw.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "rtwn-rtl8821aufw.fw"
dev/safe/safe.c optional safe
dev/scc/scc_if.m optional scc
dev/scc/scc_bfe_quicc.c optional scc quicc
dev/scc/scc_core.c optional scc
dev/scc/scc_dev_quicc.c optional scc quicc
dev/scc/scc_dev_z8530.c optional scc
dev/sdhci/sdhci.c optional sdhci
dev/sdhci/sdhci_fdt.c optional sdhci fdt regulator clk
dev/sdhci/sdhci_fdt_gpio.c optional sdhci fdt gpio
dev/sdhci/sdhci_fsl_fdt.c optional sdhci fdt gpio regulator clk
dev/sdhci/sdhci_if.m optional sdhci
dev/sdhci/sdhci_acpi.c optional sdhci acpi
dev/sdhci/sdhci_pci.c optional sdhci pci
dev/sdio/sdio_if.m optional mmccam
dev/sdio/sdio_subr.c optional mmccam
dev/sdio/sdiob.c optional mmccam
dev/sge/if_sge.c optional sge pci
dev/siis/siis.c optional siis pci
dev/sis/if_sis.c optional sis pci
dev/sk/if_sk.c optional sk pci
dev/smbios/smbios.c optional smbios
dev/smbus/smb.c optional smb
dev/smbus/smbconf.c optional smbus
dev/smbus/smbus.c optional smbus
dev/smbus/smbus_if.m optional smbus
dev/smc/if_smc.c optional smc
dev/smc/if_smc_acpi.c optional smc acpi
dev/smc/if_smc_fdt.c optional smc fdt
dev/snp/snp.c optional snp
dev/sound/clone.c optional sound
dev/sound/unit.c optional sound
dev/sound/pci/als4000.c optional snd_als4000 pci
dev/sound/pci/atiixp.c optional snd_atiixp pci
dev/sound/pci/cmi.c optional snd_cmi pci
dev/sound/pci/cs4281.c optional snd_cs4281 pci
dev/sound/pci/csa.c optional snd_csa pci
dev/sound/pci/csapcm.c optional snd_csa pci
dev/sound/pci/emu10k1.c optional snd_emu10k1 pci
dev/sound/pci/emu10kx.c optional snd_emu10kx pci
dev/sound/pci/emu10kx-pcm.c optional snd_emu10kx pci
dev/sound/pci/emu10kx-midi.c optional snd_emu10kx pci
dev/sound/pci/envy24.c optional snd_envy24 pci
dev/sound/pci/envy24ht.c optional snd_envy24ht pci
dev/sound/pci/es137x.c optional snd_es137x pci
dev/sound/pci/fm801.c optional snd_fm801 pci
dev/sound/pci/ich.c optional snd_ich pci
dev/sound/pci/maestro3.c optional snd_maestro3 pci
dev/sound/pci/neomagic.c optional snd_neomagic pci
dev/sound/pci/solo.c optional snd_solo pci
dev/sound/pci/spicds.c optional snd_spicds pci
dev/sound/pci/t4dwave.c optional snd_t4dwave pci
dev/sound/pci/via8233.c optional snd_via8233 pci
dev/sound/pci/via82c686.c optional snd_via82c686 pci
dev/sound/pci/vibes.c optional snd_vibes pci
dev/sound/pci/hda/hdaa.c optional snd_hda pci
dev/sound/pci/hda/hdaa_patches.c optional snd_hda pci
dev/sound/pci/hda/hdac.c optional snd_hda pci
dev/sound/pci/hda/hdac_if.m optional snd_hda pci
dev/sound/pci/hda/hdacc.c optional snd_hda pci
dev/sound/pci/hdspe.c optional snd_hdspe pci
dev/sound/pci/hdspe-pcm.c optional snd_hdspe pci
dev/sound/pcm/ac97.c optional sound
dev/sound/pcm/ac97_if.m optional sound
dev/sound/pcm/ac97_patch.c optional sound
dev/sound/pcm/buffer.c optional sound \
dependency "snd_fxdiv_gen.h"
dev/sound/pcm/channel.c optional sound
dev/sound/pcm/channel_if.m optional sound
dev/sound/pcm/dsp.c optional sound
dev/sound/pcm/feeder.c optional sound
dev/sound/pcm/feeder_chain.c optional sound
dev/sound/pcm/feeder_eq.c optional sound \
dependency "feeder_eq_gen.h" \
dependency "snd_fxdiv_gen.h"
dev/sound/pcm/feeder_if.m optional sound
dev/sound/pcm/feeder_format.c optional sound \
dependency "snd_fxdiv_gen.h"
dev/sound/pcm/feeder_matrix.c optional sound \
dependency "snd_fxdiv_gen.h"
dev/sound/pcm/feeder_mixer.c optional sound \
dependency "snd_fxdiv_gen.h"
dev/sound/pcm/feeder_rate.c optional sound \
dependency "feeder_rate_gen.h" \
dependency "snd_fxdiv_gen.h"
dev/sound/pcm/feeder_volume.c optional sound \
dependency "snd_fxdiv_gen.h"
dev/sound/pcm/mixer.c optional sound
dev/sound/pcm/mixer_if.m optional sound
dev/sound/pcm/sndstat.c optional sound
dev/sound/pcm/sound.c optional sound
dev/sound/pcm/vchan.c optional sound
dev/sound/usb/uaudio.c optional snd_uaudio usb
dev/sound/usb/uaudio_pcm.c optional snd_uaudio usb
dev/sound/midi/midi.c optional sound
dev/sound/midi/mpu401.c optional sound
dev/sound/midi/mpu_if.m optional sound
dev/sound/midi/mpufoi_if.m optional sound
dev/sound/midi/sequencer.c optional sound
dev/sound/midi/synth_if.m optional sound
dev/spibus/ofw_spibus.c optional fdt spibus
dev/spibus/spibus.c optional spibus \
dependency "spibus_if.h"
dev/spibus/spigen.c optional spigen
dev/spibus/spibus_if.m optional spibus
dev/ste/if_ste.c optional ste pci
dev/stge/if_stge.c optional stge
dev/sym/sym_hipd.c optional sym \
dependency "$S/dev/sym/sym_{conf,defs}.h"
dev/syscons/blank/blank_saver.c optional blank_saver
dev/syscons/daemon/daemon_saver.c optional daemon_saver
dev/syscons/dragon/dragon_saver.c optional dragon_saver
dev/syscons/fade/fade_saver.c optional fade_saver
dev/syscons/fire/fire_saver.c optional fire_saver
dev/syscons/green/green_saver.c optional green_saver
dev/syscons/logo/logo.c optional logo_saver
dev/syscons/logo/logo_saver.c optional logo_saver
dev/syscons/rain/rain_saver.c optional rain_saver
dev/syscons/schistory.c optional sc
dev/syscons/scmouse.c optional sc
dev/syscons/scterm.c optional sc
dev/syscons/scterm-dumb.c optional sc !SC_NO_TERM_DUMB
dev/syscons/scterm-sc.c optional sc !SC_NO_TERM_SC
dev/syscons/scterm-teken.c optional sc !SC_NO_TERM_TEKEN
dev/syscons/scvidctl.c optional sc
dev/syscons/scvtb.c optional sc
dev/syscons/snake/snake_saver.c optional snake_saver
dev/syscons/star/star_saver.c optional star_saver
dev/syscons/syscons.c optional sc
dev/syscons/sysmouse.c optional sc
dev/syscons/warp/warp_saver.c optional warp_saver
dev/tcp_log/tcp_log_dev.c optional tcp_blackbox inet | tcp_blackbox inet6
dev/tdfx/tdfx_pci.c optional tdfx pci
dev/ti/if_ti.c optional ti pci
dev/twe/twe.c optional twe
dev/twe/twe_freebsd.c optional twe
dev/tws/tws.c optional tws
dev/tws/tws_cam.c optional tws
dev/tws/tws_hdm.c optional tws
dev/tws/tws_services.c optional tws
dev/tws/tws_user.c optional tws
dev/uart/uart_bus_acpi.c optional uart acpi
dev/uart/uart_bus_fdt.c optional uart fdt
dev/uart/uart_bus_isa.c optional uart isa
dev/uart/uart_bus_pci.c optional uart pci
dev/uart/uart_bus_puc.c optional uart puc
dev/uart/uart_bus_scc.c optional uart scc
dev/uart/uart_core.c optional uart
dev/uart/uart_cpu_acpi.c optional uart acpi
dev/uart/uart_dbg.c optional uart gdb
dev/uart/uart_dev_imx.c optional uart uart_imx fdt
dev/uart/uart_dev_msm.c optional uart uart_msm fdt
dev/uart/uart_dev_mvebu.c optional uart uart_mvebu
dev/uart/uart_dev_ns8250.c optional uart uart_ns8250 | uart uart_snps
dev/uart/uart_dev_pl011.c optional uart pl011
dev/uart/uart_dev_quicc.c optional uart quicc
dev/uart/uart_dev_snps.c optional uart uart_snps fdt
dev/uart/uart_dev_z8530.c optional uart uart_z8530 | uart scc
dev/uart/uart_if.m optional uart
dev/uart/uart_subr.c optional uart
dev/uart/uart_tty.c optional uart
#
# USB controller drivers
#
dev/usb/controller/musb_otg.c optional musb
dev/usb/controller/dwc_otg.c optional dwcotg
dev/usb/controller/dwc_otg_fdt.c optional dwcotg fdt
dev/usb/controller/dwc_otg_acpi.c optional dwcotg acpi
dev/usb/controller/ehci.c optional ehci
dev/usb/controller/ehci_msm.c optional ehci_msm fdt
dev/usb/controller/ehci_pci.c optional ehci pci
dev/usb/controller/ohci.c optional ohci
dev/usb/controller/ohci_pci.c optional ohci pci
dev/usb/controller/uhci.c optional uhci
dev/usb/controller/uhci_pci.c optional uhci pci
dev/usb/controller/xhci.c optional xhci
dev/usb/controller/xhci_pci.c optional xhci pci
dev/usb/controller/saf1761_otg.c optional saf1761otg
dev/usb/controller/saf1761_otg_fdt.c optional saf1761otg fdt
dev/usb/controller/uss820dci.c optional uss820dci
dev/usb/controller/usb_controller.c optional usb
#
# USB storage drivers
#
dev/usb/storage/cfumass.c optional cfumass ctl
dev/usb/storage/umass.c optional umass
dev/usb/storage/urio.c optional urio
dev/usb/storage/ustorage_fs.c optional usfs
#
# USB core
#
dev/usb/usb_busdma.c optional usb
dev/usb/usb_core.c optional usb
dev/usb/usb_debug.c optional usb
dev/usb/usb_dev.c optional usb
dev/usb/usb_device.c optional usb
dev/usb/usb_dynamic.c optional usb
dev/usb/usb_error.c optional usb
dev/usb/usb_fdt_support.c optional usb fdt
dev/usb/usb_generic.c optional usb
dev/usb/usb_handle_request.c optional usb
dev/usb/usb_hid.c optional usb
dev/usb/usb_hub.c optional usb
dev/usb/usb_hub_acpi.c optional uacpi acpi
dev/usb/usb_if.m optional usb
dev/usb/usb_lookup.c optional usb
dev/usb/usb_mbuf.c optional usb
dev/usb/usb_msctest.c optional usb
dev/usb/usb_parse.c optional usb
dev/usb/usb_pf.c optional usb
dev/usb/usb_process.c optional usb
dev/usb/usb_request.c optional usb
dev/usb/usb_transfer.c optional usb
dev/usb/usb_util.c optional usb
#
# USB network drivers
#
dev/usb/net/if_aue.c optional aue
dev/usb/net/if_axe.c optional axe
dev/usb/net/if_axge.c optional axge
dev/usb/net/if_cdce.c optional cdce
dev/usb/net/if_cdceem.c optional cdceem
dev/usb/net/if_cue.c optional cue
dev/usb/net/if_ipheth.c optional ipheth
dev/usb/net/if_kue.c optional kue
dev/usb/net/if_mos.c optional mos
dev/usb/net/if_muge.c optional muge
dev/usb/net/if_rue.c optional rue
dev/usb/net/if_smsc.c optional smsc
dev/usb/net/if_udav.c optional udav
dev/usb/net/if_ure.c optional ure
dev/usb/net/if_usie.c optional usie
dev/usb/net/if_urndis.c optional urndis
dev/usb/net/ruephy.c optional rue
dev/usb/net/usb_ethernet.c optional uether | aue | axe | axge | cdce | \
cdceem | cue | ipheth | kue | mos | \
rue | smsc | udav | ure | urndis | muge
dev/usb/net/uhso.c optional uhso
#
# USB WLAN drivers
#
dev/usb/wlan/if_rsu.c optional rsu
rsu-rtl8712fw.c optional rsu-rtl8712fw | rsufw \
compile-with "${AWK} -f $S/tools/fw_stub.awk rsu-rtl8712fw.fw:rsu-rtl8712fw:120 -mrsu-rtl8712fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "rsu-rtl8712fw.c"
rsu-rtl8712fw.fwo optional rsu-rtl8712fw | rsufw \
dependency "rsu-rtl8712fw.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "rsu-rtl8712fw.fwo"
rsu-rtl8712fw.fw optional rsu-rtl8712.fw | rsufw \
dependency "$S/contrib/dev/rsu/rsu-rtl8712fw.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "rsu-rtl8712fw.fw"
dev/usb/wlan/if_rum.c optional rum
dev/usb/wlan/if_run.c optional run
runfw.c optional runfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk run.fw:runfw -mrunfw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "runfw.c"
runfw.fwo optional runfw \
dependency "run.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "runfw.fwo"
run.fw optional runfw \
dependency "$S/contrib/dev/run/rt2870.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "run.fw"
dev/usb/wlan/if_uath.c optional uath
dev/usb/wlan/if_upgt.c optional upgt
dev/usb/wlan/if_ural.c optional ural
dev/usb/wlan/if_urtw.c optional urtw
dev/usb/wlan/if_zyd.c optional zyd
#
# USB serial and parallel port drivers
#
dev/usb/serial/u3g.c optional u3g
dev/usb/serial/uark.c optional uark
dev/usb/serial/ubsa.c optional ubsa
dev/usb/serial/ubser.c optional ubser
dev/usb/serial/uchcom.c optional uchcom
dev/usb/serial/ucycom.c optional ucycom
dev/usb/serial/ufoma.c optional ufoma
dev/usb/serial/uftdi.c optional uftdi
dev/usb/serial/ugensa.c optional ugensa
dev/usb/serial/uipaq.c optional uipaq
dev/usb/serial/ulpt.c optional ulpt
dev/usb/serial/umcs.c optional umcs
dev/usb/serial/umct.c optional umct
dev/usb/serial/umodem.c optional umodem
dev/usb/serial/umoscom.c optional umoscom
dev/usb/serial/uplcom.c optional uplcom
dev/usb/serial/uslcom.c optional uslcom
dev/usb/serial/uvisor.c optional uvisor
dev/usb/serial/uvscom.c optional uvscom
dev/usb/serial/usb_serial.c optional ucom | u3g | uark | ubsa | ubser | \
uchcom | ucycom | ufoma | uftdi | \
ugensa | uipaq | umcs | umct | \
umodem | umoscom | uplcom | usie | \
uslcom | uvisor | uvscom
#
# USB misc drivers
#
dev/usb/misc/cp2112.c optional cp2112
dev/usb/misc/udbp.c optional udbp
dev/usb/misc/ugold.c optional ugold
dev/usb/misc/uled.c optional uled
#
# USB input drivers
#
dev/usb/input/atp.c optional atp
dev/usb/input/uep.c optional uep
dev/usb/input/uhid.c optional uhid
dev/usb/input/uhid_snes.c optional uhid_snes
dev/usb/input/ukbd.c optional ukbd
dev/usb/input/ums.c optional ums
dev/usb/input/usbhid.c optional usbhid
dev/usb/input/wmt.c optional wmt
dev/usb/input/wsp.c optional wsp
#
# USB quirks
#
dev/usb/quirk/usb_quirk.c optional usb
#
# USB templates
#
dev/usb/template/usb_template.c optional usb_template
dev/usb/template/usb_template_audio.c optional usb_template
dev/usb/template/usb_template_cdce.c optional usb_template
dev/usb/template/usb_template_kbd.c optional usb_template
dev/usb/template/usb_template_modem.c optional usb_template
dev/usb/template/usb_template_mouse.c optional usb_template
dev/usb/template/usb_template_msc.c optional usb_template
dev/usb/template/usb_template_mtp.c optional usb_template
dev/usb/template/usb_template_phone.c optional usb_template
dev/usb/template/usb_template_serialnet.c optional usb_template
dev/usb/template/usb_template_midi.c optional usb_template
dev/usb/template/usb_template_multi.c optional usb_template
dev/usb/template/usb_template_cdceem.c optional usb_template
#
# USB video drivers
#
dev/usb/video/udl.c optional udl
#
# USB END
#
dev/videomode/videomode.c optional videomode
dev/videomode/edid.c optional videomode
dev/videomode/pickmode.c optional videomode
dev/videomode/vesagtf.c optional videomode
dev/veriexec/verified_exec.c optional mac_veriexec
dev/vge/if_vge.c optional vge
dev/viapm/viapm.c optional viapm pci
dev/virtio/virtio.c optional virtio
dev/virtio/virtqueue.c optional virtio
dev/virtio/virtio_bus_if.m optional virtio
dev/virtio/virtio_if.m optional virtio
dev/virtio/pci/virtio_pci.c optional virtio_pci
dev/virtio/pci/virtio_pci_if.m optional virtio_pci
dev/virtio/pci/virtio_pci_legacy.c optional virtio_pci
dev/virtio/pci/virtio_pci_modern.c optional virtio_pci
dev/virtio/mmio/virtio_mmio.c optional virtio_mmio
dev/virtio/mmio/virtio_mmio_acpi.c optional virtio_mmio acpi
dev/virtio/mmio/virtio_mmio_fdt.c optional virtio_mmio fdt
dev/virtio/mmio/virtio_mmio_if.m optional virtio_mmio
dev/virtio/network/if_vtnet.c optional vtnet
dev/virtio/block/virtio_blk.c optional virtio_blk
dev/virtio/balloon/virtio_balloon.c optional virtio_balloon
dev/virtio/scsi/virtio_scsi.c optional virtio_scsi
dev/virtio/random/virtio_random.c optional virtio_random
dev/virtio/console/virtio_console.c optional virtio_console
dev/vkbd/vkbd.c optional vkbd
dev/vmgenc/vmgenc_acpi.c optional acpi
dev/vmware/vmxnet3/if_vmx.c optional vmx
dev/vmware/vmci/vmci.c optional vmci
dev/vmware/vmci/vmci_datagram.c optional vmci
dev/vmware/vmci/vmci_doorbell.c optional vmci
dev/vmware/vmci/vmci_driver.c optional vmci
dev/vmware/vmci/vmci_event.c optional vmci
dev/vmware/vmci/vmci_hashtable.c optional vmci
dev/vmware/vmci/vmci_kernel_if.c optional vmci
dev/vmware/vmci/vmci_qpair.c optional vmci
dev/vmware/vmci/vmci_queue_pair.c optional vmci
dev/vmware/vmci/vmci_resource.c optional vmci
dev/vmware/pvscsi/pvscsi.c optional pvscsi
dev/vr/if_vr.c optional vr pci
dev/vt/colors/vt_termcolors.c optional vt
dev/vt/font/vt_font_default.c optional vt
dev/vt/font/vt_mouse_cursor.c optional vt
dev/vt/hw/efifb/efifb.c optional vt_efifb
dev/vt/hw/vbefb/vbefb.c optional vt_vbefb
dev/vt/hw/fb/vt_fb.c optional vt
dev/vt/hw/vga/vt_vga.c optional vt vt_vga
dev/vt/logo/logo_freebsd.c optional vt splash
dev/vt/logo/logo_beastie.c optional vt splash
dev/vt/vt_buf.c optional vt
dev/vt/vt_consolectl.c optional vt
dev/vt/vt_core.c optional vt
dev/vt/vt_cpulogos.c optional vt splash
dev/vt/vt_font.c optional vt
dev/vt/vt_sysmouse.c optional vt
dev/vte/if_vte.c optional vte pci
dev/watchdog/watchdog.c standard
dev/wpi/if_wpi.c optional wpi pci
wpifw.c optional wpifw \
compile-with "${AWK} -f $S/tools/fw_stub.awk wpi.fw:wpifw:153229 -mwpi -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "wpifw.c"
wpifw.fwo optional wpifw \
dependency "wpi.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "wpifw.fwo"
wpi.fw optional wpifw \
dependency "$S/contrib/dev/wpi/iwlwifi-3945-15.32.2.9.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "wpi.fw"
dev/xdma/controller/pl330.c optional xdma pl330
dev/xdma/xdma.c optional xdma
dev/xdma/xdma_bank.c optional xdma
dev/xdma/xdma_bio.c optional xdma
dev/xdma/xdma_fdt_test.c optional xdma xdma_test fdt
dev/xdma/xdma_if.m optional xdma
dev/xdma/xdma_iommu.c optional xdma
dev/xdma/xdma_mbuf.c optional xdma
dev/xdma/xdma_queue.c optional xdma
dev/xdma/xdma_sg.c optional xdma
dev/xdma/xdma_sglist.c optional xdma
dev/xen/balloon/balloon.c optional xenhvm
dev/xen/blkfront/blkfront.c optional xenhvm
dev/xen/blkback/blkback.c optional xenhvm
dev/xen/bus/xenpv.c optional xenhvm
dev/xen/console/xen_console.c optional xenhvm
dev/xen/control/control.c optional xenhvm
dev/xen/cpu/xen_acpi_cpu.c optional xenhvm
dev/xen/efi/pvefi.c optional xenhvm efirt
dev/xen/grant_table/grant_table.c optional xenhvm
dev/xen/netback/netback.c optional xenhvm
dev/xen/netfront/netfront.c optional xenhvm
dev/xen/timer/timer.c optional xenhvm xentimer
dev/xen/xenpci/xenpci.c optional xenpci
dev/xen/xenstore/xenstore.c optional xenhvm
dev/xen/xenstore/xenstore_dev.c optional xenhvm
dev/xen/xenstore/xenstored_dev.c optional xenhvm
dev/xen/evtchn/evtchn_dev.c optional xenhvm
dev/xen/privcmd/privcmd.c optional xenhvm
dev/xen/gntdev/gntdev.c optional xenhvm
dev/xen/debug/debug.c optional xenhvm
dev/xl/if_xl.c optional xl pci
dev/xl/xlphy.c optional xl pci
fs/autofs/autofs.c optional autofs
fs/autofs/autofs_vfsops.c optional autofs
fs/autofs/autofs_vnops.c optional autofs
fs/deadfs/dead_vnops.c standard
fs/devfs/devfs_devs.c standard
fs/devfs/devfs_dir.c standard
fs/devfs/devfs_rule.c standard
fs/devfs/devfs_vfsops.c standard
fs/devfs/devfs_vnops.c standard
fs/fdescfs/fdesc_vfsops.c optional fdescfs
fs/fdescfs/fdesc_vnops.c optional fdescfs
fs/fifofs/fifo_vnops.c standard
fs/cuse/cuse.c optional cuse
fs/fuse/fuse_device.c optional fusefs
fs/fuse/fuse_file.c optional fusefs
fs/fuse/fuse_internal.c optional fusefs
fs/fuse/fuse_io.c optional fusefs
fs/fuse/fuse_ipc.c optional fusefs
fs/fuse/fuse_main.c optional fusefs
fs/fuse/fuse_node.c optional fusefs
fs/fuse/fuse_vfsops.c optional fusefs
fs/fuse/fuse_vnops.c optional fusefs
fs/mntfs/mntfs_vnops.c standard
fs/msdosfs/msdosfs_conv.c optional msdosfs
fs/msdosfs/msdosfs_denode.c optional msdosfs
fs/msdosfs/msdosfs_fat.c optional msdosfs
fs/msdosfs/msdosfs_iconv.c optional msdosfs_iconv
fs/msdosfs/msdosfs_lookup.c optional msdosfs
fs/msdosfs/msdosfs_vfsops.c optional msdosfs
fs/msdosfs/msdosfs_vnops.c optional msdosfs
fs/nfs/nfs_commonkrpc.c optional nfscl | nfslockd | nfsd
fs/nfs/nfs_commonsubs.c optional nfscl | nfslockd | nfsd
fs/nfs/nfs_commonport.c optional nfscl | nfslockd | nfsd
fs/nfs/nfs_commonacl.c optional nfscl | nfslockd | nfsd
fs/nfsclient/nfs_clcomsubs.c optional nfscl
fs/nfsclient/nfs_clsubs.c optional nfscl
fs/nfsclient/nfs_clstate.c optional nfscl
fs/nfsclient/nfs_clkrpc.c optional nfscl
fs/nfsclient/nfs_clrpcops.c optional nfscl
fs/nfsclient/nfs_clvnops.c optional nfscl
fs/nfsclient/nfs_clnode.c optional nfscl
fs/nfsclient/nfs_clvfsops.c optional nfscl
fs/nfsclient/nfs_clport.c optional nfscl
fs/nfsclient/nfs_clbio.c optional nfscl
fs/nfsclient/nfs_clnfsiod.c optional nfscl
fs/nfsserver/nfs_fha_new.c optional nfsd inet
fs/nfsserver/nfs_nfsdsocket.c optional nfsd inet
fs/nfsserver/nfs_nfsdsubs.c optional nfsd inet
fs/nfsserver/nfs_nfsdstate.c optional nfsd inet
fs/nfsserver/nfs_nfsdkrpc.c optional nfsd inet
fs/nfsserver/nfs_nfsdserv.c optional nfsd inet
fs/nfsserver/nfs_nfsdport.c optional nfsd inet
fs/nfsserver/nfs_nfsdcache.c optional nfsd inet
fs/nullfs/null_subr.c optional nullfs
fs/nullfs/null_vfsops.c optional nullfs
fs/nullfs/null_vnops.c optional nullfs
fs/procfs/procfs.c optional procfs
fs/procfs/procfs_dbregs.c optional procfs
fs/procfs/procfs_fpregs.c optional procfs
fs/procfs/procfs_map.c optional procfs
fs/procfs/procfs_mem.c optional procfs
fs/procfs/procfs_note.c optional procfs
fs/procfs/procfs_osrel.c optional procfs
fs/procfs/procfs_regs.c optional procfs
fs/procfs/procfs_rlimit.c optional procfs
fs/procfs/procfs_status.c optional procfs
fs/procfs/procfs_type.c optional procfs
fs/pseudofs/pseudofs.c optional pseudofs
fs/pseudofs/pseudofs_fileno.c optional pseudofs
fs/pseudofs/pseudofs_vncache.c optional pseudofs
fs/pseudofs/pseudofs_vnops.c optional pseudofs
fs/smbfs/smbfs_io.c optional smbfs
fs/smbfs/smbfs_node.c optional smbfs
fs/smbfs/smbfs_smb.c optional smbfs
fs/smbfs/smbfs_subr.c optional smbfs
fs/smbfs/smbfs_vfsops.c optional smbfs
fs/smbfs/smbfs_vnops.c optional smbfs
fs/udf/osta.c optional udf
fs/udf/udf_iconv.c optional udf_iconv
fs/udf/udf_vfsops.c optional udf
fs/udf/udf_vnops.c optional udf
fs/unionfs/union_subr.c optional unionfs
fs/unionfs/union_vfsops.c optional unionfs
fs/unionfs/union_vnops.c optional unionfs
fs/tmpfs/tmpfs_vnops.c optional tmpfs
fs/tmpfs/tmpfs_fifoops.c optional tmpfs
fs/tmpfs/tmpfs_vfsops.c optional tmpfs
fs/tmpfs/tmpfs_subr.c optional tmpfs
gdb/gdb_cons.c optional gdb
gdb/gdb_main.c optional gdb
gdb/gdb_packet.c optional gdb
gdb/netgdb.c optional ddb debugnet gdb netgdb inet
geom/bde/g_bde.c optional geom_bde
geom/bde/g_bde_crypt.c optional geom_bde
geom/bde/g_bde_lock.c optional geom_bde
geom/bde/g_bde_work.c optional geom_bde
geom/cache/g_cache.c optional geom_cache
geom/concat/g_concat.c optional geom_concat
geom/eli/g_eli.c optional geom_eli
geom/eli/g_eli_crypto.c optional geom_eli
geom/eli/g_eli_ctl.c optional geom_eli
geom/eli/g_eli_hmac.c optional geom_eli
geom/eli/g_eli_integrity.c optional geom_eli
geom/eli/g_eli_key.c optional geom_eli
geom/eli/g_eli_key_cache.c optional geom_eli
geom/eli/g_eli_privacy.c optional geom_eli
geom/eli/pkcs5v2.c optional geom_eli
geom/gate/g_gate.c optional geom_gate
geom/geom_bsd_enc.c optional geom_part_bsd
geom/geom_ccd.c optional ccd | geom_ccd
geom/geom_ctl.c standard
geom/geom_dev.c standard
geom/geom_disk.c standard
geom/geom_dump.c standard
geom/geom_event.c standard
geom/geom_flashmap.c optional fdt cfi | fdt mx25l | mmcsd | fdt n25q | fdt at45d
geom/geom_io.c standard
geom/geom_kern.c standard
geom/geom_map.c optional geom_map
geom/geom_redboot.c optional geom_redboot
geom/geom_slice.c standard
geom/geom_subr.c standard
geom/geom_vfs.c standard
geom/journal/g_journal.c optional geom_journal
geom/journal/g_journal_ufs.c optional geom_journal
geom/label/g_label.c optional geom_label | geom_label_gpt
geom/label/g_label_ext2fs.c optional geom_label
geom/label/g_label_flashmap.c optional geom_label
geom/label/g_label_iso9660.c optional geom_label
geom/label/g_label_msdosfs.c optional geom_label
geom/label/g_label_ntfs.c optional geom_label
geom/label/g_label_reiserfs.c optional geom_label
geom/label/g_label_ufs.c optional geom_label
geom/label/g_label_gpt.c optional geom_label | geom_label_gpt
geom/label/g_label_disk_ident.c optional geom_label
geom/linux_lvm/g_linux_lvm.c optional geom_linux_lvm
geom/mirror/g_mirror.c optional geom_mirror
geom/mirror/g_mirror_ctl.c optional geom_mirror
geom/mountver/g_mountver.c optional geom_mountver
geom/multipath/g_multipath.c optional geom_multipath
geom/nop/g_nop.c optional geom_nop
geom/part/g_part.c standard
geom/part/g_part_if.m standard
geom/part/g_part_apm.c optional geom_part_apm
geom/part/g_part_bsd.c optional geom_part_bsd
geom/part/g_part_bsd64.c optional geom_part_bsd64
geom/part/g_part_ebr.c optional geom_part_ebr
geom/part/g_part_gpt.c optional geom_part_gpt
geom/part/g_part_ldm.c optional geom_part_ldm
geom/part/g_part_mbr.c optional geom_part_mbr
geom/part/g_part_vtoc8.c optional geom_part_vtoc8
geom/raid/g_raid.c optional geom_raid
geom/raid/g_raid_ctl.c optional geom_raid
geom/raid/g_raid_md_if.m optional geom_raid
geom/raid/g_raid_tr_if.m optional geom_raid
geom/raid/md_ddf.c optional geom_raid
geom/raid/md_intel.c optional geom_raid
geom/raid/md_jmicron.c optional geom_raid
geom/raid/md_nvidia.c optional geom_raid
geom/raid/md_promise.c optional geom_raid
geom/raid/md_sii.c optional geom_raid
geom/raid/tr_concat.c optional geom_raid
geom/raid/tr_raid0.c optional geom_raid
geom/raid/tr_raid1.c optional geom_raid
geom/raid/tr_raid1e.c optional geom_raid
geom/raid/tr_raid5.c optional geom_raid
geom/raid3/g_raid3.c optional geom_raid3
geom/raid3/g_raid3_ctl.c optional geom_raid3
geom/shsec/g_shsec.c optional geom_shsec
geom/stripe/g_stripe.c optional geom_stripe
geom/union/g_union.c optional geom_union
geom/uzip/g_uzip.c optional geom_uzip
geom/uzip/g_uzip_lzma.c optional geom_uzip
geom/uzip/g_uzip_wrkthr.c optional geom_uzip
geom/uzip/g_uzip_zlib.c optional geom_uzip
geom/uzip/g_uzip_zstd.c optional geom_uzip zstdio \
compile-with "${NORMAL_C} -I$S/contrib/zstd/lib/freebsd"
geom/vinum/geom_vinum.c optional geom_vinum
geom/vinum/geom_vinum_create.c optional geom_vinum
geom/vinum/geom_vinum_drive.c optional geom_vinum
geom/vinum/geom_vinum_plex.c optional geom_vinum
geom/vinum/geom_vinum_volume.c optional geom_vinum
geom/vinum/geom_vinum_subr.c optional geom_vinum
geom/vinum/geom_vinum_raid5.c optional geom_vinum
geom/vinum/geom_vinum_share.c optional geom_vinum
geom/vinum/geom_vinum_list.c optional geom_vinum
geom/vinum/geom_vinum_rm.c optional geom_vinum
geom/vinum/geom_vinum_init.c optional geom_vinum
geom/vinum/geom_vinum_state.c optional geom_vinum
geom/vinum/geom_vinum_rename.c optional geom_vinum
geom/vinum/geom_vinum_move.c optional geom_vinum
geom/vinum/geom_vinum_events.c optional geom_vinum
geom/virstor/binstream.c optional geom_virstor
geom/virstor/g_virstor.c optional geom_virstor
geom/virstor/g_virstor_md.c optional geom_virstor
geom/zero/g_zero.c optional geom_zero
fs/ext2fs/ext2_acl.c optional ext2fs
fs/ext2fs/ext2_alloc.c optional ext2fs
fs/ext2fs/ext2_balloc.c optional ext2fs
fs/ext2fs/ext2_bmap.c optional ext2fs
fs/ext2fs/ext2_csum.c optional ext2fs
fs/ext2fs/ext2_extattr.c optional ext2fs
fs/ext2fs/ext2_extents.c optional ext2fs
fs/ext2fs/ext2_inode.c optional ext2fs
fs/ext2fs/ext2_inode_cnv.c optional ext2fs
fs/ext2fs/ext2_hash.c optional ext2fs
fs/ext2fs/ext2_htree.c optional ext2fs
fs/ext2fs/ext2_lookup.c optional ext2fs
fs/ext2fs/ext2_subr.c optional ext2fs
fs/ext2fs/ext2_vfsops.c optional ext2fs
fs/ext2fs/ext2_vnops.c optional ext2fs
#
isa/isa_if.m standard
isa/isa_common.c optional isa
isa/isahint.c optional isa
isa/pnp.c optional isa isapnp
isa/pnpparse.c optional isa isapnp
fs/cd9660/cd9660_bmap.c optional cd9660
fs/cd9660/cd9660_lookup.c optional cd9660
fs/cd9660/cd9660_node.c optional cd9660
fs/cd9660/cd9660_rrip.c optional cd9660
fs/cd9660/cd9660_util.c optional cd9660
fs/cd9660/cd9660_vfsops.c optional cd9660
fs/cd9660/cd9660_vnops.c optional cd9660
fs/cd9660/cd9660_iconv.c optional cd9660_iconv
gnu/gcov/gcc_4_7.c optional gcov \
warning "kernel contains GPL licensed gcov support"
gnu/gcov/gcov_fs.c optional gcov lindebugfs \
compile-with "${LINUXKPI_C}"
gnu/gcov/gcov_subr.c optional gcov
kern/bus_if.m standard
kern/clock_if.m standard
kern/cpufreq_if.m standard
kern/device_if.m standard
kern/imgact_binmisc.c optional imagact_binmisc
kern/imgact_elf.c standard
kern/imgact_elf32.c optional compat_freebsd32
kern/imgact_shell.c standard
kern/init_main.c standard
kern/init_sysent.c standard
kern/ksched.c optional _kposix_priority_scheduling
kern/kern_acct.c standard
kern/kern_alq.c optional alq
kern/kern_boottrace.c standard
kern/kern_clock.c standard
kern/kern_clocksource.c standard
kern/kern_condvar.c standard
kern/kern_conf.c standard
kern/kern_cons.c standard
kern/kern_cpu.c standard
kern/kern_cpuset.c standard
kern/kern_context.c standard
kern/kern_descrip.c standard
kern/kern_devctl.c standard
kern/kern_dtrace.c optional kdtrace_hooks
kern/kern_dump.c standard
kern/kern_environment.c standard
kern/kern_et.c standard
kern/kern_event.c standard
kern/kern_exec.c standard
kern/kern_exit.c standard
kern/kern_fail.c standard
kern/kern_ffclock.c standard
kern/kern_fork.c standard
kern/kern_hhook.c standard
kern/kern_idle.c standard
kern/kern_intr.c standard
kern/kern_jail.c standard
kern/kern_kcov.c optional kcov \
compile-with "${NORMAL_C:N-fsanitize*} ${NORMAL_C:M-fsanitize=kernel-memory}"
kern/kern_khelp.c standard
kern/kern_kthread.c standard
kern/kern_ktr.c optional ktr
kern/kern_ktrace.c standard
kern/kern_linker.c standard
kern/kern_lock.c standard
kern/kern_lockf.c standard
kern/kern_lockstat.c optional kdtrace_hooks
kern/kern_loginclass.c standard
kern/kern_malloc.c standard
kern/kern_mbuf.c standard
kern/kern_mib.c standard
kern/kern_module.c standard
kern/kern_mtxpool.c standard
kern/kern_mutex.c standard
kern/kern_ntptime.c standard
kern/kern_osd.c standard
kern/kern_physio.c standard
kern/kern_pmc.c standard
kern/kern_poll.c optional device_polling
kern/kern_priv.c standard
kern/kern_proc.c standard
kern/kern_procctl.c standard
kern/kern_prot.c standard
kern/kern_racct.c standard
kern/kern_rangelock.c standard
kern/kern_rctl.c standard
kern/kern_resource.c standard
kern/kern_rmlock.c standard
kern/kern_rwlock.c standard
kern/kern_sdt.c optional kdtrace_hooks
kern/kern_sema.c standard
kern/kern_sendfile.c standard
kern/kern_sharedpage.c standard
kern/kern_shutdown.c standard
kern/kern_sig.c standard
kern/kern_switch.c standard
kern/kern_sx.c standard
kern/kern_synch.c standard
kern/kern_syscalls.c standard
kern/kern_sysctl.c standard
kern/kern_tc.c standard
kern/kern_thr.c standard
kern/kern_thread.c standard
kern/kern_time.c standard
kern/kern_timeout.c standard
kern/kern_tslog.c optional tslog
kern/kern_ubsan.c optional kubsan
kern/kern_umtx.c standard
kern/kern_uuid.c standard
kern/kern_vnodedumper.c standard
kern/kern_xxx.c standard
kern/link_elf.c standard
kern/linker_if.m standard
kern/md4c.c optional netsmb
kern/md5c.c standard
kern/p1003_1b.c standard
kern/posix4_mib.c standard
kern/sched_4bsd.c optional sched_4bsd
kern/sched_ule.c optional sched_ule
kern/serdev_if.m standard
kern/stack_protector.c standard \
compile-with "${NORMAL_C:N-fstack-protector*}"
kern/subr_acl_nfs4.c optional ufs_acl | zfs
kern/subr_acl_posix1e.c optional ufs_acl
kern/subr_asan.c optional kasan \
compile-with "${NORMAL_C:N-fsanitize*}"
kern/subr_autoconf.c standard
kern/subr_blist.c standard
kern/subr_boot.c standard
kern/subr_bus.c standard
kern/subr_bus_dma.c standard
kern/subr_bufring.c standard
kern/subr_capability.c standard
kern/subr_clock.c standard
kern/subr_compressor.c standard \
compile-with "${NORMAL_C} -I$S/contrib/zstd/lib/freebsd"
kern/subr_coverage.c optional coverage \
compile-with "${NORMAL_C:N-fsanitize*}"
kern/subr_counter.c standard
kern/subr_csan.c optional kcsan \
compile-with "${NORMAL_C:N-fsanitize*}"
kern/subr_devstat.c standard
kern/subr_disk.c standard
kern/subr_early.c standard
kern/subr_epoch.c standard
kern/subr_eventhandler.c standard
kern/subr_fattime.c standard
kern/subr_firmware.c optional firmware
kern/subr_filter.c standard
kern/subr_gtaskqueue.c standard
kern/subr_hash.c standard
kern/subr_hints.c standard
kern/subr_kdb.c standard
kern/subr_kobj.c standard
kern/subr_lock.c standard
kern/subr_log.c standard
kern/subr_mchain.c optional libmchain
kern/subr_module.c standard
kern/subr_msan.c optional kmsan \
compile-with "${NORMAL_C:N-fsanitize*}"
kern/subr_msgbuf.c standard
kern/subr_param.c standard
kern/subr_pcpu.c standard
kern/subr_pctrie.c standard
kern/subr_pidctrl.c standard
kern/subr_power.c standard
kern/subr_prf.c standard
kern/subr_prng.c standard
kern/subr_prof.c standard
kern/subr_rangeset.c standard
kern/subr_rman.c standard
kern/subr_rtc.c standard
kern/subr_sbuf.c standard
kern/subr_scanf.c standard
kern/subr_sglist.c standard
kern/subr_sleepqueue.c standard
kern/subr_smp.c standard
kern/subr_smr.c standard
kern/subr_stack.c optional ddb | stack | ktr
kern/subr_stats.c optional stats
kern/subr_taskqueue.c standard
kern/subr_terminal.c optional vt
kern/subr_trap.c standard
kern/subr_turnstile.c standard
kern/subr_uio.c standard
kern/subr_unit.c standard
kern/subr_vmem.c standard
kern/subr_witness.c optional witness
kern/sys_capability.c standard
kern/sys_eventfd.c standard
kern/sys_generic.c standard
kern/sys_getrandom.c standard
kern/sys_pipe.c standard
kern/sys_procdesc.c standard
kern/sys_process.c standard
kern/sys_socket.c standard
kern/syscalls.c standard
kern/sysv_ipc.c standard
kern/sysv_msg.c optional sysvmsg
kern/sysv_sem.c optional sysvsem
kern/sysv_shm.c optional sysvshm
kern/tty.c standard
kern/tty_compat.c optional compat_43tty
kern/tty_info.c standard
kern/tty_inq.c standard
kern/tty_outq.c standard
kern/tty_pts.c standard
kern/tty_tty.c standard
kern/tty_ttydisc.c standard
kern/uipc_accf.c standard
kern/uipc_debug.c optional ddb
kern/uipc_domain.c standard
kern/uipc_ktls.c optional kern_tls
kern/uipc_mbuf.c standard
kern/uipc_mbuf2.c standard
kern/uipc_mbufhash.c standard
kern/uipc_mqueue.c optional p1003_1b_mqueue
kern/uipc_sem.c optional p1003_1b_semaphores
kern/uipc_shm.c standard
kern/uipc_sockbuf.c standard
kern/uipc_socket.c standard
kern/uipc_syscalls.c standard
kern/uipc_usrreq.c standard
kern/vfs_acl.c standard
kern/vfs_aio.c standard
kern/vfs_bio.c standard
kern/vfs_cache.c standard
kern/vfs_cluster.c standard
kern/vfs_default.c standard
kern/vfs_export.c standard
kern/vfs_extattr.c standard
kern/vfs_hash.c standard
kern/vfs_init.c standard
kern/vfs_lookup.c standard
kern/vfs_mount.c standard
kern/vfs_mountroot.c standard
kern/vfs_subr.c standard
kern/vfs_syscalls.c standard
kern/vfs_vnops.c standard
#
# Kernel GSS-API
#
gssd.h optional kgssapi \
dependency "$S/kgssapi/gssd.x" \
compile-with "RPCGEN_CPP='${CPP}' rpcgen -hM $S/kgssapi/gssd.x | grep -v pthread.h > gssd.h" \
no-obj no-implicit-rule before-depend local \
clean "gssd.h"
gssd_xdr.c optional kgssapi \
dependency "$S/kgssapi/gssd.x gssd.h" \
compile-with "RPCGEN_CPP='${CPP}' rpcgen -c $S/kgssapi/gssd.x -o gssd_xdr.c" \
no-ctfconvert no-implicit-rule before-depend local \
clean "gssd_xdr.c"
gssd_clnt.c optional kgssapi \
dependency "$S/kgssapi/gssd.x gssd.h" \
compile-with "RPCGEN_CPP='${CPP}' rpcgen -lM $S/kgssapi/gssd.x | grep -v string.h > gssd_clnt.c" \
no-ctfconvert no-implicit-rule before-depend local \
clean "gssd_clnt.c"
kgssapi/gss_accept_sec_context.c optional kgssapi
kgssapi/gss_add_oid_set_member.c optional kgssapi
kgssapi/gss_acquire_cred.c optional kgssapi
kgssapi/gss_canonicalize_name.c optional kgssapi
kgssapi/gss_create_empty_oid_set.c optional kgssapi
kgssapi/gss_delete_sec_context.c optional kgssapi
kgssapi/gss_display_status.c optional kgssapi
kgssapi/gss_export_name.c optional kgssapi
kgssapi/gss_get_mic.c optional kgssapi
kgssapi/gss_init_sec_context.c optional kgssapi
kgssapi/gss_impl.c optional kgssapi
kgssapi/gss_import_name.c optional kgssapi
kgssapi/gss_names.c optional kgssapi
kgssapi/gss_pname_to_uid.c optional kgssapi
kgssapi/gss_release_buffer.c optional kgssapi
kgssapi/gss_release_cred.c optional kgssapi
kgssapi/gss_release_name.c optional kgssapi
kgssapi/gss_release_oid_set.c optional kgssapi
kgssapi/gss_set_cred_option.c optional kgssapi
kgssapi/gss_test_oid_set_member.c optional kgssapi
kgssapi/gss_unwrap.c optional kgssapi
kgssapi/gss_verify_mic.c optional kgssapi
kgssapi/gss_wrap.c optional kgssapi
kgssapi/gss_wrap_size_limit.c optional kgssapi
kgssapi/gssd_prot.c optional kgssapi
kgssapi/krb5/krb5_mech.c optional kgssapi
kgssapi/krb5/kcrypto.c optional kgssapi
kgssapi/krb5/kcrypto_aes.c optional kgssapi
kgssapi/kgss_if.m optional kgssapi
kgssapi/gsstest.c optional kgssapi_debug
# These files in libkern/ are those needed by all architectures. Some
# of the files in libkern/ are only needed on some architectures, e.g.,
# libkern/divdi3.c is needed by i386 but not alpha. Also, some of these
# routines may be optimized for a particular platform. In either case,
# the file should be moved to conf/files.<arch> from here.
#
libkern/arc4random.c standard
libkern/arc4random_uniform.c standard
libkern/asprintf.c standard
libkern/bcd.c standard
libkern/bsearch.c standard
libkern/explicit_bzero.c standard
libkern/fnmatch.c standard
libkern/gsb_crc32.c standard
libkern/iconv.c optional libiconv
libkern/iconv_converter_if.m optional libiconv
libkern/iconv_ucs.c optional libiconv
libkern/iconv_xlat.c optional libiconv
libkern/iconv_xlat16.c optional libiconv
libkern/inet_aton.c standard
libkern/inet_ntoa.c standard
libkern/inet_ntop.c standard
libkern/inet_pton.c standard
libkern/jenkins_hash.c standard
libkern/murmur3_32.c standard
libkern/memcchr.c standard
libkern/memchr.c standard
libkern/memmem.c optional gdb
libkern/qsort.c standard
libkern/qsort_r.c standard
libkern/random.c standard
libkern/scanc.c standard
libkern/strcasecmp.c standard
libkern/strcasestr.c standard
libkern/strcat.c standard
libkern/strchr.c standard
libkern/strchrnul.c optional gdb
libkern/strcpy.c standard
libkern/strcspn.c standard
libkern/strdup.c standard
libkern/strndup.c standard
libkern/strlcat.c standard
libkern/strlcpy.c standard
libkern/strncat.c standard
libkern/strncpy.c standard
libkern/strnlen.c standard
libkern/strnstr.c standard
libkern/strrchr.c standard
libkern/strsep.c standard
libkern/strspn.c standard
libkern/strstr.c standard
libkern/strtol.c standard
libkern/strtoq.c standard
libkern/strtoul.c standard
libkern/strtouq.c standard
libkern/strvalid.c standard
libkern/timingsafe_bcmp.c standard
contrib/zlib/adler32.c optional crypto | geom_uzip | \
mxge | ddb_ctf | gzio | zfs | zlib
contrib/zlib/compress.c optional crypto | geom_uzip | \
mxge | ddb_ctf | gzio | zfs | zlib \
compile-with "${NORMAL_C} -Wno-cast-qual"
contrib/zlib/crc32.c optional crypto | geom_uzip | \
mxge | ddb_ctf | gzio | zfs | zlib \
compile-with "${NORMAL_C} ${NO_WSTRICT_PROTOTYPES}"
contrib/zlib/deflate.c optional crypto | geom_uzip | \
mxge | ddb_ctf | gzio | zfs | zlib \
compile-with "${NORMAL_C} -Wno-cast-qual"
contrib/zlib/inffast.c optional crypto | geom_uzip | \
mxge | ddb_ctf | gzio | zfs | zlib
contrib/zlib/inflate.c optional crypto | geom_uzip | \
mxge | ddb_ctf | gzio | zfs | zlib
contrib/zlib/inftrees.c optional crypto | geom_uzip | \
mxge | ddb_ctf | gzio | zfs | zlib
contrib/zlib/trees.c optional crypto | geom_uzip | \
mxge | ddb_ctf | gzio | zfs | zlib \
compile-with "${NORMAL_C} ${NO_WSTRICT_PROTOTYPES}"
contrib/zlib/uncompr.c optional crypto | geom_uzip | \
mxge | ddb_ctf | gzio | zfs | zlib \
compile-with "${NORMAL_C} -Wno-cast-qual"
contrib/zlib/zutil.c optional crypto | geom_uzip | \
mxge | ddb_ctf | gzio | zfs | zlib \
compile-with "${NORMAL_C} ${NO_WSTRICT_PROTOTYPES}"
dev/zlib/zlib_mod.c optional crypto | geom_uzip | \
mxge | ddb_ctf | gzio | zfs | zlib
dev/zlib/zcalloc.c optional crypto | geom_uzip | \
mxge | ddb_ctf | gzio | zfs | zlib
net/altq/altq_cbq.c optional altq
net/altq/altq_codel.c optional altq
net/altq/altq_hfsc.c optional altq
net/altq/altq_fairq.c optional altq
net/altq/altq_priq.c optional altq
net/altq/altq_red.c optional altq
net/altq/altq_rio.c optional altq
net/altq/altq_rmclass.c optional altq
net/altq/altq_subr.c optional altq
net/bpf.c standard
net/bpf_buffer.c optional bpf
net/bpf_jitter.c optional bpf_jitter
net/bpf_filter.c optional bpf | netgraph_bpf
net/bpf_zerocopy.c optional bpf
net/bridgestp.c optional bridge | if_bridge
net/ieee8023ad_lacp.c optional lagg
net/if.c standard
net/if_bridge.c optional bridge inet | if_bridge inet
net/if_clone.c standard
net/if_dead.c standard
net/if_disc.c optional disc
net/if_edsc.c optional edsc
net/if_enc.c optional enc inet | enc inet6
net/if_epair.c optional epair
net/if_ethersubr.c optional ether
net/if_fwsubr.c optional fwip
net/if_gif.c optional gif inet | gif inet6 | \
netgraph_gif inet | netgraph_gif inet6
net/if_gre.c optional gre inet | gre inet6
net/if_ipsec.c optional inet ipsec | inet6 ipsec
net/if_lagg.c optional lagg
net/if_loop.c optional loop
net/if_llatbl.c standard
net/if_me.c optional me inet
net/if_media.c standard
net/if_mib.c standard
net/if_ovpn.c optional ovpn inet | ovpn inet6
net/if_stf.c optional stf inet inet6
net/if_tuntap.c optional tuntap
net/if_vlan.c optional vlan
net/if_vxlan.c optional vxlan inet | vxlan inet6
net/ifdi_if.m optional ether pci iflib
net/iflib.c optional ether pci iflib
net/iflib_clone.c optional ether pci iflib
net/mp_ring.c optional ether iflib
net/mppcc.c optional netgraph_mppc_compression
net/mppcd.c optional netgraph_mppc_compression
net/netisr.c standard
net/debugnet.c optional inet debugnet
net/debugnet_inet.c optional inet debugnet
net/pfil.c optional ether | inet
net/radix.c standard
net/route.c standard
net/route/nhgrp.c optional route_mpath
net/route/nhgrp_ctl.c optional route_mpath
net/route/nhop.c standard
net/route/nhop_ctl.c standard
net/route/nhop_utils.c standard
net/route/fib_algo.c optional fib_algo
net/route/route_ctl.c standard
net/route/route_ddb.c optional ddb
net/route/route_helpers.c standard
net/route/route_ifaddrs.c standard
net/route/route_rtentry.c standard
net/route/route_subscription.c standard
net/route/route_tables.c standard
net/route/route_temporal.c standard
net/rss_config.c optional inet rss | inet6 rss
net/rtsock.c standard
net/slcompress.c optional netgraph_vjc
net/toeplitz.c optional inet rss | inet6 rss | route_mpath
net/vnet.c optional vimage
net80211/ieee80211.c optional wlan
net80211/ieee80211_acl.c optional wlan wlan_acl
net80211/ieee80211_action.c optional wlan
net80211/ieee80211_adhoc.c optional wlan \
compile-with "${NORMAL_C} -Wno-unused-function"
net80211/ieee80211_ageq.c optional wlan
net80211/ieee80211_amrr.c optional wlan | wlan_amrr
net80211/ieee80211_crypto.c optional wlan \
compile-with "${NORMAL_C} -Wno-unused-function"
net80211/ieee80211_crypto_ccmp.c optional wlan wlan_ccmp
net80211/ieee80211_crypto_none.c optional wlan
net80211/ieee80211_crypto_tkip.c optional wlan wlan_tkip
net80211/ieee80211_crypto_wep.c optional wlan wlan_wep
net80211/ieee80211_ddb.c optional wlan ddb
net80211/ieee80211_dfs.c optional wlan
net80211/ieee80211_freebsd.c optional wlan
net80211/ieee80211_hostap.c optional wlan \
compile-with "${NORMAL_C} -Wno-unused-function"
net80211/ieee80211_ht.c optional wlan
net80211/ieee80211_hwmp.c optional wlan ieee80211_support_mesh
net80211/ieee80211_input.c optional wlan
net80211/ieee80211_ioctl.c optional wlan
net80211/ieee80211_mesh.c optional wlan ieee80211_support_mesh \
compile-with "${NORMAL_C} -Wno-unused-function"
net80211/ieee80211_monitor.c optional wlan
net80211/ieee80211_node.c optional wlan
net80211/ieee80211_output.c optional wlan
net80211/ieee80211_phy.c optional wlan
net80211/ieee80211_power.c optional wlan
net80211/ieee80211_proto.c optional wlan
net80211/ieee80211_radiotap.c optional wlan
net80211/ieee80211_ratectl.c optional wlan
net80211/ieee80211_ratectl_none.c optional wlan
net80211/ieee80211_regdomain.c optional wlan
net80211/ieee80211_rssadapt.c optional wlan wlan_rssadapt
net80211/ieee80211_scan.c optional wlan
net80211/ieee80211_scan_sta.c optional wlan
net80211/ieee80211_sta.c optional wlan \
compile-with "${NORMAL_C} -Wno-unused-function"
net80211/ieee80211_superg.c optional wlan ieee80211_support_superg
net80211/ieee80211_scan_sw.c optional wlan
net80211/ieee80211_tdma.c optional wlan ieee80211_support_tdma
net80211/ieee80211_vht.c optional wlan
net80211/ieee80211_wds.c optional wlan
net80211/ieee80211_xauth.c optional wlan wlan_xauth
net80211/ieee80211_alq.c optional wlan ieee80211_alq
netgraph/atm/ccatm/ng_ccatm.c optional ngatm_ccatm \
compile-with "${NORMAL_C} -I$S/contrib/ngatm"
netgraph/atm/ngatmbase.c optional ngatm_atmbase \
compile-with "${NORMAL_C} -I$S/contrib/ngatm"
netgraph/atm/sscfu/ng_sscfu.c optional ngatm_sscfu \
compile-with "${NORMAL_C} -I$S/contrib/ngatm"
netgraph/atm/sscop/ng_sscop.c optional ngatm_sscop \
compile-with "${NORMAL_C} -I$S/contrib/ngatm"
netgraph/atm/uni/ng_uni.c optional ngatm_uni \
compile-with "${NORMAL_C} -I$S/contrib/ngatm"
netgraph/bluetooth/common/ng_bluetooth.c optional netgraph_bluetooth
netgraph/bluetooth/drivers/ubt/ng_ubt.c optional netgraph_bluetooth_ubt usb
netgraph/bluetooth/drivers/ubt/ng_ubt_intel.c optional netgraph_bluetooth_ubt usb
netgraph/bluetooth/drivers/ubtbcmfw/ubtbcmfw.c optional netgraph_bluetooth_ubtbcmfw usb
netgraph/bluetooth/hci/ng_hci_cmds.c optional netgraph_bluetooth_hci
netgraph/bluetooth/hci/ng_hci_evnt.c optional netgraph_bluetooth_hci
netgraph/bluetooth/hci/ng_hci_main.c optional netgraph_bluetooth_hci
netgraph/bluetooth/hci/ng_hci_misc.c optional netgraph_bluetooth_hci
netgraph/bluetooth/hci/ng_hci_ulpi.c optional netgraph_bluetooth_hci
netgraph/bluetooth/l2cap/ng_l2cap_cmds.c optional netgraph_bluetooth_l2cap
netgraph/bluetooth/l2cap/ng_l2cap_evnt.c optional netgraph_bluetooth_l2cap
netgraph/bluetooth/l2cap/ng_l2cap_llpi.c optional netgraph_bluetooth_l2cap
netgraph/bluetooth/l2cap/ng_l2cap_main.c optional netgraph_bluetooth_l2cap
netgraph/bluetooth/l2cap/ng_l2cap_misc.c optional netgraph_bluetooth_l2cap
netgraph/bluetooth/l2cap/ng_l2cap_ulpi.c optional netgraph_bluetooth_l2cap
netgraph/bluetooth/socket/ng_btsocket.c optional netgraph_bluetooth_socket
netgraph/bluetooth/socket/ng_btsocket_hci_raw.c optional netgraph_bluetooth_socket
netgraph/bluetooth/socket/ng_btsocket_l2cap.c optional netgraph_bluetooth_socket
netgraph/bluetooth/socket/ng_btsocket_l2cap_raw.c optional netgraph_bluetooth_socket
netgraph/bluetooth/socket/ng_btsocket_rfcomm.c optional netgraph_bluetooth_socket
netgraph/bluetooth/socket/ng_btsocket_sco.c optional netgraph_bluetooth_socket
netgraph/netflow/netflow.c optional netgraph_netflow
netgraph/netflow/netflow_v9.c optional netgraph_netflow
netgraph/netflow/ng_netflow.c optional netgraph_netflow
netgraph/ng_UI.c optional netgraph_UI
netgraph/ng_async.c optional netgraph_async
netgraph/ng_atmllc.c optional netgraph_atmllc
netgraph/ng_base.c optional netgraph
netgraph/ng_bpf.c optional netgraph_bpf
netgraph/ng_bridge.c optional netgraph_bridge
netgraph/ng_car.c optional netgraph_car
netgraph/ng_checksum.c optional netgraph_checksum
netgraph/ng_cisco.c optional netgraph_cisco
netgraph/ng_deflate.c optional netgraph_deflate
netgraph/ng_device.c optional netgraph_device
netgraph/ng_echo.c optional netgraph_echo
netgraph/ng_eiface.c optional netgraph_eiface
netgraph/ng_ether.c optional netgraph_ether
netgraph/ng_ether_echo.c optional netgraph_ether_echo
netgraph/ng_frame_relay.c optional netgraph_frame_relay
netgraph/ng_gif.c optional netgraph_gif inet6 | netgraph_gif inet
netgraph/ng_gif_demux.c optional netgraph_gif_demux
netgraph/ng_hole.c optional netgraph_hole
netgraph/ng_iface.c optional netgraph_iface
netgraph/ng_ip_input.c optional netgraph_ip_input
netgraph/ng_ipfw.c optional netgraph_ipfw inet ipfirewall
netgraph/ng_ksocket.c optional netgraph_ksocket
netgraph/ng_l2tp.c optional netgraph_l2tp
netgraph/ng_lmi.c optional netgraph_lmi
netgraph/ng_macfilter.c optional netgraph_macfilter
netgraph/ng_mppc.c optional netgraph_mppc_compression | \
netgraph_mppc_encryption
netgraph/ng_nat.c optional netgraph_nat inet libalias
netgraph/ng_one2many.c optional netgraph_one2many
netgraph/ng_parse.c optional netgraph
netgraph/ng_patch.c optional netgraph_patch
netgraph/ng_pipe.c optional netgraph_pipe
netgraph/ng_ppp.c optional netgraph_ppp
netgraph/ng_pppoe.c optional netgraph_pppoe
netgraph/ng_pptpgre.c optional netgraph_pptpgre
netgraph/ng_pred1.c optional netgraph_pred1
netgraph/ng_rfc1490.c optional netgraph_rfc1490
netgraph/ng_socket.c optional netgraph_socket
netgraph/ng_split.c optional netgraph_split
netgraph/ng_tag.c optional netgraph_tag
netgraph/ng_tcpmss.c optional netgraph_tcpmss
netgraph/ng_tee.c optional netgraph_tee
netgraph/ng_tty.c optional netgraph_tty
netgraph/ng_vjc.c optional netgraph_vjc
netgraph/ng_vlan.c optional netgraph_vlan
netgraph/ng_vlan_rotate.c optional netgraph_vlan_rotate
netinet/accf_data.c optional accept_filter_data inet
netinet/accf_dns.c optional accept_filter_dns inet
netinet/accf_http.c optional accept_filter_http inet
netinet/if_ether.c optional inet ether
netinet/igmp.c optional inet
netinet/in.c optional inet
netinet/in_cksum.c optional inet | inet6
netinet/in_debug.c optional inet ddb
netinet/in_kdtrace.c optional inet | inet6
netinet/ip_carp.c optional inet carp | inet6 carp
netinet/in_fib.c optional inet
netinet/in_fib_algo.c optional inet fib_algo
netinet/in_gif.c optional gif inet | netgraph_gif inet
netinet/ip_gre.c optional gre inet
netinet/ip_id.c optional inet
netinet/in_jail.c optional inet
netinet/in_mcast.c optional inet
netinet/in_pcb.c optional inet | inet6
netinet/in_prot.c optional inet | inet6
netinet/in_proto.c optional inet | inet6
netinet/in_rmx.c optional inet
netinet/in_rss.c optional inet rss
netinet/ip_divert.c optional ipdivert inet | ipdivert inet6
netinet/ip_ecn.c optional inet | inet6
netinet/ip_encap.c optional inet | inet6
netinet/ip_fastfwd.c optional inet
netinet/ip_icmp.c optional inet | inet6
netinet/ip_input.c optional inet
netinet/ip_mroute.c optional mrouting inet
netinet/ip_options.c optional inet
netinet/ip_output.c optional inet
netinet/ip_reass.c optional inet
netinet/raw_ip.c optional inet | inet6
netinet/cc/cc.c optional cc_newreno inet | cc_vegas inet | \
cc_htcp inet | cc_hd inet | cc_dctcp inet | cc_cubic inet | \
cc_chd inet | cc_cdg inet | cc_newreno inet6 | cc_vegas inet6 | \
cc_htcp inet6 | cc_hd inet6 |cc_dctcp inet6 | cc_cubic inet6 | \
cc_chd inet6 | cc_cdg inet6
netinet/cc/cc_cdg.c optional inet cc_cdg tcp_hhook
netinet/cc/cc_chd.c optional inet cc_chd tcp_hhook
netinet/cc/cc_cubic.c optional inet cc_cubic | inet6 cc_cubic
netinet/cc/cc_dctcp.c optional inet cc_dctcp | inet6 cc_dctcp
netinet/cc/cc_hd.c optional inet cc_hd tcp_hhook
netinet/cc/cc_htcp.c optional inet cc_htcp | inet6 cc_htcp
netinet/cc/cc_newreno.c optional inet cc_newreno | inet6 cc_newreno
netinet/cc/cc_vegas.c optional inet cc_vegas tcp_hhook
netinet/khelp/h_ertt.c optional inet tcp_hhook
netinet/sctp_asconf.c optional inet sctp | inet6 sctp
netinet/sctp_auth.c optional inet sctp | inet6 sctp
netinet/sctp_bsd_addr.c optional inet sctp | inet6 sctp
netinet/sctp_cc_functions.c optional inet sctp | inet6 sctp
netinet/sctp_crc32.c optional inet | inet6
netinet/sctp_indata.c optional inet sctp | inet6 sctp
netinet/sctp_input.c optional inet sctp | inet6 sctp
netinet/sctp_kdtrace.c optional inet sctp | inet6 sctp
netinet/sctp_output.c optional inet sctp | inet6 sctp
netinet/sctp_pcb.c optional inet sctp | inet6 sctp
netinet/sctp_peeloff.c optional inet sctp | inet6 sctp
netinet/sctp_ss_functions.c optional inet sctp | inet6 sctp
netinet/sctp_syscalls.c optional inet sctp | inet6 sctp
netinet/sctp_sysctl.c optional inet sctp | inet6 sctp
netinet/sctp_timer.c optional inet sctp | inet6 sctp
netinet/sctp_usrreq.c optional inet sctp | inet6 sctp
netinet/sctputil.c optional inet sctp | inet6 sctp
netinet/siftr.c optional inet siftr alq | inet6 siftr alq
netinet/tcp_debug.c optional tcpdebug
netinet/tcp_ecn.c optional inet | inet6
netinet/tcp_fastopen.c optional inet tcp_rfc7413 | inet6 tcp_rfc7413
netinet/tcp_hostcache.c optional inet | inet6
netinet/tcp_input.c optional inet | inet6
netinet/tcp_log_buf.c optional tcp_blackbox inet | tcp_blackbox inet6
netinet/tcp_lro.c optional inet | inet6
netinet/tcp_output.c optional inet | inet6
netinet/tcp_offload.c optional tcp_offload inet | tcp_offload inet6
netinet/tcp_hpts.c optional tcphpts inet | tcphpts inet6
netinet/tcp_ratelimit.c optional ratelimit inet | ratelimit inet6
netinet/tcp_pcap.c optional inet tcppcap | inet6 tcppcap \
compile-with "${NORMAL_C} ${NO_WNONNULL}"
netinet/tcp_reass.c optional inet | inet6
netinet/tcp_sack.c optional inet | inet6
netinet/tcp_stats.c optional stats inet | stats inet6
netinet/tcp_subr.c optional inet | inet6
netinet/tcp_syncache.c optional inet | inet6
netinet/tcp_timer.c optional inet | inet6
netinet/tcp_timewait.c optional inet | inet6
netinet/tcp_usrreq.c optional inet | inet6
netinet/udp_usrreq.c optional inet | inet6
netinet/libalias/alias.c optional libalias inet | netgraph_nat inet
netinet/libalias/alias_db.c optional libalias inet | netgraph_nat inet
netinet/libalias/alias_mod.c optional libalias | netgraph_nat
netinet/libalias/alias_proxy.c optional libalias inet | netgraph_nat inet
netinet/libalias/alias_util.c optional libalias inet | netgraph_nat inet
netinet/libalias/alias_sctp.c optional libalias inet | netgraph_nat inet
netinet/netdump/netdump_client.c optional inet debugnet netdump
netinet6/dest6.c optional inet6
netinet6/frag6.c optional inet6
netinet6/icmp6.c optional inet6
netinet6/in6.c optional inet6
netinet6/in6_cksum.c optional inet6
netinet6/in6_fib.c optional inet6
netinet6/in6_fib_algo.c optional inet6 fib_algo
netinet6/in6_gif.c optional gif inet6 | netgraph_gif inet6
netinet6/in6_ifattach.c optional inet6
netinet6/in6_jail.c optional inet6
netinet6/in6_mcast.c optional inet6
netinet6/in6_pcb.c optional inet6
netinet6/in6_proto.c optional inet6
netinet6/in6_rmx.c optional inet6
netinet6/in6_rss.c optional inet6 rss
netinet6/in6_src.c optional inet6
netinet6/ip6_fastfwd.c optional inet6
netinet6/ip6_forward.c optional inet6
netinet6/ip6_gre.c optional gre inet6
netinet6/ip6_id.c optional inet6
netinet6/ip6_input.c optional inet6
netinet6/ip6_mroute.c optional mrouting inet6
netinet6/ip6_output.c optional inet6
netinet6/mld6.c optional inet6
netinet6/nd6.c optional inet6
netinet6/nd6_nbr.c optional inet6
netinet6/nd6_rtr.c optional inet6
netinet6/raw_ip6.c optional inet6
netinet6/route6.c optional inet6
netinet6/scope6.c optional inet6
netinet6/sctp6_usrreq.c optional inet6 sctp
netinet6/udp6_usrreq.c optional inet6
netipsec/ipsec.c optional ipsec inet | ipsec inet6
netipsec/ipsec_input.c optional ipsec inet | ipsec inet6
netipsec/ipsec_mbuf.c optional ipsec inet | ipsec inet6
netipsec/ipsec_mod.c optional ipsec inet | ipsec inet6
netipsec/ipsec_output.c optional ipsec inet | ipsec inet6
netipsec/ipsec_pcb.c optional ipsec inet | ipsec inet6 | \
ipsec_support inet | ipsec_support inet6
netipsec/key.c optional ipsec inet | ipsec inet6 | \
ipsec_support inet | ipsec_support inet6
netipsec/key_debug.c optional ipsec inet | ipsec inet6 | \
ipsec_support inet | ipsec_support inet6
netipsec/keysock.c optional ipsec inet | ipsec inet6 | \
ipsec_support inet | ipsec_support inet6
netipsec/subr_ipsec.c optional ipsec inet | ipsec inet6 | \
ipsec_support inet | ipsec_support inet6
netipsec/udpencap.c optional ipsec inet
netipsec/xform_ah.c optional ipsec inet | ipsec inet6
netipsec/xform_esp.c optional ipsec inet | ipsec inet6
netipsec/xform_ipcomp.c optional ipsec inet | ipsec inet6
netipsec/xform_tcp.c optional ipsec inet tcp_signature | \
ipsec inet6 tcp_signature | ipsec_support inet tcp_signature | \
ipsec_support inet6 tcp_signature
netpfil/ipfw/dn_aqm_codel.c optional inet dummynet
netpfil/ipfw/dn_aqm_pie.c optional inet dummynet
netpfil/ipfw/dn_heap.c optional inet dummynet
netpfil/ipfw/dn_sched_fifo.c optional inet dummynet
netpfil/ipfw/dn_sched_fq_codel.c optional inet dummynet
netpfil/ipfw/dn_sched_fq_pie.c optional inet dummynet
netpfil/ipfw/dn_sched_prio.c optional inet dummynet
netpfil/ipfw/dn_sched_qfq.c optional inet dummynet
netpfil/ipfw/dn_sched_rr.c optional inet dummynet
netpfil/ipfw/dn_sched_wf2q.c optional inet dummynet
netpfil/ipfw/ip_dummynet.c optional inet dummynet
netpfil/ipfw/ip_dn_io.c optional inet dummynet
netpfil/ipfw/ip_dn_glue.c optional inet dummynet
netpfil/ipfw/ip_fw2.c optional inet ipfirewall
netpfil/ipfw/ip_fw_bpf.c optional inet ipfirewall
netpfil/ipfw/ip_fw_dynamic.c optional inet ipfirewall \
compile-with "${NORMAL_C} -I$S/contrib/ck/include"
netpfil/ipfw/ip_fw_eaction.c optional inet ipfirewall
netpfil/ipfw/ip_fw_log.c optional inet ipfirewall
netpfil/ipfw/ip_fw_pfil.c optional inet ipfirewall
netpfil/ipfw/ip_fw_sockopt.c optional inet ipfirewall
netpfil/ipfw/ip_fw_table.c optional inet ipfirewall
netpfil/ipfw/ip_fw_table_algo.c optional inet ipfirewall
netpfil/ipfw/ip_fw_table_value.c optional inet ipfirewall
netpfil/ipfw/ip_fw_iface.c optional inet ipfirewall
netpfil/ipfw/ip_fw_nat.c optional inet ipfirewall_nat
netpfil/ipfw/nat64/ip_fw_nat64.c optional inet inet6 ipfirewall \
ipfirewall_nat64
netpfil/ipfw/nat64/nat64clat.c optional inet inet6 ipfirewall \
ipfirewall_nat64
netpfil/ipfw/nat64/nat64clat_control.c optional inet inet6 ipfirewall \
ipfirewall_nat64
netpfil/ipfw/nat64/nat64lsn.c optional inet inet6 ipfirewall \
ipfirewall_nat64 compile-with "${NORMAL_C} -I$S/contrib/ck/include"
netpfil/ipfw/nat64/nat64lsn_control.c optional inet inet6 ipfirewall \
ipfirewall_nat64 compile-with "${NORMAL_C} -I$S/contrib/ck/include"
netpfil/ipfw/nat64/nat64stl.c optional inet inet6 ipfirewall \
ipfirewall_nat64
netpfil/ipfw/nat64/nat64stl_control.c optional inet inet6 ipfirewall \
ipfirewall_nat64
netpfil/ipfw/nat64/nat64_translate.c optional inet inet6 ipfirewall \
ipfirewall_nat64
netpfil/ipfw/nptv6/ip_fw_nptv6.c optional inet inet6 ipfirewall \
ipfirewall_nptv6
netpfil/ipfw/nptv6/nptv6.c optional inet inet6 ipfirewall \
ipfirewall_nptv6
netpfil/ipfw/pmod/ip_fw_pmod.c optional inet ipfirewall_pmod
netpfil/ipfw/pmod/tcpmod.c optional inet ipfirewall_pmod
netpfil/pf/if_pflog.c optional pflog pf inet
netpfil/pf/if_pfsync.c optional pfsync pf inet
netpfil/pf/pf.c optional pf inet
netpfil/pf/pf_if.c optional pf inet
netpfil/pf/pf_ioctl.c optional pf inet
netpfil/pf/pf_lb.c optional pf inet
netpfil/pf/pf_norm.c optional pf inet
netpfil/pf/pf_nv.c optional pf inet
netpfil/pf/pf_osfp.c optional pf inet
netpfil/pf/pf_ruleset.c optional pf inet
netpfil/pf/pf_syncookies.c optional pf inet
netpfil/pf/pf_table.c optional pf inet
netpfil/pf/in4_cksum.c optional pf inet
netsmb/smb_conn.c optional netsmb
netsmb/smb_crypt.c optional netsmb
netsmb/smb_dev.c optional netsmb
netsmb/smb_iod.c optional netsmb
netsmb/smb_rq.c optional netsmb
netsmb/smb_smb.c optional netsmb
netsmb/smb_subr.c optional netsmb
netsmb/smb_trantcp.c optional netsmb
netsmb/smb_usr.c optional netsmb
nfs/bootp_subr.c optional bootp nfscl
nfs/krpc_subr.c optional bootp nfscl
nfs/nfs_diskless.c optional nfscl nfs_root
nfs/nfs_nfssvc.c optional nfscl | nfslockd | nfsd
nlm/nlm_advlock.c optional nfslockd | nfsd
nlm/nlm_prot_clnt.c optional nfslockd | nfsd
nlm/nlm_prot_impl.c optional nfslockd | nfsd
nlm/nlm_prot_server.c optional nfslockd | nfsd
nlm/nlm_prot_svc.c optional nfslockd | nfsd
nlm/nlm_prot_xdr.c optional nfslockd | nfsd
nlm/sm_inter_xdr.c optional nfslockd | nfsd
# Linux Kernel Programming Interface
compat/linuxkpi/common/src/linux_80211.c optional compat_linuxkpi wlan \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_80211_macops.c optional compat_linuxkpi wlan \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_kmod.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_acpi.c optional compat_linuxkpi acpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_compat.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_current.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_devres.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_dmi.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_domain.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_firmware.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_fpu.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_hrtimer.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_i2c.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_i2cbb.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_interrupt.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_kthread.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_lock.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_netdev.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_page.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_pci.c optional compat_linuxkpi pci \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_tasklet.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_idr.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_radix.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_rcu.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C} -I$S/contrib/ck/include"
compat/linuxkpi/common/src/linux_schedule.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_shmemfs.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_shrinker.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_skbuff.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_slab.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_usb.c optional compat_linuxkpi usb \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_work.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_xarray.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/lkpi_iic_if.m optional compat_linuxkpi
compat/linuxkpi/common/src/linux_seq_file.c optional compat_linuxkpi | lindebugfs \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_simple_attr.c optional compat_linuxkpi | lindebugfs \
compile-with "${LINUXKPI_C}"
compat/lindebugfs/lindebugfs.c optional lindebugfs \
compile-with "${LINUXKPI_C}"
# OpenFabrics Enterprise Distribution (Infiniband)
net/if_infiniband.c optional ofed | lagg
ofed/drivers/infiniband/core/ib_addr.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_agent.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_cache.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_cm.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_cma.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_core_uverbs.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_cq.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_device.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_fmr_pool.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_iwcm.c optional ofed \
compile-with "${OFED_C} ${NO_WUNUSED_BUT_SET_VARIABLE}"
ofed/drivers/infiniband/core/ib_iwpm_msg.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_iwpm_util.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_mad.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_mad_rmpp.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_multicast.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_packer.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_rdma_core.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_sa_query.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_smi.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_sysfs.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_ucm.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_ucma.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_ud_header.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_umem.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_user_mad.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_uverbs_cmd.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_uverbs_ioctl.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_uverbs_main.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_uverbs_marshall.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_uverbs_std_types.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_uverbs_std_types_async_fd.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_uverbs_std_types_counters.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_uverbs_std_types_cq.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_uverbs_std_types_device.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_uverbs_std_types_dm.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_uverbs_std_types_flow_action.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_uverbs_std_types_mr.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_uverbs_uapi.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_verbs.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c optional ipoib \
compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/"
#ofed/drivers/infiniband/ulp/ipoib/ipoib_fs.c optional ipoib \
# compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/"
ofed/drivers/infiniband/ulp/ipoib/ipoib_ib.c optional ipoib \
compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/"
ofed/drivers/infiniband/ulp/ipoib/ipoib_main.c optional ipoib \
compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/"
ofed/drivers/infiniband/ulp/ipoib/ipoib_multicast.c optional ipoib \
compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/"
ofed/drivers/infiniband/ulp/ipoib/ipoib_verbs.c optional ipoib \
compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/"
#ofed/drivers/infiniband/ulp/ipoib/ipoib_vlan.c optional ipoib \
# compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/"
ofed/drivers/infiniband/ulp/sdp/sdp_bcopy.c optional sdp inet \
compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/"
ofed/drivers/infiniband/ulp/sdp/sdp_main.c optional sdp inet \
compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/"
ofed/drivers/infiniband/ulp/sdp/sdp_rx.c optional sdp inet \
compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/ ${NO_WUNUSED_BUT_SET_VARIABLE}"
ofed/drivers/infiniband/ulp/sdp/sdp_cma.c optional sdp inet \
compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/"
ofed/drivers/infiniband/ulp/sdp/sdp_tx.c optional sdp inet \
compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/ ${NO_WUNUSED_BUT_SET_VARIABLE}"
dev/irdma/icrdma.c optional irdma ice inet inet6 pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_cm.c optional irdma ice inet inet6 pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_ctrl.c optional irdma ice inet inet6 pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_hmc.c optional irdma ice inet inet6 pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_hw.c optional irdma ice inet inet6 pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/icrdma_hw.c optional irdma ice inet inet6 pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/fbsd_kcompat.c optional irdma ice inet inet6 pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_kcompat.c optional irdma ice inet inet6 pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_pble.c optional irdma ice inet inet6 pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_puda.c optional irdma ice inet inet6 pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_uda.c optional irdma ice inet inet6 pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_uk.c optional irdma ice inet inet6 pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_utils.c optional irdma ice inet inet6 pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_verbs.c optional irdma ice inet inet6 pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_ws.c optional irdma ice inet inet6 pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/mthca/mthca_allocator.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_av.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_catas.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_cmd.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_cq.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_eq.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_mad.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_main.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_mcg.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_memfree.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_mr.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_pd.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_profile.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_provider.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_qp.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_reset.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_srq.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_uar.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mlx4/mlx4_ib/mlx4_ib_alias_GUID.c optional mlx4ib pci ofed \
compile-with "${OFED_C}"
dev/mlx4/mlx4_ib/mlx4_ib_mcg.c optional mlx4ib pci ofed \
compile-with "${OFED_C}"
dev/mlx4/mlx4_ib/mlx4_ib_sysfs.c optional mlx4ib pci ofed \
compile-with "${OFED_C}"
dev/mlx4/mlx4_ib/mlx4_ib_cm.c optional mlx4ib pci ofed \
compile-with "${OFED_C}"
dev/mlx4/mlx4_ib/mlx4_ib_ah.c optional mlx4ib pci ofed \
compile-with "${OFED_C}"
dev/mlx4/mlx4_ib/mlx4_ib_cq.c optional mlx4ib pci ofed \
compile-with "${OFED_C}"
dev/mlx4/mlx4_ib/mlx4_ib_doorbell.c optional mlx4ib pci ofed \
compile-with "${OFED_C}"
dev/mlx4/mlx4_ib/mlx4_ib_mad.c optional mlx4ib pci ofed \
compile-with "${OFED_C}"
dev/mlx4/mlx4_ib/mlx4_ib_main.c optional mlx4ib pci ofed \
compile-with "${OFED_C}"
dev/mlx4/mlx4_ib/mlx4_ib_mr.c optional mlx4ib pci ofed \
compile-with "${OFED_C}"
dev/mlx4/mlx4_ib/mlx4_ib_qp.c optional mlx4ib pci ofed \
compile-with "${OFED_C}"
dev/mlx4/mlx4_ib/mlx4_ib_srq.c optional mlx4ib pci ofed \
compile-with "${OFED_C}"
dev/mlx4/mlx4_ib/mlx4_ib_wc.c optional mlx4ib pci ofed \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_alloc.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_catas.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_cmd.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_cq.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_eq.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_fw.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_fw_qos.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_icm.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_intf.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_main.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_mcg.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_mr.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_pd.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_port.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_profile.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_qp.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_reset.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_sense.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_srq.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_resource_tracker.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_en/mlx4_en_cq.c optional mlx4en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx4/mlx4_en/mlx4_en_main.c optional mlx4en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx4/mlx4_en/mlx4_en_netdev.c optional mlx4en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx4/mlx4_en/mlx4_en_port.c optional mlx4en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx4/mlx4_en/mlx4_en_resources.c optional mlx4en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx4/mlx4_en/mlx4_en_rx.c optional mlx4en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx4/mlx4_en/mlx4_en_tx.c optional mlx4en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_ah.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_cong.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_cq.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_devx.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_doorbell.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_gsi.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_mad.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_main.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_mem.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_mr.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_qp.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_srq.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_virt.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_alloc.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_cmd.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_cq.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_diagnostics.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_eq.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_eswitch.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_fs_cmd.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_fs_tcp.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_fs_tree.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_fw.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_fwdump.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_health.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_mad.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_main.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_mcg.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_mpfs.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_mr.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_pagealloc.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_pd.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_port.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_qp.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_rl.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_srq.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_tls.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_transobj.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_uar.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_vport.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_vsc.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_wq.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_lib/mlx5_gid.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_en/mlx5_en_dim.c optional mlx5en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx5/mlx5_en/mlx5_en_ethtool.c optional mlx5en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx5/mlx5_en/mlx5_en_main.c optional mlx5en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx5/mlx5_en/mlx5_en_tx.c optional mlx5en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx5/mlx5_en/mlx5_en_flow_table.c optional mlx5en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx5/mlx5_en/mlx5_en_hw_tls.c optional mlx5en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx5/mlx5_en/mlx5_en_hw_tls_rx.c optional mlx5en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx5/mlx5_en/mlx5_en_iq.c optional mlx5en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx5/mlx5_en/mlx5_en_rx.c optional mlx5en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx5/mlx5_en/mlx5_en_rl.c optional mlx5en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx5/mlx5_en/mlx5_en_txrx.c optional mlx5en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx5/mlx5_en/mlx5_en_port_buffer.c optional mlx5en pci inet inet6 \
compile-with "${OFED_C}"
# crypto support
opencrypto/cbc_mac.c optional crypto
opencrypto/criov.c optional crypto
opencrypto/crypto.c optional crypto
opencrypto/cryptodev.c optional cryptodev
opencrypto/cryptodev_if.m optional crypto
opencrypto/cryptosoft.c optional crypto
opencrypto/cryptodeflate.c optional crypto
opencrypto/gmac.c optional crypto
opencrypto/gfmult.c optional crypto
opencrypto/ktls_ocf.c optional kern_tls
opencrypto/rmd160.c optional crypto
opencrypto/xform_aes_cbc.c optional crypto
opencrypto/xform_aes_icm.c optional crypto
opencrypto/xform_aes_xts.c optional crypto
opencrypto/xform_cbc_mac.c optional crypto
opencrypto/xform_chacha20_poly1305.c optional crypto \
compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include -I$S/crypto/libsodium"
opencrypto/xform_cml.c optional crypto
opencrypto/xform_deflate.c optional crypto
opencrypto/xform_gmac.c optional crypto
opencrypto/xform_null.c optional crypto
opencrypto/xform_poly1305.c optional crypto \
compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include -I$S/crypto/libsodium"
opencrypto/xform_rmd160.c optional crypto
opencrypto/xform_sha1.c optional crypto
opencrypto/xform_sha2.c optional crypto
contrib/libsodium/src/libsodium/crypto_core/ed25519/ref10/ed25519_ref10.c \
optional crypto \
compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include/sodium -I$S/crypto/libsodium -Wno-unused-function"
contrib/libsodium/src/libsodium/crypto_core/hchacha20/core_hchacha20.c \
optional crypto \
compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include/sodium -I$S/crypto/libsodium"
contrib/libsodium/src/libsodium/crypto_onetimeauth/poly1305/onetimeauth_poly1305.c \
optional crypto \
compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include/sodium -I$S/crypto/libsodium"
contrib/libsodium/src/libsodium/crypto_onetimeauth/poly1305/donna/poly1305_donna.c \
optional crypto \
compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include/sodium -I$S/crypto/libsodium"
contrib/libsodium/src/libsodium/crypto_scalarmult/curve25519/scalarmult_curve25519.c \
optional crypto \
compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include/sodium -I$S/crypto/libsodium"
contrib/libsodium/src/libsodium/crypto_scalarmult/curve25519/ref10/x25519_ref10.c \
optional crypto \
compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include/sodium -I$S/crypto/libsodium -Wno-unused-function"
contrib/libsodium/src/libsodium/crypto_stream/chacha20/stream_chacha20.c \
optional crypto \
compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include/sodium -I$S/crypto/libsodium"
contrib/libsodium/src/libsodium/crypto_stream/chacha20/ref/chacha20_ref.c \
optional crypto \
compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include/sodium -I$S/crypto/libsodium"
contrib/libsodium/src/libsodium/crypto_verify/sodium/verify.c \
optional crypto \
compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include/sodium -I$S/crypto/libsodium"
crypto/libsodium/randombytes.c optional crypto \
compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include -I$S/crypto/libsodium"
crypto/libsodium/utils.c optional crypto \
compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include -I$S/crypto/libsodium"
rpc/auth_none.c optional krpc | nfslockd | nfscl | nfsd
rpc/auth_unix.c optional krpc | nfslockd | nfscl | nfsd
rpc/authunix_prot.c optional krpc | nfslockd | nfscl | nfsd
rpc/clnt_bck.c optional krpc | nfslockd | nfscl | nfsd
rpc/clnt_dg.c optional krpc | nfslockd | nfscl | nfsd
rpc/clnt_rc.c optional krpc | nfslockd | nfscl | nfsd
rpc/clnt_vc.c optional krpc | nfslockd | nfscl | nfsd
rpc/getnetconfig.c optional krpc | nfslockd | nfscl | nfsd
rpc/replay.c optional krpc | nfslockd | nfscl | nfsd
rpc/rpc_callmsg.c optional krpc | nfslockd | nfscl | nfsd
rpc/rpc_generic.c optional krpc | nfslockd | nfscl | nfsd
rpc/rpc_prot.c optional krpc | nfslockd | nfscl | nfsd
rpc/rpcb_clnt.c optional krpc | nfslockd | nfscl | nfsd
rpc/rpcb_prot.c optional krpc | nfslockd | nfscl | nfsd
rpc/svc.c optional krpc | nfslockd | nfscl | nfsd
rpc/svc_auth.c optional krpc | nfslockd | nfscl | nfsd
rpc/svc_auth_unix.c optional krpc | nfslockd | nfscl | nfsd
rpc/svc_dg.c optional krpc | nfslockd | nfscl | nfsd
rpc/svc_generic.c optional krpc | nfslockd | nfscl | nfsd
rpc/svc_vc.c optional krpc | nfslockd | nfscl | nfsd
#
# Kernel RPC-over-TLS
#
rpctlscd.h optional krpc | nfslockd | nfscl | nfsd \
dependency "$S/rpc/rpcsec_tls/rpctlscd.x" \
compile-with "RPCGEN_CPP='${CPP}' rpcgen -hM $S/rpc/rpcsec_tls/rpctlscd.x | grep -v pthread.h > rpctlscd.h" \
no-obj no-implicit-rule before-depend local \
clean "rpctlscd.h"
rpctlscd_xdr.c optional krpc | nfslockd | nfscl | nfsd \
dependency "$S/rpc/rpcsec_tls/rpctlscd.x rpctlscd.h" \
compile-with "RPCGEN_CPP='${CPP}' rpcgen -c $S/rpc/rpcsec_tls/rpctlscd.x -o rpctlscd_xdr.c" no-ctfconvert \
no-implicit-rule before-depend local \
clean "rpctlscd_xdr.c"
rpctlscd_clnt.c optional krpc | nfslockd | nfscl | nfsd \
dependency "$S/rpc/rpcsec_tls/rpctlscd.x rpctlscd.h" \
compile-with "RPCGEN_CPP='${CPP}' rpcgen -lM $S/rpc/rpcsec_tls/rpctlscd.x | grep -v string.h > rpctlscd_clnt.c" no-ctfconvert \
no-implicit-rule before-depend local \
clean "rpctlscd_clnt.c"
rpctlssd.h optional krpc | nfslockd | nfscl | nfsd \
dependency "$S/rpc/rpcsec_tls/rpctlssd.x" \
compile-with "RPCGEN_CPP='${CPP}' rpcgen -hM $S/rpc/rpcsec_tls/rpctlssd.x | grep -v pthread.h > rpctlssd.h" \
no-obj no-implicit-rule before-depend local \
clean "rpctlssd.h"
rpctlssd_xdr.c optional krpc | nfslockd | nfscl | nfsd \
dependency "$S/rpc/rpcsec_tls/rpctlssd.x rpctlssd.h" \
compile-with "RPCGEN_CPP='${CPP}' rpcgen -c $S/rpc/rpcsec_tls/rpctlssd.x -o rpctlssd_xdr.c" no-ctfconvert \
no-implicit-rule before-depend local \
clean "rpctlssd_xdr.c"
rpctlssd_clnt.c optional krpc | nfslockd | nfscl | nfsd \
dependency "$S/rpc/rpcsec_tls/rpctlssd.x rpctlssd.h" \
compile-with "RPCGEN_CPP='${CPP}' rpcgen -lM $S/rpc/rpcsec_tls/rpctlssd.x | grep -v string.h > rpctlssd_clnt.c" no-ctfconvert \
no-implicit-rule before-depend local \
clean "rpctlssd_clnt.c"
rpc/rpcsec_tls/rpctls_impl.c optional krpc | nfslockd | nfscl | nfsd
rpc/rpcsec_tls/auth_tls.c optional krpc | nfslockd | nfscl | nfsd
rpc/rpcsec_gss/rpcsec_gss.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi
rpc/rpcsec_gss/rpcsec_gss_conf.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi
rpc/rpcsec_gss/rpcsec_gss_misc.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi
rpc/rpcsec_gss/rpcsec_gss_prot.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi
rpc/rpcsec_gss/svc_rpcsec_gss.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi
security/audit/audit.c optional audit
security/audit/audit_arg.c optional audit
security/audit/audit_bsm.c optional audit
security/audit/audit_bsm_db.c optional audit
security/audit/audit_bsm_klib.c optional audit
security/audit/audit_dtrace.c optional dtaudit audit | dtraceall audit compile-with "${CDDL_C}"
security/audit/audit_pipe.c optional audit
security/audit/audit_syscalls.c standard
security/audit/audit_trigger.c optional audit
security/audit/audit_worker.c optional audit
security/audit/bsm_domain.c optional audit
security/audit/bsm_errno.c optional audit
security/audit/bsm_fcntl.c optional audit
security/audit/bsm_socket_type.c optional audit
security/audit/bsm_token.c optional audit
security/mac/mac_audit.c optional mac audit
security/mac/mac_cred.c optional mac
security/mac/mac_kdb.c optional mac
security/mac/mac_framework.c optional mac
security/mac/mac_inet.c optional mac inet | mac inet6
security/mac/mac_inet6.c optional mac inet6
security/mac/mac_label.c optional mac
security/mac/mac_net.c optional mac
security/mac/mac_pipe.c optional mac
security/mac/mac_posix_sem.c optional mac
security/mac/mac_posix_shm.c optional mac
security/mac/mac_priv.c optional mac
security/mac/mac_process.c optional mac
security/mac/mac_socket.c optional mac
security/mac/mac_syscalls.c standard
security/mac/mac_system.c optional mac
security/mac/mac_sysv_msg.c optional mac
security/mac/mac_sysv_sem.c optional mac
security/mac/mac_sysv_shm.c optional mac
security/mac/mac_vfs.c optional mac
security/mac_biba/mac_biba.c optional mac_biba
security/mac_ddb/mac_ddb.c optional mac_ddb
security/mac_bsdextended/mac_bsdextended.c optional mac_bsdextended
security/mac_bsdextended/ugidfw_system.c optional mac_bsdextended
security/mac_bsdextended/ugidfw_vnode.c optional mac_bsdextended
security/mac_ifoff/mac_ifoff.c optional mac_ifoff
security/mac_lomac/mac_lomac.c optional mac_lomac
security/mac_mls/mac_mls.c optional mac_mls
security/mac_none/mac_none.c optional mac_none
security/mac_ntpd/mac_ntpd.c optional mac_ntpd
security/mac_partition/mac_partition.c optional mac_partition
security/mac_portacl/mac_portacl.c optional mac_portacl
security/mac_priority/mac_priority.c optional mac_priority
security/mac_seeotheruids/mac_seeotheruids.c optional mac_seeotheruids
security/mac_stub/mac_stub.c optional mac_stub
security/mac_test/mac_test.c optional mac_test
security/mac_veriexec/mac_veriexec.c optional mac_veriexec
security/mac_veriexec/veriexec_fingerprint.c optional mac_veriexec
security/mac_veriexec/veriexec_metadata.c optional mac_veriexec
security/mac_veriexec_parser/mac_veriexec_parser.c optional mac_veriexec mac_veriexec_parser
security/mac_veriexec/mac_veriexec_rmd160.c optional mac_veriexec_rmd160
security/mac_veriexec/mac_veriexec_sha1.c optional mac_veriexec_sha1
security/mac_veriexec/mac_veriexec_sha256.c optional mac_veriexec_sha256
security/mac_veriexec/mac_veriexec_sha384.c optional mac_veriexec_sha384
security/mac_veriexec/mac_veriexec_sha512.c optional mac_veriexec_sha512
teken/teken.c optional sc !SC_NO_TERM_TEKEN | vt
ufs/ffs/ffs_alloc.c optional ffs
ufs/ffs/ffs_balloc.c optional ffs
ufs/ffs/ffs_inode.c optional ffs
ufs/ffs/ffs_snapshot.c optional ffs
ufs/ffs/ffs_softdep.c optional ffs
ufs/ffs/ffs_subr.c optional ffs | geom_label
ufs/ffs/ffs_tables.c optional ffs | geom_label
ufs/ffs/ffs_vfsops.c optional ffs
ufs/ffs/ffs_vnops.c optional ffs
ufs/ffs/ffs_rawread.c optional ffs directio
ufs/ffs/ffs_suspend.c optional ffs
ufs/ufs/ufs_acl.c optional ffs
ufs/ufs/ufs_bmap.c optional ffs
ufs/ufs/ufs_dirhash.c optional ffs
ufs/ufs/ufs_extattr.c optional ffs
ufs/ufs/ufs_gjournal.c optional ffs UFS_GJOURNAL
ufs/ufs/ufs_inode.c optional ffs
ufs/ufs/ufs_lookup.c optional ffs
ufs/ufs/ufs_quota.c optional ffs
ufs/ufs/ufs_vfsops.c optional ffs
ufs/ufs/ufs_vnops.c optional ffs
vm/device_pager.c standard
vm/phys_pager.c standard
vm/redzone.c optional DEBUG_REDZONE
vm/sg_pager.c standard
vm/swap_pager.c standard
vm/uma_core.c standard
vm/uma_dbg.c standard
vm/memguard.c optional DEBUG_MEMGUARD
vm/vm_domainset.c standard
vm/vm_fault.c standard
vm/vm_glue.c standard
vm/vm_init.c standard
vm/vm_kern.c standard
vm/vm_map.c standard
vm/vm_meter.c standard
vm/vm_mmap.c standard
vm/vm_object.c standard
vm/vm_page.c standard
vm/vm_pageout.c standard
vm/vm_pager.c standard
vm/vm_phys.c standard
vm/vm_radix.c standard
vm/vm_reserv.c standard
vm/vm_swapout.c optional !NO_SWAPPING
vm/vm_swapout_dummy.c optional NO_SWAPPING
vm/vm_unix.c standard
vm/vnode_pager.c standard
xen/features.c optional xenhvm
xen/xenbus/xenbus_if.m optional xenhvm
xen/xenbus/xenbus.c optional xenhvm
xen/xenbus/xenbusb_if.m optional xenhvm
xen/xenbus/xenbusb.c optional xenhvm
xen/xenbus/xenbusb_front.c optional xenhvm
xen/xenbus/xenbusb_back.c optional xenhvm
xen/xenmem/xenmem_if.m optional xenhvm
xdr/xdr.c optional xdr | krpc | nfslockd | nfscl | nfsd
xdr/xdr_array.c optional xdr | krpc | nfslockd | nfscl | nfsd
xdr/xdr_mbuf.c optional xdr | krpc | nfslockd | nfscl | nfsd
xdr/xdr_mem.c optional xdr | krpc | nfslockd | nfscl | nfsd
xdr/xdr_reference.c optional xdr | krpc | nfslockd | nfscl | nfsd
xdr/xdr_sizeof.c optional xdr | krpc | nfslockd | nfscl | nfsd
diff --git a/sys/contrib/openzfs/.github/workflows/zfs-tests-functional.yml b/sys/contrib/openzfs/.github/workflows/zfs-tests-functional.yml
index 328cb97f10e4..0273610af045 100644
--- a/sys/contrib/openzfs/.github/workflows/zfs-tests-functional.yml
+++ b/sys/contrib/openzfs/.github/workflows/zfs-tests-functional.yml
@@ -1,82 +1,82 @@
name: zfs-tests-functional
on:
push:
pull_request:
jobs:
tests-functional-ubuntu:
strategy:
fail-fast: false
matrix:
os: [18.04, 20.04]
runs-on: ubuntu-${{ matrix.os }}
steps:
- uses: actions/checkout@v2
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Reclaim disk space
run: |
${{ github.workspace }}/.github/workflows/scripts/reclaim_disk_space.sh
- name: Install dependencies
run: |
sudo apt-get update
xargs --arg-file=${{ github.workspace }}/.github/workflows/build-dependencies.txt sudo apt-get install -qq
sudo apt-get clean
- name: Autogen.sh
run: |
./autogen.sh
- name: Configure
run: |
- ./configure --enable-debug --enable-debuginfo --enable-asan --enable-ubsan --with-config=dist
+ ./configure --enable-debug --enable-debuginfo --enable-asan --enable-ubsan
- name: Make
run: |
make -j$(nproc) --no-print-directory --silent pkg-utils pkg-kmod
- name: Install
run: |
sudo dpkg -i *.deb
# Update order of directories to search for modules, otherwise
# Ubuntu will load kernel-shipped ones.
sudo sed -i.bak 's/updates/extra updates/' /etc/depmod.d/ubuntu.conf
sudo depmod
sudo modprobe zfs
# Workaround for cloud-init bug
# see https://github.com/openzfs/zfs/issues/12644
FILE=/lib/udev/rules.d/10-cloud-init-hook-hotplug.rules
if [ -r "${FILE}" ]; then
HASH=$(md5sum "${FILE}" | awk '{ print $1 }')
if [ "${HASH}" = "121ff0ef1936cd2ef65aec0458a35772" ]; then
# Just shove a zd* exclusion right above the hotplug hook...
sudo sed -i -e s/'LABEL="cloudinit_hook"'/'KERNEL=="zd*", GOTO="cloudinit_end"\n&'/ "${FILE}"
sudo udevadm control --reload-rules
fi
fi
- name: Clear the kernel ring buffer
run: |
sudo dmesg -c >/var/tmp/dmesg-prerun
- name: Report disk space
run: |
df -h /
- name: Tests
run: |
set -o pipefail
/usr/share/zfs/zfs-tests.sh -vKR -s 3G | scripts/zfs-tests-color.sh
shell: bash
timeout-minutes: 330
- name: Prepare artifacts
if: failure()
run: |
RESULTS_PATH=$(readlink -f /var/tmp/test_results/current)
sudo dmesg > $RESULTS_PATH/dmesg
sudo cp /var/log/syslog /var/tmp/dmesg-prerun $RESULTS_PATH/
sudo chmod +r $RESULTS_PATH/*
# Replace ':' in dir names, actions/upload-artifact doesn't support it
for f in $(find /var/tmp/test_results -name '*:*'); do mv "$f" "${f//:/__}"; done
- uses: actions/upload-artifact@v2
if: failure()
with:
name: Test logs Ubuntu-${{ matrix.os }}
path: |
/var/tmp/test_results/*
!/var/tmp/test_results/current
if-no-files-found: ignore
diff --git a/sys/contrib/openzfs/.github/workflows/zfs-tests-sanity.yml b/sys/contrib/openzfs/.github/workflows/zfs-tests-sanity.yml
index 4c15cecf58d8..73606f909e10 100644
--- a/sys/contrib/openzfs/.github/workflows/zfs-tests-sanity.yml
+++ b/sys/contrib/openzfs/.github/workflows/zfs-tests-sanity.yml
@@ -1,78 +1,78 @@
name: zfs-tests-sanity
on:
push:
pull_request:
jobs:
tests:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v2
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Reclaim disk space
run: |
${{ github.workspace }}/.github/workflows/scripts/reclaim_disk_space.sh
- name: Install dependencies
run: |
sudo apt-get update
xargs --arg-file=${{ github.workspace }}/.github/workflows/build-dependencies.txt sudo apt-get install -qq
sudo apt-get clean
- name: Autogen.sh
run: |
./autogen.sh
- name: Configure
run: |
- ./configure --enable-debug --enable-debuginfo --enable-asan --enable-ubsan --with-config=dist
+ ./configure --enable-debug --enable-debuginfo --enable-asan --enable-ubsan
- name: Make
run: |
make -j$(nproc) --no-print-directory --silent pkg-utils pkg-kmod
- name: Install
run: |
sudo dpkg -i *.deb
# Update order of directories to search for modules, otherwise
# Ubuntu will load kernel-shipped ones.
sudo sed -i.bak 's/updates/extra updates/' /etc/depmod.d/ubuntu.conf
sudo depmod
sudo modprobe zfs
# Workaround for cloud-init bug
# see https://github.com/openzfs/zfs/issues/12644
FILE=/lib/udev/rules.d/10-cloud-init-hook-hotplug.rules
if [ -r "${FILE}" ]; then
HASH=$(md5sum "${FILE}" | awk '{ print $1 }')
if [ "${HASH}" = "121ff0ef1936cd2ef65aec0458a35772" ]; then
# Just shove a zd* exclusion right above the hotplug hook...
sudo sed -i -e s/'LABEL="cloudinit_hook"'/'KERNEL=="zd*", GOTO="cloudinit_end"\n&'/ "${FILE}"
sudo udevadm control --reload-rules
fi
fi
- name: Clear the kernel ring buffer
run: |
sudo dmesg -c >/var/tmp/dmesg-prerun
- name: Report disk space
run: |
df -h /
- name: Tests
run: |
set -o pipefail
/usr/share/zfs/zfs-tests.sh -vKR -s 3G -r sanity | scripts/zfs-tests-color.sh
shell: bash
timeout-minutes: 330
- name: Prepare artifacts
if: failure()
run: |
RESULTS_PATH=$(readlink -f /var/tmp/test_results/current)
sudo dmesg > $RESULTS_PATH/dmesg
sudo cp /var/log/syslog /var/tmp/dmesg-prerun $RESULTS_PATH/
sudo chmod +r $RESULTS_PATH/*
# Replace ':' in dir names, actions/upload-artifact doesn't support it
for f in $(find /var/tmp/test_results -name '*:*'); do mv "$f" "${f//:/__}"; done
- uses: actions/upload-artifact@v2
if: failure()
with:
name: Test logs Ubuntu-${{ matrix.os }}
path: |
/var/tmp/test_results/*
!/var/tmp/test_results/current
if-no-files-found: ignore
diff --git a/sys/contrib/openzfs/.github/workflows/zloop.yml b/sys/contrib/openzfs/.github/workflows/zloop.yml
index 64fe96a3ab6b..d49eeae1653c 100644
--- a/sys/contrib/openzfs/.github/workflows/zloop.yml
+++ b/sys/contrib/openzfs/.github/workflows/zloop.yml
@@ -1,62 +1,62 @@
name: zloop
on:
push:
pull_request:
jobs:
tests:
runs-on: ubuntu-20.04
env:
TEST_DIR: /var/tmp/zloop
steps:
- uses: actions/checkout@v2
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Install dependencies
run: |
sudo apt-get update
xargs --arg-file=${{ github.workspace }}/.github/workflows/build-dependencies.txt sudo apt-get install -qq
sudo apt-get clean
- name: Autogen.sh
run: |
./autogen.sh
- name: Configure
run: |
- ./configure --enable-debug --enable-debuginfo --enable-asan --enable-ubsan --with-config=dist
+ ./configure --enable-debug --enable-debuginfo --enable-asan --enable-ubsan
- name: Make
run: |
make -j$(nproc) --no-print-directory --silent pkg-utils pkg-kmod
- name: Install
run: |
sudo dpkg -i *.deb
# Update order of directories to search for modules, otherwise
# Ubuntu will load kernel-shipped ones.
sudo sed -i.bak 's/updates/extra updates/' /etc/depmod.d/ubuntu.conf
sudo depmod
sudo modprobe zfs
- name: Tests
run: |
sudo mkdir -p $TEST_DIR
# run for 10 minutes or at most 2 iterations for a maximum runner
# time of 20 minutes.
sudo /usr/share/zfs/zloop.sh -t 600 -I 2 -l -m1 -- -T 120 -P 60
- name: Prepare artifacts
if: failure()
run: |
sudo chmod +r -R $TEST_DIR/
- uses: actions/upload-artifact@v2
if: failure()
with:
name: Logs
path: |
/var/tmp/zloop/*/
!/var/tmp/zloop/*/vdev/
if-no-files-found: ignore
- uses: actions/upload-artifact@v2
if: failure()
with:
name: Pool files
path: |
/var/tmp/zloop/*/vdev/
if-no-files-found: ignore
diff --git a/sys/contrib/openzfs/cmd/Makefile.am b/sys/contrib/openzfs/cmd/Makefile.am
index 65de980da308..6d6de4adb42a 100644
--- a/sys/contrib/openzfs/cmd/Makefile.am
+++ b/sys/contrib/openzfs/cmd/Makefile.am
@@ -1,115 +1,116 @@
bin_SCRIPTS =
bin_PROGRAMS =
sbin_SCRIPTS =
sbin_PROGRAMS =
dist_bin_SCRIPTS =
zfsexec_PROGRAMS =
mounthelper_PROGRAMS =
sbin_SCRIPTS += fsck.zfs
SHELLCHECKSCRIPTS += fsck.zfs
CLEANFILES += fsck.zfs
dist_noinst_DATA += %D%/fsck.zfs.in
$(call SUBST,fsck.zfs,%D%/)
sbin_PROGRAMS += zfs_ids_to_path
CPPCHECKTARGETS += zfs_ids_to_path
zfs_ids_to_path_SOURCES = \
%D%/zfs_ids_to_path.c
zfs_ids_to_path_LDADD = \
libzfs.la
zhack_CPPFLAGS = $(AM_CPPFLAGS) $(FORCEDEBUG_CPPFLAGS)
sbin_PROGRAMS += zhack
CPPCHECKTARGETS += zhack
zhack_SOURCES = \
%D%/zhack.c
zhack_LDADD = \
libzpool.la \
libzfs_core.la \
libnvpair.la
ztest_CFLAGS = $(AM_CFLAGS) $(KERNEL_CFLAGS)
# Get rid of compiler warning for unchecked truncating snprintfs on gcc 7.1.1
ztest_CFLAGS += $(NO_FORMAT_TRUNCATION)
ztest_CPPFLAGS = $(AM_CPPFLAGS) $(FORCEDEBUG_CPPFLAGS)
sbin_PROGRAMS += ztest
CPPCHECKTARGETS += ztest
ztest_SOURCES = \
%D%/ztest.c
ztest_LDADD = \
libzpool.la \
libzfs_core.la \
libnvpair.la
ztest_LDADD += -lm
ztest_LDFLAGS = -pthread
include $(srcdir)/%D%/raidz_test/Makefile.am
include $(srcdir)/%D%/zdb/Makefile.am
include $(srcdir)/%D%/zfs/Makefile.am
include $(srcdir)/%D%/zinject/Makefile.am
include $(srcdir)/%D%/zpool/Makefile.am
include $(srcdir)/%D%/zpool_influxdb/Makefile.am
include $(srcdir)/%D%/zstream/Makefile.am
if BUILD_LINUX
mounthelper_PROGRAMS += mount.zfs
CPPCHECKTARGETS += mount.zfs
mount_zfs_SOURCES = \
%D%/mount_zfs.c
mount_zfs_LDADD = \
libzfs.la \
libzfs_core.la \
libnvpair.la
mount_zfs_LDADD += $(LTLIBINTL)
CPPCHECKTARGETS += raidz_test
sbin_PROGRAMS += zgenhostid
CPPCHECKTARGETS += zgenhostid
zgenhostid_SOURCES = \
%D%/zgenhostid.c
dist_bin_SCRIPTS += %D%/zvol_wait
SHELLCHECKSCRIPTS += %D%/zvol_wait
include $(srcdir)/%D%/zed/Makefile.am
endif
if USING_PYTHON
-bin_SCRIPTS += arc_summary arcstat dbufstat
-CLEANFILES += arc_summary arcstat dbufstat
-dist_noinst_DATA += %D%/arc_summary %D%/arcstat.in %D%/dbufstat.in
+bin_SCRIPTS += arc_summary arcstat dbufstat zilstat
+CLEANFILES += arc_summary arcstat dbufstat zilstat
+dist_noinst_DATA += %D%/arc_summary %D%/arcstat.in %D%/dbufstat.in %D%/zilstat.in
$(call SUBST,arcstat,%D%/)
$(call SUBST,dbufstat,%D%/)
+$(call SUBST,zilstat,%D%/)
arc_summary: %D%/arc_summary
$(AM_V_at)cp $< $@
endif
PHONY += cmd
cmd: $(bin_SCRIPTS) $(bin_PROGRAMS) $(sbin_SCRIPTS) $(sbin_PROGRAMS) $(dist_bin_SCRIPTS) $(zfsexec_PROGRAMS) $(mounthelper_PROGRAMS)
diff --git a/sys/contrib/openzfs/cmd/zdb/zdb.c b/sys/contrib/openzfs/cmd/zdb/zdb.c
index fdf569691cb2..92df3dd167bf 100644
--- a/sys/contrib/openzfs/cmd/zdb/zdb.c
+++ b/sys/contrib/openzfs/cmd/zdb/zdb.c
@@ -1,9000 +1,9013 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2019 by Delphix. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Nexenta Systems, Inc.
* Copyright (c) 2017, 2018 Lawrence Livermore National Security, LLC.
* Copyright (c) 2015, 2017, Intel Corporation.
* Copyright (c) 2020 Datto Inc.
* Copyright (c) 2020, The FreeBSD Foundation [1]
*
* [1] Portions of this software were developed by Allan Jude
* under sponsorship from the FreeBSD Foundation.
* Copyright (c) 2021 Allan Jude
* Copyright (c) 2021 Toomas Soome <tsoome@me.com>
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <ctype.h>
#include <getopt.h>
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/dmu.h>
#include <sys/zap.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_sa.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/metaslab_impl.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_bookmark.h>
#include <sys/dbuf.h>
#include <sys/zil.h>
#include <sys/zil_impl.h>
#include <sys/stat.h>
#include <sys/resource.h>
#include <sys/dmu_send.h>
#include <sys/dmu_traverse.h>
#include <sys/zio_checksum.h>
#include <sys/zio_compress.h>
#include <sys/zfs_fuid.h>
#include <sys/arc.h>
#include <sys/arc_impl.h>
#include <sys/ddt.h>
#include <sys/zfeature.h>
#include <sys/abd.h>
#include <sys/blkptr.h>
#include <sys/dsl_crypt.h>
#include <sys/dsl_scan.h>
#include <sys/btree.h>
#include <zfs_comutil.h>
#include <sys/zstd/zstd.h>
#include <libnvpair.h>
#include <libzutil.h>
#include "zdb.h"
#define ZDB_COMPRESS_NAME(idx) ((idx) < ZIO_COMPRESS_FUNCTIONS ? \
zio_compress_table[(idx)].ci_name : "UNKNOWN")
#define ZDB_CHECKSUM_NAME(idx) ((idx) < ZIO_CHECKSUM_FUNCTIONS ? \
zio_checksum_table[(idx)].ci_name : "UNKNOWN")
#define ZDB_OT_TYPE(idx) ((idx) < DMU_OT_NUMTYPES ? (idx) : \
(idx) == DMU_OTN_ZAP_DATA || (idx) == DMU_OTN_ZAP_METADATA ? \
DMU_OT_ZAP_OTHER : \
(idx) == DMU_OTN_UINT64_DATA || (idx) == DMU_OTN_UINT64_METADATA ? \
DMU_OT_UINT64_OTHER : DMU_OT_NUMTYPES)
/* Some platforms require part of inode IDs to be remapped */
#ifdef __APPLE__
#define ZDB_MAP_OBJECT_ID(obj) INO_XNUTOZFS(obj, 2)
#else
#define ZDB_MAP_OBJECT_ID(obj) (obj)
#endif
static const char *
zdb_ot_name(dmu_object_type_t type)
{
if (type < DMU_OT_NUMTYPES)
return (dmu_ot[type].ot_name);
else if ((type & DMU_OT_NEWTYPE) &&
((type & DMU_OT_BYTESWAP_MASK) < DMU_BSWAP_NUMFUNCS))
return (dmu_ot_byteswap[type & DMU_OT_BYTESWAP_MASK].ob_name);
else
return ("UNKNOWN");
}
extern int reference_tracking_enable;
extern int zfs_recover;
extern unsigned long zfs_arc_meta_min, zfs_arc_meta_limit;
extern int zfs_vdev_async_read_max_active;
extern boolean_t spa_load_verify_dryrun;
extern boolean_t spa_mode_readable_spacemaps;
extern int zfs_reconstruct_indirect_combinations_max;
-extern int zfs_btree_verify_intensity;
+extern uint_t zfs_btree_verify_intensity;
static const char cmdname[] = "zdb";
uint8_t dump_opt[256];
typedef void object_viewer_t(objset_t *, uint64_t, void *data, size_t size);
uint64_t *zopt_metaslab = NULL;
static unsigned zopt_metaslab_args = 0;
typedef struct zopt_object_range {
uint64_t zor_obj_start;
uint64_t zor_obj_end;
uint64_t zor_flags;
} zopt_object_range_t;
zopt_object_range_t *zopt_object_ranges = NULL;
static unsigned zopt_object_args = 0;
static int flagbits[256];
#define ZOR_FLAG_PLAIN_FILE 0x0001
#define ZOR_FLAG_DIRECTORY 0x0002
#define ZOR_FLAG_SPACE_MAP 0x0004
#define ZOR_FLAG_ZAP 0x0008
#define ZOR_FLAG_ALL_TYPES -1
#define ZOR_SUPPORTED_FLAGS (ZOR_FLAG_PLAIN_FILE | \
ZOR_FLAG_DIRECTORY | \
ZOR_FLAG_SPACE_MAP | \
ZOR_FLAG_ZAP)
#define ZDB_FLAG_CHECKSUM 0x0001
#define ZDB_FLAG_DECOMPRESS 0x0002
#define ZDB_FLAG_BSWAP 0x0004
#define ZDB_FLAG_GBH 0x0008
#define ZDB_FLAG_INDIRECT 0x0010
#define ZDB_FLAG_RAW 0x0020
#define ZDB_FLAG_PRINT_BLKPTR 0x0040
#define ZDB_FLAG_VERBOSE 0x0080
uint64_t max_inflight_bytes = 256 * 1024 * 1024; /* 256MB */
static int leaked_objects = 0;
static range_tree_t *mos_refd_objs;
static void snprintf_blkptr_compact(char *, size_t, const blkptr_t *,
boolean_t);
static void mos_obj_refd(uint64_t);
static void mos_obj_refd_multiple(uint64_t);
static int dump_bpobj_cb(void *arg, const blkptr_t *bp, boolean_t free,
dmu_tx_t *tx);
typedef struct sublivelist_verify {
/* FREE's that haven't yet matched to an ALLOC, in one sub-livelist */
zfs_btree_t sv_pair;
/* ALLOC's without a matching FREE, accumulates across sub-livelists */
zfs_btree_t sv_leftover;
} sublivelist_verify_t;
static int
livelist_compare(const void *larg, const void *rarg)
{
const blkptr_t *l = larg;
const blkptr_t *r = rarg;
/* Sort them according to dva[0] */
uint64_t l_dva0_vdev, r_dva0_vdev;
l_dva0_vdev = DVA_GET_VDEV(&l->blk_dva[0]);
r_dva0_vdev = DVA_GET_VDEV(&r->blk_dva[0]);
if (l_dva0_vdev < r_dva0_vdev)
return (-1);
else if (l_dva0_vdev > r_dva0_vdev)
return (+1);
/* if vdevs are equal, sort by offsets. */
uint64_t l_dva0_offset;
uint64_t r_dva0_offset;
l_dva0_offset = DVA_GET_OFFSET(&l->blk_dva[0]);
r_dva0_offset = DVA_GET_OFFSET(&r->blk_dva[0]);
if (l_dva0_offset < r_dva0_offset) {
return (-1);
} else if (l_dva0_offset > r_dva0_offset) {
return (+1);
}
/*
* Since we're storing blkptrs without cancelling FREE/ALLOC pairs,
* it's possible the offsets are equal. In that case, sort by txg
*/
if (l->blk_birth < r->blk_birth) {
return (-1);
} else if (l->blk_birth > r->blk_birth) {
return (+1);
}
return (0);
}
typedef struct sublivelist_verify_block {
dva_t svb_dva;
/*
* We need this to check if the block marked as allocated
* in the livelist was freed (and potentially reallocated)
* in the metaslab spacemaps at a later TXG.
*/
uint64_t svb_allocated_txg;
} sublivelist_verify_block_t;
static void zdb_print_blkptr(const blkptr_t *bp, int flags);
typedef struct sublivelist_verify_block_refcnt {
/* block pointer entry in livelist being verified */
blkptr_t svbr_blk;
/*
* Refcount gets incremented to 1 when we encounter the first
* FREE entry for the svfbr block pointer and a node for it
* is created in our ZDB verification/tracking metadata.
*
* As we encounter more FREE entries we increment this counter
* and similarly decrement it whenever we find the respective
* ALLOC entries for this block.
*
* When the refcount gets to 0 it means that all the FREE and
* ALLOC entries of this block have paired up and we no longer
* need to track it in our verification logic (e.g. the node
* containing this struct in our verification data structure
* should be freed).
*
* [refer to sublivelist_verify_blkptr() for the actual code]
*/
uint32_t svbr_refcnt;
} sublivelist_verify_block_refcnt_t;
static int
sublivelist_block_refcnt_compare(const void *larg, const void *rarg)
{
const sublivelist_verify_block_refcnt_t *l = larg;
const sublivelist_verify_block_refcnt_t *r = rarg;
return (livelist_compare(&l->svbr_blk, &r->svbr_blk));
}
static int
sublivelist_verify_blkptr(void *arg, const blkptr_t *bp, boolean_t free,
dmu_tx_t *tx)
{
ASSERT3P(tx, ==, NULL);
struct sublivelist_verify *sv = arg;
sublivelist_verify_block_refcnt_t current = {
.svbr_blk = *bp,
/*
* Start with 1 in case this is the first free entry.
* This field is not used for our B-Tree comparisons
* anyway.
*/
.svbr_refcnt = 1,
};
zfs_btree_index_t where;
sublivelist_verify_block_refcnt_t *pair =
zfs_btree_find(&sv->sv_pair, &current, &where);
if (free) {
if (pair == NULL) {
/* first free entry for this block pointer */
zfs_btree_add(&sv->sv_pair, &current);
} else {
pair->svbr_refcnt++;
}
} else {
if (pair == NULL) {
/* block that is currently marked as allocated */
for (int i = 0; i < SPA_DVAS_PER_BP; i++) {
if (DVA_IS_EMPTY(&bp->blk_dva[i]))
break;
sublivelist_verify_block_t svb = {
.svb_dva = bp->blk_dva[i],
.svb_allocated_txg = bp->blk_birth
};
if (zfs_btree_find(&sv->sv_leftover, &svb,
&where) == NULL) {
zfs_btree_add_idx(&sv->sv_leftover,
&svb, &where);
}
}
} else {
/* alloc matches a free entry */
pair->svbr_refcnt--;
if (pair->svbr_refcnt == 0) {
/* all allocs and frees have been matched */
zfs_btree_remove_idx(&sv->sv_pair, &where);
}
}
}
return (0);
}
static int
sublivelist_verify_func(void *args, dsl_deadlist_entry_t *dle)
{
int err;
struct sublivelist_verify *sv = args;
zfs_btree_create(&sv->sv_pair, sublivelist_block_refcnt_compare,
sizeof (sublivelist_verify_block_refcnt_t));
err = bpobj_iterate_nofree(&dle->dle_bpobj, sublivelist_verify_blkptr,
sv, NULL);
sublivelist_verify_block_refcnt_t *e;
zfs_btree_index_t *cookie = NULL;
while ((e = zfs_btree_destroy_nodes(&sv->sv_pair, &cookie)) != NULL) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf),
&e->svbr_blk, B_TRUE);
(void) printf("\tERROR: %d unmatched FREE(s): %s\n",
e->svbr_refcnt, blkbuf);
}
zfs_btree_destroy(&sv->sv_pair);
return (err);
}
static int
livelist_block_compare(const void *larg, const void *rarg)
{
const sublivelist_verify_block_t *l = larg;
const sublivelist_verify_block_t *r = rarg;
if (DVA_GET_VDEV(&l->svb_dva) < DVA_GET_VDEV(&r->svb_dva))
return (-1);
else if (DVA_GET_VDEV(&l->svb_dva) > DVA_GET_VDEV(&r->svb_dva))
return (+1);
if (DVA_GET_OFFSET(&l->svb_dva) < DVA_GET_OFFSET(&r->svb_dva))
return (-1);
else if (DVA_GET_OFFSET(&l->svb_dva) > DVA_GET_OFFSET(&r->svb_dva))
return (+1);
if (DVA_GET_ASIZE(&l->svb_dva) < DVA_GET_ASIZE(&r->svb_dva))
return (-1);
else if (DVA_GET_ASIZE(&l->svb_dva) > DVA_GET_ASIZE(&r->svb_dva))
return (+1);
return (0);
}
/*
* Check for errors in a livelist while tracking all unfreed ALLOCs in the
* sublivelist_verify_t: sv->sv_leftover
*/
static void
livelist_verify(dsl_deadlist_t *dl, void *arg)
{
sublivelist_verify_t *sv = arg;
dsl_deadlist_iterate(dl, sublivelist_verify_func, sv);
}
/*
* Check for errors in the livelist entry and discard the intermediary
* data structures
*/
static int
sublivelist_verify_lightweight(void *args, dsl_deadlist_entry_t *dle)
{
(void) args;
sublivelist_verify_t sv;
zfs_btree_create(&sv.sv_leftover, livelist_block_compare,
sizeof (sublivelist_verify_block_t));
int err = sublivelist_verify_func(&sv, dle);
zfs_btree_clear(&sv.sv_leftover);
zfs_btree_destroy(&sv.sv_leftover);
return (err);
}
typedef struct metaslab_verify {
/*
* Tree containing all the leftover ALLOCs from the livelists
* that are part of this metaslab.
*/
zfs_btree_t mv_livelist_allocs;
/*
* Metaslab information.
*/
uint64_t mv_vdid;
uint64_t mv_msid;
uint64_t mv_start;
uint64_t mv_end;
/*
* What's currently allocated for this metaslab.
*/
range_tree_t *mv_allocated;
} metaslab_verify_t;
typedef void ll_iter_t(dsl_deadlist_t *ll, void *arg);
typedef int (*zdb_log_sm_cb_t)(spa_t *spa, space_map_entry_t *sme, uint64_t txg,
void *arg);
typedef struct unflushed_iter_cb_arg {
spa_t *uic_spa;
uint64_t uic_txg;
void *uic_arg;
zdb_log_sm_cb_t uic_cb;
} unflushed_iter_cb_arg_t;
static int
iterate_through_spacemap_logs_cb(space_map_entry_t *sme, void *arg)
{
unflushed_iter_cb_arg_t *uic = arg;
return (uic->uic_cb(uic->uic_spa, sme, uic->uic_txg, uic->uic_arg));
}
static void
iterate_through_spacemap_logs(spa_t *spa, zdb_log_sm_cb_t cb, void *arg)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
space_map_t *sm = NULL;
VERIFY0(space_map_open(&sm, spa_meta_objset(spa),
sls->sls_sm_obj, 0, UINT64_MAX, SPA_MINBLOCKSHIFT));
unflushed_iter_cb_arg_t uic = {
.uic_spa = spa,
.uic_txg = sls->sls_txg,
.uic_arg = arg,
.uic_cb = cb
};
VERIFY0(space_map_iterate(sm, space_map_length(sm),
iterate_through_spacemap_logs_cb, &uic));
space_map_close(sm);
}
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
static void
verify_livelist_allocs(metaslab_verify_t *mv, uint64_t txg,
uint64_t offset, uint64_t size)
{
sublivelist_verify_block_t svb;
DVA_SET_VDEV(&svb.svb_dva, mv->mv_vdid);
DVA_SET_OFFSET(&svb.svb_dva, offset);
DVA_SET_ASIZE(&svb.svb_dva, size);
zfs_btree_index_t where;
uint64_t end_offset = offset + size;
/*
* Look for an exact match for spacemap entry in the livelist entries.
* Then, look for other livelist entries that fall within the range
* of the spacemap entry as it may have been condensed
*/
sublivelist_verify_block_t *found =
zfs_btree_find(&mv->mv_livelist_allocs, &svb, &where);
if (found == NULL) {
found = zfs_btree_next(&mv->mv_livelist_allocs, &where, &where);
}
for (; found != NULL && DVA_GET_VDEV(&found->svb_dva) == mv->mv_vdid &&
DVA_GET_OFFSET(&found->svb_dva) < end_offset;
found = zfs_btree_next(&mv->mv_livelist_allocs, &where, &where)) {
if (found->svb_allocated_txg <= txg) {
(void) printf("ERROR: Livelist ALLOC [%llx:%llx] "
"from TXG %llx FREED at TXG %llx\n",
(u_longlong_t)DVA_GET_OFFSET(&found->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&found->svb_dva),
(u_longlong_t)found->svb_allocated_txg,
(u_longlong_t)txg);
}
}
}
static int
metaslab_spacemap_validation_cb(space_map_entry_t *sme, void *arg)
{
metaslab_verify_t *mv = arg;
uint64_t offset = sme->sme_offset;
uint64_t size = sme->sme_run;
uint64_t txg = sme->sme_txg;
if (sme->sme_type == SM_ALLOC) {
if (range_tree_contains(mv->mv_allocated,
offset, size)) {
(void) printf("ERROR: DOUBLE ALLOC: "
"%llu [%llx:%llx] "
"%llu:%llu LOG_SM\n",
(u_longlong_t)txg, (u_longlong_t)offset,
(u_longlong_t)size, (u_longlong_t)mv->mv_vdid,
(u_longlong_t)mv->mv_msid);
} else {
range_tree_add(mv->mv_allocated,
offset, size);
}
} else {
if (!range_tree_contains(mv->mv_allocated,
offset, size)) {
(void) printf("ERROR: DOUBLE FREE: "
"%llu [%llx:%llx] "
"%llu:%llu LOG_SM\n",
(u_longlong_t)txg, (u_longlong_t)offset,
(u_longlong_t)size, (u_longlong_t)mv->mv_vdid,
(u_longlong_t)mv->mv_msid);
} else {
range_tree_remove(mv->mv_allocated,
offset, size);
}
}
if (sme->sme_type != SM_ALLOC) {
/*
* If something is freed in the spacemap, verify that
* it is not listed as allocated in the livelist.
*/
verify_livelist_allocs(mv, txg, offset, size);
}
return (0);
}
static int
spacemap_check_sm_log_cb(spa_t *spa, space_map_entry_t *sme,
uint64_t txg, void *arg)
{
metaslab_verify_t *mv = arg;
uint64_t offset = sme->sme_offset;
uint64_t vdev_id = sme->sme_vdev;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
/* skip indirect vdevs */
if (!vdev_is_concrete(vd))
return (0);
if (vdev_id != mv->mv_vdid)
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
if (ms->ms_id != mv->mv_msid)
return (0);
if (txg < metaslab_unflushed_txg(ms))
return (0);
ASSERT3U(txg, ==, sme->sme_txg);
return (metaslab_spacemap_validation_cb(sme, mv));
}
static void
spacemap_check_sm_log(spa_t *spa, metaslab_verify_t *mv)
{
iterate_through_spacemap_logs(spa, spacemap_check_sm_log_cb, mv);
}
static void
spacemap_check_ms_sm(space_map_t *sm, metaslab_verify_t *mv)
{
if (sm == NULL)
return;
VERIFY0(space_map_iterate(sm, space_map_length(sm),
metaslab_spacemap_validation_cb, mv));
}
static void iterate_deleted_livelists(spa_t *spa, ll_iter_t func, void *arg);
/*
* Transfer blocks from sv_leftover tree to the mv_livelist_allocs if
* they are part of that metaslab (mv_msid).
*/
static void
mv_populate_livelist_allocs(metaslab_verify_t *mv, sublivelist_verify_t *sv)
{
zfs_btree_index_t where;
sublivelist_verify_block_t *svb;
ASSERT3U(zfs_btree_numnodes(&mv->mv_livelist_allocs), ==, 0);
for (svb = zfs_btree_first(&sv->sv_leftover, &where);
svb != NULL;
svb = zfs_btree_next(&sv->sv_leftover, &where, &where)) {
if (DVA_GET_VDEV(&svb->svb_dva) != mv->mv_vdid)
continue;
if (DVA_GET_OFFSET(&svb->svb_dva) < mv->mv_start &&
(DVA_GET_OFFSET(&svb->svb_dva) +
DVA_GET_ASIZE(&svb->svb_dva)) > mv->mv_start) {
(void) printf("ERROR: Found block that crosses "
"metaslab boundary: <%llu:%llx:%llx>\n",
(u_longlong_t)DVA_GET_VDEV(&svb->svb_dva),
(u_longlong_t)DVA_GET_OFFSET(&svb->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&svb->svb_dva));
continue;
}
if (DVA_GET_OFFSET(&svb->svb_dva) < mv->mv_start)
continue;
if (DVA_GET_OFFSET(&svb->svb_dva) >= mv->mv_end)
continue;
if ((DVA_GET_OFFSET(&svb->svb_dva) +
DVA_GET_ASIZE(&svb->svb_dva)) > mv->mv_end) {
(void) printf("ERROR: Found block that crosses "
"metaslab boundary: <%llu:%llx:%llx>\n",
(u_longlong_t)DVA_GET_VDEV(&svb->svb_dva),
(u_longlong_t)DVA_GET_OFFSET(&svb->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&svb->svb_dva));
continue;
}
zfs_btree_add(&mv->mv_livelist_allocs, svb);
}
for (svb = zfs_btree_first(&mv->mv_livelist_allocs, &where);
svb != NULL;
svb = zfs_btree_next(&mv->mv_livelist_allocs, &where, &where)) {
zfs_btree_remove(&sv->sv_leftover, svb);
}
}
/*
* [Livelist Check]
* Iterate through all the sublivelists and:
* - report leftover frees (**)
* - record leftover ALLOCs together with their TXG [see Cross Check]
*
* (**) Note: Double ALLOCs are valid in datasets that have dedup
* enabled. Similarly double FREEs are allowed as well but
* only if they pair up with a corresponding ALLOC entry once
* we our done with our sublivelist iteration.
*
* [Spacemap Check]
* for each metaslab:
* - iterate over spacemap and then the metaslab's entries in the
* spacemap log, then report any double FREEs and ALLOCs (do not
* blow up).
*
* [Cross Check]
* After finishing the Livelist Check phase and while being in the
* Spacemap Check phase, we find all the recorded leftover ALLOCs
* of the livelist check that are part of the metaslab that we are
* currently looking at in the Spacemap Check. We report any entries
* that are marked as ALLOCs in the livelists but have been actually
* freed (and potentially allocated again) after their TXG stamp in
* the spacemaps. Also report any ALLOCs from the livelists that
* belong to indirect vdevs (e.g. their vdev completed removal).
*
* Note that this will miss Log Spacemap entries that cancelled each other
* out before being flushed to the metaslab, so we are not guaranteed
* to match all erroneous ALLOCs.
*/
static void
livelist_metaslab_validate(spa_t *spa)
{
(void) printf("Verifying deleted livelist entries\n");
sublivelist_verify_t sv;
zfs_btree_create(&sv.sv_leftover, livelist_block_compare,
sizeof (sublivelist_verify_block_t));
iterate_deleted_livelists(spa, livelist_verify, &sv);
(void) printf("Verifying metaslab entries\n");
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
if (!vdev_is_concrete(vd))
continue;
for (uint64_t mid = 0; mid < vd->vdev_ms_count; mid++) {
metaslab_t *m = vd->vdev_ms[mid];
(void) fprintf(stderr,
"\rverifying concrete vdev %llu, "
"metaslab %llu of %llu ...",
(longlong_t)vd->vdev_id,
(longlong_t)mid,
(longlong_t)vd->vdev_ms_count);
uint64_t shift, start;
range_seg_type_t type =
metaslab_calculate_range_tree_type(vd, m,
&start, &shift);
metaslab_verify_t mv;
mv.mv_allocated = range_tree_create(NULL,
type, NULL, start, shift);
mv.mv_vdid = vd->vdev_id;
mv.mv_msid = m->ms_id;
mv.mv_start = m->ms_start;
mv.mv_end = m->ms_start + m->ms_size;
zfs_btree_create(&mv.mv_livelist_allocs,
livelist_block_compare,
sizeof (sublivelist_verify_block_t));
mv_populate_livelist_allocs(&mv, &sv);
spacemap_check_ms_sm(m->ms_sm, &mv);
spacemap_check_sm_log(spa, &mv);
range_tree_vacate(mv.mv_allocated, NULL, NULL);
range_tree_destroy(mv.mv_allocated);
zfs_btree_clear(&mv.mv_livelist_allocs);
zfs_btree_destroy(&mv.mv_livelist_allocs);
}
}
(void) fprintf(stderr, "\n");
/*
* If there are any segments in the leftover tree after we walked
* through all the metaslabs in the concrete vdevs then this means
* that we have segments in the livelists that belong to indirect
* vdevs and are marked as allocated.
*/
if (zfs_btree_numnodes(&sv.sv_leftover) == 0) {
zfs_btree_destroy(&sv.sv_leftover);
return;
}
(void) printf("ERROR: Found livelist blocks marked as allocated "
"for indirect vdevs:\n");
zfs_btree_index_t *where = NULL;
sublivelist_verify_block_t *svb;
while ((svb = zfs_btree_destroy_nodes(&sv.sv_leftover, &where)) !=
NULL) {
int vdev_id = DVA_GET_VDEV(&svb->svb_dva);
ASSERT3U(vdev_id, <, rvd->vdev_children);
vdev_t *vd = rvd->vdev_child[vdev_id];
ASSERT(!vdev_is_concrete(vd));
(void) printf("<%d:%llx:%llx> TXG %llx\n",
vdev_id, (u_longlong_t)DVA_GET_OFFSET(&svb->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&svb->svb_dva),
(u_longlong_t)svb->svb_allocated_txg);
}
(void) printf("\n");
zfs_btree_destroy(&sv.sv_leftover);
}
/*
* These libumem hooks provide a reasonable set of defaults for the allocator's
* debugging facilities.
*/
const char *
_umem_debug_init(void)
{
return ("default,verbose"); /* $UMEM_DEBUG setting */
}
const char *
_umem_logging_init(void)
{
return ("fail,contents"); /* $UMEM_LOGGING setting */
}
static void
usage(void)
{
(void) fprintf(stderr,
"Usage:\t%s [-AbcdDFGhikLMPsvXy] [-e [-V] [-p <path> ...]] "
"[-I <inflight I/Os>]\n"
"\t\t[-o <var>=<value>]... [-t <txg>] [-U <cache>] [-x <dumpdir>]\n"
"\t\t[<poolname>[/<dataset | objset id>] [<object | range> ...]]\n"
"\t%s [-AdiPv] [-e [-V] [-p <path> ...]] [-U <cache>]\n"
"\t\t[<poolname>[/<dataset | objset id>] [<object | range> ...]\n"
"\t%s [-v] <bookmark>\n"
"\t%s -C [-A] [-U <cache>]\n"
"\t%s -l [-Aqu] <device>\n"
"\t%s -m [-AFLPX] [-e [-V] [-p <path> ...]] [-t <txg>] "
"[-U <cache>]\n\t\t<poolname> [<vdev> [<metaslab> ...]]\n"
"\t%s -O <dataset> <path>\n"
"\t%s -r <dataset> <path> <destination>\n"
"\t%s -R [-A] [-e [-V] [-p <path> ...]] [-U <cache>]\n"
"\t\t<poolname> <vdev>:<offset>:<size>[:<flags>]\n"
"\t%s -E [-A] word0:word1:...:word15\n"
"\t%s -S [-AP] [-e [-V] [-p <path> ...]] [-U <cache>] "
"<poolname>\n\n",
cmdname, cmdname, cmdname, cmdname, cmdname, cmdname, cmdname,
cmdname, cmdname, cmdname, cmdname);
(void) fprintf(stderr, " Dataset name must include at least one "
"separator character '/' or '@'\n");
(void) fprintf(stderr, " If dataset name is specified, only that "
"dataset is dumped\n");
(void) fprintf(stderr, " If object numbers or object number "
"ranges are specified, only those\n"
" objects or ranges are dumped.\n\n");
(void) fprintf(stderr,
" Object ranges take the form <start>:<end>[:<flags>]\n"
" start Starting object number\n"
" end Ending object number, or -1 for no upper bound\n"
" flags Optional flags to select object types:\n"
" A All objects (this is the default)\n"
" d ZFS directories\n"
" f ZFS files \n"
" m SPA space maps\n"
" z ZAPs\n"
" - Negate effect of next flag\n\n");
(void) fprintf(stderr, " Options to control amount of output:\n");
(void) fprintf(stderr, " -b --block-stats "
"block statistics\n");
(void) fprintf(stderr, " -c --checksum "
"checksum all metadata (twice for all data) blocks\n");
(void) fprintf(stderr, " -C --config "
"config (or cachefile if alone)\n");
(void) fprintf(stderr, " -d --datasets "
"dataset(s)\n");
(void) fprintf(stderr, " -D --dedup-stats "
"dedup statistics\n");
(void) fprintf(stderr, " -E --embedded-block-pointer=INTEGER\n"
" decode and display block "
"from an embedded block pointer\n");
(void) fprintf(stderr, " -h --history "
"pool history\n");
(void) fprintf(stderr, " -i --intent-logs "
"intent logs\n");
(void) fprintf(stderr, " -l --label "
"read label contents\n");
(void) fprintf(stderr, " -k --checkpointed-state "
"examine the checkpointed state of the pool\n");
(void) fprintf(stderr, " -L --disable-leak-tracking "
"disable leak tracking (do not load spacemaps)\n");
(void) fprintf(stderr, " -m --metaslabs "
"metaslabs\n");
(void) fprintf(stderr, " -M --metaslab-groups "
"metaslab groups\n");
(void) fprintf(stderr, " -O --object-lookups "
"perform object lookups by path\n");
(void) fprintf(stderr, " -r --copy-object "
"copy an object by path to file\n");
(void) fprintf(stderr, " -R --read-block "
"read and display block from a device\n");
(void) fprintf(stderr, " -s --io-stats "
"report stats on zdb's I/O\n");
(void) fprintf(stderr, " -S --simulate-dedup "
"simulate dedup to measure effect\n");
(void) fprintf(stderr, " -v --verbose "
"verbose (applies to all others)\n");
(void) fprintf(stderr, " -y --livelist "
"perform livelist and metaslab validation on any livelists being "
"deleted\n\n");
(void) fprintf(stderr, " Below options are intended for use "
"with other options:\n");
(void) fprintf(stderr, " -A --ignore-assertions "
"ignore assertions (-A), enable panic recovery (-AA) or both "
"(-AAA)\n");
(void) fprintf(stderr, " -e --exported "
"pool is exported/destroyed/has altroot/not in a cachefile\n");
(void) fprintf(stderr, " -F --automatic-rewind "
"attempt automatic rewind within safe range of transaction "
"groups\n");
(void) fprintf(stderr, " -G --dump-debug-msg "
"dump zfs_dbgmsg buffer before exiting\n");
(void) fprintf(stderr, " -I --inflight=INTEGER "
"specify the maximum number of checksumming I/Os "
"[default is 200]\n");
(void) fprintf(stderr, " -o --option=\"OPTION=INTEGER\" "
"set global variable to an unsigned 32-bit integer\n");
(void) fprintf(stderr, " -p --path==PATH "
"use one or more with -e to specify path to vdev dir\n");
(void) fprintf(stderr, " -P --parseable "
"print numbers in parseable form\n");
(void) fprintf(stderr, " -q --skip-label "
"don't print label contents\n");
(void) fprintf(stderr, " -t --txg=INTEGER "
"highest txg to use when searching for uberblocks\n");
(void) fprintf(stderr, " -u --uberblock "
"uberblock\n");
(void) fprintf(stderr, " -U --cachefile=PATH "
"use alternate cachefile\n");
(void) fprintf(stderr, " -V --verbatim "
"do verbatim import\n");
(void) fprintf(stderr, " -x --dump-blocks=PATH "
"dump all read blocks into specified directory\n");
(void) fprintf(stderr, " -X --extreme-rewind "
"attempt extreme rewind (does not work with dataset)\n");
(void) fprintf(stderr, " -Y --all-reconstruction "
"attempt all reconstruction combinations for split blocks\n");
(void) fprintf(stderr, " -Z --zstd-headers "
"show ZSTD headers \n");
(void) fprintf(stderr, "Specify an option more than once (e.g. -bb) "
"to make only that option verbose\n");
(void) fprintf(stderr, "Default is to dump everything non-verbosely\n");
exit(1);
}
static void
dump_debug_buffer(void)
{
if (dump_opt['G']) {
(void) printf("\n");
(void) fflush(stdout);
zfs_dbgmsg_print("zdb");
}
}
/*
* Called for usage errors that are discovered after a call to spa_open(),
* dmu_bonus_hold(), or pool_match(). abort() is called for other errors.
*/
static void
fatal(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
(void) fprintf(stderr, "%s: ", cmdname);
(void) vfprintf(stderr, fmt, ap);
va_end(ap);
(void) fprintf(stderr, "\n");
dump_debug_buffer();
exit(1);
}
static void
dump_packed_nvlist(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) size;
nvlist_t *nv;
size_t nvsize = *(uint64_t *)data;
char *packed = umem_alloc(nvsize, UMEM_NOFAIL);
VERIFY(0 == dmu_read(os, object, 0, nvsize, packed, DMU_READ_PREFETCH));
VERIFY(nvlist_unpack(packed, nvsize, &nv, 0) == 0);
umem_free(packed, nvsize);
dump_nvlist(nv, 8);
nvlist_free(nv);
}
static void
dump_history_offsets(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) size;
spa_history_phys_t *shp = data;
if (shp == NULL)
return;
(void) printf("\t\tpool_create_len = %llu\n",
(u_longlong_t)shp->sh_pool_create_len);
(void) printf("\t\tphys_max_off = %llu\n",
(u_longlong_t)shp->sh_phys_max_off);
(void) printf("\t\tbof = %llu\n",
(u_longlong_t)shp->sh_bof);
(void) printf("\t\teof = %llu\n",
(u_longlong_t)shp->sh_eof);
(void) printf("\t\trecords_lost = %llu\n",
(u_longlong_t)shp->sh_records_lost);
}
static void
zdb_nicenum(uint64_t num, char *buf, size_t buflen)
{
if (dump_opt['P'])
(void) snprintf(buf, buflen, "%llu", (longlong_t)num);
else
nicenum(num, buf, sizeof (buf));
}
static const char histo_stars[] = "****************************************";
static const uint64_t histo_width = sizeof (histo_stars) - 1;
static void
dump_histogram(const uint64_t *histo, int size, int offset)
{
int i;
int minidx = size - 1;
int maxidx = 0;
uint64_t max = 0;
for (i = 0; i < size; i++) {
if (histo[i] > max)
max = histo[i];
if (histo[i] > 0 && i > maxidx)
maxidx = i;
if (histo[i] > 0 && i < minidx)
minidx = i;
}
if (max < histo_width)
max = histo_width;
for (i = minidx; i <= maxidx; i++) {
(void) printf("\t\t\t%3u: %6llu %s\n",
i + offset, (u_longlong_t)histo[i],
&histo_stars[(max - histo[i]) * histo_width / max]);
}
}
static void
dump_zap_stats(objset_t *os, uint64_t object)
{
int error;
zap_stats_t zs;
error = zap_get_stats(os, object, &zs);
if (error)
return;
if (zs.zs_ptrtbl_len == 0) {
ASSERT(zs.zs_num_blocks == 1);
(void) printf("\tmicrozap: %llu bytes, %llu entries\n",
(u_longlong_t)zs.zs_blocksize,
(u_longlong_t)zs.zs_num_entries);
return;
}
(void) printf("\tFat ZAP stats:\n");
(void) printf("\t\tPointer table:\n");
(void) printf("\t\t\t%llu elements\n",
(u_longlong_t)zs.zs_ptrtbl_len);
(void) printf("\t\t\tzt_blk: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_zt_blk);
(void) printf("\t\t\tzt_numblks: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_zt_numblks);
(void) printf("\t\t\tzt_shift: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_zt_shift);
(void) printf("\t\t\tzt_blks_copied: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_blks_copied);
(void) printf("\t\t\tzt_nextblk: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_nextblk);
(void) printf("\t\tZAP entries: %llu\n",
(u_longlong_t)zs.zs_num_entries);
(void) printf("\t\tLeaf blocks: %llu\n",
(u_longlong_t)zs.zs_num_leafs);
(void) printf("\t\tTotal blocks: %llu\n",
(u_longlong_t)zs.zs_num_blocks);
(void) printf("\t\tzap_block_type: 0x%llx\n",
(u_longlong_t)zs.zs_block_type);
(void) printf("\t\tzap_magic: 0x%llx\n",
(u_longlong_t)zs.zs_magic);
(void) printf("\t\tzap_salt: 0x%llx\n",
(u_longlong_t)zs.zs_salt);
(void) printf("\t\tLeafs with 2^n pointers:\n");
dump_histogram(zs.zs_leafs_with_2n_pointers, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tBlocks with n*5 entries:\n");
dump_histogram(zs.zs_blocks_with_n5_entries, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tBlocks n/10 full:\n");
dump_histogram(zs.zs_blocks_n_tenths_full, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tEntries with n chunks:\n");
dump_histogram(zs.zs_entries_using_n_chunks, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tBuckets with n entries:\n");
dump_histogram(zs.zs_buckets_with_n_entries, ZAP_HISTOGRAM_SIZE, 0);
}
static void
dump_none(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) data, (void) size;
}
static void
dump_unknown(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) data, (void) size;
(void) printf("\tUNKNOWN OBJECT TYPE\n");
}
static void
dump_uint8(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) data, (void) size;
}
static void
dump_uint64(objset_t *os, uint64_t object, void *data, size_t size)
{
uint64_t *arr;
uint64_t oursize;
if (dump_opt['d'] < 6)
return;
if (data == NULL) {
dmu_object_info_t doi;
VERIFY0(dmu_object_info(os, object, &doi));
size = doi.doi_max_offset;
/*
* We cap the size at 1 mebibyte here to prevent
* allocation failures and nigh-infinite printing if the
* object is extremely large.
*/
oursize = MIN(size, 1 << 20);
arr = kmem_alloc(oursize, KM_SLEEP);
int err = dmu_read(os, object, 0, oursize, arr, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
kmem_free(arr, oursize);
return;
}
} else {
/*
* Even though the allocation is already done in this code path,
* we still cap the size to prevent excessive printing.
*/
oursize = MIN(size, 1 << 20);
arr = data;
}
if (size == 0) {
(void) printf("\t\t[]\n");
return;
}
(void) printf("\t\t[%0llx", (u_longlong_t)arr[0]);
for (size_t i = 1; i * sizeof (uint64_t) < oursize; i++) {
if (i % 4 != 0)
(void) printf(", %0llx", (u_longlong_t)arr[i]);
else
(void) printf(",\n\t\t%0llx", (u_longlong_t)arr[i]);
}
if (oursize != size)
(void) printf(", ... ");
(void) printf("]\n");
if (data == NULL)
kmem_free(arr, oursize);
}
static void
dump_zap(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
zap_cursor_t zc;
zap_attribute_t attr;
void *prop;
unsigned i;
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = ", attr.za_name);
if (attr.za_num_integers == 0) {
(void) printf("\n");
continue;
}
prop = umem_zalloc(attr.za_num_integers *
attr.za_integer_length, UMEM_NOFAIL);
(void) zap_lookup(os, object, attr.za_name,
attr.za_integer_length, attr.za_num_integers, prop);
if (attr.za_integer_length == 1) {
if (strcmp(attr.za_name,
DSL_CRYPTO_KEY_MASTER_KEY) == 0 ||
strcmp(attr.za_name,
DSL_CRYPTO_KEY_HMAC_KEY) == 0 ||
strcmp(attr.za_name, DSL_CRYPTO_KEY_IV) == 0 ||
strcmp(attr.za_name, DSL_CRYPTO_KEY_MAC) == 0 ||
strcmp(attr.za_name, DMU_POOL_CHECKSUM_SALT) == 0) {
uint8_t *u8 = prop;
for (i = 0; i < attr.za_num_integers; i++) {
(void) printf("%02x", u8[i]);
}
} else {
(void) printf("%s", (char *)prop);
}
} else {
for (i = 0; i < attr.za_num_integers; i++) {
switch (attr.za_integer_length) {
case 2:
(void) printf("%u ",
((uint16_t *)prop)[i]);
break;
case 4:
(void) printf("%u ",
((uint32_t *)prop)[i]);
break;
case 8:
(void) printf("%lld ",
(u_longlong_t)((int64_t *)prop)[i]);
break;
}
}
}
(void) printf("\n");
umem_free(prop, attr.za_num_integers * attr.za_integer_length);
}
zap_cursor_fini(&zc);
}
static void
dump_bpobj(objset_t *os, uint64_t object, void *data, size_t size)
{
bpobj_phys_t *bpop = data;
uint64_t i;
char bytes[32], comp[32], uncomp[32];
/* make sure the output won't get truncated */
_Static_assert(sizeof (bytes) >= NN_NUMBUF_SZ, "bytes truncated");
_Static_assert(sizeof (comp) >= NN_NUMBUF_SZ, "comp truncated");
_Static_assert(sizeof (uncomp) >= NN_NUMBUF_SZ, "uncomp truncated");
if (bpop == NULL)
return;
zdb_nicenum(bpop->bpo_bytes, bytes, sizeof (bytes));
zdb_nicenum(bpop->bpo_comp, comp, sizeof (comp));
zdb_nicenum(bpop->bpo_uncomp, uncomp, sizeof (uncomp));
(void) printf("\t\tnum_blkptrs = %llu\n",
(u_longlong_t)bpop->bpo_num_blkptrs);
(void) printf("\t\tbytes = %s\n", bytes);
if (size >= BPOBJ_SIZE_V1) {
(void) printf("\t\tcomp = %s\n", comp);
(void) printf("\t\tuncomp = %s\n", uncomp);
}
if (size >= BPOBJ_SIZE_V2) {
(void) printf("\t\tsubobjs = %llu\n",
(u_longlong_t)bpop->bpo_subobjs);
(void) printf("\t\tnum_subobjs = %llu\n",
(u_longlong_t)bpop->bpo_num_subobjs);
}
if (size >= sizeof (*bpop)) {
(void) printf("\t\tnum_freed = %llu\n",
(u_longlong_t)bpop->bpo_num_freed);
}
if (dump_opt['d'] < 5)
return;
for (i = 0; i < bpop->bpo_num_blkptrs; i++) {
char blkbuf[BP_SPRINTF_LEN];
blkptr_t bp;
int err = dmu_read(os, object,
i * sizeof (bp), sizeof (bp), &bp, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
break;
}
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), &bp,
BP_GET_FREE(&bp));
(void) printf("\t%s\n", blkbuf);
}
}
static void
dump_bpobj_subobjs(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
dmu_object_info_t doi;
int64_t i;
VERIFY0(dmu_object_info(os, object, &doi));
uint64_t *subobjs = kmem_alloc(doi.doi_max_offset, KM_SLEEP);
int err = dmu_read(os, object, 0, doi.doi_max_offset, subobjs, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
kmem_free(subobjs, doi.doi_max_offset);
return;
}
int64_t last_nonzero = -1;
for (i = 0; i < doi.doi_max_offset / 8; i++) {
if (subobjs[i] != 0)
last_nonzero = i;
}
for (i = 0; i <= last_nonzero; i++) {
(void) printf("\t%llu\n", (u_longlong_t)subobjs[i]);
}
kmem_free(subobjs, doi.doi_max_offset);
}
static void
dump_ddt_zap(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
dump_zap_stats(os, object);
/* contents are printed elsewhere, properly decoded */
}
static void
dump_sa_attrs(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
zap_cursor_t zc;
zap_attribute_t attr;
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = ", attr.za_name);
if (attr.za_num_integers == 0) {
(void) printf("\n");
continue;
}
(void) printf(" %llx : [%d:%d:%d]\n",
(u_longlong_t)attr.za_first_integer,
(int)ATTR_LENGTH(attr.za_first_integer),
(int)ATTR_BSWAP(attr.za_first_integer),
(int)ATTR_NUM(attr.za_first_integer));
}
zap_cursor_fini(&zc);
}
static void
dump_sa_layouts(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
zap_cursor_t zc;
zap_attribute_t attr;
uint16_t *layout_attrs;
unsigned i;
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = [", attr.za_name);
if (attr.za_num_integers == 0) {
(void) printf("\n");
continue;
}
VERIFY(attr.za_integer_length == 2);
layout_attrs = umem_zalloc(attr.za_num_integers *
attr.za_integer_length, UMEM_NOFAIL);
VERIFY(zap_lookup(os, object, attr.za_name,
attr.za_integer_length,
attr.za_num_integers, layout_attrs) == 0);
for (i = 0; i != attr.za_num_integers; i++)
(void) printf(" %d ", (int)layout_attrs[i]);
(void) printf("]\n");
umem_free(layout_attrs,
attr.za_num_integers * attr.za_integer_length);
}
zap_cursor_fini(&zc);
}
static void
dump_zpldir(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
zap_cursor_t zc;
zap_attribute_t attr;
const char *typenames[] = {
/* 0 */ "not specified",
/* 1 */ "FIFO",
/* 2 */ "Character Device",
/* 3 */ "3 (invalid)",
/* 4 */ "Directory",
/* 5 */ "5 (invalid)",
/* 6 */ "Block Device",
/* 7 */ "7 (invalid)",
/* 8 */ "Regular File",
/* 9 */ "9 (invalid)",
/* 10 */ "Symbolic Link",
/* 11 */ "11 (invalid)",
/* 12 */ "Socket",
/* 13 */ "Door",
/* 14 */ "Event Port",
/* 15 */ "15 (invalid)",
};
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = %lld (type: %s)\n",
attr.za_name, ZFS_DIRENT_OBJ(attr.za_first_integer),
typenames[ZFS_DIRENT_TYPE(attr.za_first_integer)]);
}
zap_cursor_fini(&zc);
}
static int
get_dtl_refcount(vdev_t *vd)
{
int refcount = 0;
if (vd->vdev_ops->vdev_op_leaf) {
space_map_t *sm = vd->vdev_dtl_sm;
if (sm != NULL &&
sm->sm_dbuf->db_size == sizeof (space_map_phys_t))
return (1);
return (0);
}
for (unsigned c = 0; c < vd->vdev_children; c++)
refcount += get_dtl_refcount(vd->vdev_child[c]);
return (refcount);
}
static int
get_metaslab_refcount(vdev_t *vd)
{
int refcount = 0;
if (vd->vdev_top == vd) {
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
space_map_t *sm = vd->vdev_ms[m]->ms_sm;
if (sm != NULL &&
sm->sm_dbuf->db_size == sizeof (space_map_phys_t))
refcount++;
}
}
for (unsigned c = 0; c < vd->vdev_children; c++)
refcount += get_metaslab_refcount(vd->vdev_child[c]);
return (refcount);
}
static int
get_obsolete_refcount(vdev_t *vd)
{
uint64_t obsolete_sm_object;
int refcount = 0;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (vd->vdev_top == vd && obsolete_sm_object != 0) {
dmu_object_info_t doi;
VERIFY0(dmu_object_info(vd->vdev_spa->spa_meta_objset,
obsolete_sm_object, &doi));
if (doi.doi_bonus_size == sizeof (space_map_phys_t)) {
refcount++;
}
} else {
ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
ASSERT3U(obsolete_sm_object, ==, 0);
}
for (unsigned c = 0; c < vd->vdev_children; c++) {
refcount += get_obsolete_refcount(vd->vdev_child[c]);
}
return (refcount);
}
static int
get_prev_obsolete_spacemap_refcount(spa_t *spa)
{
uint64_t prev_obj =
spa->spa_condensing_indirect_phys.scip_prev_obsolete_sm_object;
if (prev_obj != 0) {
dmu_object_info_t doi;
VERIFY0(dmu_object_info(spa->spa_meta_objset, prev_obj, &doi));
if (doi.doi_bonus_size == sizeof (space_map_phys_t)) {
return (1);
}
}
return (0);
}
static int
get_checkpoint_refcount(vdev_t *vd)
{
int refcount = 0;
if (vd->vdev_top == vd && vd->vdev_top_zap != 0 &&
zap_contains(spa_meta_objset(vd->vdev_spa),
vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) == 0)
refcount++;
for (uint64_t c = 0; c < vd->vdev_children; c++)
refcount += get_checkpoint_refcount(vd->vdev_child[c]);
return (refcount);
}
static int
get_log_spacemap_refcount(spa_t *spa)
{
return (avl_numnodes(&spa->spa_sm_logs_by_txg));
}
static int
verify_spacemap_refcounts(spa_t *spa)
{
uint64_t expected_refcount = 0;
uint64_t actual_refcount;
(void) feature_get_refcount(spa,
&spa_feature_table[SPA_FEATURE_SPACEMAP_HISTOGRAM],
&expected_refcount);
actual_refcount = get_dtl_refcount(spa->spa_root_vdev);
actual_refcount += get_metaslab_refcount(spa->spa_root_vdev);
actual_refcount += get_obsolete_refcount(spa->spa_root_vdev);
actual_refcount += get_prev_obsolete_spacemap_refcount(spa);
actual_refcount += get_checkpoint_refcount(spa->spa_root_vdev);
actual_refcount += get_log_spacemap_refcount(spa);
if (expected_refcount != actual_refcount) {
(void) printf("space map refcount mismatch: expected %lld != "
"actual %lld\n",
(longlong_t)expected_refcount,
(longlong_t)actual_refcount);
return (2);
}
return (0);
}
static void
dump_spacemap(objset_t *os, space_map_t *sm)
{
const char *ddata[] = { "ALLOC", "FREE", "CONDENSE", "INVALID",
"INVALID", "INVALID", "INVALID", "INVALID" };
if (sm == NULL)
return;
(void) printf("space map object %llu:\n",
(longlong_t)sm->sm_object);
(void) printf(" smp_length = 0x%llx\n",
(longlong_t)sm->sm_phys->smp_length);
(void) printf(" smp_alloc = 0x%llx\n",
(longlong_t)sm->sm_phys->smp_alloc);
if (dump_opt['d'] < 6 && dump_opt['m'] < 4)
return;
/*
* Print out the freelist entries in both encoded and decoded form.
*/
uint8_t mapshift = sm->sm_shift;
int64_t alloc = 0;
uint64_t word, entry_id = 0;
for (uint64_t offset = 0; offset < space_map_length(sm);
offset += sizeof (word)) {
VERIFY0(dmu_read(os, space_map_object(sm), offset,
sizeof (word), &word, DMU_READ_PREFETCH));
if (sm_entry_is_debug(word)) {
uint64_t de_txg = SM_DEBUG_TXG_DECODE(word);
uint64_t de_sync_pass = SM_DEBUG_SYNCPASS_DECODE(word);
if (de_txg == 0) {
(void) printf(
"\t [%6llu] PADDING\n",
(u_longlong_t)entry_id);
} else {
(void) printf(
"\t [%6llu] %s: txg %llu pass %llu\n",
(u_longlong_t)entry_id,
ddata[SM_DEBUG_ACTION_DECODE(word)],
(u_longlong_t)de_txg,
(u_longlong_t)de_sync_pass);
}
entry_id++;
continue;
}
uint8_t words;
char entry_type;
uint64_t entry_off, entry_run, entry_vdev = SM_NO_VDEVID;
if (sm_entry_is_single_word(word)) {
entry_type = (SM_TYPE_DECODE(word) == SM_ALLOC) ?
'A' : 'F';
entry_off = (SM_OFFSET_DECODE(word) << mapshift) +
sm->sm_start;
entry_run = SM_RUN_DECODE(word) << mapshift;
words = 1;
} else {
/* it is a two-word entry so we read another word */
ASSERT(sm_entry_is_double_word(word));
uint64_t extra_word;
offset += sizeof (extra_word);
VERIFY0(dmu_read(os, space_map_object(sm), offset,
sizeof (extra_word), &extra_word,
DMU_READ_PREFETCH));
ASSERT3U(offset, <=, space_map_length(sm));
entry_run = SM2_RUN_DECODE(word) << mapshift;
entry_vdev = SM2_VDEV_DECODE(word);
entry_type = (SM2_TYPE_DECODE(extra_word) == SM_ALLOC) ?
'A' : 'F';
entry_off = (SM2_OFFSET_DECODE(extra_word) <<
mapshift) + sm->sm_start;
words = 2;
}
(void) printf("\t [%6llu] %c range:"
" %010llx-%010llx size: %06llx vdev: %06llu words: %u\n",
(u_longlong_t)entry_id,
entry_type, (u_longlong_t)entry_off,
(u_longlong_t)(entry_off + entry_run),
(u_longlong_t)entry_run,
(u_longlong_t)entry_vdev, words);
if (entry_type == 'A')
alloc += entry_run;
else
alloc -= entry_run;
entry_id++;
}
if (alloc != space_map_allocated(sm)) {
(void) printf("space_map_object alloc (%lld) INCONSISTENT "
"with space map summary (%lld)\n",
(longlong_t)space_map_allocated(sm), (longlong_t)alloc);
}
}
static void
dump_metaslab_stats(metaslab_t *msp)
{
char maxbuf[32];
range_tree_t *rt = msp->ms_allocatable;
zfs_btree_t *t = &msp->ms_allocatable_by_size;
int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
/* max sure nicenum has enough space */
_Static_assert(sizeof (maxbuf) >= NN_NUMBUF_SZ, "maxbuf truncated");
zdb_nicenum(metaslab_largest_allocatable(msp), maxbuf, sizeof (maxbuf));
(void) printf("\t %25s %10lu %7s %6s %4s %4d%%\n",
"segments", zfs_btree_numnodes(t), "maxsize", maxbuf,
"freepct", free_pct);
(void) printf("\tIn-memory histogram:\n");
dump_histogram(rt->rt_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
}
static void
dump_metaslab(metaslab_t *msp)
{
vdev_t *vd = msp->ms_group->mg_vd;
spa_t *spa = vd->vdev_spa;
space_map_t *sm = msp->ms_sm;
char freebuf[32];
zdb_nicenum(msp->ms_size - space_map_allocated(sm), freebuf,
sizeof (freebuf));
(void) printf(
"\tmetaslab %6llu offset %12llx spacemap %6llu free %5s\n",
(u_longlong_t)msp->ms_id, (u_longlong_t)msp->ms_start,
(u_longlong_t)space_map_object(sm), freebuf);
if (dump_opt['m'] > 2 && !dump_opt['L']) {
mutex_enter(&msp->ms_lock);
VERIFY0(metaslab_load(msp));
range_tree_stat_verify(msp->ms_allocatable);
dump_metaslab_stats(msp);
metaslab_unload(msp);
mutex_exit(&msp->ms_lock);
}
if (dump_opt['m'] > 1 && sm != NULL &&
spa_feature_is_active(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
/*
* The space map histogram represents free space in chunks
* of sm_shift (i.e. bucket 0 refers to 2^sm_shift).
*/
(void) printf("\tOn-disk histogram:\t\tfragmentation %llu\n",
(u_longlong_t)msp->ms_fragmentation);
dump_histogram(sm->sm_phys->smp_histogram,
SPACE_MAP_HISTOGRAM_SIZE, sm->sm_shift);
}
if (vd->vdev_ops == &vdev_draid_ops)
ASSERT3U(msp->ms_size, <=, 1ULL << vd->vdev_ms_shift);
else
ASSERT3U(msp->ms_size, ==, 1ULL << vd->vdev_ms_shift);
dump_spacemap(spa->spa_meta_objset, msp->ms_sm);
if (spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
(void) printf("\tFlush data:\n\tunflushed txg=%llu\n\n",
(u_longlong_t)metaslab_unflushed_txg(msp));
}
}
static void
print_vdev_metaslab_header(vdev_t *vd)
{
vdev_alloc_bias_t alloc_bias = vd->vdev_alloc_bias;
const char *bias_str = "";
if (alloc_bias == VDEV_BIAS_LOG || vd->vdev_islog) {
bias_str = VDEV_ALLOC_BIAS_LOG;
} else if (alloc_bias == VDEV_BIAS_SPECIAL) {
bias_str = VDEV_ALLOC_BIAS_SPECIAL;
} else if (alloc_bias == VDEV_BIAS_DEDUP) {
bias_str = VDEV_ALLOC_BIAS_DEDUP;
}
uint64_t ms_flush_data_obj = 0;
if (vd->vdev_top_zap != 0) {
int error = zap_lookup(spa_meta_objset(vd->vdev_spa),
vd->vdev_top_zap, VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS,
sizeof (uint64_t), 1, &ms_flush_data_obj);
if (error != ENOENT) {
ASSERT0(error);
}
}
(void) printf("\tvdev %10llu %s",
(u_longlong_t)vd->vdev_id, bias_str);
if (ms_flush_data_obj != 0) {
(void) printf(" ms_unflushed_phys object %llu",
(u_longlong_t)ms_flush_data_obj);
}
(void) printf("\n\t%-10s%5llu %-19s %-15s %-12s\n",
"metaslabs", (u_longlong_t)vd->vdev_ms_count,
"offset", "spacemap", "free");
(void) printf("\t%15s %19s %15s %12s\n",
"---------------", "-------------------",
"---------------", "------------");
}
static void
dump_metaslab_groups(spa_t *spa, boolean_t show_special)
{
vdev_t *rvd = spa->spa_root_vdev;
metaslab_class_t *mc = spa_normal_class(spa);
metaslab_class_t *smc = spa_special_class(spa);
uint64_t fragmentation;
metaslab_class_histogram_verify(mc);
for (unsigned c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
if (mg == NULL || (mg->mg_class != mc &&
(!show_special || mg->mg_class != smc)))
continue;
metaslab_group_histogram_verify(mg);
mg->mg_fragmentation = metaslab_group_fragmentation(mg);
(void) printf("\tvdev %10llu\t\tmetaslabs%5llu\t\t"
"fragmentation",
(u_longlong_t)tvd->vdev_id,
(u_longlong_t)tvd->vdev_ms_count);
if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
(void) printf("%3s\n", "-");
} else {
(void) printf("%3llu%%\n",
(u_longlong_t)mg->mg_fragmentation);
}
dump_histogram(mg->mg_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
}
(void) printf("\tpool %s\tfragmentation", spa_name(spa));
fragmentation = metaslab_class_fragmentation(mc);
if (fragmentation == ZFS_FRAG_INVALID)
(void) printf("\t%3s\n", "-");
else
(void) printf("\t%3llu%%\n", (u_longlong_t)fragmentation);
dump_histogram(mc->mc_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
}
static void
print_vdev_indirect(vdev_t *vd)
{
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
vdev_indirect_births_t *vib = vd->vdev_indirect_births;
if (vim == NULL) {
ASSERT3P(vib, ==, NULL);
return;
}
ASSERT3U(vdev_indirect_mapping_object(vim), ==,
vic->vic_mapping_object);
ASSERT3U(vdev_indirect_births_object(vib), ==,
vic->vic_births_object);
(void) printf("indirect births obj %llu:\n",
(longlong_t)vic->vic_births_object);
(void) printf(" vib_count = %llu\n",
(longlong_t)vdev_indirect_births_count(vib));
for (uint64_t i = 0; i < vdev_indirect_births_count(vib); i++) {
vdev_indirect_birth_entry_phys_t *cur_vibe =
&vib->vib_entries[i];
(void) printf("\toffset %llx -> txg %llu\n",
(longlong_t)cur_vibe->vibe_offset,
(longlong_t)cur_vibe->vibe_phys_birth_txg);
}
(void) printf("\n");
(void) printf("indirect mapping obj %llu:\n",
(longlong_t)vic->vic_mapping_object);
(void) printf(" vim_max_offset = 0x%llx\n",
(longlong_t)vdev_indirect_mapping_max_offset(vim));
(void) printf(" vim_bytes_mapped = 0x%llx\n",
(longlong_t)vdev_indirect_mapping_bytes_mapped(vim));
(void) printf(" vim_count = %llu\n",
(longlong_t)vdev_indirect_mapping_num_entries(vim));
if (dump_opt['d'] <= 5 && dump_opt['m'] <= 3)
return;
uint32_t *counts = vdev_indirect_mapping_load_obsolete_counts(vim);
for (uint64_t i = 0; i < vdev_indirect_mapping_num_entries(vim); i++) {
vdev_indirect_mapping_entry_phys_t *vimep =
&vim->vim_entries[i];
(void) printf("\t<%llx:%llx:%llx> -> "
"<%llx:%llx:%llx> (%x obsolete)\n",
(longlong_t)vd->vdev_id,
(longlong_t)DVA_MAPPING_GET_SRC_OFFSET(vimep),
(longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
(longlong_t)DVA_GET_VDEV(&vimep->vimep_dst),
(longlong_t)DVA_GET_OFFSET(&vimep->vimep_dst),
(longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
counts[i]);
}
(void) printf("\n");
uint64_t obsolete_sm_object;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (obsolete_sm_object != 0) {
objset_t *mos = vd->vdev_spa->spa_meta_objset;
(void) printf("obsolete space map object %llu:\n",
(u_longlong_t)obsolete_sm_object);
ASSERT(vd->vdev_obsolete_sm != NULL);
ASSERT3U(space_map_object(vd->vdev_obsolete_sm), ==,
obsolete_sm_object);
dump_spacemap(mos, vd->vdev_obsolete_sm);
(void) printf("\n");
}
}
static void
dump_metaslabs(spa_t *spa)
{
vdev_t *vd, *rvd = spa->spa_root_vdev;
uint64_t m, c = 0, children = rvd->vdev_children;
(void) printf("\nMetaslabs:\n");
if (!dump_opt['d'] && zopt_metaslab_args > 0) {
c = zopt_metaslab[0];
if (c >= children)
(void) fatal("bad vdev id: %llu", (u_longlong_t)c);
if (zopt_metaslab_args > 1) {
vd = rvd->vdev_child[c];
print_vdev_metaslab_header(vd);
for (m = 1; m < zopt_metaslab_args; m++) {
if (zopt_metaslab[m] < vd->vdev_ms_count)
dump_metaslab(
vd->vdev_ms[zopt_metaslab[m]]);
else
(void) fprintf(stderr, "bad metaslab "
"number %llu\n",
(u_longlong_t)zopt_metaslab[m]);
}
(void) printf("\n");
return;
}
children = c + 1;
}
for (; c < children; c++) {
vd = rvd->vdev_child[c];
print_vdev_metaslab_header(vd);
print_vdev_indirect(vd);
for (m = 0; m < vd->vdev_ms_count; m++)
dump_metaslab(vd->vdev_ms[m]);
(void) printf("\n");
}
}
static void
dump_log_spacemaps(spa_t *spa)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
(void) printf("\nLog Space Maps in Pool:\n");
for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
space_map_t *sm = NULL;
VERIFY0(space_map_open(&sm, spa_meta_objset(spa),
sls->sls_sm_obj, 0, UINT64_MAX, SPA_MINBLOCKSHIFT));
(void) printf("Log Spacemap object %llu txg %llu\n",
(u_longlong_t)sls->sls_sm_obj, (u_longlong_t)sls->sls_txg);
dump_spacemap(spa->spa_meta_objset, sm);
space_map_close(sm);
}
(void) printf("\n");
}
static void
dump_dde(const ddt_t *ddt, const ddt_entry_t *dde, uint64_t index)
{
const ddt_phys_t *ddp = dde->dde_phys;
const ddt_key_t *ddk = &dde->dde_key;
const char *types[4] = { "ditto", "single", "double", "triple" };
char blkbuf[BP_SPRINTF_LEN];
blkptr_t blk;
int p;
for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0)
continue;
ddt_bp_create(ddt->ddt_checksum, ddk, ddp, &blk);
snprintf_blkptr(blkbuf, sizeof (blkbuf), &blk);
(void) printf("index %llx refcnt %llu %s %s\n",
(u_longlong_t)index, (u_longlong_t)ddp->ddp_refcnt,
types[p], blkbuf);
}
}
static void
dump_dedup_ratio(const ddt_stat_t *dds)
{
double rL, rP, rD, D, dedup, compress, copies;
if (dds->dds_blocks == 0)
return;
rL = (double)dds->dds_ref_lsize;
rP = (double)dds->dds_ref_psize;
rD = (double)dds->dds_ref_dsize;
D = (double)dds->dds_dsize;
dedup = rD / D;
compress = rL / rP;
copies = rD / rP;
(void) printf("dedup = %.2f, compress = %.2f, copies = %.2f, "
"dedup * compress / copies = %.2f\n\n",
dedup, compress, copies, dedup * compress / copies);
}
static void
dump_ddt(ddt_t *ddt, enum ddt_type type, enum ddt_class class)
{
char name[DDT_NAMELEN];
ddt_entry_t dde;
uint64_t walk = 0;
dmu_object_info_t doi;
uint64_t count, dspace, mspace;
int error;
error = ddt_object_info(ddt, type, class, &doi);
if (error == ENOENT)
return;
ASSERT(error == 0);
error = ddt_object_count(ddt, type, class, &count);
ASSERT(error == 0);
if (count == 0)
return;
dspace = doi.doi_physical_blocks_512 << 9;
mspace = doi.doi_fill_count * doi.doi_data_block_size;
ddt_object_name(ddt, type, class, name);
(void) printf("%s: %llu entries, size %llu on disk, %llu in core\n",
name,
(u_longlong_t)count,
(u_longlong_t)(dspace / count),
(u_longlong_t)(mspace / count));
if (dump_opt['D'] < 3)
return;
zpool_dump_ddt(NULL, &ddt->ddt_histogram[type][class]);
if (dump_opt['D'] < 4)
return;
if (dump_opt['D'] < 5 && class == DDT_CLASS_UNIQUE)
return;
(void) printf("%s contents:\n\n", name);
while ((error = ddt_object_walk(ddt, type, class, &walk, &dde)) == 0)
dump_dde(ddt, &dde, walk);
ASSERT3U(error, ==, ENOENT);
(void) printf("\n");
}
static void
dump_all_ddts(spa_t *spa)
{
ddt_histogram_t ddh_total = {{{0}}};
ddt_stat_t dds_total = {0};
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
ddt_t *ddt = spa->spa_ddt[c];
for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
for (enum ddt_class class = 0; class < DDT_CLASSES;
class++) {
dump_ddt(ddt, type, class);
}
}
}
ddt_get_dedup_stats(spa, &dds_total);
if (dds_total.dds_blocks == 0) {
(void) printf("All DDTs are empty\n");
return;
}
(void) printf("\n");
if (dump_opt['D'] > 1) {
(void) printf("DDT histogram (aggregated over all DDTs):\n");
ddt_get_dedup_histogram(spa, &ddh_total);
zpool_dump_ddt(&dds_total, &ddh_total);
}
dump_dedup_ratio(&dds_total);
}
static void
dump_dtl_seg(void *arg, uint64_t start, uint64_t size)
{
char *prefix = arg;
(void) printf("%s [%llu,%llu) length %llu\n",
prefix,
(u_longlong_t)start,
(u_longlong_t)(start + size),
(u_longlong_t)(size));
}
static void
dump_dtl(vdev_t *vd, int indent)
{
spa_t *spa = vd->vdev_spa;
boolean_t required;
const char *name[DTL_TYPES] = { "missing", "partial", "scrub",
"outage" };
char prefix[256];
spa_vdev_state_enter(spa, SCL_NONE);
required = vdev_dtl_required(vd);
(void) spa_vdev_state_exit(spa, NULL, 0);
if (indent == 0)
(void) printf("\nDirty time logs:\n\n");
(void) printf("\t%*s%s [%s]\n", indent, "",
vd->vdev_path ? vd->vdev_path :
vd->vdev_parent ? vd->vdev_ops->vdev_op_type : spa_name(spa),
required ? "DTL-required" : "DTL-expendable");
for (int t = 0; t < DTL_TYPES; t++) {
range_tree_t *rt = vd->vdev_dtl[t];
if (range_tree_space(rt) == 0)
continue;
(void) snprintf(prefix, sizeof (prefix), "\t%*s%s",
indent + 2, "", name[t]);
range_tree_walk(rt, dump_dtl_seg, prefix);
if (dump_opt['d'] > 5 && vd->vdev_children == 0)
dump_spacemap(spa->spa_meta_objset,
vd->vdev_dtl_sm);
}
for (unsigned c = 0; c < vd->vdev_children; c++)
dump_dtl(vd->vdev_child[c], indent + 4);
}
static void
dump_history(spa_t *spa)
{
nvlist_t **events = NULL;
char *buf;
uint64_t resid, len, off = 0;
uint_t num = 0;
int error;
char tbuf[30];
if ((buf = malloc(SPA_OLD_MAXBLOCKSIZE)) == NULL) {
(void) fprintf(stderr, "%s: unable to allocate I/O buffer\n",
__func__);
return;
}
do {
len = SPA_OLD_MAXBLOCKSIZE;
if ((error = spa_history_get(spa, &off, &len, buf)) != 0) {
(void) fprintf(stderr, "Unable to read history: "
"error %d\n", error);
free(buf);
return;
}
if (zpool_history_unpack(buf, len, &resid, &events, &num) != 0)
break;
off -= resid;
} while (len != 0);
(void) printf("\nHistory:\n");
for (unsigned i = 0; i < num; i++) {
boolean_t printed = B_FALSE;
if (nvlist_exists(events[i], ZPOOL_HIST_TIME)) {
time_t tsec;
struct tm t;
tsec = fnvlist_lookup_uint64(events[i],
ZPOOL_HIST_TIME);
(void) localtime_r(&tsec, &t);
(void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
} else {
tbuf[0] = '\0';
}
if (nvlist_exists(events[i], ZPOOL_HIST_CMD)) {
(void) printf("%s %s\n", tbuf,
fnvlist_lookup_string(events[i], ZPOOL_HIST_CMD));
} else if (nvlist_exists(events[i], ZPOOL_HIST_INT_EVENT)) {
uint64_t ievent;
ievent = fnvlist_lookup_uint64(events[i],
ZPOOL_HIST_INT_EVENT);
if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS)
goto next;
(void) printf(" %s [internal %s txg:%ju] %s\n",
tbuf,
zfs_history_event_names[ievent],
fnvlist_lookup_uint64(events[i],
ZPOOL_HIST_TXG),
fnvlist_lookup_string(events[i],
ZPOOL_HIST_INT_STR));
} else if (nvlist_exists(events[i], ZPOOL_HIST_INT_NAME)) {
(void) printf("%s [txg:%ju] %s", tbuf,
fnvlist_lookup_uint64(events[i],
ZPOOL_HIST_TXG),
fnvlist_lookup_string(events[i],
ZPOOL_HIST_INT_NAME));
if (nvlist_exists(events[i], ZPOOL_HIST_DSNAME)) {
(void) printf(" %s (%llu)",
fnvlist_lookup_string(events[i],
ZPOOL_HIST_DSNAME),
(u_longlong_t)fnvlist_lookup_uint64(
events[i],
ZPOOL_HIST_DSID));
}
(void) printf(" %s\n", fnvlist_lookup_string(events[i],
ZPOOL_HIST_INT_STR));
} else if (nvlist_exists(events[i], ZPOOL_HIST_IOCTL)) {
(void) printf("%s ioctl %s\n", tbuf,
fnvlist_lookup_string(events[i],
ZPOOL_HIST_IOCTL));
if (nvlist_exists(events[i], ZPOOL_HIST_INPUT_NVL)) {
(void) printf(" input:\n");
dump_nvlist(fnvlist_lookup_nvlist(events[i],
ZPOOL_HIST_INPUT_NVL), 8);
}
if (nvlist_exists(events[i], ZPOOL_HIST_OUTPUT_NVL)) {
(void) printf(" output:\n");
dump_nvlist(fnvlist_lookup_nvlist(events[i],
ZPOOL_HIST_OUTPUT_NVL), 8);
}
if (nvlist_exists(events[i], ZPOOL_HIST_ERRNO)) {
(void) printf(" errno: %lld\n",
(longlong_t)fnvlist_lookup_int64(events[i],
ZPOOL_HIST_ERRNO));
}
} else {
goto next;
}
printed = B_TRUE;
next:
if (dump_opt['h'] > 1) {
if (!printed)
(void) printf("unrecognized record:\n");
dump_nvlist(events[i], 2);
}
}
free(buf);
}
static void
dump_dnode(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) data, (void) size;
}
static uint64_t
blkid2offset(const dnode_phys_t *dnp, const blkptr_t *bp,
const zbookmark_phys_t *zb)
{
if (dnp == NULL) {
ASSERT(zb->zb_level < 0);
if (zb->zb_object == 0)
return (zb->zb_blkid);
return (zb->zb_blkid * BP_GET_LSIZE(bp));
}
ASSERT(zb->zb_level >= 0);
return ((zb->zb_blkid <<
(zb->zb_level * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT))) *
dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
}
static void
snprintf_zstd_header(spa_t *spa, char *blkbuf, size_t buflen,
const blkptr_t *bp)
{
abd_t *pabd;
void *buf;
zio_t *zio;
zfs_zstdhdr_t zstd_hdr;
int error;
if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_ZSTD)
return;
if (BP_IS_HOLE(bp))
return;
if (BP_IS_EMBEDDED(bp)) {
buf = malloc(SPA_MAXBLOCKSIZE);
if (buf == NULL) {
(void) fprintf(stderr, "out of memory\n");
exit(1);
}
decode_embedded_bp_compressed(bp, buf);
memcpy(&zstd_hdr, buf, sizeof (zstd_hdr));
free(buf);
zstd_hdr.c_len = BE_32(zstd_hdr.c_len);
zstd_hdr.raw_version_level = BE_32(zstd_hdr.raw_version_level);
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
" ZSTD:size=%u:version=%u:level=%u:EMBEDDED",
zstd_hdr.c_len, zfs_get_hdrversion(&zstd_hdr),
zfs_get_hdrlevel(&zstd_hdr));
return;
}
pabd = abd_alloc_for_io(SPA_MAXBLOCKSIZE, B_FALSE);
zio = zio_root(spa, NULL, NULL, 0);
/* Decrypt but don't decompress so we can read the compression header */
zio_nowait(zio_read(zio, spa, bp, pabd, BP_GET_PSIZE(bp), NULL, NULL,
ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW_COMPRESS,
NULL));
error = zio_wait(zio);
if (error) {
(void) fprintf(stderr, "read failed: %d\n", error);
return;
}
buf = abd_borrow_buf_copy(pabd, BP_GET_LSIZE(bp));
memcpy(&zstd_hdr, buf, sizeof (zstd_hdr));
zstd_hdr.c_len = BE_32(zstd_hdr.c_len);
zstd_hdr.raw_version_level = BE_32(zstd_hdr.raw_version_level);
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
" ZSTD:size=%u:version=%u:level=%u:NORMAL",
zstd_hdr.c_len, zfs_get_hdrversion(&zstd_hdr),
zfs_get_hdrlevel(&zstd_hdr));
abd_return_buf_copy(pabd, buf, BP_GET_LSIZE(bp));
}
static void
snprintf_blkptr_compact(char *blkbuf, size_t buflen, const blkptr_t *bp,
boolean_t bp_freed)
{
const dva_t *dva = bp->blk_dva;
int ndvas = dump_opt['d'] > 5 ? BP_GET_NDVAS(bp) : 1;
int i;
if (dump_opt['b'] >= 6) {
snprintf_blkptr(blkbuf, buflen, bp);
if (bp_freed) {
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf), " %s", "FREE");
}
return;
}
if (BP_IS_EMBEDDED(bp)) {
(void) sprintf(blkbuf,
"EMBEDDED et=%u %llxL/%llxP B=%llu",
(int)BPE_GET_ETYPE(bp),
(u_longlong_t)BPE_GET_LSIZE(bp),
(u_longlong_t)BPE_GET_PSIZE(bp),
(u_longlong_t)bp->blk_birth);
return;
}
blkbuf[0] = '\0';
for (i = 0; i < ndvas; i++)
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf), "%llu:%llx:%llx ",
(u_longlong_t)DVA_GET_VDEV(&dva[i]),
(u_longlong_t)DVA_GET_OFFSET(&dva[i]),
(u_longlong_t)DVA_GET_ASIZE(&dva[i]));
if (BP_IS_HOLE(bp)) {
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
"%llxL B=%llu",
(u_longlong_t)BP_GET_LSIZE(bp),
(u_longlong_t)bp->blk_birth);
} else {
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
"%llxL/%llxP F=%llu B=%llu/%llu",
(u_longlong_t)BP_GET_LSIZE(bp),
(u_longlong_t)BP_GET_PSIZE(bp),
(u_longlong_t)BP_GET_FILL(bp),
(u_longlong_t)bp->blk_birth,
(u_longlong_t)BP_PHYSICAL_BIRTH(bp));
if (bp_freed)
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf), " %s", "FREE");
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf), " cksum=%llx:%llx:%llx:%llx",
(u_longlong_t)bp->blk_cksum.zc_word[0],
(u_longlong_t)bp->blk_cksum.zc_word[1],
(u_longlong_t)bp->blk_cksum.zc_word[2],
(u_longlong_t)bp->blk_cksum.zc_word[3]);
}
}
static void
print_indirect(spa_t *spa, blkptr_t *bp, const zbookmark_phys_t *zb,
const dnode_phys_t *dnp)
{
char blkbuf[BP_SPRINTF_LEN];
int l;
if (!BP_IS_EMBEDDED(bp)) {
ASSERT3U(BP_GET_TYPE(bp), ==, dnp->dn_type);
ASSERT3U(BP_GET_LEVEL(bp), ==, zb->zb_level);
}
(void) printf("%16llx ", (u_longlong_t)blkid2offset(dnp, bp, zb));
ASSERT(zb->zb_level >= 0);
for (l = dnp->dn_nlevels - 1; l >= -1; l--) {
if (l == zb->zb_level) {
(void) printf("L%llx", (u_longlong_t)zb->zb_level);
} else {
(void) printf(" ");
}
}
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp, B_FALSE);
if (dump_opt['Z'] && BP_GET_COMPRESS(bp) == ZIO_COMPRESS_ZSTD)
snprintf_zstd_header(spa, blkbuf, sizeof (blkbuf), bp);
(void) printf("%s\n", blkbuf);
}
static int
visit_indirect(spa_t *spa, const dnode_phys_t *dnp,
blkptr_t *bp, const zbookmark_phys_t *zb)
{
int err = 0;
if (bp->blk_birth == 0)
return (0);
print_indirect(spa, bp, zb, dnp);
if (BP_GET_LEVEL(bp) > 0 && !BP_IS_HOLE(bp)) {
arc_flags_t flags = ARC_FLAG_WAIT;
int i;
blkptr_t *cbp;
int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
arc_buf_t *buf;
uint64_t fill = 0;
ASSERT(!BP_IS_REDACTED(bp));
err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
if (err)
return (err);
ASSERT(buf->b_data);
/* recursively visit blocks below this */
cbp = buf->b_data;
for (i = 0; i < epb; i++, cbp++) {
zbookmark_phys_t czb;
SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
zb->zb_level - 1,
zb->zb_blkid * epb + i);
err = visit_indirect(spa, dnp, cbp, &czb);
if (err)
break;
fill += BP_GET_FILL(cbp);
}
if (!err)
ASSERT3U(fill, ==, BP_GET_FILL(bp));
arc_buf_destroy(buf, &buf);
}
return (err);
}
static void
dump_indirect(dnode_t *dn)
{
dnode_phys_t *dnp = dn->dn_phys;
zbookmark_phys_t czb;
(void) printf("Indirect blocks:\n");
SET_BOOKMARK(&czb, dmu_objset_id(dn->dn_objset),
dn->dn_object, dnp->dn_nlevels - 1, 0);
for (int j = 0; j < dnp->dn_nblkptr; j++) {
czb.zb_blkid = j;
(void) visit_indirect(dmu_objset_spa(dn->dn_objset), dnp,
&dnp->dn_blkptr[j], &czb);
}
(void) printf("\n");
}
static void
dump_dsl_dir(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object;
dsl_dir_phys_t *dd = data;
time_t crtime;
char nice[32];
/* make sure nicenum has enough space */
_Static_assert(sizeof (nice) >= NN_NUMBUF_SZ, "nice truncated");
if (dd == NULL)
return;
ASSERT3U(size, >=, sizeof (dsl_dir_phys_t));
crtime = dd->dd_creation_time;
(void) printf("\t\tcreation_time = %s", ctime(&crtime));
(void) printf("\t\thead_dataset_obj = %llu\n",
(u_longlong_t)dd->dd_head_dataset_obj);
(void) printf("\t\tparent_dir_obj = %llu\n",
(u_longlong_t)dd->dd_parent_obj);
(void) printf("\t\torigin_obj = %llu\n",
(u_longlong_t)dd->dd_origin_obj);
(void) printf("\t\tchild_dir_zapobj = %llu\n",
(u_longlong_t)dd->dd_child_dir_zapobj);
zdb_nicenum(dd->dd_used_bytes, nice, sizeof (nice));
(void) printf("\t\tused_bytes = %s\n", nice);
zdb_nicenum(dd->dd_compressed_bytes, nice, sizeof (nice));
(void) printf("\t\tcompressed_bytes = %s\n", nice);
zdb_nicenum(dd->dd_uncompressed_bytes, nice, sizeof (nice));
(void) printf("\t\tuncompressed_bytes = %s\n", nice);
zdb_nicenum(dd->dd_quota, nice, sizeof (nice));
(void) printf("\t\tquota = %s\n", nice);
zdb_nicenum(dd->dd_reserved, nice, sizeof (nice));
(void) printf("\t\treserved = %s\n", nice);
(void) printf("\t\tprops_zapobj = %llu\n",
(u_longlong_t)dd->dd_props_zapobj);
(void) printf("\t\tdeleg_zapobj = %llu\n",
(u_longlong_t)dd->dd_deleg_zapobj);
(void) printf("\t\tflags = %llx\n",
(u_longlong_t)dd->dd_flags);
#define DO(which) \
zdb_nicenum(dd->dd_used_breakdown[DD_USED_ ## which], nice, \
sizeof (nice)); \
(void) printf("\t\tused_breakdown[" #which "] = %s\n", nice)
DO(HEAD);
DO(SNAP);
DO(CHILD);
DO(CHILD_RSRV);
DO(REFRSRV);
#undef DO
(void) printf("\t\tclones = %llu\n",
(u_longlong_t)dd->dd_clones);
}
static void
dump_dsl_dataset(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object;
dsl_dataset_phys_t *ds = data;
time_t crtime;
char used[32], compressed[32], uncompressed[32], unique[32];
char blkbuf[BP_SPRINTF_LEN];
/* make sure nicenum has enough space */
_Static_assert(sizeof (used) >= NN_NUMBUF_SZ, "used truncated");
_Static_assert(sizeof (compressed) >= NN_NUMBUF_SZ,
"compressed truncated");
_Static_assert(sizeof (uncompressed) >= NN_NUMBUF_SZ,
"uncompressed truncated");
_Static_assert(sizeof (unique) >= NN_NUMBUF_SZ, "unique truncated");
if (ds == NULL)
return;
ASSERT(size == sizeof (*ds));
crtime = ds->ds_creation_time;
zdb_nicenum(ds->ds_referenced_bytes, used, sizeof (used));
zdb_nicenum(ds->ds_compressed_bytes, compressed, sizeof (compressed));
zdb_nicenum(ds->ds_uncompressed_bytes, uncompressed,
sizeof (uncompressed));
zdb_nicenum(ds->ds_unique_bytes, unique, sizeof (unique));
snprintf_blkptr(blkbuf, sizeof (blkbuf), &ds->ds_bp);
(void) printf("\t\tdir_obj = %llu\n",
(u_longlong_t)ds->ds_dir_obj);
(void) printf("\t\tprev_snap_obj = %llu\n",
(u_longlong_t)ds->ds_prev_snap_obj);
(void) printf("\t\tprev_snap_txg = %llu\n",
(u_longlong_t)ds->ds_prev_snap_txg);
(void) printf("\t\tnext_snap_obj = %llu\n",
(u_longlong_t)ds->ds_next_snap_obj);
(void) printf("\t\tsnapnames_zapobj = %llu\n",
(u_longlong_t)ds->ds_snapnames_zapobj);
(void) printf("\t\tnum_children = %llu\n",
(u_longlong_t)ds->ds_num_children);
(void) printf("\t\tuserrefs_obj = %llu\n",
(u_longlong_t)ds->ds_userrefs_obj);
(void) printf("\t\tcreation_time = %s", ctime(&crtime));
(void) printf("\t\tcreation_txg = %llu\n",
(u_longlong_t)ds->ds_creation_txg);
(void) printf("\t\tdeadlist_obj = %llu\n",
(u_longlong_t)ds->ds_deadlist_obj);
(void) printf("\t\tused_bytes = %s\n", used);
(void) printf("\t\tcompressed_bytes = %s\n", compressed);
(void) printf("\t\tuncompressed_bytes = %s\n", uncompressed);
(void) printf("\t\tunique = %s\n", unique);
(void) printf("\t\tfsid_guid = %llu\n",
(u_longlong_t)ds->ds_fsid_guid);
(void) printf("\t\tguid = %llu\n",
(u_longlong_t)ds->ds_guid);
(void) printf("\t\tflags = %llx\n",
(u_longlong_t)ds->ds_flags);
(void) printf("\t\tnext_clones_obj = %llu\n",
(u_longlong_t)ds->ds_next_clones_obj);
(void) printf("\t\tprops_obj = %llu\n",
(u_longlong_t)ds->ds_props_obj);
(void) printf("\t\tbp = %s\n", blkbuf);
}
static int
dump_bptree_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
(void) arg, (void) tx;
char blkbuf[BP_SPRINTF_LEN];
if (bp->blk_birth != 0) {
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("\t%s\n", blkbuf);
}
return (0);
}
static void
dump_bptree(objset_t *os, uint64_t obj, const char *name)
{
char bytes[32];
bptree_phys_t *bt;
dmu_buf_t *db;
/* make sure nicenum has enough space */
_Static_assert(sizeof (bytes) >= NN_NUMBUF_SZ, "bytes truncated");
if (dump_opt['d'] < 3)
return;
VERIFY3U(0, ==, dmu_bonus_hold(os, obj, FTAG, &db));
bt = db->db_data;
zdb_nicenum(bt->bt_bytes, bytes, sizeof (bytes));
(void) printf("\n %s: %llu datasets, %s\n",
name, (unsigned long long)(bt->bt_end - bt->bt_begin), bytes);
dmu_buf_rele(db, FTAG);
if (dump_opt['d'] < 5)
return;
(void) printf("\n");
(void) bptree_iterate(os, obj, B_FALSE, dump_bptree_cb, NULL, NULL);
}
static int
dump_bpobj_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, dmu_tx_t *tx)
{
(void) arg, (void) tx;
char blkbuf[BP_SPRINTF_LEN];
ASSERT(bp->blk_birth != 0);
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp, bp_freed);
(void) printf("\t%s\n", blkbuf);
return (0);
}
static void
dump_full_bpobj(bpobj_t *bpo, const char *name, int indent)
{
char bytes[32];
char comp[32];
char uncomp[32];
uint64_t i;
/* make sure nicenum has enough space */
_Static_assert(sizeof (bytes) >= NN_NUMBUF_SZ, "bytes truncated");
_Static_assert(sizeof (comp) >= NN_NUMBUF_SZ, "comp truncated");
_Static_assert(sizeof (uncomp) >= NN_NUMBUF_SZ, "uncomp truncated");
if (dump_opt['d'] < 3)
return;
zdb_nicenum(bpo->bpo_phys->bpo_bytes, bytes, sizeof (bytes));
if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) {
zdb_nicenum(bpo->bpo_phys->bpo_comp, comp, sizeof (comp));
zdb_nicenum(bpo->bpo_phys->bpo_uncomp, uncomp, sizeof (uncomp));
if (bpo->bpo_havefreed) {
(void) printf(" %*s: object %llu, %llu local "
"blkptrs, %llu freed, %llu subobjs in object %llu, "
"%s (%s/%s comp)\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
(u_longlong_t)bpo->bpo_phys->bpo_num_freed,
(u_longlong_t)bpo->bpo_phys->bpo_num_subobjs,
(u_longlong_t)bpo->bpo_phys->bpo_subobjs,
bytes, comp, uncomp);
} else {
(void) printf(" %*s: object %llu, %llu local "
"blkptrs, %llu subobjs in object %llu, "
"%s (%s/%s comp)\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
(u_longlong_t)bpo->bpo_phys->bpo_num_subobjs,
(u_longlong_t)bpo->bpo_phys->bpo_subobjs,
bytes, comp, uncomp);
}
for (i = 0; i < bpo->bpo_phys->bpo_num_subobjs; i++) {
uint64_t subobj;
bpobj_t subbpo;
int error;
VERIFY0(dmu_read(bpo->bpo_os,
bpo->bpo_phys->bpo_subobjs,
i * sizeof (subobj), sizeof (subobj), &subobj, 0));
error = bpobj_open(&subbpo, bpo->bpo_os, subobj);
if (error != 0) {
(void) printf("ERROR %u while trying to open "
"subobj id %llu\n",
error, (u_longlong_t)subobj);
continue;
}
dump_full_bpobj(&subbpo, "subobj", indent + 1);
bpobj_close(&subbpo);
}
} else {
if (bpo->bpo_havefreed) {
(void) printf(" %*s: object %llu, %llu blkptrs, "
"%llu freed, %s\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
(u_longlong_t)bpo->bpo_phys->bpo_num_freed,
bytes);
} else {
(void) printf(" %*s: object %llu, %llu blkptrs, "
"%s\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
bytes);
}
}
if (dump_opt['d'] < 5)
return;
if (indent == 0) {
(void) bpobj_iterate_nofree(bpo, dump_bpobj_cb, NULL, NULL);
(void) printf("\n");
}
}
static int
dump_bookmark(dsl_pool_t *dp, char *name, boolean_t print_redact,
boolean_t print_list)
{
int err = 0;
zfs_bookmark_phys_t prop;
objset_t *mos = dp->dp_spa->spa_meta_objset;
err = dsl_bookmark_lookup(dp, name, NULL, &prop);
if (err != 0) {
return (err);
}
(void) printf("\t#%s: ", strchr(name, '#') + 1);
(void) printf("{guid: %llx creation_txg: %llu creation_time: "
"%llu redaction_obj: %llu}\n", (u_longlong_t)prop.zbm_guid,
(u_longlong_t)prop.zbm_creation_txg,
(u_longlong_t)prop.zbm_creation_time,
(u_longlong_t)prop.zbm_redaction_obj);
IMPLY(print_list, print_redact);
if (!print_redact || prop.zbm_redaction_obj == 0)
return (0);
redaction_list_t *rl;
VERIFY0(dsl_redaction_list_hold_obj(dp,
prop.zbm_redaction_obj, FTAG, &rl));
redaction_list_phys_t *rlp = rl->rl_phys;
(void) printf("\tRedacted:\n\t\tProgress: ");
if (rlp->rlp_last_object != UINT64_MAX ||
rlp->rlp_last_blkid != UINT64_MAX) {
(void) printf("%llu %llu (incomplete)\n",
(u_longlong_t)rlp->rlp_last_object,
(u_longlong_t)rlp->rlp_last_blkid);
} else {
(void) printf("complete\n");
}
(void) printf("\t\tSnapshots: [");
for (unsigned int i = 0; i < rlp->rlp_num_snaps; i++) {
if (i > 0)
(void) printf(", ");
(void) printf("%0llu",
(u_longlong_t)rlp->rlp_snaps[i]);
}
(void) printf("]\n\t\tLength: %llu\n",
(u_longlong_t)rlp->rlp_num_entries);
if (!print_list) {
dsl_redaction_list_rele(rl, FTAG);
return (0);
}
if (rlp->rlp_num_entries == 0) {
dsl_redaction_list_rele(rl, FTAG);
(void) printf("\t\tRedaction List: []\n\n");
return (0);
}
redact_block_phys_t *rbp_buf;
uint64_t size;
dmu_object_info_t doi;
VERIFY0(dmu_object_info(mos, prop.zbm_redaction_obj, &doi));
size = doi.doi_max_offset;
rbp_buf = kmem_alloc(size, KM_SLEEP);
err = dmu_read(mos, prop.zbm_redaction_obj, 0, size,
rbp_buf, 0);
if (err != 0) {
dsl_redaction_list_rele(rl, FTAG);
kmem_free(rbp_buf, size);
return (err);
}
(void) printf("\t\tRedaction List: [{object: %llx, offset: "
"%llx, blksz: %x, count: %llx}",
(u_longlong_t)rbp_buf[0].rbp_object,
(u_longlong_t)rbp_buf[0].rbp_blkid,
(uint_t)(redact_block_get_size(&rbp_buf[0])),
(u_longlong_t)redact_block_get_count(&rbp_buf[0]));
for (size_t i = 1; i < rlp->rlp_num_entries; i++) {
(void) printf(",\n\t\t{object: %llx, offset: %llx, "
"blksz: %x, count: %llx}",
(u_longlong_t)rbp_buf[i].rbp_object,
(u_longlong_t)rbp_buf[i].rbp_blkid,
(uint_t)(redact_block_get_size(&rbp_buf[i])),
(u_longlong_t)redact_block_get_count(&rbp_buf[i]));
}
dsl_redaction_list_rele(rl, FTAG);
kmem_free(rbp_buf, size);
(void) printf("]\n\n");
return (0);
}
static void
dump_bookmarks(objset_t *os, int verbosity)
{
zap_cursor_t zc;
zap_attribute_t attr;
dsl_dataset_t *ds = dmu_objset_ds(os);
dsl_pool_t *dp = spa_get_dsl(os->os_spa);
objset_t *mos = os->os_spa->spa_meta_objset;
if (verbosity < 4)
return;
dsl_pool_config_enter(dp, FTAG);
for (zap_cursor_init(&zc, mos, ds->ds_bookmarks_obj);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
char osname[ZFS_MAX_DATASET_NAME_LEN];
char buf[ZFS_MAX_DATASET_NAME_LEN];
dmu_objset_name(os, osname);
VERIFY3S(0, <=, snprintf(buf, sizeof (buf), "%s#%s", osname,
attr.za_name));
(void) dump_bookmark(dp, buf, verbosity >= 5, verbosity >= 6);
}
zap_cursor_fini(&zc);
dsl_pool_config_exit(dp, FTAG);
}
static void
bpobj_count_refd(bpobj_t *bpo)
{
mos_obj_refd(bpo->bpo_object);
if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) {
mos_obj_refd(bpo->bpo_phys->bpo_subobjs);
for (uint64_t i = 0; i < bpo->bpo_phys->bpo_num_subobjs; i++) {
uint64_t subobj;
bpobj_t subbpo;
int error;
VERIFY0(dmu_read(bpo->bpo_os,
bpo->bpo_phys->bpo_subobjs,
i * sizeof (subobj), sizeof (subobj), &subobj, 0));
error = bpobj_open(&subbpo, bpo->bpo_os, subobj);
if (error != 0) {
(void) printf("ERROR %u while trying to open "
"subobj id %llu\n",
error, (u_longlong_t)subobj);
continue;
}
bpobj_count_refd(&subbpo);
bpobj_close(&subbpo);
}
}
}
static int
dsl_deadlist_entry_count_refd(void *arg, dsl_deadlist_entry_t *dle)
{
spa_t *spa = arg;
uint64_t empty_bpobj = spa->spa_dsl_pool->dp_empty_bpobj;
if (dle->dle_bpobj.bpo_object != empty_bpobj)
bpobj_count_refd(&dle->dle_bpobj);
return (0);
}
static int
dsl_deadlist_entry_dump(void *arg, dsl_deadlist_entry_t *dle)
{
ASSERT(arg == NULL);
if (dump_opt['d'] >= 5) {
char buf[128];
(void) snprintf(buf, sizeof (buf),
"mintxg %llu -> obj %llu",
(longlong_t)dle->dle_mintxg,
(longlong_t)dle->dle_bpobj.bpo_object);
dump_full_bpobj(&dle->dle_bpobj, buf, 0);
} else {
(void) printf("mintxg %llu -> obj %llu\n",
(longlong_t)dle->dle_mintxg,
(longlong_t)dle->dle_bpobj.bpo_object);
}
return (0);
}
static void
dump_blkptr_list(dsl_deadlist_t *dl, const char *name)
{
char bytes[32];
char comp[32];
char uncomp[32];
char entries[32];
spa_t *spa = dmu_objset_spa(dl->dl_os);
uint64_t empty_bpobj = spa->spa_dsl_pool->dp_empty_bpobj;
if (dl->dl_oldfmt) {
if (dl->dl_bpobj.bpo_object != empty_bpobj)
bpobj_count_refd(&dl->dl_bpobj);
} else {
mos_obj_refd(dl->dl_object);
dsl_deadlist_iterate(dl, dsl_deadlist_entry_count_refd, spa);
}
/* make sure nicenum has enough space */
_Static_assert(sizeof (bytes) >= NN_NUMBUF_SZ, "bytes truncated");
_Static_assert(sizeof (comp) >= NN_NUMBUF_SZ, "comp truncated");
_Static_assert(sizeof (uncomp) >= NN_NUMBUF_SZ, "uncomp truncated");
_Static_assert(sizeof (entries) >= NN_NUMBUF_SZ, "entries truncated");
if (dump_opt['d'] < 3)
return;
if (dl->dl_oldfmt) {
dump_full_bpobj(&dl->dl_bpobj, "old-format deadlist", 0);
return;
}
zdb_nicenum(dl->dl_phys->dl_used, bytes, sizeof (bytes));
zdb_nicenum(dl->dl_phys->dl_comp, comp, sizeof (comp));
zdb_nicenum(dl->dl_phys->dl_uncomp, uncomp, sizeof (uncomp));
zdb_nicenum(avl_numnodes(&dl->dl_tree), entries, sizeof (entries));
(void) printf("\n %s: %s (%s/%s comp), %s entries\n",
name, bytes, comp, uncomp, entries);
if (dump_opt['d'] < 4)
return;
(void) putchar('\n');
dsl_deadlist_iterate(dl, dsl_deadlist_entry_dump, NULL);
}
static int
verify_dd_livelist(objset_t *os)
{
uint64_t ll_used, used, ll_comp, comp, ll_uncomp, uncomp;
dsl_pool_t *dp = spa_get_dsl(os->os_spa);
dsl_dir_t *dd = os->os_dsl_dataset->ds_dir;
ASSERT(!dmu_objset_is_snapshot(os));
if (!dsl_deadlist_is_open(&dd->dd_livelist))
return (0);
/* Iterate through the livelist to check for duplicates */
dsl_deadlist_iterate(&dd->dd_livelist, sublivelist_verify_lightweight,
NULL);
dsl_pool_config_enter(dp, FTAG);
dsl_deadlist_space(&dd->dd_livelist, &ll_used,
&ll_comp, &ll_uncomp);
dsl_dataset_t *origin_ds;
ASSERT(dsl_pool_config_held(dp));
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dir_phys(dd)->dd_origin_obj, FTAG, &origin_ds));
VERIFY0(dsl_dataset_space_written(origin_ds, os->os_dsl_dataset,
&used, &comp, &uncomp));
dsl_dataset_rele(origin_ds, FTAG);
dsl_pool_config_exit(dp, FTAG);
/*
* It's possible that the dataset's uncomp space is larger than the
* livelist's because livelists do not track embedded block pointers
*/
if (used != ll_used || comp != ll_comp || uncomp < ll_uncomp) {
char nice_used[32], nice_comp[32], nice_uncomp[32];
(void) printf("Discrepancy in space accounting:\n");
zdb_nicenum(used, nice_used, sizeof (nice_used));
zdb_nicenum(comp, nice_comp, sizeof (nice_comp));
zdb_nicenum(uncomp, nice_uncomp, sizeof (nice_uncomp));
(void) printf("dir: used %s, comp %s, uncomp %s\n",
nice_used, nice_comp, nice_uncomp);
zdb_nicenum(ll_used, nice_used, sizeof (nice_used));
zdb_nicenum(ll_comp, nice_comp, sizeof (nice_comp));
zdb_nicenum(ll_uncomp, nice_uncomp, sizeof (nice_uncomp));
(void) printf("livelist: used %s, comp %s, uncomp %s\n",
nice_used, nice_comp, nice_uncomp);
return (1);
}
return (0);
}
static avl_tree_t idx_tree;
static avl_tree_t domain_tree;
static boolean_t fuid_table_loaded;
static objset_t *sa_os = NULL;
static sa_attr_type_t *sa_attr_table = NULL;
static int
open_objset(const char *path, const void *tag, objset_t **osp)
{
int err;
uint64_t sa_attrs = 0;
uint64_t version = 0;
VERIFY3P(sa_os, ==, NULL);
/*
* We can't own an objset if it's redacted. Therefore, we do this
* dance: hold the objset, then acquire a long hold on its dataset, then
* release the pool (which is held as part of holding the objset).
*/
err = dmu_objset_hold(path, tag, osp);
if (err != 0) {
(void) fprintf(stderr, "failed to hold dataset '%s': %s\n",
path, strerror(err));
return (err);
}
dsl_dataset_long_hold(dmu_objset_ds(*osp), tag);
dsl_pool_rele(dmu_objset_pool(*osp), tag);
if (dmu_objset_type(*osp) == DMU_OST_ZFS && !(*osp)->os_encrypted) {
(void) zap_lookup(*osp, MASTER_NODE_OBJ, ZPL_VERSION_STR,
8, 1, &version);
if (version >= ZPL_VERSION_SA) {
(void) zap_lookup(*osp, MASTER_NODE_OBJ, ZFS_SA_ATTRS,
8, 1, &sa_attrs);
}
err = sa_setup(*osp, sa_attrs, zfs_attr_table, ZPL_END,
&sa_attr_table);
if (err != 0) {
(void) fprintf(stderr, "sa_setup failed: %s\n",
strerror(err));
dsl_dataset_long_rele(dmu_objset_ds(*osp), tag);
dsl_dataset_rele(dmu_objset_ds(*osp), tag);
*osp = NULL;
}
}
sa_os = *osp;
return (0);
}
static void
close_objset(objset_t *os, const void *tag)
{
VERIFY3P(os, ==, sa_os);
if (os->os_sa != NULL)
sa_tear_down(os);
dsl_dataset_long_rele(dmu_objset_ds(os), tag);
dsl_dataset_rele(dmu_objset_ds(os), tag);
sa_attr_table = NULL;
sa_os = NULL;
}
static void
fuid_table_destroy(void)
{
if (fuid_table_loaded) {
zfs_fuid_table_destroy(&idx_tree, &domain_tree);
fuid_table_loaded = B_FALSE;
}
}
/*
* print uid or gid information.
* For normal POSIX id just the id is printed in decimal format.
* For CIFS files with FUID the fuid is printed in hex followed by
* the domain-rid string.
*/
static void
print_idstr(uint64_t id, const char *id_type)
{
if (FUID_INDEX(id)) {
const char *domain =
zfs_fuid_idx_domain(&idx_tree, FUID_INDEX(id));
(void) printf("\t%s %llx [%s-%d]\n", id_type,
(u_longlong_t)id, domain, (int)FUID_RID(id));
} else {
(void) printf("\t%s %llu\n", id_type, (u_longlong_t)id);
}
}
static void
dump_uidgid(objset_t *os, uint64_t uid, uint64_t gid)
{
uint32_t uid_idx, gid_idx;
uid_idx = FUID_INDEX(uid);
gid_idx = FUID_INDEX(gid);
/* Load domain table, if not already loaded */
if (!fuid_table_loaded && (uid_idx || gid_idx)) {
uint64_t fuid_obj;
/* first find the fuid object. It lives in the master node */
VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES,
8, 1, &fuid_obj) == 0);
zfs_fuid_avl_tree_create(&idx_tree, &domain_tree);
(void) zfs_fuid_table_load(os, fuid_obj,
&idx_tree, &domain_tree);
fuid_table_loaded = B_TRUE;
}
print_idstr(uid, "uid");
print_idstr(gid, "gid");
}
static void
dump_znode_sa_xattr(sa_handle_t *hdl)
{
nvlist_t *sa_xattr;
nvpair_t *elem = NULL;
int sa_xattr_size = 0;
int sa_xattr_entries = 0;
int error;
char *sa_xattr_packed;
error = sa_size(hdl, sa_attr_table[ZPL_DXATTR], &sa_xattr_size);
if (error || sa_xattr_size == 0)
return;
sa_xattr_packed = malloc(sa_xattr_size);
if (sa_xattr_packed == NULL)
return;
error = sa_lookup(hdl, sa_attr_table[ZPL_DXATTR],
sa_xattr_packed, sa_xattr_size);
if (error) {
free(sa_xattr_packed);
return;
}
error = nvlist_unpack(sa_xattr_packed, sa_xattr_size, &sa_xattr, 0);
if (error) {
free(sa_xattr_packed);
return;
}
while ((elem = nvlist_next_nvpair(sa_xattr, elem)) != NULL)
sa_xattr_entries++;
(void) printf("\tSA xattrs: %d bytes, %d entries\n\n",
sa_xattr_size, sa_xattr_entries);
while ((elem = nvlist_next_nvpair(sa_xattr, elem)) != NULL) {
uchar_t *value;
uint_t cnt, idx;
(void) printf("\t\t%s = ", nvpair_name(elem));
nvpair_value_byte_array(elem, &value, &cnt);
for (idx = 0; idx < cnt; ++idx) {
if (isprint(value[idx]))
(void) putchar(value[idx]);
else
(void) printf("\\%3.3o", value[idx]);
}
(void) putchar('\n');
}
nvlist_free(sa_xattr);
free(sa_xattr_packed);
}
static void
dump_znode_symlink(sa_handle_t *hdl)
{
int sa_symlink_size = 0;
char linktarget[MAXPATHLEN];
int error;
error = sa_size(hdl, sa_attr_table[ZPL_SYMLINK], &sa_symlink_size);
if (error || sa_symlink_size == 0) {
return;
}
if (sa_symlink_size >= sizeof (linktarget)) {
(void) printf("symlink size %d is too large\n",
sa_symlink_size);
return;
}
linktarget[sa_symlink_size] = '\0';
if (sa_lookup(hdl, sa_attr_table[ZPL_SYMLINK],
&linktarget, sa_symlink_size) == 0)
(void) printf("\ttarget %s\n", linktarget);
}
static void
dump_znode(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
char path[MAXPATHLEN * 2]; /* allow for xattr and failure prefix */
sa_handle_t *hdl;
uint64_t xattr, rdev, gen;
uint64_t uid, gid, mode, fsize, parent, links;
uint64_t pflags;
uint64_t acctm[2], modtm[2], chgtm[2], crtm[2];
time_t z_crtime, z_atime, z_mtime, z_ctime;
sa_bulk_attr_t bulk[12];
int idx = 0;
int error;
VERIFY3P(os, ==, sa_os);
if (sa_handle_get(os, object, NULL, SA_HDL_PRIVATE, &hdl)) {
(void) printf("Failed to get handle for SA znode\n");
return;
}
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_UID], NULL, &uid, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GID], NULL, &gid, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_LINKS], NULL,
&links, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GEN], NULL, &gen, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MODE], NULL,
&mode, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_PARENT],
NULL, &parent, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_SIZE], NULL,
&fsize, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_ATIME], NULL,
acctm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MTIME], NULL,
modtm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CRTIME], NULL,
crtm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CTIME], NULL,
chgtm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_FLAGS], NULL,
&pflags, 8);
if (sa_bulk_lookup(hdl, bulk, idx)) {
(void) sa_handle_destroy(hdl);
return;
}
z_crtime = (time_t)crtm[0];
z_atime = (time_t)acctm[0];
z_mtime = (time_t)modtm[0];
z_ctime = (time_t)chgtm[0];
if (dump_opt['d'] > 4) {
error = zfs_obj_to_path(os, object, path, sizeof (path));
if (error == ESTALE) {
(void) snprintf(path, sizeof (path), "on delete queue");
} else if (error != 0) {
leaked_objects++;
(void) snprintf(path, sizeof (path),
"path not found, possibly leaked");
}
(void) printf("\tpath %s\n", path);
}
if (S_ISLNK(mode))
dump_znode_symlink(hdl);
dump_uidgid(os, uid, gid);
(void) printf("\tatime %s", ctime(&z_atime));
(void) printf("\tmtime %s", ctime(&z_mtime));
(void) printf("\tctime %s", ctime(&z_ctime));
(void) printf("\tcrtime %s", ctime(&z_crtime));
(void) printf("\tgen %llu\n", (u_longlong_t)gen);
(void) printf("\tmode %llo\n", (u_longlong_t)mode);
(void) printf("\tsize %llu\n", (u_longlong_t)fsize);
(void) printf("\tparent %llu\n", (u_longlong_t)parent);
(void) printf("\tlinks %llu\n", (u_longlong_t)links);
(void) printf("\tpflags %llx\n", (u_longlong_t)pflags);
if (dmu_objset_projectquota_enabled(os) && (pflags & ZFS_PROJID)) {
uint64_t projid;
if (sa_lookup(hdl, sa_attr_table[ZPL_PROJID], &projid,
sizeof (uint64_t)) == 0)
(void) printf("\tprojid %llu\n", (u_longlong_t)projid);
}
if (sa_lookup(hdl, sa_attr_table[ZPL_XATTR], &xattr,
sizeof (uint64_t)) == 0)
(void) printf("\txattr %llu\n", (u_longlong_t)xattr);
if (sa_lookup(hdl, sa_attr_table[ZPL_RDEV], &rdev,
sizeof (uint64_t)) == 0)
(void) printf("\trdev 0x%016llx\n", (u_longlong_t)rdev);
dump_znode_sa_xattr(hdl);
sa_handle_destroy(hdl);
}
static void
dump_acl(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) data, (void) size;
}
static void
dump_dmu_objset(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) data, (void) size;
}
static object_viewer_t *object_viewer[DMU_OT_NUMTYPES + 1] = {
dump_none, /* unallocated */
dump_zap, /* object directory */
dump_uint64, /* object array */
dump_none, /* packed nvlist */
dump_packed_nvlist, /* packed nvlist size */
dump_none, /* bpobj */
dump_bpobj, /* bpobj header */
dump_none, /* SPA space map header */
dump_none, /* SPA space map */
dump_none, /* ZIL intent log */
dump_dnode, /* DMU dnode */
dump_dmu_objset, /* DMU objset */
dump_dsl_dir, /* DSL directory */
dump_zap, /* DSL directory child map */
dump_zap, /* DSL dataset snap map */
dump_zap, /* DSL props */
dump_dsl_dataset, /* DSL dataset */
dump_znode, /* ZFS znode */
dump_acl, /* ZFS V0 ACL */
dump_uint8, /* ZFS plain file */
dump_zpldir, /* ZFS directory */
dump_zap, /* ZFS master node */
dump_zap, /* ZFS delete queue */
dump_uint8, /* zvol object */
dump_zap, /* zvol prop */
dump_uint8, /* other uint8[] */
dump_uint64, /* other uint64[] */
dump_zap, /* other ZAP */
dump_zap, /* persistent error log */
dump_uint8, /* SPA history */
dump_history_offsets, /* SPA history offsets */
dump_zap, /* Pool properties */
dump_zap, /* DSL permissions */
dump_acl, /* ZFS ACL */
dump_uint8, /* ZFS SYSACL */
dump_none, /* FUID nvlist */
dump_packed_nvlist, /* FUID nvlist size */
dump_zap, /* DSL dataset next clones */
dump_zap, /* DSL scrub queue */
dump_zap, /* ZFS user/group/project used */
dump_zap, /* ZFS user/group/project quota */
dump_zap, /* snapshot refcount tags */
dump_ddt_zap, /* DDT ZAP object */
dump_zap, /* DDT statistics */
dump_znode, /* SA object */
dump_zap, /* SA Master Node */
dump_sa_attrs, /* SA attribute registration */
dump_sa_layouts, /* SA attribute layouts */
dump_zap, /* DSL scrub translations */
dump_none, /* fake dedup BP */
dump_zap, /* deadlist */
dump_none, /* deadlist hdr */
dump_zap, /* dsl clones */
dump_bpobj_subobjs, /* bpobj subobjs */
dump_unknown, /* Unknown type, must be last */
};
static boolean_t
match_object_type(dmu_object_type_t obj_type, uint64_t flags)
{
boolean_t match = B_TRUE;
switch (obj_type) {
case DMU_OT_DIRECTORY_CONTENTS:
if (!(flags & ZOR_FLAG_DIRECTORY))
match = B_FALSE;
break;
case DMU_OT_PLAIN_FILE_CONTENTS:
if (!(flags & ZOR_FLAG_PLAIN_FILE))
match = B_FALSE;
break;
case DMU_OT_SPACE_MAP:
if (!(flags & ZOR_FLAG_SPACE_MAP))
match = B_FALSE;
break;
default:
if (strcmp(zdb_ot_name(obj_type), "zap") == 0) {
if (!(flags & ZOR_FLAG_ZAP))
match = B_FALSE;
break;
}
/*
* If all bits except some of the supported flags are
* set, the user combined the all-types flag (A) with
* a negated flag to exclude some types (e.g. A-f to
* show all object types except plain files).
*/
if ((flags | ZOR_SUPPORTED_FLAGS) != ZOR_FLAG_ALL_TYPES)
match = B_FALSE;
break;
}
return (match);
}
static void
dump_object(objset_t *os, uint64_t object, int verbosity,
boolean_t *print_header, uint64_t *dnode_slots_used, uint64_t flags)
{
dmu_buf_t *db = NULL;
dmu_object_info_t doi;
dnode_t *dn;
boolean_t dnode_held = B_FALSE;
void *bonus = NULL;
size_t bsize = 0;
char iblk[32], dblk[32], lsize[32], asize[32], fill[32], dnsize[32];
char bonus_size[32];
char aux[50];
int error;
/* make sure nicenum has enough space */
_Static_assert(sizeof (iblk) >= NN_NUMBUF_SZ, "iblk truncated");
_Static_assert(sizeof (dblk) >= NN_NUMBUF_SZ, "dblk truncated");
_Static_assert(sizeof (lsize) >= NN_NUMBUF_SZ, "lsize truncated");
_Static_assert(sizeof (asize) >= NN_NUMBUF_SZ, "asize truncated");
_Static_assert(sizeof (bonus_size) >= NN_NUMBUF_SZ,
"bonus_size truncated");
if (*print_header) {
(void) printf("\n%10s %3s %5s %5s %5s %6s %5s %6s %s\n",
"Object", "lvl", "iblk", "dblk", "dsize", "dnsize",
"lsize", "%full", "type");
*print_header = 0;
}
if (object == 0) {
dn = DMU_META_DNODE(os);
dmu_object_info_from_dnode(dn, &doi);
} else {
/*
* Encrypted datasets will have sensitive bonus buffers
* encrypted. Therefore we cannot hold the bonus buffer and
* must hold the dnode itself instead.
*/
error = dmu_object_info(os, object, &doi);
if (error)
fatal("dmu_object_info() failed, errno %u", error);
if (os->os_encrypted &&
DMU_OT_IS_ENCRYPTED(doi.doi_bonus_type)) {
error = dnode_hold(os, object, FTAG, &dn);
if (error)
fatal("dnode_hold() failed, errno %u", error);
dnode_held = B_TRUE;
} else {
error = dmu_bonus_hold(os, object, FTAG, &db);
if (error)
fatal("dmu_bonus_hold(%llu) failed, errno %u",
object, error);
bonus = db->db_data;
bsize = db->db_size;
dn = DB_DNODE((dmu_buf_impl_t *)db);
}
}
/*
* Default to showing all object types if no flags were specified.
*/
if (flags != 0 && flags != ZOR_FLAG_ALL_TYPES &&
!match_object_type(doi.doi_type, flags))
goto out;
if (dnode_slots_used)
*dnode_slots_used = doi.doi_dnodesize / DNODE_MIN_SIZE;
zdb_nicenum(doi.doi_metadata_block_size, iblk, sizeof (iblk));
zdb_nicenum(doi.doi_data_block_size, dblk, sizeof (dblk));
zdb_nicenum(doi.doi_max_offset, lsize, sizeof (lsize));
zdb_nicenum(doi.doi_physical_blocks_512 << 9, asize, sizeof (asize));
zdb_nicenum(doi.doi_bonus_size, bonus_size, sizeof (bonus_size));
zdb_nicenum(doi.doi_dnodesize, dnsize, sizeof (dnsize));
(void) sprintf(fill, "%6.2f", 100.0 * doi.doi_fill_count *
doi.doi_data_block_size / (object == 0 ? DNODES_PER_BLOCK : 1) /
doi.doi_max_offset);
aux[0] = '\0';
if (doi.doi_checksum != ZIO_CHECKSUM_INHERIT || verbosity >= 6) {
(void) snprintf(aux + strlen(aux), sizeof (aux) - strlen(aux),
" (K=%s)", ZDB_CHECKSUM_NAME(doi.doi_checksum));
}
if (doi.doi_compress == ZIO_COMPRESS_INHERIT &&
ZIO_COMPRESS_HASLEVEL(os->os_compress) && verbosity >= 6) {
const char *compname = NULL;
if (zfs_prop_index_to_string(ZFS_PROP_COMPRESSION,
ZIO_COMPRESS_RAW(os->os_compress, os->os_complevel),
&compname) == 0) {
(void) snprintf(aux + strlen(aux),
sizeof (aux) - strlen(aux), " (Z=inherit=%s)",
compname);
} else {
(void) snprintf(aux + strlen(aux),
sizeof (aux) - strlen(aux),
" (Z=inherit=%s-unknown)",
ZDB_COMPRESS_NAME(os->os_compress));
}
} else if (doi.doi_compress == ZIO_COMPRESS_INHERIT && verbosity >= 6) {
(void) snprintf(aux + strlen(aux), sizeof (aux) - strlen(aux),
" (Z=inherit=%s)", ZDB_COMPRESS_NAME(os->os_compress));
} else if (doi.doi_compress != ZIO_COMPRESS_INHERIT || verbosity >= 6) {
(void) snprintf(aux + strlen(aux), sizeof (aux) - strlen(aux),
" (Z=%s)", ZDB_COMPRESS_NAME(doi.doi_compress));
}
(void) printf("%10lld %3u %5s %5s %5s %6s %5s %6s %s%s\n",
(u_longlong_t)object, doi.doi_indirection, iblk, dblk,
asize, dnsize, lsize, fill, zdb_ot_name(doi.doi_type), aux);
if (doi.doi_bonus_type != DMU_OT_NONE && verbosity > 3) {
(void) printf("%10s %3s %5s %5s %5s %5s %5s %6s %s\n",
"", "", "", "", "", "", bonus_size, "bonus",
zdb_ot_name(doi.doi_bonus_type));
}
if (verbosity >= 4) {
(void) printf("\tdnode flags: %s%s%s%s\n",
(dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) ?
"USED_BYTES " : "",
(dn->dn_phys->dn_flags & DNODE_FLAG_USERUSED_ACCOUNTED) ?
"USERUSED_ACCOUNTED " : "",
(dn->dn_phys->dn_flags & DNODE_FLAG_USEROBJUSED_ACCOUNTED) ?
"USEROBJUSED_ACCOUNTED " : "",
(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) ?
"SPILL_BLKPTR" : "");
(void) printf("\tdnode maxblkid: %llu\n",
(longlong_t)dn->dn_phys->dn_maxblkid);
if (!dnode_held) {
object_viewer[ZDB_OT_TYPE(doi.doi_bonus_type)](os,
object, bonus, bsize);
} else {
(void) printf("\t\t(bonus encrypted)\n");
}
if (!os->os_encrypted || !DMU_OT_IS_ENCRYPTED(doi.doi_type)) {
object_viewer[ZDB_OT_TYPE(doi.doi_type)](os, object,
NULL, 0);
} else {
(void) printf("\t\t(object encrypted)\n");
}
*print_header = B_TRUE;
}
if (verbosity >= 5) {
if (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf),
DN_SPILL_BLKPTR(dn->dn_phys), B_FALSE);
(void) printf("\nSpill block: %s\n", blkbuf);
}
dump_indirect(dn);
}
if (verbosity >= 5) {
/*
* Report the list of segments that comprise the object.
*/
uint64_t start = 0;
uint64_t end;
uint64_t blkfill = 1;
int minlvl = 1;
if (dn->dn_type == DMU_OT_DNODE) {
minlvl = 0;
blkfill = DNODES_PER_BLOCK;
}
for (;;) {
char segsize[32];
/* make sure nicenum has enough space */
_Static_assert(sizeof (segsize) >= NN_NUMBUF_SZ,
"segsize truncated");
error = dnode_next_offset(dn,
0, &start, minlvl, blkfill, 0);
if (error)
break;
end = start;
error = dnode_next_offset(dn,
DNODE_FIND_HOLE, &end, minlvl, blkfill, 0);
zdb_nicenum(end - start, segsize, sizeof (segsize));
(void) printf("\t\tsegment [%016llx, %016llx)"
" size %5s\n", (u_longlong_t)start,
(u_longlong_t)end, segsize);
if (error)
break;
start = end;
}
}
out:
if (db != NULL)
dmu_buf_rele(db, FTAG);
if (dnode_held)
dnode_rele(dn, FTAG);
}
static void
count_dir_mos_objects(dsl_dir_t *dd)
{
mos_obj_refd(dd->dd_object);
mos_obj_refd(dsl_dir_phys(dd)->dd_child_dir_zapobj);
mos_obj_refd(dsl_dir_phys(dd)->dd_deleg_zapobj);
mos_obj_refd(dsl_dir_phys(dd)->dd_props_zapobj);
mos_obj_refd(dsl_dir_phys(dd)->dd_clones);
/*
* The dd_crypto_obj can be referenced by multiple dsl_dir's.
* Ignore the references after the first one.
*/
mos_obj_refd_multiple(dd->dd_crypto_obj);
}
static void
count_ds_mos_objects(dsl_dataset_t *ds)
{
mos_obj_refd(ds->ds_object);
mos_obj_refd(dsl_dataset_phys(ds)->ds_next_clones_obj);
mos_obj_refd(dsl_dataset_phys(ds)->ds_props_obj);
mos_obj_refd(dsl_dataset_phys(ds)->ds_userrefs_obj);
mos_obj_refd(dsl_dataset_phys(ds)->ds_snapnames_zapobj);
mos_obj_refd(ds->ds_bookmarks_obj);
if (!dsl_dataset_is_snapshot(ds)) {
count_dir_mos_objects(ds->ds_dir);
}
}
static const char *const objset_types[DMU_OST_NUMTYPES] = {
"NONE", "META", "ZPL", "ZVOL", "OTHER", "ANY" };
/*
* Parse a string denoting a range of object IDs of the form
* <start>[:<end>[:flags]], and store the results in zor.
* Return 0 on success. On error, return 1 and update the msg
* pointer to point to a descriptive error message.
*/
static int
parse_object_range(char *range, zopt_object_range_t *zor, const char **msg)
{
uint64_t flags = 0;
char *p, *s, *dup, *flagstr, *tmp = NULL;
size_t len;
int i;
int rc = 0;
if (strchr(range, ':') == NULL) {
zor->zor_obj_start = strtoull(range, &p, 0);
if (*p != '\0') {
*msg = "Invalid characters in object ID";
rc = 1;
}
zor->zor_obj_start = ZDB_MAP_OBJECT_ID(zor->zor_obj_start);
zor->zor_obj_end = zor->zor_obj_start;
return (rc);
}
if (strchr(range, ':') == range) {
*msg = "Invalid leading colon";
rc = 1;
return (rc);
}
len = strlen(range);
if (range[len - 1] == ':') {
*msg = "Invalid trailing colon";
rc = 1;
return (rc);
}
dup = strdup(range);
s = strtok_r(dup, ":", &tmp);
zor->zor_obj_start = strtoull(s, &p, 0);
if (*p != '\0') {
*msg = "Invalid characters in start object ID";
rc = 1;
goto out;
}
s = strtok_r(NULL, ":", &tmp);
zor->zor_obj_end = strtoull(s, &p, 0);
if (*p != '\0') {
*msg = "Invalid characters in end object ID";
rc = 1;
goto out;
}
if (zor->zor_obj_start > zor->zor_obj_end) {
*msg = "Start object ID may not exceed end object ID";
rc = 1;
goto out;
}
s = strtok_r(NULL, ":", &tmp);
if (s == NULL) {
zor->zor_flags = ZOR_FLAG_ALL_TYPES;
goto out;
} else if (strtok_r(NULL, ":", &tmp) != NULL) {
*msg = "Invalid colon-delimited field after flags";
rc = 1;
goto out;
}
flagstr = s;
for (i = 0; flagstr[i]; i++) {
int bit;
boolean_t negation = (flagstr[i] == '-');
if (negation) {
i++;
if (flagstr[i] == '\0') {
*msg = "Invalid trailing negation operator";
rc = 1;
goto out;
}
}
bit = flagbits[(uchar_t)flagstr[i]];
if (bit == 0) {
*msg = "Invalid flag";
rc = 1;
goto out;
}
if (negation)
flags &= ~bit;
else
flags |= bit;
}
zor->zor_flags = flags;
zor->zor_obj_start = ZDB_MAP_OBJECT_ID(zor->zor_obj_start);
zor->zor_obj_end = ZDB_MAP_OBJECT_ID(zor->zor_obj_end);
out:
free(dup);
return (rc);
}
static void
dump_objset(objset_t *os)
{
dmu_objset_stats_t dds = { 0 };
uint64_t object, object_count;
uint64_t refdbytes, usedobjs, scratch;
char numbuf[32];
char blkbuf[BP_SPRINTF_LEN + 20];
char osname[ZFS_MAX_DATASET_NAME_LEN];
const char *type = "UNKNOWN";
int verbosity = dump_opt['d'];
boolean_t print_header;
unsigned i;
int error;
uint64_t total_slots_used = 0;
uint64_t max_slot_used = 0;
uint64_t dnode_slots;
uint64_t obj_start;
uint64_t obj_end;
uint64_t flags;
/* make sure nicenum has enough space */
_Static_assert(sizeof (numbuf) >= NN_NUMBUF_SZ, "numbuf truncated");
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
dmu_objset_fast_stat(os, &dds);
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
print_header = B_TRUE;
if (dds.dds_type < DMU_OST_NUMTYPES)
type = objset_types[dds.dds_type];
if (dds.dds_type == DMU_OST_META) {
dds.dds_creation_txg = TXG_INITIAL;
usedobjs = BP_GET_FILL(os->os_rootbp);
refdbytes = dsl_dir_phys(os->os_spa->spa_dsl_pool->dp_mos_dir)->
dd_used_bytes;
} else {
dmu_objset_space(os, &refdbytes, &scratch, &usedobjs, &scratch);
}
ASSERT3U(usedobjs, ==, BP_GET_FILL(os->os_rootbp));
zdb_nicenum(refdbytes, numbuf, sizeof (numbuf));
if (verbosity >= 4) {
(void) snprintf(blkbuf, sizeof (blkbuf), ", rootbp ");
(void) snprintf_blkptr(blkbuf + strlen(blkbuf),
sizeof (blkbuf) - strlen(blkbuf), os->os_rootbp);
} else {
blkbuf[0] = '\0';
}
dmu_objset_name(os, osname);
(void) printf("Dataset %s [%s], ID %llu, cr_txg %llu, "
"%s, %llu objects%s%s\n",
osname, type, (u_longlong_t)dmu_objset_id(os),
(u_longlong_t)dds.dds_creation_txg,
numbuf, (u_longlong_t)usedobjs, blkbuf,
(dds.dds_inconsistent) ? " (inconsistent)" : "");
for (i = 0; i < zopt_object_args; i++) {
obj_start = zopt_object_ranges[i].zor_obj_start;
obj_end = zopt_object_ranges[i].zor_obj_end;
flags = zopt_object_ranges[i].zor_flags;
object = obj_start;
if (object == 0 || obj_start == obj_end)
dump_object(os, object, verbosity, &print_header, NULL,
flags);
else
object--;
while ((dmu_object_next(os, &object, B_FALSE, 0) == 0) &&
object <= obj_end) {
dump_object(os, object, verbosity, &print_header, NULL,
flags);
}
}
if (zopt_object_args > 0) {
(void) printf("\n");
return;
}
if (dump_opt['i'] != 0 || verbosity >= 2)
dump_intent_log(dmu_objset_zil(os));
if (dmu_objset_ds(os) != NULL) {
dsl_dataset_t *ds = dmu_objset_ds(os);
dump_blkptr_list(&ds->ds_deadlist, "Deadlist");
if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
!dmu_objset_is_snapshot(os)) {
dump_blkptr_list(&ds->ds_dir->dd_livelist, "Livelist");
if (verify_dd_livelist(os) != 0)
fatal("livelist is incorrect");
}
if (dsl_dataset_remap_deadlist_exists(ds)) {
(void) printf("ds_remap_deadlist:\n");
dump_blkptr_list(&ds->ds_remap_deadlist, "Deadlist");
}
count_ds_mos_objects(ds);
}
if (dmu_objset_ds(os) != NULL)
dump_bookmarks(os, verbosity);
if (verbosity < 2)
return;
if (BP_IS_HOLE(os->os_rootbp))
return;
dump_object(os, 0, verbosity, &print_header, NULL, 0);
object_count = 0;
if (DMU_USERUSED_DNODE(os) != NULL &&
DMU_USERUSED_DNODE(os)->dn_type != 0) {
dump_object(os, DMU_USERUSED_OBJECT, verbosity, &print_header,
NULL, 0);
dump_object(os, DMU_GROUPUSED_OBJECT, verbosity, &print_header,
NULL, 0);
}
if (DMU_PROJECTUSED_DNODE(os) != NULL &&
DMU_PROJECTUSED_DNODE(os)->dn_type != 0)
dump_object(os, DMU_PROJECTUSED_OBJECT, verbosity,
&print_header, NULL, 0);
object = 0;
while ((error = dmu_object_next(os, &object, B_FALSE, 0)) == 0) {
dump_object(os, object, verbosity, &print_header, &dnode_slots,
0);
object_count++;
total_slots_used += dnode_slots;
max_slot_used = object + dnode_slots - 1;
}
(void) printf("\n");
(void) printf(" Dnode slots:\n");
(void) printf("\tTotal used: %10llu\n",
(u_longlong_t)total_slots_used);
(void) printf("\tMax used: %10llu\n",
(u_longlong_t)max_slot_used);
(void) printf("\tPercent empty: %10lf\n",
(double)(max_slot_used - total_slots_used)*100 /
(double)max_slot_used);
(void) printf("\n");
if (error != ESRCH) {
(void) fprintf(stderr, "dmu_object_next() = %d\n", error);
abort();
}
ASSERT3U(object_count, ==, usedobjs);
if (leaked_objects != 0) {
(void) printf("%d potentially leaked objects detected\n",
leaked_objects);
leaked_objects = 0;
}
}
static void
dump_uberblock(uberblock_t *ub, const char *header, const char *footer)
{
time_t timestamp = ub->ub_timestamp;
(void) printf("%s", header ? header : "");
(void) printf("\tmagic = %016llx\n", (u_longlong_t)ub->ub_magic);
(void) printf("\tversion = %llu\n", (u_longlong_t)ub->ub_version);
(void) printf("\ttxg = %llu\n", (u_longlong_t)ub->ub_txg);
(void) printf("\tguid_sum = %llu\n", (u_longlong_t)ub->ub_guid_sum);
(void) printf("\ttimestamp = %llu UTC = %s",
(u_longlong_t)ub->ub_timestamp, ctime(&timestamp));
(void) printf("\tmmp_magic = %016llx\n",
(u_longlong_t)ub->ub_mmp_magic);
if (MMP_VALID(ub)) {
(void) printf("\tmmp_delay = %0llu\n",
(u_longlong_t)ub->ub_mmp_delay);
if (MMP_SEQ_VALID(ub))
(void) printf("\tmmp_seq = %u\n",
(unsigned int) MMP_SEQ(ub));
if (MMP_FAIL_INT_VALID(ub))
(void) printf("\tmmp_fail = %u\n",
(unsigned int) MMP_FAIL_INT(ub));
if (MMP_INTERVAL_VALID(ub))
(void) printf("\tmmp_write = %u\n",
(unsigned int) MMP_INTERVAL(ub));
/* After MMP_* to make summarize_uberblock_mmp cleaner */
(void) printf("\tmmp_valid = %x\n",
(unsigned int) ub->ub_mmp_config & 0xFF);
}
if (dump_opt['u'] >= 4) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr(blkbuf, sizeof (blkbuf), &ub->ub_rootbp);
(void) printf("\trootbp = %s\n", blkbuf);
}
(void) printf("\tcheckpoint_txg = %llu\n",
(u_longlong_t)ub->ub_checkpoint_txg);
(void) printf("%s", footer ? footer : "");
}
static void
dump_config(spa_t *spa)
{
dmu_buf_t *db;
size_t nvsize = 0;
int error = 0;
error = dmu_bonus_hold(spa->spa_meta_objset,
spa->spa_config_object, FTAG, &db);
if (error == 0) {
nvsize = *(uint64_t *)db->db_data;
dmu_buf_rele(db, FTAG);
(void) printf("\nMOS Configuration:\n");
dump_packed_nvlist(spa->spa_meta_objset,
spa->spa_config_object, (void *)&nvsize, 1);
} else {
(void) fprintf(stderr, "dmu_bonus_hold(%llu) failed, errno %d",
(u_longlong_t)spa->spa_config_object, error);
}
}
static void
dump_cachefile(const char *cachefile)
{
int fd;
struct stat64 statbuf;
char *buf;
nvlist_t *config;
if ((fd = open64(cachefile, O_RDONLY)) < 0) {
(void) printf("cannot open '%s': %s\n", cachefile,
strerror(errno));
exit(1);
}
if (fstat64(fd, &statbuf) != 0) {
(void) printf("failed to stat '%s': %s\n", cachefile,
strerror(errno));
exit(1);
}
if ((buf = malloc(statbuf.st_size)) == NULL) {
(void) fprintf(stderr, "failed to allocate %llu bytes\n",
(u_longlong_t)statbuf.st_size);
exit(1);
}
if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
(void) fprintf(stderr, "failed to read %llu bytes\n",
(u_longlong_t)statbuf.st_size);
exit(1);
}
(void) close(fd);
if (nvlist_unpack(buf, statbuf.st_size, &config, 0) != 0) {
(void) fprintf(stderr, "failed to unpack nvlist\n");
exit(1);
}
free(buf);
dump_nvlist(config, 0);
nvlist_free(config);
}
/*
* ZFS label nvlist stats
*/
typedef struct zdb_nvl_stats {
int zns_list_count;
int zns_leaf_count;
size_t zns_leaf_largest;
size_t zns_leaf_total;
nvlist_t *zns_string;
nvlist_t *zns_uint64;
nvlist_t *zns_boolean;
} zdb_nvl_stats_t;
static void
collect_nvlist_stats(nvlist_t *nvl, zdb_nvl_stats_t *stats)
{
nvlist_t *list, **array;
nvpair_t *nvp = NULL;
char *name;
uint_t i, items;
stats->zns_list_count++;
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
name = nvpair_name(nvp);
switch (nvpair_type(nvp)) {
case DATA_TYPE_STRING:
fnvlist_add_string(stats->zns_string, name,
fnvpair_value_string(nvp));
break;
case DATA_TYPE_UINT64:
fnvlist_add_uint64(stats->zns_uint64, name,
fnvpair_value_uint64(nvp));
break;
case DATA_TYPE_BOOLEAN:
fnvlist_add_boolean(stats->zns_boolean, name);
break;
case DATA_TYPE_NVLIST:
if (nvpair_value_nvlist(nvp, &list) == 0)
collect_nvlist_stats(list, stats);
break;
case DATA_TYPE_NVLIST_ARRAY:
if (nvpair_value_nvlist_array(nvp, &array, &items) != 0)
break;
for (i = 0; i < items; i++) {
collect_nvlist_stats(array[i], stats);
/* collect stats on leaf vdev */
if (strcmp(name, "children") == 0) {
size_t size;
(void) nvlist_size(array[i], &size,
NV_ENCODE_XDR);
stats->zns_leaf_total += size;
if (size > stats->zns_leaf_largest)
stats->zns_leaf_largest = size;
stats->zns_leaf_count++;
}
}
break;
default:
(void) printf("skip type %d!\n", (int)nvpair_type(nvp));
}
}
}
static void
dump_nvlist_stats(nvlist_t *nvl, size_t cap)
{
zdb_nvl_stats_t stats = { 0 };
size_t size, sum = 0, total;
size_t noise;
/* requires nvlist with non-unique names for stat collection */
VERIFY0(nvlist_alloc(&stats.zns_string, 0, 0));
VERIFY0(nvlist_alloc(&stats.zns_uint64, 0, 0));
VERIFY0(nvlist_alloc(&stats.zns_boolean, 0, 0));
VERIFY0(nvlist_size(stats.zns_boolean, &noise, NV_ENCODE_XDR));
(void) printf("\n\nZFS Label NVList Config Stats:\n");
VERIFY0(nvlist_size(nvl, &total, NV_ENCODE_XDR));
(void) printf(" %d bytes used, %d bytes free (using %4.1f%%)\n\n",
(int)total, (int)(cap - total), 100.0 * total / cap);
collect_nvlist_stats(nvl, &stats);
VERIFY0(nvlist_size(stats.zns_uint64, &size, NV_ENCODE_XDR));
size -= noise;
sum += size;
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n", "integers:",
(int)fnvlist_num_pairs(stats.zns_uint64),
(int)size, 100.0 * size / total);
VERIFY0(nvlist_size(stats.zns_string, &size, NV_ENCODE_XDR));
size -= noise;
sum += size;
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n", "strings:",
(int)fnvlist_num_pairs(stats.zns_string),
(int)size, 100.0 * size / total);
VERIFY0(nvlist_size(stats.zns_boolean, &size, NV_ENCODE_XDR));
size -= noise;
sum += size;
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n", "booleans:",
(int)fnvlist_num_pairs(stats.zns_boolean),
(int)size, 100.0 * size / total);
size = total - sum; /* treat remainder as nvlist overhead */
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n\n", "nvlists:",
stats.zns_list_count, (int)size, 100.0 * size / total);
if (stats.zns_leaf_count > 0) {
size_t average = stats.zns_leaf_total / stats.zns_leaf_count;
(void) printf("%12s %4d %6d bytes average\n", "leaf vdevs:",
stats.zns_leaf_count, (int)average);
(void) printf("%24d bytes largest\n",
(int)stats.zns_leaf_largest);
if (dump_opt['l'] >= 3 && average > 0)
(void) printf(" space for %d additional leaf vdevs\n",
(int)((cap - total) / average));
}
(void) printf("\n");
nvlist_free(stats.zns_string);
nvlist_free(stats.zns_uint64);
nvlist_free(stats.zns_boolean);
}
typedef struct cksum_record {
zio_cksum_t cksum;
boolean_t labels[VDEV_LABELS];
avl_node_t link;
} cksum_record_t;
static int
cksum_record_compare(const void *x1, const void *x2)
{
const cksum_record_t *l = (cksum_record_t *)x1;
const cksum_record_t *r = (cksum_record_t *)x2;
int arraysize = ARRAY_SIZE(l->cksum.zc_word);
int difference = 0;
for (int i = 0; i < arraysize; i++) {
difference = TREE_CMP(l->cksum.zc_word[i], r->cksum.zc_word[i]);
if (difference)
break;
}
return (difference);
}
static cksum_record_t *
cksum_record_alloc(zio_cksum_t *cksum, int l)
{
cksum_record_t *rec;
rec = umem_zalloc(sizeof (*rec), UMEM_NOFAIL);
rec->cksum = *cksum;
rec->labels[l] = B_TRUE;
return (rec);
}
static cksum_record_t *
cksum_record_lookup(avl_tree_t *tree, zio_cksum_t *cksum)
{
cksum_record_t lookup = { .cksum = *cksum };
avl_index_t where;
return (avl_find(tree, &lookup, &where));
}
static cksum_record_t *
cksum_record_insert(avl_tree_t *tree, zio_cksum_t *cksum, int l)
{
cksum_record_t *rec;
rec = cksum_record_lookup(tree, cksum);
if (rec) {
rec->labels[l] = B_TRUE;
} else {
rec = cksum_record_alloc(cksum, l);
avl_add(tree, rec);
}
return (rec);
}
static int
first_label(cksum_record_t *rec)
{
for (int i = 0; i < VDEV_LABELS; i++)
if (rec->labels[i])
return (i);
return (-1);
}
static void
print_label_numbers(const char *prefix, const cksum_record_t *rec)
{
fputs(prefix, stdout);
for (int i = 0; i < VDEV_LABELS; i++)
if (rec->labels[i] == B_TRUE)
printf("%d ", i);
putchar('\n');
}
#define MAX_UBERBLOCK_COUNT (VDEV_UBERBLOCK_RING >> UBERBLOCK_SHIFT)
typedef struct zdb_label {
vdev_label_t label;
uint64_t label_offset;
nvlist_t *config_nv;
cksum_record_t *config;
cksum_record_t *uberblocks[MAX_UBERBLOCK_COUNT];
boolean_t header_printed;
boolean_t read_failed;
boolean_t cksum_valid;
} zdb_label_t;
static void
print_label_header(zdb_label_t *label, int l)
{
if (dump_opt['q'])
return;
if (label->header_printed == B_TRUE)
return;
(void) printf("------------------------------------\n");
(void) printf("LABEL %d %s\n", l,
label->cksum_valid ? "" : "(Bad label cksum)");
(void) printf("------------------------------------\n");
label->header_printed = B_TRUE;
}
static void
print_l2arc_header(void)
{
(void) printf("------------------------------------\n");
(void) printf("L2ARC device header\n");
(void) printf("------------------------------------\n");
}
static void
print_l2arc_log_blocks(void)
{
(void) printf("------------------------------------\n");
(void) printf("L2ARC device log blocks\n");
(void) printf("------------------------------------\n");
}
static void
dump_l2arc_log_entries(uint64_t log_entries,
l2arc_log_ent_phys_t *le, uint64_t i)
{
for (int j = 0; j < log_entries; j++) {
dva_t dva = le[j].le_dva;
(void) printf("lb[%4llu]\tle[%4d]\tDVA asize: %llu, "
"vdev: %llu, offset: %llu\n",
(u_longlong_t)i, j + 1,
(u_longlong_t)DVA_GET_ASIZE(&dva),
(u_longlong_t)DVA_GET_VDEV(&dva),
(u_longlong_t)DVA_GET_OFFSET(&dva));
(void) printf("|\t\t\t\tbirth: %llu\n",
(u_longlong_t)le[j].le_birth);
(void) printf("|\t\t\t\tlsize: %llu\n",
(u_longlong_t)L2BLK_GET_LSIZE((&le[j])->le_prop));
(void) printf("|\t\t\t\tpsize: %llu\n",
(u_longlong_t)L2BLK_GET_PSIZE((&le[j])->le_prop));
(void) printf("|\t\t\t\tcompr: %llu\n",
(u_longlong_t)L2BLK_GET_COMPRESS((&le[j])->le_prop));
(void) printf("|\t\t\t\tcomplevel: %llu\n",
(u_longlong_t)(&le[j])->le_complevel);
(void) printf("|\t\t\t\ttype: %llu\n",
(u_longlong_t)L2BLK_GET_TYPE((&le[j])->le_prop));
(void) printf("|\t\t\t\tprotected: %llu\n",
(u_longlong_t)L2BLK_GET_PROTECTED((&le[j])->le_prop));
(void) printf("|\t\t\t\tprefetch: %llu\n",
(u_longlong_t)L2BLK_GET_PREFETCH((&le[j])->le_prop));
(void) printf("|\t\t\t\taddress: %llu\n",
(u_longlong_t)le[j].le_daddr);
(void) printf("|\t\t\t\tARC state: %llu\n",
(u_longlong_t)L2BLK_GET_STATE((&le[j])->le_prop));
(void) printf("|\n");
}
(void) printf("\n");
}
static void
dump_l2arc_log_blkptr(l2arc_log_blkptr_t lbps)
{
(void) printf("|\t\tdaddr: %llu\n", (u_longlong_t)lbps.lbp_daddr);
(void) printf("|\t\tpayload_asize: %llu\n",
(u_longlong_t)lbps.lbp_payload_asize);
(void) printf("|\t\tpayload_start: %llu\n",
(u_longlong_t)lbps.lbp_payload_start);
(void) printf("|\t\tlsize: %llu\n",
(u_longlong_t)L2BLK_GET_LSIZE((&lbps)->lbp_prop));
(void) printf("|\t\tasize: %llu\n",
(u_longlong_t)L2BLK_GET_PSIZE((&lbps)->lbp_prop));
(void) printf("|\t\tcompralgo: %llu\n",
(u_longlong_t)L2BLK_GET_COMPRESS((&lbps)->lbp_prop));
(void) printf("|\t\tcksumalgo: %llu\n",
(u_longlong_t)L2BLK_GET_CHECKSUM((&lbps)->lbp_prop));
(void) printf("|\n\n");
}
static void
dump_l2arc_log_blocks(int fd, l2arc_dev_hdr_phys_t l2dhdr,
l2arc_dev_hdr_phys_t *rebuild)
{
l2arc_log_blk_phys_t this_lb;
uint64_t asize;
l2arc_log_blkptr_t lbps[2];
abd_t *abd;
zio_cksum_t cksum;
int failed = 0;
l2arc_dev_t dev;
if (!dump_opt['q'])
print_l2arc_log_blocks();
memcpy(lbps, l2dhdr.dh_start_lbps, sizeof (lbps));
dev.l2ad_evict = l2dhdr.dh_evict;
dev.l2ad_start = l2dhdr.dh_start;
dev.l2ad_end = l2dhdr.dh_end;
if (l2dhdr.dh_start_lbps[0].lbp_daddr == 0) {
/* no log blocks to read */
if (!dump_opt['q']) {
(void) printf("No log blocks to read\n");
(void) printf("\n");
}
return;
} else {
dev.l2ad_hand = lbps[0].lbp_daddr +
L2BLK_GET_PSIZE((&lbps[0])->lbp_prop);
}
dev.l2ad_first = !!(l2dhdr.dh_flags & L2ARC_DEV_HDR_EVICT_FIRST);
for (;;) {
if (!l2arc_log_blkptr_valid(&dev, &lbps[0]))
break;
/* L2BLK_GET_PSIZE returns aligned size for log blocks */
asize = L2BLK_GET_PSIZE((&lbps[0])->lbp_prop);
if (pread64(fd, &this_lb, asize, lbps[0].lbp_daddr) != asize) {
if (!dump_opt['q']) {
(void) printf("Error while reading next log "
"block\n\n");
}
break;
}
fletcher_4_native_varsize(&this_lb, asize, &cksum);
if (!ZIO_CHECKSUM_EQUAL(cksum, lbps[0].lbp_cksum)) {
failed++;
if (!dump_opt['q']) {
(void) printf("Invalid cksum\n");
dump_l2arc_log_blkptr(lbps[0]);
}
break;
}
switch (L2BLK_GET_COMPRESS((&lbps[0])->lbp_prop)) {
case ZIO_COMPRESS_OFF:
break;
default:
abd = abd_alloc_for_io(asize, B_TRUE);
abd_copy_from_buf_off(abd, &this_lb, 0, asize);
zio_decompress_data(L2BLK_GET_COMPRESS(
(&lbps[0])->lbp_prop), abd, &this_lb,
asize, sizeof (this_lb), NULL);
abd_free(abd);
break;
}
if (this_lb.lb_magic == BSWAP_64(L2ARC_LOG_BLK_MAGIC))
byteswap_uint64_array(&this_lb, sizeof (this_lb));
if (this_lb.lb_magic != L2ARC_LOG_BLK_MAGIC) {
if (!dump_opt['q'])
(void) printf("Invalid log block magic\n\n");
break;
}
rebuild->dh_lb_count++;
rebuild->dh_lb_asize += asize;
if (dump_opt['l'] > 1 && !dump_opt['q']) {
(void) printf("lb[%4llu]\tmagic: %llu\n",
(u_longlong_t)rebuild->dh_lb_count,
(u_longlong_t)this_lb.lb_magic);
dump_l2arc_log_blkptr(lbps[0]);
}
if (dump_opt['l'] > 2 && !dump_opt['q'])
dump_l2arc_log_entries(l2dhdr.dh_log_entries,
this_lb.lb_entries,
rebuild->dh_lb_count);
if (l2arc_range_check_overlap(lbps[1].lbp_payload_start,
lbps[0].lbp_payload_start, dev.l2ad_evict) &&
!dev.l2ad_first)
break;
lbps[0] = lbps[1];
lbps[1] = this_lb.lb_prev_lbp;
}
if (!dump_opt['q']) {
(void) printf("log_blk_count:\t %llu with valid cksum\n",
(u_longlong_t)rebuild->dh_lb_count);
(void) printf("\t\t %d with invalid cksum\n", failed);
(void) printf("log_blk_asize:\t %llu\n\n",
(u_longlong_t)rebuild->dh_lb_asize);
}
}
static int
dump_l2arc_header(int fd)
{
l2arc_dev_hdr_phys_t l2dhdr = {0}, rebuild = {0};
int error = B_FALSE;
if (pread64(fd, &l2dhdr, sizeof (l2dhdr),
VDEV_LABEL_START_SIZE) != sizeof (l2dhdr)) {
error = B_TRUE;
} else {
if (l2dhdr.dh_magic == BSWAP_64(L2ARC_DEV_HDR_MAGIC))
byteswap_uint64_array(&l2dhdr, sizeof (l2dhdr));
if (l2dhdr.dh_magic != L2ARC_DEV_HDR_MAGIC)
error = B_TRUE;
}
if (error) {
(void) printf("L2ARC device header not found\n\n");
/* Do not return an error here for backward compatibility */
return (0);
} else if (!dump_opt['q']) {
print_l2arc_header();
(void) printf(" magic: %llu\n",
(u_longlong_t)l2dhdr.dh_magic);
(void) printf(" version: %llu\n",
(u_longlong_t)l2dhdr.dh_version);
(void) printf(" pool_guid: %llu\n",
(u_longlong_t)l2dhdr.dh_spa_guid);
(void) printf(" flags: %llu\n",
(u_longlong_t)l2dhdr.dh_flags);
(void) printf(" start_lbps[0]: %llu\n",
(u_longlong_t)
l2dhdr.dh_start_lbps[0].lbp_daddr);
(void) printf(" start_lbps[1]: %llu\n",
(u_longlong_t)
l2dhdr.dh_start_lbps[1].lbp_daddr);
(void) printf(" log_blk_ent: %llu\n",
(u_longlong_t)l2dhdr.dh_log_entries);
(void) printf(" start: %llu\n",
(u_longlong_t)l2dhdr.dh_start);
(void) printf(" end: %llu\n",
(u_longlong_t)l2dhdr.dh_end);
(void) printf(" evict: %llu\n",
(u_longlong_t)l2dhdr.dh_evict);
(void) printf(" lb_asize_refcount: %llu\n",
(u_longlong_t)l2dhdr.dh_lb_asize);
(void) printf(" lb_count_refcount: %llu\n",
(u_longlong_t)l2dhdr.dh_lb_count);
(void) printf(" trim_action_time: %llu\n",
(u_longlong_t)l2dhdr.dh_trim_action_time);
(void) printf(" trim_state: %llu\n\n",
(u_longlong_t)l2dhdr.dh_trim_state);
}
dump_l2arc_log_blocks(fd, l2dhdr, &rebuild);
/*
* The total aligned size of log blocks and the number of log blocks
* reported in the header of the device may be less than what zdb
* reports by dump_l2arc_log_blocks() which emulates l2arc_rebuild().
* This happens because dump_l2arc_log_blocks() lacks the memory
* pressure valve that l2arc_rebuild() has. Thus, if we are on a system
* with low memory, l2arc_rebuild will exit prematurely and dh_lb_asize
* and dh_lb_count will be lower to begin with than what exists on the
* device. This is normal and zdb should not exit with an error. The
* opposite case should never happen though, the values reported in the
* header should never be higher than what dump_l2arc_log_blocks() and
* l2arc_rebuild() report. If this happens there is a leak in the
* accounting of log blocks.
*/
if (l2dhdr.dh_lb_asize > rebuild.dh_lb_asize ||
l2dhdr.dh_lb_count > rebuild.dh_lb_count)
return (1);
return (0);
}
static void
dump_config_from_label(zdb_label_t *label, size_t buflen, int l)
{
if (dump_opt['q'])
return;
if ((dump_opt['l'] < 3) && (first_label(label->config) != l))
return;
print_label_header(label, l);
dump_nvlist(label->config_nv, 4);
print_label_numbers(" labels = ", label->config);
if (dump_opt['l'] >= 2)
dump_nvlist_stats(label->config_nv, buflen);
}
#define ZDB_MAX_UB_HEADER_SIZE 32
static void
dump_label_uberblocks(zdb_label_t *label, uint64_t ashift, int label_num)
{
vdev_t vd;
char header[ZDB_MAX_UB_HEADER_SIZE];
vd.vdev_ashift = ashift;
vd.vdev_top = &vd;
for (int i = 0; i < VDEV_UBERBLOCK_COUNT(&vd); i++) {
uint64_t uoff = VDEV_UBERBLOCK_OFFSET(&vd, i);
uberblock_t *ub = (void *)((char *)&label->label + uoff);
cksum_record_t *rec = label->uberblocks[i];
if (rec == NULL) {
if (dump_opt['u'] >= 2) {
print_label_header(label, label_num);
(void) printf(" Uberblock[%d] invalid\n", i);
}
continue;
}
if ((dump_opt['u'] < 3) && (first_label(rec) != label_num))
continue;
if ((dump_opt['u'] < 4) &&
(ub->ub_mmp_magic == MMP_MAGIC) && ub->ub_mmp_delay &&
(i >= VDEV_UBERBLOCK_COUNT(&vd) - MMP_BLOCKS_PER_LABEL))
continue;
print_label_header(label, label_num);
(void) snprintf(header, ZDB_MAX_UB_HEADER_SIZE,
" Uberblock[%d]\n", i);
dump_uberblock(ub, header, "");
print_label_numbers(" labels = ", rec);
}
}
static char curpath[PATH_MAX];
/*
* Iterate through the path components, recursively passing
* current one's obj and remaining path until we find the obj
* for the last one.
*/
static int
dump_path_impl(objset_t *os, uint64_t obj, char *name, uint64_t *retobj)
{
int err;
boolean_t header = B_TRUE;
uint64_t child_obj;
char *s;
dmu_buf_t *db;
dmu_object_info_t doi;
if ((s = strchr(name, '/')) != NULL)
*s = '\0';
err = zap_lookup(os, obj, name, 8, 1, &child_obj);
(void) strlcat(curpath, name, sizeof (curpath));
if (err != 0) {
(void) fprintf(stderr, "failed to lookup %s: %s\n",
curpath, strerror(err));
return (err);
}
child_obj = ZFS_DIRENT_OBJ(child_obj);
err = sa_buf_hold(os, child_obj, FTAG, &db);
if (err != 0) {
(void) fprintf(stderr,
"failed to get SA dbuf for obj %llu: %s\n",
(u_longlong_t)child_obj, strerror(err));
return (EINVAL);
}
dmu_object_info_from_db(db, &doi);
sa_buf_rele(db, FTAG);
if (doi.doi_bonus_type != DMU_OT_SA &&
doi.doi_bonus_type != DMU_OT_ZNODE) {
(void) fprintf(stderr, "invalid bonus type %d for obj %llu\n",
doi.doi_bonus_type, (u_longlong_t)child_obj);
return (EINVAL);
}
if (dump_opt['v'] > 6) {
(void) printf("obj=%llu %s type=%d bonustype=%d\n",
(u_longlong_t)child_obj, curpath, doi.doi_type,
doi.doi_bonus_type);
}
(void) strlcat(curpath, "/", sizeof (curpath));
switch (doi.doi_type) {
case DMU_OT_DIRECTORY_CONTENTS:
if (s != NULL && *(s + 1) != '\0')
return (dump_path_impl(os, child_obj, s + 1, retobj));
zfs_fallthrough;
case DMU_OT_PLAIN_FILE_CONTENTS:
if (retobj != NULL) {
*retobj = child_obj;
} else {
dump_object(os, child_obj, dump_opt['v'], &header,
NULL, 0);
}
return (0);
default:
(void) fprintf(stderr, "object %llu has non-file/directory "
"type %d\n", (u_longlong_t)obj, doi.doi_type);
break;
}
return (EINVAL);
}
/*
* Dump the blocks for the object specified by path inside the dataset.
*/
static int
dump_path(char *ds, char *path, uint64_t *retobj)
{
int err;
objset_t *os;
uint64_t root_obj;
err = open_objset(ds, FTAG, &os);
if (err != 0)
return (err);
err = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1, &root_obj);
if (err != 0) {
(void) fprintf(stderr, "can't lookup root znode: %s\n",
strerror(err));
close_objset(os, FTAG);
return (EINVAL);
}
(void) snprintf(curpath, sizeof (curpath), "dataset=%s path=/", ds);
err = dump_path_impl(os, root_obj, path, retobj);
close_objset(os, FTAG);
return (err);
}
static int
zdb_copy_object(objset_t *os, uint64_t srcobj, char *destfile)
{
int err = 0;
uint64_t size, readsize, oursize, offset;
ssize_t writesize;
sa_handle_t *hdl;
(void) printf("Copying object %" PRIu64 " to file %s\n", srcobj,
destfile);
VERIFY3P(os, ==, sa_os);
if ((err = sa_handle_get(os, srcobj, NULL, SA_HDL_PRIVATE, &hdl))) {
(void) printf("Failed to get handle for SA znode\n");
return (err);
}
if ((err = sa_lookup(hdl, sa_attr_table[ZPL_SIZE], &size, 8))) {
(void) sa_handle_destroy(hdl);
return (err);
}
(void) sa_handle_destroy(hdl);
(void) printf("Object %" PRIu64 " is %" PRIu64 " bytes\n", srcobj,
size);
if (size == 0) {
return (EINVAL);
}
int fd = open(destfile, O_WRONLY | O_CREAT | O_TRUNC, 0644);
+ if (fd == -1)
+ return (errno);
/*
* We cap the size at 1 mebibyte here to prevent
* allocation failures and nigh-infinite printing if the
* object is extremely large.
*/
oursize = MIN(size, 1 << 20);
offset = 0;
char *buf = kmem_alloc(oursize, KM_NOSLEEP);
if (buf == NULL) {
+ (void) close(fd);
return (ENOMEM);
}
while (offset < size) {
readsize = MIN(size - offset, 1 << 20);
err = dmu_read(os, srcobj, offset, readsize, buf, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
kmem_free(buf, oursize);
+ (void) close(fd);
return (err);
}
if (dump_opt['v'] > 3) {
(void) printf("Read offset=%" PRIu64 " size=%" PRIu64
" error=%d\n", offset, readsize, err);
}
writesize = write(fd, buf, readsize);
if (writesize < 0) {
err = errno;
break;
} else if (writesize != readsize) {
/* Incomplete write */
(void) fprintf(stderr, "Short write, only wrote %llu of"
" %" PRIu64 " bytes, exiting...\n",
(u_longlong_t)writesize, readsize);
break;
}
offset += readsize;
}
(void) close(fd);
if (buf != NULL)
kmem_free(buf, oursize);
return (err);
}
static boolean_t
label_cksum_valid(vdev_label_t *label, uint64_t offset)
{
zio_checksum_info_t *ci = &zio_checksum_table[ZIO_CHECKSUM_LABEL];
zio_cksum_t expected_cksum;
zio_cksum_t actual_cksum;
zio_cksum_t verifier;
zio_eck_t *eck;
int byteswap;
void *data = (char *)label + offsetof(vdev_label_t, vl_vdev_phys);
eck = (zio_eck_t *)((char *)(data) + VDEV_PHYS_SIZE) - 1;
offset += offsetof(vdev_label_t, vl_vdev_phys);
ZIO_SET_CHECKSUM(&verifier, offset, 0, 0, 0);
byteswap = (eck->zec_magic == BSWAP_64(ZEC_MAGIC));
if (byteswap)
byteswap_uint64_array(&verifier, sizeof (zio_cksum_t));
expected_cksum = eck->zec_cksum;
eck->zec_cksum = verifier;
abd_t *abd = abd_get_from_buf(data, VDEV_PHYS_SIZE);
ci->ci_func[byteswap](abd, VDEV_PHYS_SIZE, NULL, &actual_cksum);
abd_free(abd);
if (byteswap)
byteswap_uint64_array(&expected_cksum, sizeof (zio_cksum_t));
if (ZIO_CHECKSUM_EQUAL(actual_cksum, expected_cksum))
return (B_TRUE);
return (B_FALSE);
}
static int
dump_label(const char *dev)
{
char path[MAXPATHLEN];
zdb_label_t labels[VDEV_LABELS] = {{{{0}}}};
uint64_t psize, ashift, l2cache;
struct stat64 statbuf;
boolean_t config_found = B_FALSE;
boolean_t error = B_FALSE;
boolean_t read_l2arc_header = B_FALSE;
avl_tree_t config_tree;
avl_tree_t uberblock_tree;
void *node, *cookie;
int fd;
/*
* Check if we were given absolute path and use it as is.
* Otherwise if the provided vdev name doesn't point to a file,
* try prepending expected disk paths and partition numbers.
*/
(void) strlcpy(path, dev, sizeof (path));
if (dev[0] != '/' && stat64(path, &statbuf) != 0) {
int error;
error = zfs_resolve_shortname(dev, path, MAXPATHLEN);
if (error == 0 && zfs_dev_is_whole_disk(path)) {
if (zfs_append_partition(path, MAXPATHLEN) == -1)
error = ENOENT;
}
if (error || (stat64(path, &statbuf) != 0)) {
(void) printf("failed to find device %s, try "
"specifying absolute path instead\n", dev);
return (1);
}
}
if ((fd = open64(path, O_RDONLY)) < 0) {
(void) printf("cannot open '%s': %s\n", path, strerror(errno));
exit(1);
}
if (fstat64_blk(fd, &statbuf) != 0) {
(void) printf("failed to stat '%s': %s\n", path,
strerror(errno));
(void) close(fd);
exit(1);
}
if (S_ISBLK(statbuf.st_mode) && zfs_dev_flush(fd) != 0)
(void) printf("failed to invalidate cache '%s' : %s\n", path,
strerror(errno));
avl_create(&config_tree, cksum_record_compare,
sizeof (cksum_record_t), offsetof(cksum_record_t, link));
avl_create(&uberblock_tree, cksum_record_compare,
sizeof (cksum_record_t), offsetof(cksum_record_t, link));
psize = statbuf.st_size;
psize = P2ALIGN(psize, (uint64_t)sizeof (vdev_label_t));
ashift = SPA_MINBLOCKSHIFT;
/*
* 1. Read the label from disk
* 2. Verify label cksum
* 3. Unpack the configuration and insert in config tree.
* 4. Traverse all uberblocks and insert in uberblock tree.
*/
for (int l = 0; l < VDEV_LABELS; l++) {
zdb_label_t *label = &labels[l];
char *buf = label->label.vl_vdev_phys.vp_nvlist;
size_t buflen = sizeof (label->label.vl_vdev_phys.vp_nvlist);
nvlist_t *config;
cksum_record_t *rec;
zio_cksum_t cksum;
vdev_t vd;
label->label_offset = vdev_label_offset(psize, l, 0);
if (pread64(fd, &label->label, sizeof (label->label),
label->label_offset) != sizeof (label->label)) {
if (!dump_opt['q'])
(void) printf("failed to read label %d\n", l);
label->read_failed = B_TRUE;
error = B_TRUE;
continue;
}
label->read_failed = B_FALSE;
label->cksum_valid = label_cksum_valid(&label->label,
label->label_offset);
if (nvlist_unpack(buf, buflen, &config, 0) == 0) {
nvlist_t *vdev_tree = NULL;
size_t size;
if ((nvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) != 0) ||
(nvlist_lookup_uint64(vdev_tree,
ZPOOL_CONFIG_ASHIFT, &ashift) != 0))
ashift = SPA_MINBLOCKSHIFT;
if (nvlist_size(config, &size, NV_ENCODE_XDR) != 0)
size = buflen;
/* If the device is a cache device clear the header. */
if (!read_l2arc_header) {
if (nvlist_lookup_uint64(config,
ZPOOL_CONFIG_POOL_STATE, &l2cache) == 0 &&
l2cache == POOL_STATE_L2CACHE) {
read_l2arc_header = B_TRUE;
}
}
fletcher_4_native_varsize(buf, size, &cksum);
rec = cksum_record_insert(&config_tree, &cksum, l);
label->config = rec;
label->config_nv = config;
config_found = B_TRUE;
} else {
error = B_TRUE;
}
vd.vdev_ashift = ashift;
vd.vdev_top = &vd;
for (int i = 0; i < VDEV_UBERBLOCK_COUNT(&vd); i++) {
uint64_t uoff = VDEV_UBERBLOCK_OFFSET(&vd, i);
uberblock_t *ub = (void *)((char *)label + uoff);
if (uberblock_verify(ub))
continue;
fletcher_4_native_varsize(ub, sizeof (*ub), &cksum);
rec = cksum_record_insert(&uberblock_tree, &cksum, l);
label->uberblocks[i] = rec;
}
}
/*
* Dump the label and uberblocks.
*/
for (int l = 0; l < VDEV_LABELS; l++) {
zdb_label_t *label = &labels[l];
size_t buflen = sizeof (label->label.vl_vdev_phys.vp_nvlist);
if (label->read_failed == B_TRUE)
continue;
if (label->config_nv) {
dump_config_from_label(label, buflen, l);
} else {
if (!dump_opt['q'])
(void) printf("failed to unpack label %d\n", l);
}
if (dump_opt['u'])
dump_label_uberblocks(label, ashift, l);
nvlist_free(label->config_nv);
}
/*
* Dump the L2ARC header, if existent.
*/
if (read_l2arc_header)
error |= dump_l2arc_header(fd);
cookie = NULL;
while ((node = avl_destroy_nodes(&config_tree, &cookie)) != NULL)
umem_free(node, sizeof (cksum_record_t));
cookie = NULL;
while ((node = avl_destroy_nodes(&uberblock_tree, &cookie)) != NULL)
umem_free(node, sizeof (cksum_record_t));
avl_destroy(&config_tree);
avl_destroy(&uberblock_tree);
(void) close(fd);
return (config_found == B_FALSE ? 2 :
(error == B_TRUE ? 1 : 0));
}
static uint64_t dataset_feature_count[SPA_FEATURES];
static uint64_t global_feature_count[SPA_FEATURES];
static uint64_t remap_deadlist_count = 0;
static int
dump_one_objset(const char *dsname, void *arg)
{
(void) arg;
int error;
objset_t *os;
spa_feature_t f;
error = open_objset(dsname, FTAG, &os);
if (error != 0)
return (0);
for (f = 0; f < SPA_FEATURES; f++) {
if (!dsl_dataset_feature_is_active(dmu_objset_ds(os), f))
continue;
ASSERT(spa_feature_table[f].fi_flags &
ZFEATURE_FLAG_PER_DATASET);
dataset_feature_count[f]++;
}
if (dsl_dataset_remap_deadlist_exists(dmu_objset_ds(os))) {
remap_deadlist_count++;
}
for (dsl_bookmark_node_t *dbn =
avl_first(&dmu_objset_ds(os)->ds_bookmarks); dbn != NULL;
dbn = AVL_NEXT(&dmu_objset_ds(os)->ds_bookmarks, dbn)) {
mos_obj_refd(dbn->dbn_phys.zbm_redaction_obj);
if (dbn->dbn_phys.zbm_redaction_obj != 0)
global_feature_count[SPA_FEATURE_REDACTION_BOOKMARKS]++;
if (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN)
global_feature_count[SPA_FEATURE_BOOKMARK_WRITTEN]++;
}
if (dsl_deadlist_is_open(&dmu_objset_ds(os)->ds_dir->dd_livelist) &&
!dmu_objset_is_snapshot(os)) {
global_feature_count[SPA_FEATURE_LIVELIST]++;
}
dump_objset(os);
close_objset(os, FTAG);
fuid_table_destroy();
return (0);
}
/*
* Block statistics.
*/
#define PSIZE_HISTO_SIZE (SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 2)
typedef struct zdb_blkstats {
uint64_t zb_asize;
uint64_t zb_lsize;
uint64_t zb_psize;
uint64_t zb_count;
uint64_t zb_gangs;
uint64_t zb_ditto_samevdev;
uint64_t zb_ditto_same_ms;
uint64_t zb_psize_histogram[PSIZE_HISTO_SIZE];
} zdb_blkstats_t;
/*
* Extended object types to report deferred frees and dedup auto-ditto blocks.
*/
#define ZDB_OT_DEFERRED (DMU_OT_NUMTYPES + 0)
#define ZDB_OT_DITTO (DMU_OT_NUMTYPES + 1)
#define ZDB_OT_OTHER (DMU_OT_NUMTYPES + 2)
#define ZDB_OT_TOTAL (DMU_OT_NUMTYPES + 3)
static const char *zdb_ot_extname[] = {
"deferred free",
"dedup ditto",
"other",
"Total",
};
#define ZB_TOTAL DN_MAX_LEVELS
#define SPA_MAX_FOR_16M (SPA_MAXBLOCKSHIFT+1)
typedef struct zdb_cb {
zdb_blkstats_t zcb_type[ZB_TOTAL + 1][ZDB_OT_TOTAL + 1];
uint64_t zcb_removing_size;
uint64_t zcb_checkpoint_size;
uint64_t zcb_dedup_asize;
uint64_t zcb_dedup_blocks;
uint64_t zcb_psize_count[SPA_MAX_FOR_16M];
uint64_t zcb_lsize_count[SPA_MAX_FOR_16M];
uint64_t zcb_asize_count[SPA_MAX_FOR_16M];
uint64_t zcb_psize_len[SPA_MAX_FOR_16M];
uint64_t zcb_lsize_len[SPA_MAX_FOR_16M];
uint64_t zcb_asize_len[SPA_MAX_FOR_16M];
uint64_t zcb_psize_total;
uint64_t zcb_lsize_total;
uint64_t zcb_asize_total;
uint64_t zcb_embedded_blocks[NUM_BP_EMBEDDED_TYPES];
uint64_t zcb_embedded_histogram[NUM_BP_EMBEDDED_TYPES]
[BPE_PAYLOAD_SIZE + 1];
uint64_t zcb_start;
hrtime_t zcb_lastprint;
uint64_t zcb_totalasize;
uint64_t zcb_errors[256];
int zcb_readfails;
int zcb_haderrors;
spa_t *zcb_spa;
uint32_t **zcb_vd_obsolete_counts;
} zdb_cb_t;
/* test if two DVA offsets from same vdev are within the same metaslab */
static boolean_t
same_metaslab(spa_t *spa, uint64_t vdev, uint64_t off1, uint64_t off2)
{
vdev_t *vd = vdev_lookup_top(spa, vdev);
uint64_t ms_shift = vd->vdev_ms_shift;
return ((off1 >> ms_shift) == (off2 >> ms_shift));
}
/*
* Used to simplify reporting of the histogram data.
*/
typedef struct one_histo {
const char *name;
uint64_t *count;
uint64_t *len;
uint64_t cumulative;
} one_histo_t;
/*
* The number of separate histograms processed for psize, lsize and asize.
*/
#define NUM_HISTO 3
/*
* This routine will create a fixed column size output of three different
* histograms showing by blocksize of 512 - 2^ SPA_MAX_FOR_16M
* the count, length and cumulative length of the psize, lsize and
* asize blocks.
*
* All three types of blocks are listed on a single line
*
* By default the table is printed in nicenumber format (e.g. 123K) but
* if the '-P' parameter is specified then the full raw number (parseable)
* is printed out.
*/
static void
dump_size_histograms(zdb_cb_t *zcb)
{
/*
* A temporary buffer that allows us to convert a number into
* a string using zdb_nicenumber to allow either raw or human
* readable numbers to be output.
*/
char numbuf[32];
/*
* Define titles which are used in the headers of the tables
* printed by this routine.
*/
const char blocksize_title1[] = "block";
const char blocksize_title2[] = "size";
const char count_title[] = "Count";
const char length_title[] = "Size";
const char cumulative_title[] = "Cum.";
/*
* Setup the histogram arrays (psize, lsize, and asize).
*/
one_histo_t parm_histo[NUM_HISTO];
parm_histo[0].name = "psize";
parm_histo[0].count = zcb->zcb_psize_count;
parm_histo[0].len = zcb->zcb_psize_len;
parm_histo[0].cumulative = 0;
parm_histo[1].name = "lsize";
parm_histo[1].count = zcb->zcb_lsize_count;
parm_histo[1].len = zcb->zcb_lsize_len;
parm_histo[1].cumulative = 0;
parm_histo[2].name = "asize";
parm_histo[2].count = zcb->zcb_asize_count;
parm_histo[2].len = zcb->zcb_asize_len;
parm_histo[2].cumulative = 0;
(void) printf("\nBlock Size Histogram\n");
/*
* Print the first line titles
*/
if (dump_opt['P'])
(void) printf("\n%s\t", blocksize_title1);
else
(void) printf("\n%7s ", blocksize_title1);
for (int j = 0; j < NUM_HISTO; j++) {
if (dump_opt['P']) {
if (j < NUM_HISTO - 1) {
(void) printf("%s\t\t\t", parm_histo[j].name);
} else {
/* Don't print trailing spaces */
(void) printf(" %s", parm_histo[j].name);
}
} else {
if (j < NUM_HISTO - 1) {
/* Left aligned strings in the output */
(void) printf("%-7s ",
parm_histo[j].name);
} else {
/* Don't print trailing spaces */
(void) printf("%s", parm_histo[j].name);
}
}
}
(void) printf("\n");
/*
* Print the second line titles
*/
if (dump_opt['P']) {
(void) printf("%s\t", blocksize_title2);
} else {
(void) printf("%7s ", blocksize_title2);
}
for (int i = 0; i < NUM_HISTO; i++) {
if (dump_opt['P']) {
(void) printf("%s\t%s\t%s\t",
count_title, length_title, cumulative_title);
} else {
(void) printf("%7s%7s%7s",
count_title, length_title, cumulative_title);
}
}
(void) printf("\n");
/*
* Print the rows
*/
for (int i = SPA_MINBLOCKSHIFT; i < SPA_MAX_FOR_16M; i++) {
/*
* Print the first column showing the blocksize
*/
zdb_nicenum((1ULL << i), numbuf, sizeof (numbuf));
if (dump_opt['P']) {
printf("%s", numbuf);
} else {
printf("%7s:", numbuf);
}
/*
* Print the remaining set of 3 columns per size:
* for psize, lsize and asize
*/
for (int j = 0; j < NUM_HISTO; j++) {
parm_histo[j].cumulative += parm_histo[j].len[i];
zdb_nicenum(parm_histo[j].count[i],
numbuf, sizeof (numbuf));
if (dump_opt['P'])
(void) printf("\t%s", numbuf);
else
(void) printf("%7s", numbuf);
zdb_nicenum(parm_histo[j].len[i],
numbuf, sizeof (numbuf));
if (dump_opt['P'])
(void) printf("\t%s", numbuf);
else
(void) printf("%7s", numbuf);
zdb_nicenum(parm_histo[j].cumulative,
numbuf, sizeof (numbuf));
if (dump_opt['P'])
(void) printf("\t%s", numbuf);
else
(void) printf("%7s", numbuf);
}
(void) printf("\n");
}
}
static void
zdb_count_block(zdb_cb_t *zcb, zilog_t *zilog, const blkptr_t *bp,
dmu_object_type_t type)
{
uint64_t refcnt = 0;
int i;
ASSERT(type < ZDB_OT_TOTAL);
if (zilog && zil_bp_tree_add(zilog, bp) != 0)
return;
spa_config_enter(zcb->zcb_spa, SCL_CONFIG, FTAG, RW_READER);
for (i = 0; i < 4; i++) {
int l = (i < 2) ? BP_GET_LEVEL(bp) : ZB_TOTAL;
int t = (i & 1) ? type : ZDB_OT_TOTAL;
int equal;
zdb_blkstats_t *zb = &zcb->zcb_type[l][t];
zb->zb_asize += BP_GET_ASIZE(bp);
zb->zb_lsize += BP_GET_LSIZE(bp);
zb->zb_psize += BP_GET_PSIZE(bp);
zb->zb_count++;
/*
* The histogram is only big enough to record blocks up to
* SPA_OLD_MAXBLOCKSIZE; larger blocks go into the last,
* "other", bucket.
*/
unsigned idx = BP_GET_PSIZE(bp) >> SPA_MINBLOCKSHIFT;
idx = MIN(idx, SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 1);
zb->zb_psize_histogram[idx]++;
zb->zb_gangs += BP_COUNT_GANG(bp);
switch (BP_GET_NDVAS(bp)) {
case 2:
if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1])) {
zb->zb_ditto_samevdev++;
if (same_metaslab(zcb->zcb_spa,
DVA_GET_VDEV(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[1])))
zb->zb_ditto_same_ms++;
}
break;
case 3:
equal = (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1])) +
(DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[2])) +
(DVA_GET_VDEV(&bp->blk_dva[1]) ==
DVA_GET_VDEV(&bp->blk_dva[2]));
if (equal != 0) {
zb->zb_ditto_samevdev++;
if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1]) &&
same_metaslab(zcb->zcb_spa,
DVA_GET_VDEV(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[1])))
zb->zb_ditto_same_ms++;
else if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[2]) &&
same_metaslab(zcb->zcb_spa,
DVA_GET_VDEV(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[2])))
zb->zb_ditto_same_ms++;
else if (DVA_GET_VDEV(&bp->blk_dva[1]) ==
DVA_GET_VDEV(&bp->blk_dva[2]) &&
same_metaslab(zcb->zcb_spa,
DVA_GET_VDEV(&bp->blk_dva[1]),
DVA_GET_OFFSET(&bp->blk_dva[1]),
DVA_GET_OFFSET(&bp->blk_dva[2])))
zb->zb_ditto_same_ms++;
}
break;
}
}
spa_config_exit(zcb->zcb_spa, SCL_CONFIG, FTAG);
if (BP_IS_EMBEDDED(bp)) {
zcb->zcb_embedded_blocks[BPE_GET_ETYPE(bp)]++;
zcb->zcb_embedded_histogram[BPE_GET_ETYPE(bp)]
[BPE_GET_PSIZE(bp)]++;
return;
}
/*
* The binning histogram bins by powers of two up to
* SPA_MAXBLOCKSIZE rather than creating bins for
* every possible blocksize found in the pool.
*/
int bin = highbit64(BP_GET_PSIZE(bp)) - 1;
zcb->zcb_psize_count[bin]++;
zcb->zcb_psize_len[bin] += BP_GET_PSIZE(bp);
zcb->zcb_psize_total += BP_GET_PSIZE(bp);
bin = highbit64(BP_GET_LSIZE(bp)) - 1;
zcb->zcb_lsize_count[bin]++;
zcb->zcb_lsize_len[bin] += BP_GET_LSIZE(bp);
zcb->zcb_lsize_total += BP_GET_LSIZE(bp);
bin = highbit64(BP_GET_ASIZE(bp)) - 1;
zcb->zcb_asize_count[bin]++;
zcb->zcb_asize_len[bin] += BP_GET_ASIZE(bp);
zcb->zcb_asize_total += BP_GET_ASIZE(bp);
if (dump_opt['L'])
return;
if (BP_GET_DEDUP(bp)) {
ddt_t *ddt;
ddt_entry_t *dde;
ddt = ddt_select(zcb->zcb_spa, bp);
ddt_enter(ddt);
dde = ddt_lookup(ddt, bp, B_FALSE);
if (dde == NULL) {
refcnt = 0;
} else {
ddt_phys_t *ddp = ddt_phys_select(dde, bp);
ddt_phys_decref(ddp);
refcnt = ddp->ddp_refcnt;
if (ddt_phys_total_refcnt(dde) == 0)
ddt_remove(ddt, dde);
}
ddt_exit(ddt);
}
VERIFY3U(zio_wait(zio_claim(NULL, zcb->zcb_spa,
refcnt ? 0 : spa_min_claim_txg(zcb->zcb_spa),
bp, NULL, NULL, ZIO_FLAG_CANFAIL)), ==, 0);
}
static void
zdb_blkptr_done(zio_t *zio)
{
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
int ioerr = zio->io_error;
zdb_cb_t *zcb = zio->io_private;
zbookmark_phys_t *zb = &zio->io_bookmark;
mutex_enter(&spa->spa_scrub_lock);
spa->spa_load_verify_bytes -= BP_GET_PSIZE(bp);
cv_broadcast(&spa->spa_scrub_io_cv);
if (ioerr && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
char blkbuf[BP_SPRINTF_LEN];
zcb->zcb_haderrors = 1;
zcb->zcb_errors[ioerr]++;
if (dump_opt['b'] >= 2)
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
else
blkbuf[0] = '\0';
(void) printf("zdb_blkptr_cb: "
"Got error %d reading "
"<%llu, %llu, %lld, %llx> %s -- skipping\n",
ioerr,
(u_longlong_t)zb->zb_objset,
(u_longlong_t)zb->zb_object,
(u_longlong_t)zb->zb_level,
(u_longlong_t)zb->zb_blkid,
blkbuf);
}
mutex_exit(&spa->spa_scrub_lock);
abd_free(zio->io_abd);
}
static int
zdb_blkptr_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
zdb_cb_t *zcb = arg;
dmu_object_type_t type;
boolean_t is_metadata;
if (zb->zb_level == ZB_DNODE_LEVEL)
return (0);
if (dump_opt['b'] >= 5 && bp->blk_birth > 0) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("objset %llu object %llu "
"level %lld offset 0x%llx %s\n",
(u_longlong_t)zb->zb_objset,
(u_longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(u_longlong_t)blkid2offset(dnp, bp, zb),
blkbuf);
}
if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp))
return (0);
type = BP_GET_TYPE(bp);
zdb_count_block(zcb, zilog, bp,
(type & DMU_OT_NEWTYPE) ? ZDB_OT_OTHER : type);
is_metadata = (BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type));
if (!BP_IS_EMBEDDED(bp) &&
(dump_opt['c'] > 1 || (dump_opt['c'] && is_metadata))) {
size_t size = BP_GET_PSIZE(bp);
abd_t *abd = abd_alloc(size, B_FALSE);
int flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCRUB | ZIO_FLAG_RAW;
/* If it's an intent log block, failure is expected. */
if (zb->zb_level == ZB_ZIL_LEVEL)
flags |= ZIO_FLAG_SPECULATIVE;
mutex_enter(&spa->spa_scrub_lock);
while (spa->spa_load_verify_bytes > max_inflight_bytes)
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
spa->spa_load_verify_bytes += size;
mutex_exit(&spa->spa_scrub_lock);
zio_nowait(zio_read(NULL, spa, bp, abd, size,
zdb_blkptr_done, zcb, ZIO_PRIORITY_ASYNC_READ, flags, zb));
}
zcb->zcb_readfails = 0;
/* only call gethrtime() every 100 blocks */
static int iters;
if (++iters > 100)
iters = 0;
else
return (0);
if (dump_opt['b'] < 5 && gethrtime() > zcb->zcb_lastprint + NANOSEC) {
uint64_t now = gethrtime();
char buf[10];
uint64_t bytes = zcb->zcb_type[ZB_TOTAL][ZDB_OT_TOTAL].zb_asize;
uint64_t kb_per_sec =
1 + bytes / (1 + ((now - zcb->zcb_start) / 1000 / 1000));
uint64_t sec_remaining =
(zcb->zcb_totalasize - bytes) / 1024 / kb_per_sec;
/* make sure nicenum has enough space */
_Static_assert(sizeof (buf) >= NN_NUMBUF_SZ, "buf truncated");
zfs_nicebytes(bytes, buf, sizeof (buf));
(void) fprintf(stderr,
"\r%5s completed (%4"PRIu64"MB/s) "
"estimated time remaining: "
"%"PRIu64"hr %02"PRIu64"min %02"PRIu64"sec ",
buf, kb_per_sec / 1024,
sec_remaining / 60 / 60,
sec_remaining / 60 % 60,
sec_remaining % 60);
zcb->zcb_lastprint = now;
}
return (0);
}
static void
zdb_leak(void *arg, uint64_t start, uint64_t size)
{
vdev_t *vd = arg;
(void) printf("leaked space: vdev %llu, offset 0x%llx, size %llu\n",
(u_longlong_t)vd->vdev_id, (u_longlong_t)start, (u_longlong_t)size);
}
static metaslab_ops_t zdb_metaslab_ops = {
NULL /* alloc */
};
static int
load_unflushed_svr_segs_cb(spa_t *spa, space_map_entry_t *sme,
uint64_t txg, void *arg)
{
spa_vdev_removal_t *svr = arg;
uint64_t offset = sme->sme_offset;
uint64_t size = sme->sme_run;
/* skip vdevs we don't care about */
if (sme->sme_vdev != svr->svr_vdev_id)
return (0);
vdev_t *vd = vdev_lookup_top(spa, sme->sme_vdev);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
if (txg < metaslab_unflushed_txg(ms))
return (0);
if (sme->sme_type == SM_ALLOC)
range_tree_add(svr->svr_allocd_segs, offset, size);
else
range_tree_remove(svr->svr_allocd_segs, offset, size);
return (0);
}
static void
claim_segment_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
(void) inner_offset, (void) arg;
/*
* This callback was called through a remap from
* a device being removed. Therefore, the vdev that
* this callback is applied to is a concrete
* vdev.
*/
ASSERT(vdev_is_concrete(vd));
VERIFY0(metaslab_claim_impl(vd, offset, size,
spa_min_claim_txg(vd->vdev_spa)));
}
static void
claim_segment_cb(void *arg, uint64_t offset, uint64_t size)
{
vdev_t *vd = arg;
vdev_indirect_ops.vdev_op_remap(vd, offset, size,
claim_segment_impl_cb, NULL);
}
/*
* After accounting for all allocated blocks that are directly referenced,
* we might have missed a reference to a block from a partially complete
* (and thus unused) indirect mapping object. We perform a secondary pass
* through the metaslabs we have already mapped and claim the destination
* blocks.
*/
static void
zdb_claim_removing(spa_t *spa, zdb_cb_t *zcb)
{
if (dump_opt['L'])
return;
if (spa->spa_vdev_removal == NULL)
return;
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
ASSERT0(range_tree_space(svr->svr_allocd_segs));
range_tree_t *allocs = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) {
metaslab_t *msp = vd->vdev_ms[msi];
ASSERT0(range_tree_space(allocs));
if (msp->ms_sm != NULL)
VERIFY0(space_map_load(msp->ms_sm, allocs, SM_ALLOC));
range_tree_vacate(allocs, range_tree_add, svr->svr_allocd_segs);
}
range_tree_destroy(allocs);
iterate_through_spacemap_logs(spa, load_unflushed_svr_segs_cb, svr);
/*
* Clear everything past what has been synced,
* because we have not allocated mappings for
* it yet.
*/
range_tree_clear(svr->svr_allocd_segs,
vdev_indirect_mapping_max_offset(vim),
vd->vdev_asize - vdev_indirect_mapping_max_offset(vim));
zcb->zcb_removing_size += range_tree_space(svr->svr_allocd_segs);
range_tree_vacate(svr->svr_allocd_segs, claim_segment_cb, vd);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
static int
increment_indirect_mapping_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
(void) tx;
zdb_cb_t *zcb = arg;
spa_t *spa = zcb->zcb_spa;
vdev_t *vd;
const dva_t *dva = &bp->blk_dva[0];
ASSERT(!bp_freed);
ASSERT(!dump_opt['L']);
ASSERT3U(BP_GET_NDVAS(bp), ==, 1);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
vd = vdev_lookup_top(zcb->zcb_spa, DVA_GET_VDEV(dva));
ASSERT3P(vd, !=, NULL);
spa_config_exit(spa, SCL_VDEV, FTAG);
ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0);
ASSERT3P(zcb->zcb_vd_obsolete_counts[vd->vdev_id], !=, NULL);
vdev_indirect_mapping_increment_obsolete_count(
vd->vdev_indirect_mapping,
DVA_GET_OFFSET(dva), DVA_GET_ASIZE(dva),
zcb->zcb_vd_obsolete_counts[vd->vdev_id]);
return (0);
}
static uint32_t *
zdb_load_obsolete_counts(vdev_t *vd)
{
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
spa_t *spa = vd->vdev_spa;
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
uint64_t obsolete_sm_object;
uint32_t *counts;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
EQUIV(obsolete_sm_object != 0, vd->vdev_obsolete_sm != NULL);
counts = vdev_indirect_mapping_load_obsolete_counts(vim);
if (vd->vdev_obsolete_sm != NULL) {
vdev_indirect_mapping_load_obsolete_spacemap(vim, counts,
vd->vdev_obsolete_sm);
}
if (scip->scip_vdev == vd->vdev_id &&
scip->scip_prev_obsolete_sm_object != 0) {
space_map_t *prev_obsolete_sm = NULL;
VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset,
scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0));
vdev_indirect_mapping_load_obsolete_spacemap(vim, counts,
prev_obsolete_sm);
space_map_close(prev_obsolete_sm);
}
return (counts);
}
static void
zdb_ddt_leak_init(spa_t *spa, zdb_cb_t *zcb)
{
ddt_bookmark_t ddb = {0};
ddt_entry_t dde;
int error;
int p;
ASSERT(!dump_opt['L']);
while ((error = ddt_walk(spa, &ddb, &dde)) == 0) {
blkptr_t blk;
ddt_phys_t *ddp = dde.dde_phys;
if (ddb.ddb_class == DDT_CLASS_UNIQUE)
return;
ASSERT(ddt_phys_total_refcnt(&dde) > 1);
for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0)
continue;
ddt_bp_create(ddb.ddb_checksum,
&dde.dde_key, ddp, &blk);
if (p == DDT_PHYS_DITTO) {
zdb_count_block(zcb, NULL, &blk, ZDB_OT_DITTO);
} else {
zcb->zcb_dedup_asize +=
BP_GET_ASIZE(&blk) * (ddp->ddp_refcnt - 1);
zcb->zcb_dedup_blocks++;
}
}
ddt_t *ddt = spa->spa_ddt[ddb.ddb_checksum];
ddt_enter(ddt);
VERIFY(ddt_lookup(ddt, &blk, B_TRUE) != NULL);
ddt_exit(ddt);
}
ASSERT(error == ENOENT);
}
typedef struct checkpoint_sm_exclude_entry_arg {
vdev_t *cseea_vd;
uint64_t cseea_checkpoint_size;
} checkpoint_sm_exclude_entry_arg_t;
static int
checkpoint_sm_exclude_entry_cb(space_map_entry_t *sme, void *arg)
{
checkpoint_sm_exclude_entry_arg_t *cseea = arg;
vdev_t *vd = cseea->cseea_vd;
metaslab_t *ms = vd->vdev_ms[sme->sme_offset >> vd->vdev_ms_shift];
uint64_t end = sme->sme_offset + sme->sme_run;
ASSERT(sme->sme_type == SM_FREE);
/*
* Since the vdev_checkpoint_sm exists in the vdev level
* and the ms_sm space maps exist in the metaslab level,
* an entry in the checkpoint space map could theoretically
* cross the boundaries of the metaslab that it belongs.
*
* In reality, because of the way that we populate and
* manipulate the checkpoint's space maps currently,
* there shouldn't be any entries that cross metaslabs.
* Hence the assertion below.
*
* That said, there is no fundamental requirement that
* the checkpoint's space map entries should not cross
* metaslab boundaries. So if needed we could add code
* that handles metaslab-crossing segments in the future.
*/
VERIFY3U(sme->sme_offset, >=, ms->ms_start);
VERIFY3U(end, <=, ms->ms_start + ms->ms_size);
/*
* By removing the entry from the allocated segments we
* also verify that the entry is there to begin with.
*/
mutex_enter(&ms->ms_lock);
range_tree_remove(ms->ms_allocatable, sme->sme_offset, sme->sme_run);
mutex_exit(&ms->ms_lock);
cseea->cseea_checkpoint_size += sme->sme_run;
return (0);
}
static void
zdb_leak_init_vdev_exclude_checkpoint(vdev_t *vd, zdb_cb_t *zcb)
{
spa_t *spa = vd->vdev_spa;
space_map_t *checkpoint_sm = NULL;
uint64_t checkpoint_sm_obj;
/*
* If there is no vdev_top_zap, we are in a pool whose
* version predates the pool checkpoint feature.
*/
if (vd->vdev_top_zap == 0)
return;
/*
* If there is no reference of the vdev_checkpoint_sm in
* the vdev_top_zap, then one of the following scenarios
* is true:
*
* 1] There is no checkpoint
* 2] There is a checkpoint, but no checkpointed blocks
* have been freed yet
* 3] The current vdev is indirect
*
* In these cases we return immediately.
*/
if (zap_contains(spa_meta_objset(spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0)
return;
VERIFY0(zap_lookup(spa_meta_objset(spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1,
&checkpoint_sm_obj));
checkpoint_sm_exclude_entry_arg_t cseea;
cseea.cseea_vd = vd;
cseea.cseea_checkpoint_size = 0;
VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(spa),
checkpoint_sm_obj, 0, vd->vdev_asize, vd->vdev_ashift));
VERIFY0(space_map_iterate(checkpoint_sm,
space_map_length(checkpoint_sm),
checkpoint_sm_exclude_entry_cb, &cseea));
space_map_close(checkpoint_sm);
zcb->zcb_checkpoint_size += cseea.cseea_checkpoint_size;
}
static void
zdb_leak_init_exclude_checkpoint(spa_t *spa, zdb_cb_t *zcb)
{
ASSERT(!dump_opt['L']);
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
ASSERT3U(c, ==, rvd->vdev_child[c]->vdev_id);
zdb_leak_init_vdev_exclude_checkpoint(rvd->vdev_child[c], zcb);
}
}
static int
count_unflushed_space_cb(spa_t *spa, space_map_entry_t *sme,
uint64_t txg, void *arg)
{
int64_t *ualloc_space = arg;
uint64_t offset = sme->sme_offset;
uint64_t vdev_id = sme->sme_vdev;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
if (!vdev_is_concrete(vd))
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
if (txg < metaslab_unflushed_txg(ms))
return (0);
if (sme->sme_type == SM_ALLOC)
*ualloc_space += sme->sme_run;
else
*ualloc_space -= sme->sme_run;
return (0);
}
static int64_t
get_unflushed_alloc_space(spa_t *spa)
{
if (dump_opt['L'])
return (0);
int64_t ualloc_space = 0;
iterate_through_spacemap_logs(spa, count_unflushed_space_cb,
&ualloc_space);
return (ualloc_space);
}
static int
load_unflushed_cb(spa_t *spa, space_map_entry_t *sme, uint64_t txg, void *arg)
{
maptype_t *uic_maptype = arg;
uint64_t offset = sme->sme_offset;
uint64_t size = sme->sme_run;
uint64_t vdev_id = sme->sme_vdev;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
/* skip indirect vdevs */
if (!vdev_is_concrete(vd))
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
ASSERT(*uic_maptype == SM_ALLOC || *uic_maptype == SM_FREE);
if (txg < metaslab_unflushed_txg(ms))
return (0);
if (*uic_maptype == sme->sme_type)
range_tree_add(ms->ms_allocatable, offset, size);
else
range_tree_remove(ms->ms_allocatable, offset, size);
return (0);
}
static void
load_unflushed_to_ms_allocatables(spa_t *spa, maptype_t maptype)
{
iterate_through_spacemap_logs(spa, load_unflushed_cb, &maptype);
}
static void
load_concrete_ms_allocatable_trees(spa_t *spa, maptype_t maptype)
{
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
ASSERT3U(i, ==, vd->vdev_id);
if (vd->vdev_ops == &vdev_indirect_ops)
continue;
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
(void) fprintf(stderr,
"\rloading concrete vdev %llu, "
"metaslab %llu of %llu ...",
(longlong_t)vd->vdev_id,
(longlong_t)msp->ms_id,
(longlong_t)vd->vdev_ms_count);
mutex_enter(&msp->ms_lock);
range_tree_vacate(msp->ms_allocatable, NULL, NULL);
/*
* We don't want to spend the CPU manipulating the
* size-ordered tree, so clear the range_tree ops.
*/
msp->ms_allocatable->rt_ops = NULL;
if (msp->ms_sm != NULL) {
VERIFY0(space_map_load(msp->ms_sm,
msp->ms_allocatable, maptype));
}
if (!msp->ms_loaded)
msp->ms_loaded = B_TRUE;
mutex_exit(&msp->ms_lock);
}
}
load_unflushed_to_ms_allocatables(spa, maptype);
}
/*
* vm_idxp is an in-out parameter which (for indirect vdevs) is the
* index in vim_entries that has the first entry in this metaslab.
* On return, it will be set to the first entry after this metaslab.
*/
static void
load_indirect_ms_allocatable_tree(vdev_t *vd, metaslab_t *msp,
uint64_t *vim_idxp)
{
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
mutex_enter(&msp->ms_lock);
range_tree_vacate(msp->ms_allocatable, NULL, NULL);
/*
* We don't want to spend the CPU manipulating the
* size-ordered tree, so clear the range_tree ops.
*/
msp->ms_allocatable->rt_ops = NULL;
for (; *vim_idxp < vdev_indirect_mapping_num_entries(vim);
(*vim_idxp)++) {
vdev_indirect_mapping_entry_phys_t *vimep =
&vim->vim_entries[*vim_idxp];
uint64_t ent_offset = DVA_MAPPING_GET_SRC_OFFSET(vimep);
uint64_t ent_len = DVA_GET_ASIZE(&vimep->vimep_dst);
ASSERT3U(ent_offset, >=, msp->ms_start);
if (ent_offset >= msp->ms_start + msp->ms_size)
break;
/*
* Mappings do not cross metaslab boundaries,
* because we create them by walking the metaslabs.
*/
ASSERT3U(ent_offset + ent_len, <=,
msp->ms_start + msp->ms_size);
range_tree_add(msp->ms_allocatable, ent_offset, ent_len);
}
if (!msp->ms_loaded)
msp->ms_loaded = B_TRUE;
mutex_exit(&msp->ms_lock);
}
static void
zdb_leak_init_prepare_indirect_vdevs(spa_t *spa, zdb_cb_t *zcb)
{
ASSERT(!dump_opt['L']);
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
ASSERT3U(c, ==, vd->vdev_id);
if (vd->vdev_ops != &vdev_indirect_ops)
continue;
/*
* Note: we don't check for mapping leaks on
* removing vdevs because their ms_allocatable's
* are used to look for leaks in allocated space.
*/
zcb->zcb_vd_obsolete_counts[c] = zdb_load_obsolete_counts(vd);
/*
* Normally, indirect vdevs don't have any
* metaslabs. We want to set them up for
* zio_claim().
*/
vdev_metaslab_group_create(vd);
VERIFY0(vdev_metaslab_init(vd, 0));
vdev_indirect_mapping_t *vim __maybe_unused =
vd->vdev_indirect_mapping;
uint64_t vim_idx = 0;
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
(void) fprintf(stderr,
"\rloading indirect vdev %llu, "
"metaslab %llu of %llu ...",
(longlong_t)vd->vdev_id,
(longlong_t)vd->vdev_ms[m]->ms_id,
(longlong_t)vd->vdev_ms_count);
load_indirect_ms_allocatable_tree(vd, vd->vdev_ms[m],
&vim_idx);
}
ASSERT3U(vim_idx, ==, vdev_indirect_mapping_num_entries(vim));
}
}
static void
zdb_leak_init(spa_t *spa, zdb_cb_t *zcb)
{
zcb->zcb_spa = spa;
if (dump_opt['L'])
return;
dsl_pool_t *dp = spa->spa_dsl_pool;
vdev_t *rvd = spa->spa_root_vdev;
/*
* We are going to be changing the meaning of the metaslab's
* ms_allocatable. Ensure that the allocator doesn't try to
* use the tree.
*/
spa->spa_normal_class->mc_ops = &zdb_metaslab_ops;
spa->spa_log_class->mc_ops = &zdb_metaslab_ops;
spa->spa_embedded_log_class->mc_ops = &zdb_metaslab_ops;
zcb->zcb_vd_obsolete_counts =
umem_zalloc(rvd->vdev_children * sizeof (uint32_t *),
UMEM_NOFAIL);
/*
* For leak detection, we overload the ms_allocatable trees
* to contain allocated segments instead of free segments.
* As a result, we can't use the normal metaslab_load/unload
* interfaces.
*/
zdb_leak_init_prepare_indirect_vdevs(spa, zcb);
load_concrete_ms_allocatable_trees(spa, SM_ALLOC);
/*
* On load_concrete_ms_allocatable_trees() we loaded all the
* allocated entries from the ms_sm to the ms_allocatable for
* each metaslab. If the pool has a checkpoint or is in the
* middle of discarding a checkpoint, some of these blocks
* may have been freed but their ms_sm may not have been
* updated because they are referenced by the checkpoint. In
* order to avoid false-positives during leak-detection, we
* go through the vdev's checkpoint space map and exclude all
* its entries from their relevant ms_allocatable.
*
* We also aggregate the space held by the checkpoint and add
* it to zcb_checkpoint_size.
*
* Note that at this point we are also verifying that all the
* entries on the checkpoint_sm are marked as allocated in
* the ms_sm of their relevant metaslab.
* [see comment in checkpoint_sm_exclude_entry_cb()]
*/
zdb_leak_init_exclude_checkpoint(spa, zcb);
ASSERT3U(zcb->zcb_checkpoint_size, ==, spa_get_checkpoint_space(spa));
/* for cleaner progress output */
(void) fprintf(stderr, "\n");
if (bpobj_is_open(&dp->dp_obsolete_bpobj)) {
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_DEVICE_REMOVAL));
(void) bpobj_iterate_nofree(&dp->dp_obsolete_bpobj,
increment_indirect_mapping_cb, zcb, NULL);
}
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
zdb_ddt_leak_init(spa, zcb);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
static boolean_t
zdb_check_for_obsolete_leaks(vdev_t *vd, zdb_cb_t *zcb)
{
boolean_t leaks = B_FALSE;
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
uint64_t total_leaked = 0;
boolean_t are_precise = B_FALSE;
ASSERT(vim != NULL);
for (uint64_t i = 0; i < vdev_indirect_mapping_num_entries(vim); i++) {
vdev_indirect_mapping_entry_phys_t *vimep =
&vim->vim_entries[i];
uint64_t obsolete_bytes = 0;
uint64_t offset = DVA_MAPPING_GET_SRC_OFFSET(vimep);
metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
/*
* This is not very efficient but it's easy to
* verify correctness.
*/
for (uint64_t inner_offset = 0;
inner_offset < DVA_GET_ASIZE(&vimep->vimep_dst);
inner_offset += 1 << vd->vdev_ashift) {
if (range_tree_contains(msp->ms_allocatable,
offset + inner_offset, 1 << vd->vdev_ashift)) {
obsolete_bytes += 1 << vd->vdev_ashift;
}
}
int64_t bytes_leaked = obsolete_bytes -
zcb->zcb_vd_obsolete_counts[vd->vdev_id][i];
ASSERT3U(DVA_GET_ASIZE(&vimep->vimep_dst), >=,
zcb->zcb_vd_obsolete_counts[vd->vdev_id][i]);
VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
if (bytes_leaked != 0 && (are_precise || dump_opt['d'] >= 5)) {
(void) printf("obsolete indirect mapping count "
"mismatch on %llu:%llx:%llx : %llx bytes leaked\n",
(u_longlong_t)vd->vdev_id,
(u_longlong_t)DVA_MAPPING_GET_SRC_OFFSET(vimep),
(u_longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
(u_longlong_t)bytes_leaked);
}
total_leaked += ABS(bytes_leaked);
}
VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
if (!are_precise && total_leaked > 0) {
int pct_leaked = total_leaked * 100 /
vdev_indirect_mapping_bytes_mapped(vim);
(void) printf("cannot verify obsolete indirect mapping "
"counts of vdev %llu because precise feature was not "
"enabled when it was removed: %d%% (%llx bytes) of mapping"
"unreferenced\n",
(u_longlong_t)vd->vdev_id, pct_leaked,
(u_longlong_t)total_leaked);
} else if (total_leaked > 0) {
(void) printf("obsolete indirect mapping count mismatch "
"for vdev %llu -- %llx total bytes mismatched\n",
(u_longlong_t)vd->vdev_id,
(u_longlong_t)total_leaked);
leaks |= B_TRUE;
}
vdev_indirect_mapping_free_obsolete_counts(vim,
zcb->zcb_vd_obsolete_counts[vd->vdev_id]);
zcb->zcb_vd_obsolete_counts[vd->vdev_id] = NULL;
return (leaks);
}
static boolean_t
zdb_leak_fini(spa_t *spa, zdb_cb_t *zcb)
{
if (dump_opt['L'])
return (B_FALSE);
boolean_t leaks = B_FALSE;
vdev_t *rvd = spa->spa_root_vdev;
for (unsigned c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
if (zcb->zcb_vd_obsolete_counts[c] != NULL) {
leaks |= zdb_check_for_obsolete_leaks(vd, zcb);
}
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
ASSERT3P(msp->ms_group, ==, (msp->ms_group->mg_class ==
spa_embedded_log_class(spa)) ?
vd->vdev_log_mg : vd->vdev_mg);
/*
* ms_allocatable has been overloaded
* to contain allocated segments. Now that
* we finished traversing all blocks, any
* block that remains in the ms_allocatable
* represents an allocated block that we
* did not claim during the traversal.
* Claimed blocks would have been removed
* from the ms_allocatable. For indirect
* vdevs, space remaining in the tree
* represents parts of the mapping that are
* not referenced, which is not a bug.
*/
if (vd->vdev_ops == &vdev_indirect_ops) {
range_tree_vacate(msp->ms_allocatable,
NULL, NULL);
} else {
range_tree_vacate(msp->ms_allocatable,
zdb_leak, vd);
}
if (msp->ms_loaded) {
msp->ms_loaded = B_FALSE;
}
}
}
umem_free(zcb->zcb_vd_obsolete_counts,
rvd->vdev_children * sizeof (uint32_t *));
zcb->zcb_vd_obsolete_counts = NULL;
return (leaks);
}
static int
count_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
(void) tx;
zdb_cb_t *zcb = arg;
if (dump_opt['b'] >= 5) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("[%s] %s\n",
"deferred free", blkbuf);
}
zdb_count_block(zcb, NULL, bp, ZDB_OT_DEFERRED);
return (0);
}
/*
* Iterate over livelists which have been destroyed by the user but
* are still present in the MOS, waiting to be freed
*/
static void
iterate_deleted_livelists(spa_t *spa, ll_iter_t func, void *arg)
{
objset_t *mos = spa->spa_meta_objset;
uint64_t zap_obj;
int err = zap_lookup(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_DELETED_CLONES, sizeof (uint64_t), 1, &zap_obj);
if (err == ENOENT)
return;
ASSERT0(err);
zap_cursor_t zc;
zap_attribute_t attr;
dsl_deadlist_t ll;
/* NULL out os prior to dsl_deadlist_open in case it's garbage */
ll.dl_os = NULL;
for (zap_cursor_init(&zc, mos, zap_obj);
zap_cursor_retrieve(&zc, &attr) == 0;
(void) zap_cursor_advance(&zc)) {
dsl_deadlist_open(&ll, mos, attr.za_first_integer);
func(&ll, arg);
dsl_deadlist_close(&ll);
}
zap_cursor_fini(&zc);
}
static int
bpobj_count_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(!bp_freed);
return (count_block_cb(arg, bp, tx));
}
static int
livelist_entry_count_blocks_cb(void *args, dsl_deadlist_entry_t *dle)
{
zdb_cb_t *zbc = args;
bplist_t blks;
bplist_create(&blks);
/* determine which blocks have been alloc'd but not freed */
VERIFY0(dsl_process_sub_livelist(&dle->dle_bpobj, &blks, NULL, NULL));
/* count those blocks */
(void) bplist_iterate(&blks, count_block_cb, zbc, NULL);
bplist_destroy(&blks);
return (0);
}
static void
livelist_count_blocks(dsl_deadlist_t *ll, void *arg)
{
dsl_deadlist_iterate(ll, livelist_entry_count_blocks_cb, arg);
}
/*
* Count the blocks in the livelists that have been destroyed by the user
* but haven't yet been freed.
*/
static void
deleted_livelists_count_blocks(spa_t *spa, zdb_cb_t *zbc)
{
iterate_deleted_livelists(spa, livelist_count_blocks, zbc);
}
static void
dump_livelist_cb(dsl_deadlist_t *ll, void *arg)
{
ASSERT3P(arg, ==, NULL);
global_feature_count[SPA_FEATURE_LIVELIST]++;
dump_blkptr_list(ll, "Deleted Livelist");
dsl_deadlist_iterate(ll, sublivelist_verify_lightweight, NULL);
}
/*
* Print out, register object references to, and increment feature counts for
* livelists that have been destroyed by the user but haven't yet been freed.
*/
static void
deleted_livelists_dump_mos(spa_t *spa)
{
uint64_t zap_obj;
objset_t *mos = spa->spa_meta_objset;
int err = zap_lookup(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_DELETED_CLONES, sizeof (uint64_t), 1, &zap_obj);
if (err == ENOENT)
return;
mos_obj_refd(zap_obj);
iterate_deleted_livelists(spa, dump_livelist_cb, NULL);
}
static int
dump_block_stats(spa_t *spa)
{
- zdb_cb_t zcb = {{{{0}}}};
+ zdb_cb_t *zcb;
zdb_blkstats_t *zb, *tzb;
uint64_t norm_alloc, norm_space, total_alloc, total_found;
int flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
TRAVERSE_NO_DECRYPT | TRAVERSE_HARD;
boolean_t leaks = B_FALSE;
int e, c, err;
bp_embedded_type_t i;
+ zcb = umem_zalloc(sizeof (zdb_cb_t), UMEM_NOFAIL);
+
(void) printf("\nTraversing all blocks %s%s%s%s%s...\n\n",
(dump_opt['c'] || !dump_opt['L']) ? "to verify " : "",
(dump_opt['c'] == 1) ? "metadata " : "",
dump_opt['c'] ? "checksums " : "",
(dump_opt['c'] && !dump_opt['L']) ? "and verify " : "",
!dump_opt['L'] ? "nothing leaked " : "");
/*
* When leak detection is enabled we load all space maps as SM_ALLOC
* maps, then traverse the pool claiming each block we discover. If
* the pool is perfectly consistent, the segment trees will be empty
* when we're done. Anything left over is a leak; any block we can't
* claim (because it's not part of any space map) is a double
* allocation, reference to a freed block, or an unclaimed log block.
*
* When leak detection is disabled (-L option) we still traverse the
* pool claiming each block we discover, but we skip opening any space
* maps.
*/
- zdb_leak_init(spa, &zcb);
+ zdb_leak_init(spa, zcb);
/*
* If there's a deferred-free bplist, process that first.
*/
(void) bpobj_iterate_nofree(&spa->spa_deferred_bpobj,
- bpobj_count_block_cb, &zcb, NULL);
+ bpobj_count_block_cb, zcb, NULL);
if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
(void) bpobj_iterate_nofree(&spa->spa_dsl_pool->dp_free_bpobj,
- bpobj_count_block_cb, &zcb, NULL);
+ bpobj_count_block_cb, zcb, NULL);
}
- zdb_claim_removing(spa, &zcb);
+ zdb_claim_removing(spa, zcb);
if (spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) {
VERIFY3U(0, ==, bptree_iterate(spa->spa_meta_objset,
spa->spa_dsl_pool->dp_bptree_obj, B_FALSE, count_block_cb,
- &zcb, NULL));
+ zcb, NULL));
}
- deleted_livelists_count_blocks(spa, &zcb);
+ deleted_livelists_count_blocks(spa, zcb);
if (dump_opt['c'] > 1)
flags |= TRAVERSE_PREFETCH_DATA;
- zcb.zcb_totalasize = metaslab_class_get_alloc(spa_normal_class(spa));
- zcb.zcb_totalasize += metaslab_class_get_alloc(spa_special_class(spa));
- zcb.zcb_totalasize += metaslab_class_get_alloc(spa_dedup_class(spa));
- zcb.zcb_totalasize +=
+ zcb->zcb_totalasize = metaslab_class_get_alloc(spa_normal_class(spa));
+ zcb->zcb_totalasize += metaslab_class_get_alloc(spa_special_class(spa));
+ zcb->zcb_totalasize += metaslab_class_get_alloc(spa_dedup_class(spa));
+ zcb->zcb_totalasize +=
metaslab_class_get_alloc(spa_embedded_log_class(spa));
- zcb.zcb_start = zcb.zcb_lastprint = gethrtime();
- err = traverse_pool(spa, 0, flags, zdb_blkptr_cb, &zcb);
+ zcb->zcb_start = zcb->zcb_lastprint = gethrtime();
+ err = traverse_pool(spa, 0, flags, zdb_blkptr_cb, zcb);
/*
* If we've traversed the data blocks then we need to wait for those
* I/Os to complete. We leverage "The Godfather" zio to wait on
* all async I/Os to complete.
*/
if (dump_opt['c']) {
for (c = 0; c < max_ncpus; c++) {
(void) zio_wait(spa->spa_async_zio_root[c]);
spa->spa_async_zio_root[c] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
}
}
ASSERT0(spa->spa_load_verify_bytes);
/*
* Done after zio_wait() since zcb_haderrors is modified in
* zdb_blkptr_done()
*/
- zcb.zcb_haderrors |= err;
+ zcb->zcb_haderrors |= err;
- if (zcb.zcb_haderrors) {
+ if (zcb->zcb_haderrors) {
(void) printf("\nError counts:\n\n");
(void) printf("\t%5s %s\n", "errno", "count");
for (e = 0; e < 256; e++) {
- if (zcb.zcb_errors[e] != 0) {
+ if (zcb->zcb_errors[e] != 0) {
(void) printf("\t%5d %llu\n",
- e, (u_longlong_t)zcb.zcb_errors[e]);
+ e, (u_longlong_t)zcb->zcb_errors[e]);
}
}
}
/*
* Report any leaked segments.
*/
- leaks |= zdb_leak_fini(spa, &zcb);
+ leaks |= zdb_leak_fini(spa, zcb);
- tzb = &zcb.zcb_type[ZB_TOTAL][ZDB_OT_TOTAL];
+ tzb = &zcb->zcb_type[ZB_TOTAL][ZDB_OT_TOTAL];
norm_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
norm_space = metaslab_class_get_space(spa_normal_class(spa));
total_alloc = norm_alloc +
metaslab_class_get_alloc(spa_log_class(spa)) +
metaslab_class_get_alloc(spa_embedded_log_class(spa)) +
metaslab_class_get_alloc(spa_special_class(spa)) +
metaslab_class_get_alloc(spa_dedup_class(spa)) +
get_unflushed_alloc_space(spa);
- total_found = tzb->zb_asize - zcb.zcb_dedup_asize +
- zcb.zcb_removing_size + zcb.zcb_checkpoint_size;
+ total_found = tzb->zb_asize - zcb->zcb_dedup_asize +
+ zcb->zcb_removing_size + zcb->zcb_checkpoint_size;
if (total_found == total_alloc && !dump_opt['L']) {
(void) printf("\n\tNo leaks (block sum matches space"
" maps exactly)\n");
} else if (!dump_opt['L']) {
(void) printf("block traversal size %llu != alloc %llu "
"(%s %lld)\n",
(u_longlong_t)total_found,
(u_longlong_t)total_alloc,
(dump_opt['L']) ? "unreachable" : "leaked",
(longlong_t)(total_alloc - total_found));
leaks = B_TRUE;
}
- if (tzb->zb_count == 0)
+ if (tzb->zb_count == 0) {
+ umem_free(zcb, sizeof (zdb_cb_t));
return (2);
+ }
(void) printf("\n");
(void) printf("\t%-16s %14llu\n", "bp count:",
(u_longlong_t)tzb->zb_count);
(void) printf("\t%-16s %14llu\n", "ganged count:",
(longlong_t)tzb->zb_gangs);
(void) printf("\t%-16s %14llu avg: %6llu\n", "bp logical:",
(u_longlong_t)tzb->zb_lsize,
(u_longlong_t)(tzb->zb_lsize / tzb->zb_count));
(void) printf("\t%-16s %14llu avg: %6llu compression: %6.2f\n",
"bp physical:", (u_longlong_t)tzb->zb_psize,
(u_longlong_t)(tzb->zb_psize / tzb->zb_count),
(double)tzb->zb_lsize / tzb->zb_psize);
(void) printf("\t%-16s %14llu avg: %6llu compression: %6.2f\n",
"bp allocated:", (u_longlong_t)tzb->zb_asize,
(u_longlong_t)(tzb->zb_asize / tzb->zb_count),
(double)tzb->zb_lsize / tzb->zb_asize);
(void) printf("\t%-16s %14llu ref>1: %6llu deduplication: %6.2f\n",
- "bp deduped:", (u_longlong_t)zcb.zcb_dedup_asize,
- (u_longlong_t)zcb.zcb_dedup_blocks,
- (double)zcb.zcb_dedup_asize / tzb->zb_asize + 1.0);
+ "bp deduped:", (u_longlong_t)zcb->zcb_dedup_asize,
+ (u_longlong_t)zcb->zcb_dedup_blocks,
+ (double)zcb->zcb_dedup_asize / tzb->zb_asize + 1.0);
(void) printf("\t%-16s %14llu used: %5.2f%%\n", "Normal class:",
(u_longlong_t)norm_alloc, 100.0 * norm_alloc / norm_space);
if (spa_special_class(spa)->mc_allocator[0].mca_rotor != NULL) {
uint64_t alloc = metaslab_class_get_alloc(
spa_special_class(spa));
uint64_t space = metaslab_class_get_space(
spa_special_class(spa));
(void) printf("\t%-16s %14llu used: %5.2f%%\n",
"Special class", (u_longlong_t)alloc,
100.0 * alloc / space);
}
if (spa_dedup_class(spa)->mc_allocator[0].mca_rotor != NULL) {
uint64_t alloc = metaslab_class_get_alloc(
spa_dedup_class(spa));
uint64_t space = metaslab_class_get_space(
spa_dedup_class(spa));
(void) printf("\t%-16s %14llu used: %5.2f%%\n",
"Dedup class", (u_longlong_t)alloc,
100.0 * alloc / space);
}
if (spa_embedded_log_class(spa)->mc_allocator[0].mca_rotor != NULL) {
uint64_t alloc = metaslab_class_get_alloc(
spa_embedded_log_class(spa));
uint64_t space = metaslab_class_get_space(
spa_embedded_log_class(spa));
(void) printf("\t%-16s %14llu used: %5.2f%%\n",
"Embedded log class", (u_longlong_t)alloc,
100.0 * alloc / space);
}
for (i = 0; i < NUM_BP_EMBEDDED_TYPES; i++) {
- if (zcb.zcb_embedded_blocks[i] == 0)
+ if (zcb->zcb_embedded_blocks[i] == 0)
continue;
(void) printf("\n");
(void) printf("\tadditional, non-pointer bps of type %u: "
"%10llu\n",
- i, (u_longlong_t)zcb.zcb_embedded_blocks[i]);
+ i, (u_longlong_t)zcb->zcb_embedded_blocks[i]);
if (dump_opt['b'] >= 3) {
(void) printf("\t number of (compressed) bytes: "
"number of bps\n");
- dump_histogram(zcb.zcb_embedded_histogram[i],
- sizeof (zcb.zcb_embedded_histogram[i]) /
- sizeof (zcb.zcb_embedded_histogram[i][0]), 0);
+ dump_histogram(zcb->zcb_embedded_histogram[i],
+ sizeof (zcb->zcb_embedded_histogram[i]) /
+ sizeof (zcb->zcb_embedded_histogram[i][0]), 0);
}
}
if (tzb->zb_ditto_samevdev != 0) {
(void) printf("\tDittoed blocks on same vdev: %llu\n",
(longlong_t)tzb->zb_ditto_samevdev);
}
if (tzb->zb_ditto_same_ms != 0) {
(void) printf("\tDittoed blocks in same metaslab: %llu\n",
(longlong_t)tzb->zb_ditto_same_ms);
}
for (uint64_t v = 0; v < spa->spa_root_vdev->vdev_children; v++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[v];
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
if (vim == NULL) {
continue;
}
char mem[32];
zdb_nicenum(vdev_indirect_mapping_num_entries(vim),
mem, vdev_indirect_mapping_size(vim));
(void) printf("\tindirect vdev id %llu has %llu segments "
"(%s in memory)\n",
(longlong_t)vd->vdev_id,
(longlong_t)vdev_indirect_mapping_num_entries(vim), mem);
}
if (dump_opt['b'] >= 2) {
int l, t, level;
(void) printf("\nBlocks\tLSIZE\tPSIZE\tASIZE"
"\t avg\t comp\t%%Total\tType\n");
for (t = 0; t <= ZDB_OT_TOTAL; t++) {
char csize[32], lsize[32], psize[32], asize[32];
char avg[32], gang[32];
const char *typename;
/* make sure nicenum has enough space */
_Static_assert(sizeof (csize) >= NN_NUMBUF_SZ,
"csize truncated");
_Static_assert(sizeof (lsize) >= NN_NUMBUF_SZ,
"lsize truncated");
_Static_assert(sizeof (psize) >= NN_NUMBUF_SZ,
"psize truncated");
_Static_assert(sizeof (asize) >= NN_NUMBUF_SZ,
"asize truncated");
_Static_assert(sizeof (avg) >= NN_NUMBUF_SZ,
"avg truncated");
_Static_assert(sizeof (gang) >= NN_NUMBUF_SZ,
"gang truncated");
if (t < DMU_OT_NUMTYPES)
typename = dmu_ot[t].ot_name;
else
typename = zdb_ot_extname[t - DMU_OT_NUMTYPES];
- if (zcb.zcb_type[ZB_TOTAL][t].zb_asize == 0) {
+ if (zcb->zcb_type[ZB_TOTAL][t].zb_asize == 0) {
(void) printf("%6s\t%5s\t%5s\t%5s"
"\t%5s\t%5s\t%6s\t%s\n",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
typename);
continue;
}
for (l = ZB_TOTAL - 1; l >= -1; l--) {
level = (l == -1 ? ZB_TOTAL : l);
- zb = &zcb.zcb_type[level][t];
+ zb = &zcb->zcb_type[level][t];
if (zb->zb_asize == 0)
continue;
if (dump_opt['b'] < 3 && level != ZB_TOTAL)
continue;
if (level == 0 && zb->zb_asize ==
- zcb.zcb_type[ZB_TOTAL][t].zb_asize)
+ zcb->zcb_type[ZB_TOTAL][t].zb_asize)
continue;
zdb_nicenum(zb->zb_count, csize,
sizeof (csize));
zdb_nicenum(zb->zb_lsize, lsize,
sizeof (lsize));
zdb_nicenum(zb->zb_psize, psize,
sizeof (psize));
zdb_nicenum(zb->zb_asize, asize,
sizeof (asize));
zdb_nicenum(zb->zb_asize / zb->zb_count, avg,
sizeof (avg));
zdb_nicenum(zb->zb_gangs, gang, sizeof (gang));
(void) printf("%6s\t%5s\t%5s\t%5s\t%5s"
"\t%5.2f\t%6.2f\t",
csize, lsize, psize, asize, avg,
(double)zb->zb_lsize / zb->zb_psize,
100.0 * zb->zb_asize / tzb->zb_asize);
if (level == ZB_TOTAL)
(void) printf("%s\n", typename);
else
(void) printf(" L%d %s\n",
level, typename);
if (dump_opt['b'] >= 3 && zb->zb_gangs > 0) {
(void) printf("\t number of ganged "
"blocks: %s\n", gang);
}
if (dump_opt['b'] >= 4) {
(void) printf("psize "
"(in 512-byte sectors): "
"number of blocks\n");
dump_histogram(zb->zb_psize_histogram,
PSIZE_HISTO_SIZE, 0);
}
}
}
/* Output a table summarizing block sizes in the pool */
if (dump_opt['b'] >= 2) {
- dump_size_histograms(&zcb);
+ dump_size_histograms(zcb);
}
}
(void) printf("\n");
- if (leaks)
+ if (leaks) {
+ umem_free(zcb, sizeof (zdb_cb_t));
return (2);
+ }
- if (zcb.zcb_haderrors)
+ if (zcb->zcb_haderrors) {
+ umem_free(zcb, sizeof (zdb_cb_t));
return (3);
+ }
+ umem_free(zcb, sizeof (zdb_cb_t));
return (0);
}
typedef struct zdb_ddt_entry {
ddt_key_t zdde_key;
uint64_t zdde_ref_blocks;
uint64_t zdde_ref_lsize;
uint64_t zdde_ref_psize;
uint64_t zdde_ref_dsize;
avl_node_t zdde_node;
} zdb_ddt_entry_t;
static int
zdb_ddt_add_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
(void) zilog, (void) dnp;
avl_tree_t *t = arg;
avl_index_t where;
zdb_ddt_entry_t *zdde, zdde_search;
if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) ||
BP_IS_EMBEDDED(bp))
return (0);
if (dump_opt['S'] > 1 && zb->zb_level == ZB_ROOT_LEVEL) {
(void) printf("traversing objset %llu, %llu objects, "
"%lu blocks so far\n",
(u_longlong_t)zb->zb_objset,
(u_longlong_t)BP_GET_FILL(bp),
avl_numnodes(t));
}
if (BP_IS_HOLE(bp) || BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_OFF ||
BP_GET_LEVEL(bp) > 0 || DMU_OT_IS_METADATA(BP_GET_TYPE(bp)))
return (0);
ddt_key_fill(&zdde_search.zdde_key, bp);
zdde = avl_find(t, &zdde_search, &where);
if (zdde == NULL) {
zdde = umem_zalloc(sizeof (*zdde), UMEM_NOFAIL);
zdde->zdde_key = zdde_search.zdde_key;
avl_insert(t, zdde, where);
}
zdde->zdde_ref_blocks += 1;
zdde->zdde_ref_lsize += BP_GET_LSIZE(bp);
zdde->zdde_ref_psize += BP_GET_PSIZE(bp);
zdde->zdde_ref_dsize += bp_get_dsize_sync(spa, bp);
return (0);
}
static void
dump_simulated_ddt(spa_t *spa)
{
avl_tree_t t;
void *cookie = NULL;
zdb_ddt_entry_t *zdde;
ddt_histogram_t ddh_total = {{{0}}};
ddt_stat_t dds_total = {0};
avl_create(&t, ddt_entry_compare,
sizeof (zdb_ddt_entry_t), offsetof(zdb_ddt_entry_t, zdde_node));
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
(void) traverse_pool(spa, 0, TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
TRAVERSE_NO_DECRYPT, zdb_ddt_add_cb, &t);
spa_config_exit(spa, SCL_CONFIG, FTAG);
while ((zdde = avl_destroy_nodes(&t, &cookie)) != NULL) {
ddt_stat_t dds;
uint64_t refcnt = zdde->zdde_ref_blocks;
ASSERT(refcnt != 0);
dds.dds_blocks = zdde->zdde_ref_blocks / refcnt;
dds.dds_lsize = zdde->zdde_ref_lsize / refcnt;
dds.dds_psize = zdde->zdde_ref_psize / refcnt;
dds.dds_dsize = zdde->zdde_ref_dsize / refcnt;
dds.dds_ref_blocks = zdde->zdde_ref_blocks;
dds.dds_ref_lsize = zdde->zdde_ref_lsize;
dds.dds_ref_psize = zdde->zdde_ref_psize;
dds.dds_ref_dsize = zdde->zdde_ref_dsize;
ddt_stat_add(&ddh_total.ddh_stat[highbit64(refcnt) - 1],
&dds, 0);
umem_free(zdde, sizeof (*zdde));
}
avl_destroy(&t);
ddt_histogram_stat(&dds_total, &ddh_total);
(void) printf("Simulated DDT histogram:\n");
zpool_dump_ddt(&dds_total, &ddh_total);
dump_dedup_ratio(&dds_total);
}
static int
verify_device_removal_feature_counts(spa_t *spa)
{
uint64_t dr_feature_refcount = 0;
uint64_t oc_feature_refcount = 0;
uint64_t indirect_vdev_count = 0;
uint64_t precise_vdev_count = 0;
uint64_t obsolete_counts_object_count = 0;
uint64_t obsolete_sm_count = 0;
uint64_t obsolete_counts_count = 0;
uint64_t scip_count = 0;
uint64_t obsolete_bpobj_count = 0;
int ret = 0;
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
if (scip->scip_next_mapping_object != 0) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[scip->scip_vdev];
ASSERT(scip->scip_prev_obsolete_sm_object != 0);
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
(void) printf("Condensing indirect vdev %llu: new mapping "
"object %llu, prev obsolete sm %llu\n",
(u_longlong_t)scip->scip_vdev,
(u_longlong_t)scip->scip_next_mapping_object,
(u_longlong_t)scip->scip_prev_obsolete_sm_object);
if (scip->scip_prev_obsolete_sm_object != 0) {
space_map_t *prev_obsolete_sm = NULL;
VERIFY0(space_map_open(&prev_obsolete_sm,
spa->spa_meta_objset,
scip->scip_prev_obsolete_sm_object,
0, vd->vdev_asize, 0));
dump_spacemap(spa->spa_meta_objset, prev_obsolete_sm);
(void) printf("\n");
space_map_close(prev_obsolete_sm);
}
scip_count += 2;
}
for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
if (vic->vic_mapping_object != 0) {
ASSERT(vd->vdev_ops == &vdev_indirect_ops ||
vd->vdev_removing);
indirect_vdev_count++;
if (vd->vdev_indirect_mapping->vim_havecounts) {
obsolete_counts_count++;
}
}
boolean_t are_precise;
VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
if (are_precise) {
ASSERT(vic->vic_mapping_object != 0);
precise_vdev_count++;
}
uint64_t obsolete_sm_object;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (obsolete_sm_object != 0) {
ASSERT(vic->vic_mapping_object != 0);
obsolete_sm_count++;
}
}
(void) feature_get_refcount(spa,
&spa_feature_table[SPA_FEATURE_DEVICE_REMOVAL],
&dr_feature_refcount);
(void) feature_get_refcount(spa,
&spa_feature_table[SPA_FEATURE_OBSOLETE_COUNTS],
&oc_feature_refcount);
if (dr_feature_refcount != indirect_vdev_count) {
ret = 1;
(void) printf("Number of indirect vdevs (%llu) " \
"does not match feature count (%llu)\n",
(u_longlong_t)indirect_vdev_count,
(u_longlong_t)dr_feature_refcount);
} else {
(void) printf("Verified device_removal feature refcount " \
"of %llu is correct\n",
(u_longlong_t)dr_feature_refcount);
}
if (zap_contains(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_OBSOLETE_BPOBJ) == 0) {
obsolete_bpobj_count++;
}
obsolete_counts_object_count = precise_vdev_count;
obsolete_counts_object_count += obsolete_sm_count;
obsolete_counts_object_count += obsolete_counts_count;
obsolete_counts_object_count += scip_count;
obsolete_counts_object_count += obsolete_bpobj_count;
obsolete_counts_object_count += remap_deadlist_count;
if (oc_feature_refcount != obsolete_counts_object_count) {
ret = 1;
(void) printf("Number of obsolete counts objects (%llu) " \
"does not match feature count (%llu)\n",
(u_longlong_t)obsolete_counts_object_count,
(u_longlong_t)oc_feature_refcount);
(void) printf("pv:%llu os:%llu oc:%llu sc:%llu "
"ob:%llu rd:%llu\n",
(u_longlong_t)precise_vdev_count,
(u_longlong_t)obsolete_sm_count,
(u_longlong_t)obsolete_counts_count,
(u_longlong_t)scip_count,
(u_longlong_t)obsolete_bpobj_count,
(u_longlong_t)remap_deadlist_count);
} else {
(void) printf("Verified indirect_refcount feature refcount " \
"of %llu is correct\n",
(u_longlong_t)oc_feature_refcount);
}
return (ret);
}
static void
zdb_set_skip_mmp(char *target)
{
spa_t *spa;
/*
* Disable the activity check to allow examination of
* active pools.
*/
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(target)) != NULL) {
spa->spa_import_flags |= ZFS_IMPORT_SKIP_MMP;
}
mutex_exit(&spa_namespace_lock);
}
#define BOGUS_SUFFIX "_CHECKPOINTED_UNIVERSE"
/*
* Import the checkpointed state of the pool specified by the target
* parameter as readonly. The function also accepts a pool config
* as an optional parameter, else it attempts to infer the config by
* the name of the target pool.
*
* Note that the checkpointed state's pool name will be the name of
* the original pool with the above suffix appended to it. In addition,
* if the target is not a pool name (e.g. a path to a dataset) then
* the new_path parameter is populated with the updated path to
* reflect the fact that we are looking into the checkpointed state.
*
* The function returns a newly-allocated copy of the name of the
* pool containing the checkpointed state. When this copy is no
* longer needed it should be freed with free(3C). Same thing
* applies to the new_path parameter if allocated.
*/
static char *
import_checkpointed_state(char *target, nvlist_t *cfg, char **new_path)
{
int error = 0;
char *poolname, *bogus_name = NULL;
boolean_t freecfg = B_FALSE;
/* If the target is not a pool, the extract the pool name */
char *path_start = strchr(target, '/');
if (path_start != NULL) {
size_t poolname_len = path_start - target;
poolname = strndup(target, poolname_len);
} else {
poolname = target;
}
if (cfg == NULL) {
zdb_set_skip_mmp(poolname);
error = spa_get_stats(poolname, &cfg, NULL, 0);
if (error != 0) {
fatal("Tried to read config of pool \"%s\" but "
"spa_get_stats() failed with error %d\n",
poolname, error);
}
freecfg = B_TRUE;
}
if (asprintf(&bogus_name, "%s%s", poolname, BOGUS_SUFFIX) == -1)
return (NULL);
fnvlist_add_string(cfg, ZPOOL_CONFIG_POOL_NAME, bogus_name);
error = spa_import(bogus_name, cfg, NULL,
ZFS_IMPORT_MISSING_LOG | ZFS_IMPORT_CHECKPOINT |
ZFS_IMPORT_SKIP_MMP);
if (freecfg)
nvlist_free(cfg);
if (error != 0) {
fatal("Tried to import pool \"%s\" but spa_import() failed "
"with error %d\n", bogus_name, error);
}
if (new_path != NULL && path_start != NULL) {
if (asprintf(new_path, "%s%s", bogus_name, path_start) == -1) {
if (path_start != NULL)
free(poolname);
return (NULL);
}
}
if (target != poolname)
free(poolname);
return (bogus_name);
}
typedef struct verify_checkpoint_sm_entry_cb_arg {
vdev_t *vcsec_vd;
/* the following fields are only used for printing progress */
uint64_t vcsec_entryid;
uint64_t vcsec_num_entries;
} verify_checkpoint_sm_entry_cb_arg_t;
#define ENTRIES_PER_PROGRESS_UPDATE 10000
static int
verify_checkpoint_sm_entry_cb(space_map_entry_t *sme, void *arg)
{
verify_checkpoint_sm_entry_cb_arg_t *vcsec = arg;
vdev_t *vd = vcsec->vcsec_vd;
metaslab_t *ms = vd->vdev_ms[sme->sme_offset >> vd->vdev_ms_shift];
uint64_t end = sme->sme_offset + sme->sme_run;
ASSERT(sme->sme_type == SM_FREE);
if ((vcsec->vcsec_entryid % ENTRIES_PER_PROGRESS_UPDATE) == 0) {
(void) fprintf(stderr,
"\rverifying vdev %llu, space map entry %llu of %llu ...",
(longlong_t)vd->vdev_id,
(longlong_t)vcsec->vcsec_entryid,
(longlong_t)vcsec->vcsec_num_entries);
}
vcsec->vcsec_entryid++;
/*
* See comment in checkpoint_sm_exclude_entry_cb()
*/
VERIFY3U(sme->sme_offset, >=, ms->ms_start);
VERIFY3U(end, <=, ms->ms_start + ms->ms_size);
/*
* The entries in the vdev_checkpoint_sm should be marked as
* allocated in the checkpointed state of the pool, therefore
* their respective ms_allocateable trees should not contain them.
*/
mutex_enter(&ms->ms_lock);
range_tree_verify_not_present(ms->ms_allocatable,
sme->sme_offset, sme->sme_run);
mutex_exit(&ms->ms_lock);
return (0);
}
/*
* Verify that all segments in the vdev_checkpoint_sm are allocated
* according to the checkpoint's ms_sm (i.e. are not in the checkpoint's
* ms_allocatable).
*
* Do so by comparing the checkpoint space maps (vdev_checkpoint_sm) of
* each vdev in the current state of the pool to the metaslab space maps
* (ms_sm) of the checkpointed state of the pool.
*
* Note that the function changes the state of the ms_allocatable
* trees of the current spa_t. The entries of these ms_allocatable
* trees are cleared out and then repopulated from with the free
* entries of their respective ms_sm space maps.
*/
static void
verify_checkpoint_vdev_spacemaps(spa_t *checkpoint, spa_t *current)
{
vdev_t *ckpoint_rvd = checkpoint->spa_root_vdev;
vdev_t *current_rvd = current->spa_root_vdev;
load_concrete_ms_allocatable_trees(checkpoint, SM_FREE);
for (uint64_t c = 0; c < ckpoint_rvd->vdev_children; c++) {
vdev_t *ckpoint_vd = ckpoint_rvd->vdev_child[c];
vdev_t *current_vd = current_rvd->vdev_child[c];
space_map_t *checkpoint_sm = NULL;
uint64_t checkpoint_sm_obj;
if (ckpoint_vd->vdev_ops == &vdev_indirect_ops) {
/*
* Since we don't allow device removal in a pool
* that has a checkpoint, we expect that all removed
* vdevs were removed from the pool before the
* checkpoint.
*/
ASSERT3P(current_vd->vdev_ops, ==, &vdev_indirect_ops);
continue;
}
/*
* If the checkpoint space map doesn't exist, then nothing
* here is checkpointed so there's nothing to verify.
*/
if (current_vd->vdev_top_zap == 0 ||
zap_contains(spa_meta_objset(current),
current_vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0)
continue;
VERIFY0(zap_lookup(spa_meta_objset(current),
current_vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
sizeof (uint64_t), 1, &checkpoint_sm_obj));
VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(current),
checkpoint_sm_obj, 0, current_vd->vdev_asize,
current_vd->vdev_ashift));
verify_checkpoint_sm_entry_cb_arg_t vcsec;
vcsec.vcsec_vd = ckpoint_vd;
vcsec.vcsec_entryid = 0;
vcsec.vcsec_num_entries =
space_map_length(checkpoint_sm) / sizeof (uint64_t);
VERIFY0(space_map_iterate(checkpoint_sm,
space_map_length(checkpoint_sm),
verify_checkpoint_sm_entry_cb, &vcsec));
if (dump_opt['m'] > 3)
dump_spacemap(current->spa_meta_objset, checkpoint_sm);
space_map_close(checkpoint_sm);
}
/*
* If we've added vdevs since we took the checkpoint, ensure
* that their checkpoint space maps are empty.
*/
if (ckpoint_rvd->vdev_children < current_rvd->vdev_children) {
for (uint64_t c = ckpoint_rvd->vdev_children;
c < current_rvd->vdev_children; c++) {
vdev_t *current_vd = current_rvd->vdev_child[c];
VERIFY3P(current_vd->vdev_checkpoint_sm, ==, NULL);
}
}
/* for cleaner progress output */
(void) fprintf(stderr, "\n");
}
/*
* Verifies that all space that's allocated in the checkpoint is
* still allocated in the current version, by checking that everything
* in checkpoint's ms_allocatable (which is actually allocated, not
* allocatable/free) is not present in current's ms_allocatable.
*
* Note that the function changes the state of the ms_allocatable
* trees of both spas when called. The entries of all ms_allocatable
* trees are cleared out and then repopulated from their respective
* ms_sm space maps. In the checkpointed state we load the allocated
* entries, and in the current state we load the free entries.
*/
static void
verify_checkpoint_ms_spacemaps(spa_t *checkpoint, spa_t *current)
{
vdev_t *ckpoint_rvd = checkpoint->spa_root_vdev;
vdev_t *current_rvd = current->spa_root_vdev;
load_concrete_ms_allocatable_trees(checkpoint, SM_ALLOC);
load_concrete_ms_allocatable_trees(current, SM_FREE);
for (uint64_t i = 0; i < ckpoint_rvd->vdev_children; i++) {
vdev_t *ckpoint_vd = ckpoint_rvd->vdev_child[i];
vdev_t *current_vd = current_rvd->vdev_child[i];
if (ckpoint_vd->vdev_ops == &vdev_indirect_ops) {
/*
* See comment in verify_checkpoint_vdev_spacemaps()
*/
ASSERT3P(current_vd->vdev_ops, ==, &vdev_indirect_ops);
continue;
}
for (uint64_t m = 0; m < ckpoint_vd->vdev_ms_count; m++) {
metaslab_t *ckpoint_msp = ckpoint_vd->vdev_ms[m];
metaslab_t *current_msp = current_vd->vdev_ms[m];
(void) fprintf(stderr,
"\rverifying vdev %llu of %llu, "
"metaslab %llu of %llu ...",
(longlong_t)current_vd->vdev_id,
(longlong_t)current_rvd->vdev_children,
(longlong_t)current_vd->vdev_ms[m]->ms_id,
(longlong_t)current_vd->vdev_ms_count);
/*
* We walk through the ms_allocatable trees that
* are loaded with the allocated blocks from the
* ms_sm spacemaps of the checkpoint. For each
* one of these ranges we ensure that none of them
* exists in the ms_allocatable trees of the
* current state which are loaded with the ranges
* that are currently free.
*
* This way we ensure that none of the blocks that
* are part of the checkpoint were freed by mistake.
*/
range_tree_walk(ckpoint_msp->ms_allocatable,
(range_tree_func_t *)range_tree_verify_not_present,
current_msp->ms_allocatable);
}
}
/* for cleaner progress output */
(void) fprintf(stderr, "\n");
}
static void
verify_checkpoint_blocks(spa_t *spa)
{
ASSERT(!dump_opt['L']);
spa_t *checkpoint_spa;
char *checkpoint_pool;
int error = 0;
/*
* We import the checkpointed state of the pool (under a different
* name) so we can do verification on it against the current state
* of the pool.
*/
checkpoint_pool = import_checkpointed_state(spa->spa_name, NULL,
NULL);
ASSERT(strcmp(spa->spa_name, checkpoint_pool) != 0);
error = spa_open(checkpoint_pool, &checkpoint_spa, FTAG);
if (error != 0) {
fatal("Tried to open pool \"%s\" but spa_open() failed with "
"error %d\n", checkpoint_pool, error);
}
/*
* Ensure that ranges in the checkpoint space maps of each vdev
* are allocated according to the checkpointed state's metaslab
* space maps.
*/
verify_checkpoint_vdev_spacemaps(checkpoint_spa, spa);
/*
* Ensure that allocated ranges in the checkpoint's metaslab
* space maps remain allocated in the metaslab space maps of
* the current state.
*/
verify_checkpoint_ms_spacemaps(checkpoint_spa, spa);
/*
* Once we are done, we get rid of the checkpointed state.
*/
spa_close(checkpoint_spa, FTAG);
free(checkpoint_pool);
}
static void
dump_leftover_checkpoint_blocks(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
space_map_t *checkpoint_sm = NULL;
uint64_t checkpoint_sm_obj;
if (vd->vdev_top_zap == 0)
continue;
if (zap_contains(spa_meta_objset(spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0)
continue;
VERIFY0(zap_lookup(spa_meta_objset(spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
sizeof (uint64_t), 1, &checkpoint_sm_obj));
VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(spa),
checkpoint_sm_obj, 0, vd->vdev_asize, vd->vdev_ashift));
dump_spacemap(spa->spa_meta_objset, checkpoint_sm);
space_map_close(checkpoint_sm);
}
}
static int
verify_checkpoint(spa_t *spa)
{
uberblock_t checkpoint;
int error;
if (!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT))
return (0);
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
if (error == ENOENT && !dump_opt['L']) {
/*
* If the feature is active but the uberblock is missing
* then we must be in the middle of discarding the
* checkpoint.
*/
(void) printf("\nPartially discarded checkpoint "
"state found:\n");
if (dump_opt['m'] > 3)
dump_leftover_checkpoint_blocks(spa);
return (0);
} else if (error != 0) {
(void) printf("lookup error %d when looking for "
"checkpointed uberblock in MOS\n", error);
return (error);
}
dump_uberblock(&checkpoint, "\nCheckpointed uberblock found:\n", "\n");
if (checkpoint.ub_checkpoint_txg == 0) {
(void) printf("\nub_checkpoint_txg not set in checkpointed "
"uberblock\n");
error = 3;
}
if (error == 0 && !dump_opt['L'])
verify_checkpoint_blocks(spa);
return (error);
}
static void
mos_leaks_cb(void *arg, uint64_t start, uint64_t size)
{
(void) arg;
for (uint64_t i = start; i < size; i++) {
(void) printf("MOS object %llu referenced but not allocated\n",
(u_longlong_t)i);
}
}
static void
mos_obj_refd(uint64_t obj)
{
if (obj != 0 && mos_refd_objs != NULL)
range_tree_add(mos_refd_objs, obj, 1);
}
/*
* Call on a MOS object that may already have been referenced.
*/
static void
mos_obj_refd_multiple(uint64_t obj)
{
if (obj != 0 && mos_refd_objs != NULL &&
!range_tree_contains(mos_refd_objs, obj, 1))
range_tree_add(mos_refd_objs, obj, 1);
}
static void
mos_leak_vdev_top_zap(vdev_t *vd)
{
uint64_t ms_flush_data_obj;
int error = zap_lookup(spa_meta_objset(vd->vdev_spa),
vd->vdev_top_zap, VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS,
sizeof (ms_flush_data_obj), 1, &ms_flush_data_obj);
if (error == ENOENT)
return;
ASSERT0(error);
mos_obj_refd(ms_flush_data_obj);
}
static void
mos_leak_vdev(vdev_t *vd)
{
mos_obj_refd(vd->vdev_dtl_object);
mos_obj_refd(vd->vdev_ms_array);
mos_obj_refd(vd->vdev_indirect_config.vic_births_object);
mos_obj_refd(vd->vdev_indirect_config.vic_mapping_object);
mos_obj_refd(vd->vdev_leaf_zap);
if (vd->vdev_checkpoint_sm != NULL)
mos_obj_refd(vd->vdev_checkpoint_sm->sm_object);
if (vd->vdev_indirect_mapping != NULL) {
mos_obj_refd(vd->vdev_indirect_mapping->
vim_phys->vimp_counts_object);
}
if (vd->vdev_obsolete_sm != NULL)
mos_obj_refd(vd->vdev_obsolete_sm->sm_object);
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *ms = vd->vdev_ms[m];
mos_obj_refd(space_map_object(ms->ms_sm));
}
if (vd->vdev_top_zap != 0) {
mos_obj_refd(vd->vdev_top_zap);
mos_leak_vdev_top_zap(vd);
}
for (uint64_t c = 0; c < vd->vdev_children; c++) {
mos_leak_vdev(vd->vdev_child[c]);
}
}
static void
mos_leak_log_spacemaps(spa_t *spa)
{
uint64_t spacemap_zap;
int error = zap_lookup(spa_meta_objset(spa),
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_LOG_SPACEMAP_ZAP,
sizeof (spacemap_zap), 1, &spacemap_zap);
if (error == ENOENT)
return;
ASSERT0(error);
mos_obj_refd(spacemap_zap);
for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls))
mos_obj_refd(sls->sls_sm_obj);
}
static int
dump_mos_leaks(spa_t *spa)
{
int rv = 0;
objset_t *mos = spa->spa_meta_objset;
dsl_pool_t *dp = spa->spa_dsl_pool;
/* Visit and mark all referenced objects in the MOS */
mos_obj_refd(DMU_POOL_DIRECTORY_OBJECT);
mos_obj_refd(spa->spa_pool_props_object);
mos_obj_refd(spa->spa_config_object);
mos_obj_refd(spa->spa_ddt_stat_object);
mos_obj_refd(spa->spa_feat_desc_obj);
mos_obj_refd(spa->spa_feat_enabled_txg_obj);
mos_obj_refd(spa->spa_feat_for_read_obj);
mos_obj_refd(spa->spa_feat_for_write_obj);
mos_obj_refd(spa->spa_history);
mos_obj_refd(spa->spa_errlog_last);
mos_obj_refd(spa->spa_errlog_scrub);
mos_obj_refd(spa->spa_all_vdev_zaps);
mos_obj_refd(spa->spa_dsl_pool->dp_bptree_obj);
mos_obj_refd(spa->spa_dsl_pool->dp_tmp_userrefs_obj);
mos_obj_refd(spa->spa_dsl_pool->dp_scan->scn_phys.scn_queue_obj);
bpobj_count_refd(&spa->spa_deferred_bpobj);
mos_obj_refd(dp->dp_empty_bpobj);
bpobj_count_refd(&dp->dp_obsolete_bpobj);
bpobj_count_refd(&dp->dp_free_bpobj);
mos_obj_refd(spa->spa_l2cache.sav_object);
mos_obj_refd(spa->spa_spares.sav_object);
if (spa->spa_syncing_log_sm != NULL)
mos_obj_refd(spa->spa_syncing_log_sm->sm_object);
mos_leak_log_spacemaps(spa);
mos_obj_refd(spa->spa_condensing_indirect_phys.
scip_next_mapping_object);
mos_obj_refd(spa->spa_condensing_indirect_phys.
scip_prev_obsolete_sm_object);
if (spa->spa_condensing_indirect_phys.scip_next_mapping_object != 0) {
vdev_indirect_mapping_t *vim =
vdev_indirect_mapping_open(mos,
spa->spa_condensing_indirect_phys.scip_next_mapping_object);
mos_obj_refd(vim->vim_phys->vimp_counts_object);
vdev_indirect_mapping_close(vim);
}
deleted_livelists_dump_mos(spa);
if (dp->dp_origin_snap != NULL) {
dsl_dataset_t *ds;
dsl_pool_config_enter(dp, FTAG);
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dataset_phys(dp->dp_origin_snap)->ds_next_snap_obj,
FTAG, &ds));
count_ds_mos_objects(ds);
dump_blkptr_list(&ds->ds_deadlist, "Deadlist");
dsl_dataset_rele(ds, FTAG);
dsl_pool_config_exit(dp, FTAG);
count_ds_mos_objects(dp->dp_origin_snap);
dump_blkptr_list(&dp->dp_origin_snap->ds_deadlist, "Deadlist");
}
count_dir_mos_objects(dp->dp_mos_dir);
if (dp->dp_free_dir != NULL)
count_dir_mos_objects(dp->dp_free_dir);
if (dp->dp_leak_dir != NULL)
count_dir_mos_objects(dp->dp_leak_dir);
mos_leak_vdev(spa->spa_root_vdev);
for (uint64_t class = 0; class < DDT_CLASSES; class++) {
for (uint64_t type = 0; type < DDT_TYPES; type++) {
for (uint64_t cksum = 0;
cksum < ZIO_CHECKSUM_FUNCTIONS; cksum++) {
ddt_t *ddt = spa->spa_ddt[cksum];
mos_obj_refd(ddt->ddt_object[type][class]);
}
}
}
/*
* Visit all allocated objects and make sure they are referenced.
*/
uint64_t object = 0;
while (dmu_object_next(mos, &object, B_FALSE, 0) == 0) {
if (range_tree_contains(mos_refd_objs, object, 1)) {
range_tree_remove(mos_refd_objs, object, 1);
} else {
dmu_object_info_t doi;
const char *name;
dmu_object_info(mos, object, &doi);
if (doi.doi_type & DMU_OT_NEWTYPE) {
dmu_object_byteswap_t bswap =
DMU_OT_BYTESWAP(doi.doi_type);
name = dmu_ot_byteswap[bswap].ob_name;
} else {
name = dmu_ot[doi.doi_type].ot_name;
}
(void) printf("MOS object %llu (%s) leaked\n",
(u_longlong_t)object, name);
rv = 2;
}
}
(void) range_tree_walk(mos_refd_objs, mos_leaks_cb, NULL);
if (!range_tree_is_empty(mos_refd_objs))
rv = 2;
range_tree_vacate(mos_refd_objs, NULL, NULL);
range_tree_destroy(mos_refd_objs);
return (rv);
}
typedef struct log_sm_obsolete_stats_arg {
uint64_t lsos_current_txg;
uint64_t lsos_total_entries;
uint64_t lsos_valid_entries;
uint64_t lsos_sm_entries;
uint64_t lsos_valid_sm_entries;
} log_sm_obsolete_stats_arg_t;
static int
log_spacemap_obsolete_stats_cb(spa_t *spa, space_map_entry_t *sme,
uint64_t txg, void *arg)
{
log_sm_obsolete_stats_arg_t *lsos = arg;
uint64_t offset = sme->sme_offset;
uint64_t vdev_id = sme->sme_vdev;
if (lsos->lsos_current_txg == 0) {
/* this is the first log */
lsos->lsos_current_txg = txg;
} else if (lsos->lsos_current_txg < txg) {
/* we just changed log - print stats and reset */
(void) printf("%-8llu valid entries out of %-8llu - txg %llu\n",
(u_longlong_t)lsos->lsos_valid_sm_entries,
(u_longlong_t)lsos->lsos_sm_entries,
(u_longlong_t)lsos->lsos_current_txg);
lsos->lsos_valid_sm_entries = 0;
lsos->lsos_sm_entries = 0;
lsos->lsos_current_txg = txg;
}
ASSERT3U(lsos->lsos_current_txg, ==, txg);
lsos->lsos_sm_entries++;
lsos->lsos_total_entries++;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
if (!vdev_is_concrete(vd))
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
if (txg < metaslab_unflushed_txg(ms))
return (0);
lsos->lsos_valid_sm_entries++;
lsos->lsos_valid_entries++;
return (0);
}
static void
dump_log_spacemap_obsolete_stats(spa_t *spa)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
log_sm_obsolete_stats_arg_t lsos = {0};
(void) printf("Log Space Map Obsolete Entry Statistics:\n");
iterate_through_spacemap_logs(spa,
log_spacemap_obsolete_stats_cb, &lsos);
/* print stats for latest log */
(void) printf("%-8llu valid entries out of %-8llu - txg %llu\n",
(u_longlong_t)lsos.lsos_valid_sm_entries,
(u_longlong_t)lsos.lsos_sm_entries,
(u_longlong_t)lsos.lsos_current_txg);
(void) printf("%-8llu valid entries out of %-8llu - total\n\n",
(u_longlong_t)lsos.lsos_valid_entries,
(u_longlong_t)lsos.lsos_total_entries);
}
static void
dump_zpool(spa_t *spa)
{
dsl_pool_t *dp = spa_get_dsl(spa);
int rc = 0;
if (dump_opt['y']) {
livelist_metaslab_validate(spa);
}
if (dump_opt['S']) {
dump_simulated_ddt(spa);
return;
}
if (!dump_opt['e'] && dump_opt['C'] > 1) {
(void) printf("\nCached configuration:\n");
dump_nvlist(spa->spa_config, 8);
}
if (dump_opt['C'])
dump_config(spa);
if (dump_opt['u'])
dump_uberblock(&spa->spa_uberblock, "\nUberblock:\n", "\n");
if (dump_opt['D'])
dump_all_ddts(spa);
if (dump_opt['d'] > 2 || dump_opt['m'])
dump_metaslabs(spa);
if (dump_opt['M'])
dump_metaslab_groups(spa, dump_opt['M'] > 1);
if (dump_opt['d'] > 2 || dump_opt['m']) {
dump_log_spacemaps(spa);
dump_log_spacemap_obsolete_stats(spa);
}
if (dump_opt['d'] || dump_opt['i']) {
spa_feature_t f;
mos_refd_objs = range_tree_create(NULL, RANGE_SEG64, NULL, 0,
0);
dump_objset(dp->dp_meta_objset);
if (dump_opt['d'] >= 3) {
dsl_pool_t *dp = spa->spa_dsl_pool;
dump_full_bpobj(&spa->spa_deferred_bpobj,
"Deferred frees", 0);
if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
dump_full_bpobj(&dp->dp_free_bpobj,
"Pool snapshot frees", 0);
}
if (bpobj_is_open(&dp->dp_obsolete_bpobj)) {
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_DEVICE_REMOVAL));
dump_full_bpobj(&dp->dp_obsolete_bpobj,
"Pool obsolete blocks", 0);
}
if (spa_feature_is_active(spa,
SPA_FEATURE_ASYNC_DESTROY)) {
dump_bptree(spa->spa_meta_objset,
dp->dp_bptree_obj,
"Pool dataset frees");
}
dump_dtl(spa->spa_root_vdev, 0);
}
for (spa_feature_t f = 0; f < SPA_FEATURES; f++)
global_feature_count[f] = UINT64_MAX;
global_feature_count[SPA_FEATURE_REDACTION_BOOKMARKS] = 0;
global_feature_count[SPA_FEATURE_BOOKMARK_WRITTEN] = 0;
global_feature_count[SPA_FEATURE_LIVELIST] = 0;
(void) dmu_objset_find(spa_name(spa), dump_one_objset,
NULL, DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
if (rc == 0 && !dump_opt['L'])
rc = dump_mos_leaks(spa);
for (f = 0; f < SPA_FEATURES; f++) {
uint64_t refcount;
uint64_t *arr;
if (!(spa_feature_table[f].fi_flags &
ZFEATURE_FLAG_PER_DATASET)) {
if (global_feature_count[f] == UINT64_MAX)
continue;
if (!spa_feature_is_enabled(spa, f)) {
ASSERT0(global_feature_count[f]);
continue;
}
arr = global_feature_count;
} else {
if (!spa_feature_is_enabled(spa, f)) {
ASSERT0(dataset_feature_count[f]);
continue;
}
arr = dataset_feature_count;
}
if (feature_get_refcount(spa, &spa_feature_table[f],
&refcount) == ENOTSUP)
continue;
if (arr[f] != refcount) {
(void) printf("%s feature refcount mismatch: "
"%lld consumers != %lld refcount\n",
spa_feature_table[f].fi_uname,
(longlong_t)arr[f], (longlong_t)refcount);
rc = 2;
} else {
(void) printf("Verified %s feature refcount "
"of %llu is correct\n",
spa_feature_table[f].fi_uname,
(longlong_t)refcount);
}
}
if (rc == 0)
rc = verify_device_removal_feature_counts(spa);
}
if (rc == 0 && (dump_opt['b'] || dump_opt['c']))
rc = dump_block_stats(spa);
if (rc == 0)
rc = verify_spacemap_refcounts(spa);
if (dump_opt['s'])
show_pool_stats(spa);
if (dump_opt['h'])
dump_history(spa);
if (rc == 0)
rc = verify_checkpoint(spa);
if (rc != 0) {
dump_debug_buffer();
exit(rc);
}
}
#define ZDB_FLAG_CHECKSUM 0x0001
#define ZDB_FLAG_DECOMPRESS 0x0002
#define ZDB_FLAG_BSWAP 0x0004
#define ZDB_FLAG_GBH 0x0008
#define ZDB_FLAG_INDIRECT 0x0010
#define ZDB_FLAG_RAW 0x0020
#define ZDB_FLAG_PRINT_BLKPTR 0x0040
#define ZDB_FLAG_VERBOSE 0x0080
static int flagbits[256];
static char flagbitstr[16];
static void
zdb_print_blkptr(const blkptr_t *bp, int flags)
{
char blkbuf[BP_SPRINTF_LEN];
if (flags & ZDB_FLAG_BSWAP)
byteswap_uint64_array((void *)bp, sizeof (blkptr_t));
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("%s\n", blkbuf);
}
static void
zdb_dump_indirect(blkptr_t *bp, int nbps, int flags)
{
int i;
for (i = 0; i < nbps; i++)
zdb_print_blkptr(&bp[i], flags);
}
static void
zdb_dump_gbh(void *buf, int flags)
{
zdb_dump_indirect((blkptr_t *)buf, SPA_GBH_NBLKPTRS, flags);
}
static void
zdb_dump_block_raw(void *buf, uint64_t size, int flags)
{
if (flags & ZDB_FLAG_BSWAP)
byteswap_uint64_array(buf, size);
VERIFY(write(fileno(stdout), buf, size) == size);
}
static void
zdb_dump_block(char *label, void *buf, uint64_t size, int flags)
{
uint64_t *d = (uint64_t *)buf;
unsigned nwords = size / sizeof (uint64_t);
int do_bswap = !!(flags & ZDB_FLAG_BSWAP);
unsigned i, j;
const char *hdr;
char *c;
if (do_bswap)
hdr = " 7 6 5 4 3 2 1 0 f e d c b a 9 8";
else
hdr = " 0 1 2 3 4 5 6 7 8 9 a b c d e f";
(void) printf("\n%s\n%6s %s 0123456789abcdef\n", label, "", hdr);
#ifdef _LITTLE_ENDIAN
/* correct the endianness */
do_bswap = !do_bswap;
#endif
for (i = 0; i < nwords; i += 2) {
(void) printf("%06llx: %016llx %016llx ",
(u_longlong_t)(i * sizeof (uint64_t)),
(u_longlong_t)(do_bswap ? BSWAP_64(d[i]) : d[i]),
(u_longlong_t)(do_bswap ? BSWAP_64(d[i + 1]) : d[i + 1]));
c = (char *)&d[i];
for (j = 0; j < 2 * sizeof (uint64_t); j++)
(void) printf("%c", isprint(c[j]) ? c[j] : '.');
(void) printf("\n");
}
}
/*
* There are two acceptable formats:
* leaf_name - For example: c1t0d0 or /tmp/ztest.0a
* child[.child]* - For example: 0.1.1
*
* The second form can be used to specify arbitrary vdevs anywhere
* in the hierarchy. For example, in a pool with a mirror of
* RAID-Zs, you can specify either RAID-Z vdev with 0.0 or 0.1 .
*/
static vdev_t *
zdb_vdev_lookup(vdev_t *vdev, const char *path)
{
char *s, *p, *q;
unsigned i;
if (vdev == NULL)
return (NULL);
/* First, assume the x.x.x.x format */
i = strtoul(path, &s, 10);
if (s == path || (s && *s != '.' && *s != '\0'))
goto name;
if (i >= vdev->vdev_children)
return (NULL);
vdev = vdev->vdev_child[i];
if (s && *s == '\0')
return (vdev);
return (zdb_vdev_lookup(vdev, s+1));
name:
for (i = 0; i < vdev->vdev_children; i++) {
vdev_t *vc = vdev->vdev_child[i];
if (vc->vdev_path == NULL) {
vc = zdb_vdev_lookup(vc, path);
if (vc == NULL)
continue;
else
return (vc);
}
p = strrchr(vc->vdev_path, '/');
p = p ? p + 1 : vc->vdev_path;
q = &vc->vdev_path[strlen(vc->vdev_path) - 2];
if (strcmp(vc->vdev_path, path) == 0)
return (vc);
if (strcmp(p, path) == 0)
return (vc);
if (strcmp(q, "s0") == 0 && strncmp(p, path, q - p) == 0)
return (vc);
}
return (NULL);
}
static int
name_from_objset_id(spa_t *spa, uint64_t objset_id, char *outstr)
{
dsl_dataset_t *ds;
dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
int error = dsl_dataset_hold_obj(spa->spa_dsl_pool, objset_id,
NULL, &ds);
if (error != 0) {
(void) fprintf(stderr, "failed to hold objset %llu: %s\n",
(u_longlong_t)objset_id, strerror(error));
dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
return (error);
}
dsl_dataset_name(ds, outstr);
dsl_dataset_rele(ds, NULL);
dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
return (0);
}
static boolean_t
zdb_parse_block_sizes(char *sizes, uint64_t *lsize, uint64_t *psize)
{
char *s0, *s1, *tmp = NULL;
if (sizes == NULL)
return (B_FALSE);
s0 = strtok_r(sizes, "/", &tmp);
if (s0 == NULL)
return (B_FALSE);
s1 = strtok_r(NULL, "/", &tmp);
*lsize = strtoull(s0, NULL, 16);
*psize = s1 ? strtoull(s1, NULL, 16) : *lsize;
return (*lsize >= *psize && *psize > 0);
}
#define ZIO_COMPRESS_MASK(alg) (1ULL << (ZIO_COMPRESS_##alg))
static boolean_t
zdb_decompress_block(abd_t *pabd, void *buf, void *lbuf, uint64_t lsize,
uint64_t psize, int flags)
{
(void) buf;
boolean_t exceeded = B_FALSE;
/*
* We don't know how the data was compressed, so just try
* every decompress function at every inflated blocksize.
*/
void *lbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
int cfuncs[ZIO_COMPRESS_FUNCTIONS] = { 0 };
int *cfuncp = cfuncs;
uint64_t maxlsize = SPA_MAXBLOCKSIZE;
uint64_t mask = ZIO_COMPRESS_MASK(ON) | ZIO_COMPRESS_MASK(OFF) |
ZIO_COMPRESS_MASK(INHERIT) | ZIO_COMPRESS_MASK(EMPTY) |
(getenv("ZDB_NO_ZLE") ? ZIO_COMPRESS_MASK(ZLE) : 0);
*cfuncp++ = ZIO_COMPRESS_LZ4;
*cfuncp++ = ZIO_COMPRESS_LZJB;
mask |= ZIO_COMPRESS_MASK(LZ4) | ZIO_COMPRESS_MASK(LZJB);
for (int c = 0; c < ZIO_COMPRESS_FUNCTIONS; c++)
if (((1ULL << c) & mask) == 0)
*cfuncp++ = c;
/*
* On the one hand, with SPA_MAXBLOCKSIZE at 16MB, this
* could take a while and we should let the user know
* we are not stuck. On the other hand, printing progress
* info gets old after a while. User can specify 'v' flag
* to see the progression.
*/
if (lsize == psize)
lsize += SPA_MINBLOCKSIZE;
else
maxlsize = lsize;
for (; lsize <= maxlsize; lsize += SPA_MINBLOCKSIZE) {
for (cfuncp = cfuncs; *cfuncp; cfuncp++) {
if (flags & ZDB_FLAG_VERBOSE) {
(void) fprintf(stderr,
"Trying %05llx -> %05llx (%s)\n",
(u_longlong_t)psize,
(u_longlong_t)lsize,
zio_compress_table[*cfuncp].\
ci_name);
}
/*
* We randomize lbuf2, and decompress to both
* lbuf and lbuf2. This way, we will know if
* decompression fill exactly to lsize.
*/
VERIFY0(random_get_pseudo_bytes(lbuf2, lsize));
if (zio_decompress_data(*cfuncp, pabd,
lbuf, psize, lsize, NULL) == 0 &&
zio_decompress_data(*cfuncp, pabd,
lbuf2, psize, lsize, NULL) == 0 &&
memcmp(lbuf, lbuf2, lsize) == 0)
break;
}
if (*cfuncp != 0)
break;
}
umem_free(lbuf2, SPA_MAXBLOCKSIZE);
if (lsize > maxlsize) {
exceeded = B_TRUE;
}
if (*cfuncp == ZIO_COMPRESS_ZLE) {
printf("\nZLE decompression was selected. If you "
"suspect the results are wrong,\ntry avoiding ZLE "
"by setting and exporting ZDB_NO_ZLE=\"true\"\n");
}
return (exceeded);
}
/*
* Read a block from a pool and print it out. The syntax of the
* block descriptor is:
*
* pool:vdev_specifier:offset:[lsize/]psize[:flags]
*
* pool - The name of the pool you wish to read from
* vdev_specifier - Which vdev (see comment for zdb_vdev_lookup)
* offset - offset, in hex, in bytes
* size - Amount of data to read, in hex, in bytes
* flags - A string of characters specifying options
* b: Decode a blkptr at given offset within block
* c: Calculate and display checksums
* d: Decompress data before dumping
* e: Byteswap data before dumping
* g: Display data as a gang block header
* i: Display as an indirect block
* r: Dump raw data to stdout
* v: Verbose
*
*/
static void
zdb_read_block(char *thing, spa_t *spa)
{
blkptr_t blk, *bp = &blk;
dva_t *dva = bp->blk_dva;
int flags = 0;
uint64_t offset = 0, psize = 0, lsize = 0, blkptr_offset = 0;
zio_t *zio;
vdev_t *vd;
abd_t *pabd;
void *lbuf, *buf;
char *s, *p, *dup, *flagstr, *sizes, *tmp = NULL;
const char *vdev, *errmsg = NULL;
int i, error;
boolean_t borrowed = B_FALSE, found = B_FALSE;
dup = strdup(thing);
s = strtok_r(dup, ":", &tmp);
vdev = s ?: "";
s = strtok_r(NULL, ":", &tmp);
offset = strtoull(s ? s : "", NULL, 16);
sizes = strtok_r(NULL, ":", &tmp);
s = strtok_r(NULL, ":", &tmp);
flagstr = strdup(s ?: "");
if (!zdb_parse_block_sizes(sizes, &lsize, &psize))
errmsg = "invalid size(s)";
if (!IS_P2ALIGNED(psize, DEV_BSIZE) || !IS_P2ALIGNED(lsize, DEV_BSIZE))
errmsg = "size must be a multiple of sector size";
if (!IS_P2ALIGNED(offset, DEV_BSIZE))
errmsg = "offset must be a multiple of sector size";
if (errmsg) {
(void) printf("Invalid block specifier: %s - %s\n",
thing, errmsg);
goto done;
}
tmp = NULL;
for (s = strtok_r(flagstr, ":", &tmp);
s != NULL;
s = strtok_r(NULL, ":", &tmp)) {
for (i = 0; i < strlen(flagstr); i++) {
int bit = flagbits[(uchar_t)flagstr[i]];
if (bit == 0) {
(void) printf("***Ignoring flag: %c\n",
(uchar_t)flagstr[i]);
continue;
}
found = B_TRUE;
flags |= bit;
p = &flagstr[i + 1];
if (*p != ':' && *p != '\0') {
int j = 0, nextbit = flagbits[(uchar_t)*p];
char *end, offstr[8] = { 0 };
if ((bit == ZDB_FLAG_PRINT_BLKPTR) &&
(nextbit == 0)) {
/* look ahead to isolate the offset */
while (nextbit == 0 &&
strchr(flagbitstr, *p) == NULL) {
offstr[j] = *p;
j++;
if (i + j > strlen(flagstr))
break;
p++;
nextbit = flagbits[(uchar_t)*p];
}
blkptr_offset = strtoull(offstr, &end,
16);
i += j;
} else if (nextbit == 0) {
(void) printf("***Ignoring flag arg:"
" '%c'\n", (uchar_t)*p);
}
}
}
}
if (blkptr_offset % sizeof (blkptr_t)) {
printf("Block pointer offset 0x%llx "
"must be divisible by 0x%x\n",
(longlong_t)blkptr_offset, (int)sizeof (blkptr_t));
goto done;
}
if (found == B_FALSE && strlen(flagstr) > 0) {
printf("Invalid flag arg: '%s'\n", flagstr);
goto done;
}
vd = zdb_vdev_lookup(spa->spa_root_vdev, vdev);
if (vd == NULL) {
(void) printf("***Invalid vdev: %s\n", vdev);
free(dup);
return;
} else {
if (vd->vdev_path)
(void) fprintf(stderr, "Found vdev: %s\n",
vd->vdev_path);
else
(void) fprintf(stderr, "Found vdev type: %s\n",
vd->vdev_ops->vdev_op_type);
}
pabd = abd_alloc_for_io(SPA_MAXBLOCKSIZE, B_FALSE);
lbuf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
BP_ZERO(bp);
DVA_SET_VDEV(&dva[0], vd->vdev_id);
DVA_SET_OFFSET(&dva[0], offset);
DVA_SET_GANG(&dva[0], !!(flags & ZDB_FLAG_GBH));
DVA_SET_ASIZE(&dva[0], vdev_psize_to_asize(vd, psize));
BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL);
BP_SET_LSIZE(bp, lsize);
BP_SET_PSIZE(bp, psize);
BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF);
BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF);
BP_SET_TYPE(bp, DMU_OT_NONE);
BP_SET_LEVEL(bp, 0);
BP_SET_DEDUP(bp, 0);
BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
zio = zio_root(spa, NULL, NULL, 0);
if (vd == vd->vdev_top) {
/*
* Treat this as a normal block read.
*/
zio_nowait(zio_read(zio, spa, bp, pabd, psize, NULL, NULL,
ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW, NULL));
} else {
/*
* Treat this as a vdev child I/O.
*/
zio_nowait(zio_vdev_child_io(zio, bp, vd, offset, pabd,
psize, ZIO_TYPE_READ, ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_PROPAGATE |
ZIO_FLAG_DONT_RETRY | ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW |
ZIO_FLAG_OPTIONAL, NULL, NULL));
}
error = zio_wait(zio);
spa_config_exit(spa, SCL_STATE, FTAG);
if (error) {
(void) printf("Read of %s failed, error: %d\n", thing, error);
goto out;
}
uint64_t orig_lsize = lsize;
buf = lbuf;
if (flags & ZDB_FLAG_DECOMPRESS) {
boolean_t failed = zdb_decompress_block(pabd, buf, lbuf,
lsize, psize, flags);
if (failed) {
(void) printf("Decompress of %s failed\n", thing);
goto out;
}
} else {
buf = abd_borrow_buf_copy(pabd, lsize);
borrowed = B_TRUE;
}
/*
* Try to detect invalid block pointer. If invalid, try
* decompressing.
*/
if ((flags & ZDB_FLAG_PRINT_BLKPTR || flags & ZDB_FLAG_INDIRECT) &&
!(flags & ZDB_FLAG_DECOMPRESS)) {
const blkptr_t *b = (const blkptr_t *)(void *)
((uintptr_t)buf + (uintptr_t)blkptr_offset);
if (zfs_blkptr_verify(spa, b, B_FALSE, BLK_VERIFY_ONLY) ==
B_FALSE) {
abd_return_buf_copy(pabd, buf, lsize);
borrowed = B_FALSE;
buf = lbuf;
boolean_t failed = zdb_decompress_block(pabd, buf,
lbuf, lsize, psize, flags);
b = (const blkptr_t *)(void *)
((uintptr_t)buf + (uintptr_t)blkptr_offset);
if (failed || zfs_blkptr_verify(spa, b, B_FALSE,
BLK_VERIFY_LOG) == B_FALSE) {
printf("invalid block pointer at this DVA\n");
goto out;
}
}
}
if (flags & ZDB_FLAG_PRINT_BLKPTR)
zdb_print_blkptr((blkptr_t *)(void *)
((uintptr_t)buf + (uintptr_t)blkptr_offset), flags);
else if (flags & ZDB_FLAG_RAW)
zdb_dump_block_raw(buf, lsize, flags);
else if (flags & ZDB_FLAG_INDIRECT)
zdb_dump_indirect((blkptr_t *)buf,
orig_lsize / sizeof (blkptr_t), flags);
else if (flags & ZDB_FLAG_GBH)
zdb_dump_gbh(buf, flags);
else
zdb_dump_block(thing, buf, lsize, flags);
/*
* If :c was specified, iterate through the checksum table to
* calculate and display each checksum for our specified
* DVA and length.
*/
if ((flags & ZDB_FLAG_CHECKSUM) && !(flags & ZDB_FLAG_RAW) &&
!(flags & ZDB_FLAG_GBH)) {
zio_t *czio;
(void) printf("\n");
for (enum zio_checksum ck = ZIO_CHECKSUM_LABEL;
ck < ZIO_CHECKSUM_FUNCTIONS; ck++) {
if ((zio_checksum_table[ck].ci_flags &
ZCHECKSUM_FLAG_EMBEDDED) ||
ck == ZIO_CHECKSUM_NOPARITY) {
continue;
}
BP_SET_CHECKSUM(bp, ck);
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
czio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
czio->io_bp = bp;
if (vd == vd->vdev_top) {
zio_nowait(zio_read(czio, spa, bp, pabd, psize,
NULL, NULL,
ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW |
ZIO_FLAG_DONT_RETRY, NULL));
} else {
zio_nowait(zio_vdev_child_io(czio, bp, vd,
offset, pabd, psize, ZIO_TYPE_READ,
ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_DONT_CACHE |
ZIO_FLAG_DONT_PROPAGATE |
ZIO_FLAG_DONT_RETRY |
ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW |
ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_OPTIONAL, NULL, NULL));
}
error = zio_wait(czio);
if (error == 0 || error == ECKSUM) {
zio_t *ck_zio = zio_root(spa, NULL, NULL, 0);
ck_zio->io_offset =
DVA_GET_OFFSET(&bp->blk_dva[0]);
ck_zio->io_bp = bp;
zio_checksum_compute(ck_zio, ck, pabd, lsize);
printf("%12s\tcksum=%llx:%llx:%llx:%llx\n",
zio_checksum_table[ck].ci_name,
(u_longlong_t)bp->blk_cksum.zc_word[0],
(u_longlong_t)bp->blk_cksum.zc_word[1],
(u_longlong_t)bp->blk_cksum.zc_word[2],
(u_longlong_t)bp->blk_cksum.zc_word[3]);
zio_wait(ck_zio);
} else {
printf("error %d reading block\n", error);
}
spa_config_exit(spa, SCL_STATE, FTAG);
}
}
if (borrowed)
abd_return_buf_copy(pabd, buf, lsize);
out:
abd_free(pabd);
umem_free(lbuf, SPA_MAXBLOCKSIZE);
done:
free(flagstr);
free(dup);
}
static void
zdb_embedded_block(char *thing)
{
blkptr_t bp = {{{{0}}}};
unsigned long long *words = (void *)&bp;
char *buf;
int err;
err = sscanf(thing, "%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx:"
"%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx",
words + 0, words + 1, words + 2, words + 3,
words + 4, words + 5, words + 6, words + 7,
words + 8, words + 9, words + 10, words + 11,
words + 12, words + 13, words + 14, words + 15);
if (err != 16) {
(void) fprintf(stderr, "invalid input format\n");
exit(1);
}
ASSERT3U(BPE_GET_LSIZE(&bp), <=, SPA_MAXBLOCKSIZE);
buf = malloc(SPA_MAXBLOCKSIZE);
if (buf == NULL) {
(void) fprintf(stderr, "out of memory\n");
exit(1);
}
err = decode_embedded_bp(&bp, buf, BPE_GET_LSIZE(&bp));
if (err != 0) {
(void) fprintf(stderr, "decode failed: %u\n", err);
exit(1);
}
zdb_dump_block_raw(buf, BPE_GET_LSIZE(&bp), 0);
free(buf);
}
/* check for valid hex or decimal numeric string */
static boolean_t
zdb_numeric(char *str)
{
int i = 0;
if (strlen(str) == 0)
return (B_FALSE);
if (strncmp(str, "0x", 2) == 0 || strncmp(str, "0X", 2) == 0)
i = 2;
for (; i < strlen(str); i++) {
if (!isxdigit(str[i]))
return (B_FALSE);
}
return (B_TRUE);
}
int
main(int argc, char **argv)
{
int c;
spa_t *spa = NULL;
objset_t *os = NULL;
int dump_all = 1;
int verbose = 0;
int error = 0;
char **searchdirs = NULL;
int nsearch = 0;
char *target, *target_pool, dsname[ZFS_MAX_DATASET_NAME_LEN];
nvlist_t *policy = NULL;
uint64_t max_txg = UINT64_MAX;
int64_t objset_id = -1;
uint64_t object;
int flags = ZFS_IMPORT_MISSING_LOG;
int rewind = ZPOOL_NEVER_REWIND;
char *spa_config_path_env, *objset_str;
boolean_t target_is_spa = B_TRUE, dataset_lookup = B_FALSE;
nvlist_t *cfg = NULL;
dprintf_setup(&argc, argv);
/*
* If there is an environment variable SPA_CONFIG_PATH it overrides
* default spa_config_path setting. If -U flag is specified it will
* override this environment variable settings once again.
*/
spa_config_path_env = getenv("SPA_CONFIG_PATH");
if (spa_config_path_env != NULL)
spa_config_path = spa_config_path_env;
/*
* For performance reasons, we set this tunable down. We do so before
* the arg parsing section so that the user can override this value if
* they choose.
*/
zfs_btree_verify_intensity = 3;
struct option long_options[] = {
{"ignore-assertions", no_argument, NULL, 'A'},
{"block-stats", no_argument, NULL, 'b'},
{"checksum", no_argument, NULL, 'c'},
{"config", no_argument, NULL, 'C'},
{"datasets", no_argument, NULL, 'd'},
{"dedup-stats", no_argument, NULL, 'D'},
{"exported", no_argument, NULL, 'e'},
{"embedded-block-pointer", no_argument, NULL, 'E'},
{"automatic-rewind", no_argument, NULL, 'F'},
{"dump-debug-msg", no_argument, NULL, 'G'},
{"history", no_argument, NULL, 'h'},
{"intent-logs", no_argument, NULL, 'i'},
{"inflight", required_argument, NULL, 'I'},
{"checkpointed-state", no_argument, NULL, 'k'},
{"label", no_argument, NULL, 'l'},
{"disable-leak-tracking", no_argument, NULL, 'L'},
{"metaslabs", no_argument, NULL, 'm'},
{"metaslab-groups", no_argument, NULL, 'M'},
{"numeric", no_argument, NULL, 'N'},
{"option", required_argument, NULL, 'o'},
{"object-lookups", no_argument, NULL, 'O'},
{"path", required_argument, NULL, 'p'},
{"parseable", no_argument, NULL, 'P'},
{"skip-label", no_argument, NULL, 'q'},
{"copy-object", no_argument, NULL, 'r'},
{"read-block", no_argument, NULL, 'R'},
{"io-stats", no_argument, NULL, 's'},
{"simulate-dedup", no_argument, NULL, 'S'},
{"txg", required_argument, NULL, 't'},
{"uberblock", no_argument, NULL, 'u'},
{"cachefile", required_argument, NULL, 'U'},
{"verbose", no_argument, NULL, 'v'},
{"verbatim", no_argument, NULL, 'V'},
{"dump-blocks", required_argument, NULL, 'x'},
{"extreme-rewind", no_argument, NULL, 'X'},
{"all-reconstruction", no_argument, NULL, 'Y'},
{"livelist", no_argument, NULL, 'y'},
{"zstd-headers", no_argument, NULL, 'Z'},
{0, 0, 0, 0}
};
while ((c = getopt_long(argc, argv,
"AbcCdDeEFGhiI:klLmMNo:Op:PqrRsSt:uU:vVx:XYyZ",
long_options, NULL)) != -1) {
switch (c) {
case 'b':
case 'c':
case 'C':
case 'd':
case 'D':
case 'E':
case 'G':
case 'h':
case 'i':
case 'l':
case 'm':
case 'M':
case 'N':
case 'O':
case 'r':
case 'R':
case 's':
case 'S':
case 'u':
case 'y':
case 'Z':
dump_opt[c]++;
dump_all = 0;
break;
case 'A':
case 'e':
case 'F':
case 'k':
case 'L':
case 'P':
case 'q':
case 'X':
dump_opt[c]++;
break;
case 'Y':
zfs_reconstruct_indirect_combinations_max = INT_MAX;
zfs_deadman_enabled = 0;
break;
/* NB: Sort single match options below. */
case 'I':
max_inflight_bytes = strtoull(optarg, NULL, 0);
if (max_inflight_bytes == 0) {
(void) fprintf(stderr, "maximum number "
"of inflight bytes must be greater "
"than 0\n");
usage();
}
break;
case 'o':
error = set_global_var(optarg);
if (error != 0)
usage();
break;
case 'p':
if (searchdirs == NULL) {
searchdirs = umem_alloc(sizeof (char *),
UMEM_NOFAIL);
} else {
char **tmp = umem_alloc((nsearch + 1) *
sizeof (char *), UMEM_NOFAIL);
memcpy(tmp, searchdirs, nsearch *
sizeof (char *));
umem_free(searchdirs,
nsearch * sizeof (char *));
searchdirs = tmp;
}
searchdirs[nsearch++] = optarg;
break;
case 't':
max_txg = strtoull(optarg, NULL, 0);
if (max_txg < TXG_INITIAL) {
(void) fprintf(stderr, "incorrect txg "
"specified: %s\n", optarg);
usage();
}
break;
case 'U':
spa_config_path = optarg;
if (spa_config_path[0] != '/') {
(void) fprintf(stderr,
"cachefile must be an absolute path "
"(i.e. start with a slash)\n");
usage();
}
break;
case 'v':
verbose++;
break;
case 'V':
flags = ZFS_IMPORT_VERBATIM;
break;
case 'x':
vn_dumpdir = optarg;
break;
default:
usage();
break;
}
}
if (!dump_opt['e'] && searchdirs != NULL) {
(void) fprintf(stderr, "-p option requires use of -e\n");
usage();
}
#if defined(_LP64)
/*
* ZDB does not typically re-read blocks; therefore limit the ARC
* to 256 MB, which can be used entirely for metadata.
*/
zfs_arc_min = zfs_arc_meta_min = 2ULL << SPA_MAXBLOCKSHIFT;
zfs_arc_max = zfs_arc_meta_limit = 256 * 1024 * 1024;
#endif
/*
* "zdb -c" uses checksum-verifying scrub i/os which are async reads.
* "zdb -b" uses traversal prefetch which uses async reads.
* For good performance, let several of them be active at once.
*/
zfs_vdev_async_read_max_active = 10;
/*
* Disable reference tracking for better performance.
*/
reference_tracking_enable = B_FALSE;
/*
* Do not fail spa_load when spa_load_verify fails. This is needed
* to load non-idle pools.
*/
spa_load_verify_dryrun = B_TRUE;
/*
* ZDB should have ability to read spacemaps.
*/
spa_mode_readable_spacemaps = B_TRUE;
kernel_init(SPA_MODE_READ);
if (dump_all)
verbose = MAX(verbose, 1);
for (c = 0; c < 256; c++) {
if (dump_all && strchr("AeEFklLNOPrRSXy", c) == NULL)
dump_opt[c] = 1;
if (dump_opt[c])
dump_opt[c] += verbose;
}
libspl_set_assert_ok((dump_opt['A'] == 1) || (dump_opt['A'] > 2));
zfs_recover = (dump_opt['A'] > 1);
argc -= optind;
argv += optind;
if (argc < 2 && dump_opt['R'])
usage();
if (dump_opt['E']) {
if (argc != 1)
usage();
zdb_embedded_block(argv[0]);
return (0);
}
if (argc < 1) {
if (!dump_opt['e'] && dump_opt['C']) {
dump_cachefile(spa_config_path);
return (0);
}
usage();
}
if (dump_opt['l'])
return (dump_label(argv[0]));
if (dump_opt['O']) {
if (argc != 2)
usage();
dump_opt['v'] = verbose + 3;
return (dump_path(argv[0], argv[1], NULL));
}
if (dump_opt['r']) {
target_is_spa = B_FALSE;
if (argc != 3)
usage();
dump_opt['v'] = verbose;
error = dump_path(argv[0], argv[1], &object);
}
if (dump_opt['X'] || dump_opt['F'])
rewind = ZPOOL_DO_REWIND |
(dump_opt['X'] ? ZPOOL_EXTREME_REWIND : 0);
/* -N implies -d */
if (dump_opt['N'] && dump_opt['d'] == 0)
dump_opt['d'] = dump_opt['N'];
if (nvlist_alloc(&policy, NV_UNIQUE_NAME_TYPE, 0) != 0 ||
nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, max_txg) != 0 ||
nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY, rewind) != 0)
fatal("internal error: %s", strerror(ENOMEM));
error = 0;
target = argv[0];
if (strpbrk(target, "/@") != NULL) {
size_t targetlen;
target_pool = strdup(target);
*strpbrk(target_pool, "/@") = '\0';
target_is_spa = B_FALSE;
targetlen = strlen(target);
if (targetlen && target[targetlen - 1] == '/')
target[targetlen - 1] = '\0';
/*
* See if an objset ID was supplied (-d <pool>/<objset ID>).
* To disambiguate tank/100, consider the 100 as objsetID
* if -N was given, otherwise 100 is an objsetID iff
* tank/100 as a named dataset fails on lookup.
*/
objset_str = strchr(target, '/');
if (objset_str && strlen(objset_str) > 1 &&
zdb_numeric(objset_str + 1)) {
char *endptr;
errno = 0;
objset_str++;
objset_id = strtoull(objset_str, &endptr, 0);
/* dataset 0 is the same as opening the pool */
if (errno == 0 && endptr != objset_str &&
objset_id != 0) {
if (dump_opt['N'])
dataset_lookup = B_TRUE;
}
/* normal dataset name not an objset ID */
if (endptr == objset_str) {
objset_id = -1;
}
} else if (objset_str && !zdb_numeric(objset_str + 1) &&
dump_opt['N']) {
printf("Supply a numeric objset ID with -N\n");
exit(1);
}
} else {
target_pool = target;
}
if (dump_opt['e']) {
importargs_t args = { 0 };
args.paths = nsearch;
args.path = searchdirs;
args.can_be_active = B_TRUE;
error = zpool_find_config(NULL, target_pool, &cfg, &args,
&libzpool_config_ops);
if (error == 0) {
if (nvlist_add_nvlist(cfg,
ZPOOL_LOAD_POLICY, policy) != 0) {
fatal("can't open '%s': %s",
target, strerror(ENOMEM));
}
if (dump_opt['C'] > 1) {
(void) printf("\nConfiguration for import:\n");
dump_nvlist(cfg, 8);
}
/*
* Disable the activity check to allow examination of
* active pools.
*/
error = spa_import(target_pool, cfg, NULL,
flags | ZFS_IMPORT_SKIP_MMP);
}
}
if (searchdirs != NULL) {
umem_free(searchdirs, nsearch * sizeof (char *));
searchdirs = NULL;
}
/*
* import_checkpointed_state makes the assumption that the
* target pool that we pass it is already part of the spa
* namespace. Because of that we need to make sure to call
* it always after the -e option has been processed, which
* imports the pool to the namespace if it's not in the
* cachefile.
*/
char *checkpoint_pool = NULL;
char *checkpoint_target = NULL;
if (dump_opt['k']) {
checkpoint_pool = import_checkpointed_state(target, cfg,
&checkpoint_target);
if (checkpoint_target != NULL)
target = checkpoint_target;
}
if (cfg != NULL) {
nvlist_free(cfg);
cfg = NULL;
}
if (target_pool != target)
free(target_pool);
if (error == 0) {
if (dump_opt['k'] && (target_is_spa || dump_opt['R'])) {
ASSERT(checkpoint_pool != NULL);
ASSERT(checkpoint_target == NULL);
error = spa_open(checkpoint_pool, &spa, FTAG);
if (error != 0) {
fatal("Tried to open pool \"%s\" but "
"spa_open() failed with error %d\n",
checkpoint_pool, error);
}
} else if (target_is_spa || dump_opt['R'] || objset_id == 0) {
zdb_set_skip_mmp(target);
error = spa_open_rewind(target, &spa, FTAG, policy,
NULL);
if (error) {
/*
* If we're missing the log device then
* try opening the pool after clearing the
* log state.
*/
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(target)) != NULL &&
spa->spa_log_state == SPA_LOG_MISSING) {
spa->spa_log_state = SPA_LOG_CLEAR;
error = 0;
}
mutex_exit(&spa_namespace_lock);
if (!error) {
error = spa_open_rewind(target, &spa,
FTAG, policy, NULL);
}
}
} else if (strpbrk(target, "#") != NULL) {
dsl_pool_t *dp;
error = dsl_pool_hold(target, FTAG, &dp);
if (error != 0) {
fatal("can't dump '%s': %s", target,
strerror(error));
}
error = dump_bookmark(dp, target, B_TRUE, verbose > 1);
dsl_pool_rele(dp, FTAG);
if (error != 0) {
fatal("can't dump '%s': %s", target,
strerror(error));
}
return (error);
} else {
target_pool = strdup(target);
if (strpbrk(target, "/@") != NULL)
*strpbrk(target_pool, "/@") = '\0';
zdb_set_skip_mmp(target);
/*
* If -N was supplied, the user has indicated that
* zdb -d <pool>/<objsetID> is in effect. Otherwise
* we first assume that the dataset string is the
* dataset name. If dmu_objset_hold fails with the
* dataset string, and we have an objset_id, retry the
* lookup with the objsetID.
*/
boolean_t retry = B_TRUE;
retry_lookup:
if (dataset_lookup == B_TRUE) {
/*
* Use the supplied id to get the name
* for open_objset.
*/
error = spa_open(target_pool, &spa, FTAG);
if (error == 0) {
error = name_from_objset_id(spa,
objset_id, dsname);
spa_close(spa, FTAG);
if (error == 0)
target = dsname;
}
}
if (error == 0) {
if (objset_id > 0 && retry) {
int err = dmu_objset_hold(target, FTAG,
&os);
if (err) {
dataset_lookup = B_TRUE;
retry = B_FALSE;
goto retry_lookup;
} else {
dmu_objset_rele(os, FTAG);
}
}
error = open_objset(target, FTAG, &os);
}
if (error == 0)
spa = dmu_objset_spa(os);
free(target_pool);
}
}
nvlist_free(policy);
if (error)
fatal("can't open '%s': %s", target, strerror(error));
/*
* Set the pool failure mode to panic in order to prevent the pool
* from suspending. A suspended I/O will have no way to resume and
* can prevent the zdb(8) command from terminating as expected.
*/
if (spa != NULL)
spa->spa_failmode = ZIO_FAILURE_MODE_PANIC;
argv++;
argc--;
if (dump_opt['r']) {
error = zdb_copy_object(os, object, argv[1]);
} else if (!dump_opt['R']) {
flagbits['d'] = ZOR_FLAG_DIRECTORY;
flagbits['f'] = ZOR_FLAG_PLAIN_FILE;
flagbits['m'] = ZOR_FLAG_SPACE_MAP;
flagbits['z'] = ZOR_FLAG_ZAP;
flagbits['A'] = ZOR_FLAG_ALL_TYPES;
if (argc > 0 && dump_opt['d']) {
zopt_object_args = argc;
zopt_object_ranges = calloc(zopt_object_args,
sizeof (zopt_object_range_t));
for (unsigned i = 0; i < zopt_object_args; i++) {
int err;
const char *msg = NULL;
err = parse_object_range(argv[i],
&zopt_object_ranges[i], &msg);
if (err != 0)
fatal("Bad object or range: '%s': %s\n",
argv[i], msg ?: "");
}
} else if (argc > 0 && dump_opt['m']) {
zopt_metaslab_args = argc;
zopt_metaslab = calloc(zopt_metaslab_args,
sizeof (uint64_t));
for (unsigned i = 0; i < zopt_metaslab_args; i++) {
errno = 0;
zopt_metaslab[i] = strtoull(argv[i], NULL, 0);
if (zopt_metaslab[i] == 0 && errno != 0)
fatal("bad number %s: %s", argv[i],
strerror(errno));
}
}
if (os != NULL) {
dump_objset(os);
} else if (zopt_object_args > 0 && !dump_opt['m']) {
dump_objset(spa->spa_meta_objset);
} else {
dump_zpool(spa);
}
} else {
flagbits['b'] = ZDB_FLAG_PRINT_BLKPTR;
flagbits['c'] = ZDB_FLAG_CHECKSUM;
flagbits['d'] = ZDB_FLAG_DECOMPRESS;
flagbits['e'] = ZDB_FLAG_BSWAP;
flagbits['g'] = ZDB_FLAG_GBH;
flagbits['i'] = ZDB_FLAG_INDIRECT;
flagbits['r'] = ZDB_FLAG_RAW;
flagbits['v'] = ZDB_FLAG_VERBOSE;
for (int i = 0; i < argc; i++)
zdb_read_block(argv[i], spa);
}
if (dump_opt['k']) {
free(checkpoint_pool);
if (!target_is_spa)
free(checkpoint_target);
}
if (os != NULL) {
close_objset(os, FTAG);
} else {
spa_close(spa, FTAG);
}
fuid_table_destroy();
dump_debug_buffer();
kernel_fini();
return (error);
}
diff --git a/sys/contrib/openzfs/cmd/zed/agents/fmd_api.c b/sys/contrib/openzfs/cmd/zed/agents/fmd_api.c
index 9e46e831d517..56c134b731b8 100644
--- a/sys/contrib/openzfs/cmd/zed/agents/fmd_api.c
+++ b/sys/contrib/openzfs/cmd/zed/agents/fmd_api.c
@@ -1,780 +1,780 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
*
* Copyright (c) 2016, Intel Corporation.
*/
/*
* This file implements the minimal FMD module API required to support the
* fault logic modules in ZED. This support includes module registration,
* memory allocation, module property accessors, basic case management,
* one-shot timers and SERD engines.
*
* In the ZED runtime, the modules are called from a single thread so no
* locking is required in this emulated FMD environment.
*/
#include <sys/types.h>
#include <sys/fm/protocol.h>
#include <uuid/uuid.h>
#include <signal.h>
#include <string.h>
#include <time.h>
#include "fmd_api.h"
#include "fmd_serd.h"
#include "zfs_agents.h"
#include "../zed_log.h"
typedef struct fmd_modstat {
fmd_stat_t ms_accepted; /* total events accepted by module */
fmd_stat_t ms_caseopen; /* cases currently open */
fmd_stat_t ms_casesolved; /* total cases solved by module */
fmd_stat_t ms_caseclosed; /* total cases closed by module */
} fmd_modstat_t;
typedef struct fmd_module {
const char *mod_name; /* basename of module (ro) */
const fmd_hdl_info_t *mod_info; /* module info registered with handle */
void *mod_spec; /* fmd_hdl_get/setspecific data value */
fmd_stat_t *mod_ustat; /* module specific custom stats */
uint_t mod_ustat_cnt; /* count of ustat stats */
fmd_modstat_t mod_stats; /* fmd built-in per-module statistics */
fmd_serd_hash_t mod_serds; /* hash of serd engs owned by module */
char *mod_vers; /* a copy of module version string */
} fmd_module_t;
/*
* ZED has two FMD hardwired module instances
*/
fmd_module_t zfs_retire_module;
fmd_module_t zfs_diagnosis_module;
/*
* Enable a reasonable set of defaults for libumem debugging on DEBUG builds.
*/
#ifdef DEBUG
const char *
_umem_debug_init(void)
{
return ("default,verbose"); /* $UMEM_DEBUG setting */
}
const char *
_umem_logging_init(void)
{
return ("fail,contents"); /* $UMEM_LOGGING setting */
}
#endif
/*
* Register a module with fmd and finish module initialization.
* Returns an integer indicating whether it succeeded (zero) or
* failed (non-zero).
*/
int
fmd_hdl_register(fmd_hdl_t *hdl, int version, const fmd_hdl_info_t *mip)
{
(void) version;
fmd_module_t *mp = (fmd_module_t *)hdl;
mp->mod_info = mip;
mp->mod_name = mip->fmdi_desc + 4; /* drop 'ZFS ' prefix */
mp->mod_spec = NULL;
/* bare minimum module stats */
(void) strcpy(mp->mod_stats.ms_accepted.fmds_name, "fmd.accepted");
(void) strcpy(mp->mod_stats.ms_caseopen.fmds_name, "fmd.caseopen");
(void) strcpy(mp->mod_stats.ms_casesolved.fmds_name, "fmd.casesolved");
(void) strcpy(mp->mod_stats.ms_caseclosed.fmds_name, "fmd.caseclosed");
fmd_serd_hash_create(&mp->mod_serds);
fmd_hdl_debug(hdl, "register module");
return (0);
}
void
fmd_hdl_unregister(fmd_hdl_t *hdl)
{
fmd_module_t *mp = (fmd_module_t *)hdl;
fmd_modstat_t *msp = &mp->mod_stats;
const fmd_hdl_ops_t *ops = mp->mod_info->fmdi_ops;
/* dump generic module stats */
fmd_hdl_debug(hdl, "%s: %llu", msp->ms_accepted.fmds_name,
msp->ms_accepted.fmds_value.ui64);
if (ops->fmdo_close != NULL) {
fmd_hdl_debug(hdl, "%s: %llu", msp->ms_caseopen.fmds_name,
msp->ms_caseopen.fmds_value.ui64);
fmd_hdl_debug(hdl, "%s: %llu", msp->ms_casesolved.fmds_name,
msp->ms_casesolved.fmds_value.ui64);
fmd_hdl_debug(hdl, "%s: %llu", msp->ms_caseclosed.fmds_name,
msp->ms_caseclosed.fmds_value.ui64);
}
/* dump module specific stats */
if (mp->mod_ustat != NULL) {
int i;
for (i = 0; i < mp->mod_ustat_cnt; i++) {
fmd_hdl_debug(hdl, "%s: %llu",
mp->mod_ustat[i].fmds_name,
mp->mod_ustat[i].fmds_value.ui64);
}
}
fmd_serd_hash_destroy(&mp->mod_serds);
fmd_hdl_debug(hdl, "unregister module");
}
/*
* fmd_hdl_setspecific() is used to associate a data pointer with
* the specified handle for the duration of the module's lifetime.
* This pointer can be retrieved using fmd_hdl_getspecific().
*/
void
fmd_hdl_setspecific(fmd_hdl_t *hdl, void *spec)
{
fmd_module_t *mp = (fmd_module_t *)hdl;
mp->mod_spec = spec;
}
/*
* Return the module-specific data pointer previously associated
* with the handle using fmd_hdl_setspecific().
*/
void *
fmd_hdl_getspecific(fmd_hdl_t *hdl)
{
fmd_module_t *mp = (fmd_module_t *)hdl;
return (mp->mod_spec);
}
void *
fmd_hdl_alloc(fmd_hdl_t *hdl, size_t size, int flags)
{
(void) hdl;
return (umem_alloc(size, flags));
}
void *
fmd_hdl_zalloc(fmd_hdl_t *hdl, size_t size, int flags)
{
(void) hdl;
return (umem_zalloc(size, flags));
}
void
fmd_hdl_free(fmd_hdl_t *hdl, void *data, size_t size)
{
(void) hdl;
umem_free(data, size);
}
/*
* Record a module debug message using the specified format.
*/
void
fmd_hdl_debug(fmd_hdl_t *hdl, const char *format, ...)
{
char message[256];
va_list vargs;
fmd_module_t *mp = (fmd_module_t *)hdl;
va_start(vargs, format);
(void) vsnprintf(message, sizeof (message), format, vargs);
va_end(vargs);
/* prefix message with module name */
zed_log_msg(LOG_INFO, "%s: %s", mp->mod_name, message);
}
/* Property Retrieval */
int32_t
fmd_prop_get_int32(fmd_hdl_t *hdl, const char *name)
{
(void) hdl;
/*
* These can be looked up in mp->modinfo->fmdi_props
* For now we just hard code for phase 2. In the
* future, there can be a ZED based override.
*/
if (strcmp(name, "spare_on_remove") == 0)
return (1);
if (strcmp(name, "io_N") == 0 || strcmp(name, "checksum_N") == 0)
return (10); /* N = 10 events */
return (0);
}
int64_t
fmd_prop_get_int64(fmd_hdl_t *hdl, const char *name)
{
(void) hdl;
/*
* These can be looked up in mp->modinfo->fmdi_props
* For now we just hard code for phase 2. In the
* future, there can be a ZED based override.
*/
if (strcmp(name, "remove_timeout") == 0)
return (15ULL * 1000ULL * 1000ULL * 1000ULL); /* 15 sec */
if (strcmp(name, "io_T") == 0 || strcmp(name, "checksum_T") == 0)
return (1000ULL * 1000ULL * 1000ULL * 600ULL); /* 10 min */
return (0);
}
/* FMD Statistics */
fmd_stat_t *
fmd_stat_create(fmd_hdl_t *hdl, uint_t flags, uint_t nstats, fmd_stat_t *statv)
{
fmd_module_t *mp = (fmd_module_t *)hdl;
if (flags == FMD_STAT_NOALLOC) {
mp->mod_ustat = statv;
mp->mod_ustat_cnt = nstats;
}
return (statv);
}
/* Case Management */
fmd_case_t *
fmd_case_open(fmd_hdl_t *hdl, void *data)
{
fmd_module_t *mp = (fmd_module_t *)hdl;
uuid_t uuid;
fmd_case_t *cp;
cp = fmd_hdl_zalloc(hdl, sizeof (fmd_case_t), FMD_SLEEP);
cp->ci_mod = hdl;
cp->ci_state = FMD_CASE_UNSOLVED;
cp->ci_flags = FMD_CF_DIRTY;
cp->ci_data = data;
cp->ci_bufptr = NULL;
cp->ci_bufsiz = 0;
uuid_generate(uuid);
uuid_unparse(uuid, cp->ci_uuid);
fmd_hdl_debug(hdl, "case opened (%s)", cp->ci_uuid);
mp->mod_stats.ms_caseopen.fmds_value.ui64++;
return (cp);
}
void
fmd_case_solve(fmd_hdl_t *hdl, fmd_case_t *cp)
{
fmd_module_t *mp = (fmd_module_t *)hdl;
/*
* For ZED, the event was already sent from fmd_case_add_suspect()
*/
if (cp->ci_state >= FMD_CASE_SOLVED)
fmd_hdl_debug(hdl, "case is already solved or closed");
cp->ci_state = FMD_CASE_SOLVED;
fmd_hdl_debug(hdl, "case solved (%s)", cp->ci_uuid);
mp->mod_stats.ms_casesolved.fmds_value.ui64++;
}
void
fmd_case_close(fmd_hdl_t *hdl, fmd_case_t *cp)
{
fmd_module_t *mp = (fmd_module_t *)hdl;
const fmd_hdl_ops_t *ops = mp->mod_info->fmdi_ops;
fmd_hdl_debug(hdl, "case closed (%s)", cp->ci_uuid);
if (ops->fmdo_close != NULL)
ops->fmdo_close(hdl, cp);
mp->mod_stats.ms_caseopen.fmds_value.ui64--;
mp->mod_stats.ms_caseclosed.fmds_value.ui64++;
if (cp->ci_bufptr != NULL && cp->ci_bufsiz > 0)
fmd_hdl_free(hdl, cp->ci_bufptr, cp->ci_bufsiz);
fmd_hdl_free(hdl, cp, sizeof (fmd_case_t));
}
void
fmd_case_uuresolved(fmd_hdl_t *hdl, const char *uuid)
{
fmd_hdl_debug(hdl, "case resolved by uuid (%s)", uuid);
}
boolean_t
fmd_case_solved(fmd_hdl_t *hdl, fmd_case_t *cp)
{
(void) hdl;
return (cp->ci_state >= FMD_CASE_SOLVED);
}
void
fmd_case_add_ereport(fmd_hdl_t *hdl, fmd_case_t *cp, fmd_event_t *ep)
{
(void) hdl, (void) cp, (void) ep;
}
static void
zed_log_fault(nvlist_t *nvl, const char *uuid, const char *code)
{
nvlist_t *rsrc;
char *strval;
uint64_t guid;
uint8_t byte;
zed_log_msg(LOG_INFO, "\nzed_fault_event:");
if (uuid != NULL)
zed_log_msg(LOG_INFO, "\t%s: %s", FM_SUSPECT_UUID, uuid);
if (nvlist_lookup_string(nvl, FM_CLASS, &strval) == 0)
zed_log_msg(LOG_INFO, "\t%s: %s", FM_CLASS, strval);
if (code != NULL)
zed_log_msg(LOG_INFO, "\t%s: %s", FM_SUSPECT_DIAG_CODE, code);
if (nvlist_lookup_uint8(nvl, FM_FAULT_CERTAINTY, &byte) == 0)
- zed_log_msg(LOG_INFO, "\t%s: %llu", FM_FAULT_CERTAINTY, byte);
+ zed_log_msg(LOG_INFO, "\t%s: %hhu", FM_FAULT_CERTAINTY, byte);
if (nvlist_lookup_nvlist(nvl, FM_FAULT_RESOURCE, &rsrc) == 0) {
if (nvlist_lookup_string(rsrc, FM_FMRI_SCHEME, &strval) == 0)
zed_log_msg(LOG_INFO, "\t%s: %s", FM_FMRI_SCHEME,
strval);
if (nvlist_lookup_uint64(rsrc, FM_FMRI_ZFS_POOL, &guid) == 0)
zed_log_msg(LOG_INFO, "\t%s: %llu", FM_FMRI_ZFS_POOL,
guid);
if (nvlist_lookup_uint64(rsrc, FM_FMRI_ZFS_VDEV, &guid) == 0)
zed_log_msg(LOG_INFO, "\t%s: %llu \n", FM_FMRI_ZFS_VDEV,
guid);
}
}
static const char *
fmd_fault_mkcode(nvlist_t *fault)
{
char *class;
const char *code = "-";
/*
* Note: message codes come from: openzfs/usr/src/cmd/fm/dicts/ZFS.po
*/
if (nvlist_lookup_string(fault, FM_CLASS, &class) == 0) {
if (strcmp(class, "fault.fs.zfs.vdev.io") == 0)
code = "ZFS-8000-FD";
else if (strcmp(class, "fault.fs.zfs.vdev.checksum") == 0)
code = "ZFS-8000-GH";
else if (strcmp(class, "fault.fs.zfs.io_failure_wait") == 0)
code = "ZFS-8000-HC";
else if (strcmp(class, "fault.fs.zfs.io_failure_continue") == 0)
code = "ZFS-8000-JQ";
else if (strcmp(class, "fault.fs.zfs.log_replay") == 0)
code = "ZFS-8000-K4";
else if (strcmp(class, "fault.fs.zfs.pool") == 0)
code = "ZFS-8000-CS";
else if (strcmp(class, "fault.fs.zfs.device") == 0)
code = "ZFS-8000-D3";
}
return (code);
}
void
fmd_case_add_suspect(fmd_hdl_t *hdl, fmd_case_t *cp, nvlist_t *fault)
{
nvlist_t *nvl;
const char *code = fmd_fault_mkcode(fault);
int64_t tod[2];
int err = 0;
/*
* payload derived from fmd_protocol_list()
*/
(void) gettimeofday(&cp->ci_tv, NULL);
tod[0] = cp->ci_tv.tv_sec;
tod[1] = cp->ci_tv.tv_usec;
nvl = fmd_nvl_alloc(hdl, FMD_SLEEP);
err |= nvlist_add_uint8(nvl, FM_VERSION, FM_SUSPECT_VERSION);
err |= nvlist_add_string(nvl, FM_CLASS, FM_LIST_SUSPECT_CLASS);
err |= nvlist_add_string(nvl, FM_SUSPECT_UUID, cp->ci_uuid);
err |= nvlist_add_string(nvl, FM_SUSPECT_DIAG_CODE, code);
err |= nvlist_add_int64_array(nvl, FM_SUSPECT_DIAG_TIME, tod, 2);
err |= nvlist_add_uint32(nvl, FM_SUSPECT_FAULT_SZ, 1);
err |= nvlist_add_nvlist_array(nvl, FM_SUSPECT_FAULT_LIST,
(const nvlist_t **)&fault, 1);
if (err)
zed_log_die("failed to populate nvlist");
zed_log_fault(fault, cp->ci_uuid, code);
zfs_agent_post_event(FM_LIST_SUSPECT_CLASS, NULL, nvl);
nvlist_free(nvl);
nvlist_free(fault);
}
void
fmd_case_setspecific(fmd_hdl_t *hdl, fmd_case_t *cp, void *data)
{
(void) hdl;
cp->ci_data = data;
}
void *
fmd_case_getspecific(fmd_hdl_t *hdl, fmd_case_t *cp)
{
(void) hdl;
return (cp->ci_data);
}
void
fmd_buf_create(fmd_hdl_t *hdl, fmd_case_t *cp, const char *name, size_t size)
{
assert(strcmp(name, "data") == 0), (void) name;
assert(cp->ci_bufptr == NULL);
assert(size < (1024 * 1024));
cp->ci_bufptr = fmd_hdl_alloc(hdl, size, FMD_SLEEP);
cp->ci_bufsiz = size;
}
void
fmd_buf_read(fmd_hdl_t *hdl, fmd_case_t *cp,
const char *name, void *buf, size_t size)
{
(void) hdl;
assert(strcmp(name, "data") == 0), (void) name;
assert(cp->ci_bufptr != NULL);
assert(size <= cp->ci_bufsiz);
memcpy(buf, cp->ci_bufptr, size);
}
void
fmd_buf_write(fmd_hdl_t *hdl, fmd_case_t *cp,
const char *name, const void *buf, size_t size)
{
(void) hdl;
assert(strcmp(name, "data") == 0), (void) name;
assert(cp->ci_bufptr != NULL);
assert(cp->ci_bufsiz >= size);
memcpy(cp->ci_bufptr, buf, size);
}
/* SERD Engines */
void
fmd_serd_create(fmd_hdl_t *hdl, const char *name, uint_t n, hrtime_t t)
{
fmd_module_t *mp = (fmd_module_t *)hdl;
if (fmd_serd_eng_lookup(&mp->mod_serds, name) != NULL) {
zed_log_msg(LOG_ERR, "failed to create SERD engine '%s': "
" name already exists", name);
return;
}
(void) fmd_serd_eng_insert(&mp->mod_serds, name, n, t);
}
void
fmd_serd_destroy(fmd_hdl_t *hdl, const char *name)
{
fmd_module_t *mp = (fmd_module_t *)hdl;
fmd_serd_eng_delete(&mp->mod_serds, name);
fmd_hdl_debug(hdl, "serd_destroy %s", name);
}
int
fmd_serd_exists(fmd_hdl_t *hdl, const char *name)
{
fmd_module_t *mp = (fmd_module_t *)hdl;
return (fmd_serd_eng_lookup(&mp->mod_serds, name) != NULL);
}
void
fmd_serd_reset(fmd_hdl_t *hdl, const char *name)
{
fmd_module_t *mp = (fmd_module_t *)hdl;
fmd_serd_eng_t *sgp;
if ((sgp = fmd_serd_eng_lookup(&mp->mod_serds, name)) == NULL) {
zed_log_msg(LOG_ERR, "serd engine '%s' does not exist", name);
return;
}
fmd_serd_eng_reset(sgp);
fmd_hdl_debug(hdl, "serd_reset %s", name);
}
int
fmd_serd_record(fmd_hdl_t *hdl, const char *name, fmd_event_t *ep)
{
fmd_module_t *mp = (fmd_module_t *)hdl;
fmd_serd_eng_t *sgp;
int err;
if ((sgp = fmd_serd_eng_lookup(&mp->mod_serds, name)) == NULL) {
zed_log_msg(LOG_ERR, "failed to add record to SERD engine '%s'",
name);
return (0);
}
err = fmd_serd_eng_record(sgp, ep->ev_hrt);
return (err);
}
/* FMD Timers */
static void
_timer_notify(union sigval sv)
{
fmd_timer_t *ftp = sv.sival_ptr;
fmd_hdl_t *hdl = ftp->ft_hdl;
fmd_module_t *mp = (fmd_module_t *)hdl;
const fmd_hdl_ops_t *ops = mp->mod_info->fmdi_ops;
struct itimerspec its;
fmd_hdl_debug(hdl, "timer fired (%p)", ftp->ft_tid);
/* disarm the timer */
memset(&its, 0, sizeof (struct itimerspec));
timer_settime(ftp->ft_tid, 0, &its, NULL);
/* Note that the fmdo_timeout can remove this timer */
if (ops->fmdo_timeout != NULL)
ops->fmdo_timeout(hdl, ftp, ftp->ft_arg);
}
/*
* Install a new timer which will fire at least delta nanoseconds after the
* current time. After the timeout has expired, the module's fmdo_timeout
* entry point is called.
*/
fmd_timer_t *
fmd_timer_install(fmd_hdl_t *hdl, void *arg, fmd_event_t *ep, hrtime_t delta)
{
(void) ep;
struct sigevent sev;
struct itimerspec its;
fmd_timer_t *ftp;
ftp = fmd_hdl_alloc(hdl, sizeof (fmd_timer_t), FMD_SLEEP);
ftp->ft_arg = arg;
ftp->ft_hdl = hdl;
its.it_value.tv_sec = delta / 1000000000;
its.it_value.tv_nsec = delta % 1000000000;
its.it_interval.tv_sec = its.it_value.tv_sec;
its.it_interval.tv_nsec = its.it_value.tv_nsec;
sev.sigev_notify = SIGEV_THREAD;
sev.sigev_notify_function = _timer_notify;
sev.sigev_notify_attributes = NULL;
sev.sigev_value.sival_ptr = ftp;
timer_create(CLOCK_REALTIME, &sev, &ftp->ft_tid);
timer_settime(ftp->ft_tid, 0, &its, NULL);
fmd_hdl_debug(hdl, "installing timer for %d secs (%p)",
(int)its.it_value.tv_sec, ftp->ft_tid);
return (ftp);
}
void
fmd_timer_remove(fmd_hdl_t *hdl, fmd_timer_t *ftp)
{
fmd_hdl_debug(hdl, "removing timer (%p)", ftp->ft_tid);
timer_delete(ftp->ft_tid);
fmd_hdl_free(hdl, ftp, sizeof (fmd_timer_t));
}
/* Name-Value Pair Lists */
nvlist_t *
fmd_nvl_create_fault(fmd_hdl_t *hdl, const char *class, uint8_t certainty,
nvlist_t *asru, nvlist_t *fru, nvlist_t *resource)
{
(void) hdl;
nvlist_t *nvl;
int err = 0;
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
zed_log_die("failed to xalloc fault nvlist");
err |= nvlist_add_uint8(nvl, FM_VERSION, FM_FAULT_VERSION);
err |= nvlist_add_string(nvl, FM_CLASS, class);
err |= nvlist_add_uint8(nvl, FM_FAULT_CERTAINTY, certainty);
if (asru != NULL)
err |= nvlist_add_nvlist(nvl, FM_FAULT_ASRU, asru);
if (fru != NULL)
err |= nvlist_add_nvlist(nvl, FM_FAULT_FRU, fru);
if (resource != NULL)
err |= nvlist_add_nvlist(nvl, FM_FAULT_RESOURCE, resource);
if (err)
zed_log_die("failed to populate nvlist: %s\n", strerror(err));
return (nvl);
}
/*
* sourced from fmd_string.c
*/
static int
fmd_strmatch(const char *s, const char *p)
{
char c;
if (p == NULL)
return (0);
if (s == NULL)
s = ""; /* treat NULL string as the empty string */
do {
if ((c = *p++) == '\0')
return (*s == '\0');
if (c == '*') {
while (*p == '*')
p++; /* consecutive *'s can be collapsed */
if (*p == '\0')
return (1);
while (*s != '\0') {
if (fmd_strmatch(s++, p) != 0)
return (1);
}
return (0);
}
} while (c == *s++);
return (0);
}
int
fmd_nvl_class_match(fmd_hdl_t *hdl, nvlist_t *nvl, const char *pattern)
{
(void) hdl;
char *class;
return (nvl != NULL &&
nvlist_lookup_string(nvl, FM_CLASS, &class) == 0 &&
fmd_strmatch(class, pattern));
}
nvlist_t *
fmd_nvl_alloc(fmd_hdl_t *hdl, int flags)
{
(void) hdl, (void) flags;
nvlist_t *nvl = NULL;
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
return (NULL);
return (nvl);
}
/*
* ZED Agent specific APIs
*/
fmd_hdl_t *
fmd_module_hdl(const char *name)
{
if (strcmp(name, "zfs-retire") == 0)
return ((fmd_hdl_t *)&zfs_retire_module);
if (strcmp(name, "zfs-diagnosis") == 0)
return ((fmd_hdl_t *)&zfs_diagnosis_module);
return (NULL);
}
boolean_t
fmd_module_initialized(fmd_hdl_t *hdl)
{
fmd_module_t *mp = (fmd_module_t *)hdl;
return (mp->mod_info != NULL);
}
/*
* fmd_module_recv is called for each event that is received by
* the fault manager that has a class that matches one of the
* module's subscriptions.
*/
void
fmd_module_recv(fmd_hdl_t *hdl, nvlist_t *nvl, const char *class)
{
fmd_module_t *mp = (fmd_module_t *)hdl;
const fmd_hdl_ops_t *ops = mp->mod_info->fmdi_ops;
fmd_event_t faux_event = {0};
int64_t *tv;
uint_t n;
/*
* Will need to normalized this if we persistently store the case data
*/
if (nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0)
faux_event.ev_hrt = tv[0] * NANOSEC + tv[1];
else
faux_event.ev_hrt = 0;
ops->fmdo_recv(hdl, &faux_event, nvl, class);
mp->mod_stats.ms_accepted.fmds_value.ui64++;
/* TBD - should we initiate fm_module_gc() periodically? */
}
diff --git a/sys/contrib/openzfs/cmd/zed/agents/zfs_mod.c b/sys/contrib/openzfs/cmd/zed/agents/zfs_mod.c
index d75854f2875a..53d9ababded9 100644
--- a/sys/contrib/openzfs/cmd/zed/agents/zfs_mod.c
+++ b/sys/contrib/openzfs/cmd/zed/agents/zfs_mod.c
@@ -1,1136 +1,1275 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright 2014 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2016, 2017, Intel Corporation.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
*/
/*
* ZFS syseventd module.
*
* file origin: openzfs/usr/src/cmd/syseventd/modules/zfs_mod/zfs_mod.c
*
* The purpose of this module is to identify when devices are added to the
* system, and appropriately online or replace the affected vdevs.
*
* When a device is added to the system:
*
* 1. Search for any vdevs whose devid matches that of the newly added
* device.
*
* 2. If no vdevs are found, then search for any vdevs whose udev path
* matches that of the new device.
*
* 3. If no vdevs match by either method, then ignore the event.
*
* 4. Attempt to online the device with a flag to indicate that it should
* be unspared when resilvering completes. If this succeeds, then the
* same device was inserted and we should continue normally.
*
* 5. If the pool does not have the 'autoreplace' property set, attempt to
* online the device again without the unspare flag, which will
* generate a FMA fault.
*
* 6. If the pool has the 'autoreplace' property set, and the matching vdev
* is a whole disk, then label the new disk and attempt a 'zpool
* replace'.
*
* The module responds to EC_DEV_ADD events. The special ESC_ZFS_VDEV_CHECK
* event indicates that a device failed to open during pool load, but the
* autoreplace property was set. In this case, we deferred the associated
* FMA fault until our module had a chance to process the autoreplace logic.
* If the device could not be replaced, then the second online attempt will
* trigger the FMA fault that we skipped earlier.
*
* On Linux udev provides a disk insert for both the disk and the partition.
*/
#include <ctype.h>
#include <fcntl.h>
#include <libnvpair.h>
#include <libzfs.h>
#include <libzutil.h>
#include <limits.h>
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include <syslog.h>
#include <sys/list.h>
#include <sys/sunddi.h>
#include <sys/sysevent/eventdefs.h>
#include <sys/sysevent/dev.h>
#include <thread_pool.h>
#include <pthread.h>
#include <unistd.h>
#include <errno.h>
#include "zfs_agents.h"
#include "../zed_log.h"
#define DEV_BYID_PATH "/dev/disk/by-id/"
#define DEV_BYPATH_PATH "/dev/disk/by-path/"
#define DEV_BYVDEV_PATH "/dev/disk/by-vdev/"
typedef void (*zfs_process_func_t)(zpool_handle_t *, nvlist_t *, boolean_t);
libzfs_handle_t *g_zfshdl;
list_t g_pool_list; /* list of unavailable pools at initialization */
list_t g_device_list; /* list of disks with asynchronous label request */
tpool_t *g_tpool;
boolean_t g_enumeration_done;
pthread_t g_zfs_tid; /* zfs_enum_pools() thread */
typedef struct unavailpool {
zpool_handle_t *uap_zhp;
list_node_t uap_node;
} unavailpool_t;
typedef struct pendingdev {
char pd_physpath[128];
list_node_t pd_node;
} pendingdev_t;
static int
zfs_toplevel_state(zpool_handle_t *zhp)
{
nvlist_t *nvroot;
vdev_stat_t *vs;
unsigned int c;
verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
return (vs->vs_state);
}
static int
zfs_unavail_pool(zpool_handle_t *zhp, void *data)
{
zed_log_msg(LOG_INFO, "zfs_unavail_pool: examining '%s' (state %d)",
zpool_get_name(zhp), (int)zfs_toplevel_state(zhp));
if (zfs_toplevel_state(zhp) < VDEV_STATE_DEGRADED) {
unavailpool_t *uap;
uap = malloc(sizeof (unavailpool_t));
uap->uap_zhp = zhp;
list_insert_tail((list_t *)data, uap);
} else {
zpool_close(zhp);
}
return (0);
}
/*
* Two stage replace on Linux
* since we get disk notifications
* we can wait for partitioned disk slice to show up!
*
* First stage tags the disk, initiates async partitioning, and returns
* Second stage finds the tag and proceeds to ZFS labeling/replace
*
* disk-add --> label-disk + tag-disk --> partition-add --> zpool_vdev_attach
*
* 1. physical match with no fs, no partition
* tag it top, partition disk
*
* 2. physical match again, see partition and tag
*
*/
/*
* The device associated with the given vdev (either by devid or physical path)
* has been added to the system. If 'isdisk' is set, then we only attempt a
* replacement if it's a whole disk. This also implies that we should label the
* disk first.
*
* First, we attempt to online the device (making sure to undo any spare
* operation when finished). If this succeeds, then we're done. If it fails,
* and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
* but that the label was not what we expected. If the 'autoreplace' property
* is enabled, then we relabel the disk (if specified), and attempt a 'zpool
* replace'. If the online is successful, but the new state is something else
* (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
* race, and we should avoid attempting to relabel the disk.
*
* Also can arrive here from a ESC_ZFS_VDEV_CHECK event
*/
static void
zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
{
char *path;
vdev_state_t newstate;
nvlist_t *nvroot, *newvd;
pendingdev_t *device;
uint64_t wholedisk = 0ULL;
uint64_t offline = 0ULL, faulted = 0ULL;
uint64_t guid = 0ULL;
char *physpath = NULL, *new_devid = NULL, *enc_sysfs_path = NULL;
char rawpath[PATH_MAX], fullpath[PATH_MAX];
char devpath[PATH_MAX];
int ret;
boolean_t is_sd = B_FALSE;
boolean_t is_mpath_wholedisk = B_FALSE;
uint_t c;
vdev_stat_t *vs;
if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
return;
/* Skip healthy disks */
verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
if (vs->vs_state == VDEV_STATE_HEALTHY) {
zed_log_msg(LOG_INFO, "%s: %s is already healthy, skip it.",
__func__, path);
return;
}
(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
&enc_sysfs_path);
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_FAULTED, &faulted);
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_GUID, &guid);
/*
* Special case:
*
* We've seen times where a disk won't have a ZPOOL_CONFIG_PHYS_PATH
* entry in their config. For example, on this force-faulted disk:
*
* children[0]:
* type: 'disk'
* id: 0
* guid: 14309659774640089719
* path: '/dev/disk/by-vdev/L28'
* whole_disk: 0
* DTL: 654
* create_txg: 4
* com.delphix:vdev_zap_leaf: 1161
* faulted: 1
* aux_state: 'external'
* children[1]:
* type: 'disk'
* id: 1
* guid: 16002508084177980912
* path: '/dev/disk/by-vdev/L29'
* devid: 'dm-uuid-mpath-35000c500a61d68a3'
* phys_path: 'L29'
* vdev_enc_sysfs_path: '/sys/class/enclosure/0:0:1:0/SLOT 30 32'
* whole_disk: 0
* DTL: 1028
* create_txg: 4
* com.delphix:vdev_zap_leaf: 131
*
* If the disk's path is a /dev/disk/by-vdev/ path, then we can infer
* the ZPOOL_CONFIG_PHYS_PATH from the by-vdev disk name.
*/
if (physpath == NULL && path != NULL) {
/* If path begins with "/dev/disk/by-vdev/" ... */
if (strncmp(path, DEV_BYVDEV_PATH,
strlen(DEV_BYVDEV_PATH)) == 0) {
/* Set physpath to the char after "/dev/disk/by-vdev" */
physpath = &path[strlen(DEV_BYVDEV_PATH)];
}
}
/*
* We don't want to autoreplace offlined disks. However, we do want to
* replace force-faulted disks (`zpool offline -f`). Force-faulted
* disks have both offline=1 and faulted=1 in the nvlist.
*/
if (offline && !faulted) {
zed_log_msg(LOG_INFO, "%s: %s is offline, skip autoreplace",
__func__, path);
return;
}
is_mpath_wholedisk = is_mpath_whole_disk(path);
zed_log_msg(LOG_INFO, "zfs_process_add: pool '%s' vdev '%s', phys '%s'"
" %s blank disk, %s mpath blank disk, %s labeled, enc sysfs '%s', "
"(guid %llu)",
zpool_get_name(zhp), path,
physpath ? physpath : "NULL",
wholedisk ? "is" : "not",
is_mpath_wholedisk? "is" : "not",
labeled ? "is" : "not",
enc_sysfs_path,
(long long unsigned int)guid);
/*
* The VDEV guid is preferred for identification (gets passed in path)
*/
if (guid != 0) {
(void) snprintf(fullpath, sizeof (fullpath), "%llu",
(long long unsigned int)guid);
} else {
/*
* otherwise use path sans partition suffix for whole disks
*/
(void) strlcpy(fullpath, path, sizeof (fullpath));
if (wholedisk) {
char *spath = zfs_strip_partition(fullpath);
if (!spath) {
zed_log_msg(LOG_INFO, "%s: Can't alloc",
__func__);
return;
}
(void) strlcpy(fullpath, spath, sizeof (fullpath));
free(spath);
}
}
/*
* Attempt to online the device.
*/
if (zpool_vdev_online(zhp, fullpath,
ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE, &newstate) == 0 &&
(newstate == VDEV_STATE_HEALTHY ||
newstate == VDEV_STATE_DEGRADED)) {
zed_log_msg(LOG_INFO,
" zpool_vdev_online: vdev '%s' ('%s') is "
"%s", fullpath, physpath, (newstate == VDEV_STATE_HEALTHY) ?
"HEALTHY" : "DEGRADED");
return;
}
/*
* vdev_id alias rule for using scsi_debug devices (FMA automated
* testing)
*/
if (physpath != NULL && strcmp("scsidebug", physpath) == 0)
is_sd = B_TRUE;
/*
* If the pool doesn't have the autoreplace property set, then use
* vdev online to trigger a FMA fault by posting an ereport.
*/
if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||
!(wholedisk || is_mpath_wholedisk) || (physpath == NULL)) {
(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
&newstate);
zed_log_msg(LOG_INFO, "Pool's autoreplace is not enabled or "
"not a blank disk for '%s' ('%s')", fullpath,
physpath);
return;
}
/*
* Convert physical path into its current device node. Rawpath
* needs to be /dev/disk/by-vdev for a scsi_debug device since
* /dev/disk/by-path will not be present.
*/
(void) snprintf(rawpath, sizeof (rawpath), "%s%s",
is_sd ? DEV_BYVDEV_PATH : DEV_BYPATH_PATH, physpath);
if (realpath(rawpath, devpath) == NULL && !is_mpath_wholedisk) {
zed_log_msg(LOG_INFO, " realpath: %s failed (%s)",
rawpath, strerror(errno));
(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
&newstate);
zed_log_msg(LOG_INFO, " zpool_vdev_online: %s FORCEFAULT (%s)",
fullpath, libzfs_error_description(g_zfshdl));
return;
}
/* Only autoreplace bad disks */
if ((vs->vs_state != VDEV_STATE_DEGRADED) &&
(vs->vs_state != VDEV_STATE_FAULTED) &&
(vs->vs_state != VDEV_STATE_CANT_OPEN)) {
zed_log_msg(LOG_INFO, " not autoreplacing since disk isn't in "
- "a bad state (currently %d)", vs->vs_state);
+ "a bad state (currently %llu)", vs->vs_state);
return;
}
nvlist_lookup_string(vdev, "new_devid", &new_devid);
if (is_mpath_wholedisk) {
/* Don't label device mapper or multipath disks. */
} else if (!labeled) {
/*
* we're auto-replacing a raw disk, so label it first
*/
char *leafname;
/*
* If this is a request to label a whole disk, then attempt to
* write out the label. Before we can label the disk, we need
* to map the physical string that was matched on to the under
* lying device node.
*
* If any part of this process fails, then do a force online
* to trigger a ZFS fault for the device (and any hot spare
* replacement).
*/
leafname = strrchr(devpath, '/') + 1;
/*
* If this is a request to label a whole disk, then attempt to
* write out the label.
*/
if (zpool_label_disk(g_zfshdl, zhp, leafname) != 0) {
zed_log_msg(LOG_INFO, " zpool_label_disk: could not "
"label '%s' (%s)", leafname,
libzfs_error_description(g_zfshdl));
(void) zpool_vdev_online(zhp, fullpath,
ZFS_ONLINE_FORCEFAULT, &newstate);
return;
}
/*
* The disk labeling is asynchronous on Linux. Just record
* this label request and return as there will be another
* disk add event for the partition after the labeling is
* completed.
*/
device = malloc(sizeof (pendingdev_t));
(void) strlcpy(device->pd_physpath, physpath,
sizeof (device->pd_physpath));
list_insert_tail(&g_device_list, device);
zed_log_msg(LOG_INFO, " zpool_label_disk: async '%s' (%llu)",
leafname, (u_longlong_t)guid);
return; /* resumes at EC_DEV_ADD.ESC_DISK for partition */
} else /* labeled */ {
boolean_t found = B_FALSE;
/*
* match up with request above to label the disk
*/
for (device = list_head(&g_device_list); device != NULL;
device = list_next(&g_device_list, device)) {
if (strcmp(physpath, device->pd_physpath) == 0) {
list_remove(&g_device_list, device);
free(device);
found = B_TRUE;
break;
}
zed_log_msg(LOG_INFO, "zpool_label_disk: %s != %s",
physpath, device->pd_physpath);
}
if (!found) {
/* unexpected partition slice encountered */
zed_log_msg(LOG_INFO, "labeled disk %s unexpected here",
fullpath);
(void) zpool_vdev_online(zhp, fullpath,
ZFS_ONLINE_FORCEFAULT, &newstate);
return;
}
zed_log_msg(LOG_INFO, " zpool_label_disk: resume '%s' (%llu)",
physpath, (u_longlong_t)guid);
(void) snprintf(devpath, sizeof (devpath), "%s%s",
DEV_BYID_PATH, new_devid);
}
/*
* Construct the root vdev to pass to zpool_vdev_attach(). While adding
* the entire vdev structure is harmless, we construct a reduced set of
* path/physpath/wholedisk to keep it simple.
*/
if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) {
zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
return;
}
if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
nvlist_free(nvroot);
return;
}
if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
nvlist_add_string(newvd, ZPOOL_CONFIG_DEVID, new_devid) != 0 ||
(physpath != NULL && nvlist_add_string(newvd,
ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||
(enc_sysfs_path != NULL && nvlist_add_string(newvd,
ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH, enc_sysfs_path) != 0) ||
nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
(const nvlist_t **)&newvd, 1) != 0) {
zed_log_msg(LOG_WARNING, "zfs_mod: unable to add nvlist pairs");
nvlist_free(newvd);
nvlist_free(nvroot);
return;
}
nvlist_free(newvd);
/*
* Wait for udev to verify the links exist, then auto-replace
* the leaf disk at same physical location.
*/
if (zpool_label_disk_wait(path, 3000) != 0) {
zed_log_msg(LOG_WARNING, "zfs_mod: expected replacement "
"disk %s is missing", path);
nvlist_free(nvroot);
return;
}
/*
* Prefer sequential resilvering when supported (mirrors and dRAID),
* otherwise fallback to a traditional healing resilver.
*/
ret = zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE, B_TRUE);
if (ret != 0) {
ret = zpool_vdev_attach(zhp, fullpath, path, nvroot,
B_TRUE, B_FALSE);
}
zed_log_msg(LOG_INFO, " zpool_vdev_replace: %s with %s (%s)",
fullpath, path, (ret == 0) ? "no errors" :
libzfs_error_description(g_zfshdl));
nvlist_free(nvroot);
}
/*
* Utility functions to find a vdev matching given criteria.
*/
typedef struct dev_data {
const char *dd_compare;
const char *dd_prop;
zfs_process_func_t dd_func;
boolean_t dd_found;
boolean_t dd_islabeled;
uint64_t dd_pool_guid;
uint64_t dd_vdev_guid;
const char *dd_new_devid;
} dev_data_t;
static void
zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
{
dev_data_t *dp = data;
char *path = NULL;
uint_t c, children;
nvlist_t **child;
/*
* First iterate over any children.
*/
if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (c = 0; c < children; c++)
zfs_iter_vdev(zhp, child[c], data);
}
/*
* Iterate over any spares and cache devices
*/
if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_SPARES,
&child, &children) == 0) {
for (c = 0; c < children; c++)
zfs_iter_vdev(zhp, child[c], data);
}
if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0) {
for (c = 0; c < children; c++)
zfs_iter_vdev(zhp, child[c], data);
}
/* once a vdev was matched and processed there is nothing left to do */
if (dp->dd_found)
return;
/*
* Match by GUID if available otherwise fallback to devid or physical
*/
if (dp->dd_vdev_guid != 0) {
uint64_t guid;
if (nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID,
&guid) != 0 || guid != dp->dd_vdev_guid) {
return;
}
zed_log_msg(LOG_INFO, " zfs_iter_vdev: matched on %llu", guid);
dp->dd_found = B_TRUE;
} else if (dp->dd_compare != NULL) {
/*
* NOTE: On Linux there is an event for partition, so unlike
* illumos, substring matching is not required to accommodate
* the partition suffix. An exact match will be present in
* the dp->dd_compare value.
*/
if (nvlist_lookup_string(nvl, dp->dd_prop, &path) != 0 ||
strcmp(dp->dd_compare, path) != 0) {
zed_log_msg(LOG_INFO, " %s: no match (%s != vdev %s)",
__func__, dp->dd_compare, path);
return;
}
zed_log_msg(LOG_INFO, " zfs_iter_vdev: matched %s on %s",
dp->dd_prop, path);
dp->dd_found = B_TRUE;
/* pass the new devid for use by replacing code */
if (dp->dd_new_devid != NULL) {
(void) nvlist_add_string(nvl, "new_devid",
dp->dd_new_devid);
}
}
(dp->dd_func)(zhp, nvl, dp->dd_islabeled);
}
static void
zfs_enable_ds(void *arg)
{
unavailpool_t *pool = (unavailpool_t *)arg;
(void) zpool_enable_datasets(pool->uap_zhp, NULL, 0);
zpool_close(pool->uap_zhp);
free(pool);
}
static int
zfs_iter_pool(zpool_handle_t *zhp, void *data)
{
nvlist_t *config, *nvl;
dev_data_t *dp = data;
uint64_t pool_guid;
unavailpool_t *pool;
zed_log_msg(LOG_INFO, "zfs_iter_pool: evaluating vdevs on %s (by %s)",
zpool_get_name(zhp), dp->dd_vdev_guid ? "GUID" : dp->dd_prop);
/*
* For each vdev in this pool, look for a match to apply dd_func
*/
if ((config = zpool_get_config(zhp, NULL)) != NULL) {
if (dp->dd_pool_guid == 0 ||
(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&pool_guid) == 0 && pool_guid == dp->dd_pool_guid)) {
(void) nvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE, &nvl);
zfs_iter_vdev(zhp, nvl, data);
}
} else {
zed_log_msg(LOG_INFO, "%s: no config\n", __func__);
}
/*
* if this pool was originally unavailable,
* then enable its datasets asynchronously
*/
if (g_enumeration_done) {
for (pool = list_head(&g_pool_list); pool != NULL;
pool = list_next(&g_pool_list, pool)) {
if (strcmp(zpool_get_name(zhp),
zpool_get_name(pool->uap_zhp)))
continue;
if (zfs_toplevel_state(zhp) >= VDEV_STATE_DEGRADED) {
list_remove(&g_pool_list, pool);
(void) tpool_dispatch(g_tpool, zfs_enable_ds,
pool);
break;
}
}
}
zpool_close(zhp);
return (dp->dd_found); /* cease iteration after a match */
}
/*
* Given a physical device location, iterate over all
* (pool, vdev) pairs which correspond to that location.
*/
static boolean_t
devphys_iter(const char *physical, const char *devid, zfs_process_func_t func,
boolean_t is_slice)
{
dev_data_t data = { 0 };
data.dd_compare = physical;
data.dd_func = func;
data.dd_prop = ZPOOL_CONFIG_PHYS_PATH;
data.dd_found = B_FALSE;
data.dd_islabeled = is_slice;
data.dd_new_devid = devid; /* used by auto replace code */
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
return (data.dd_found);
}
/*
* Given a device identifier, find any vdevs with a matching by-vdev
* path. Normally we shouldn't need this as the comparison would be
* made earlier in the devphys_iter(). For example, if we were replacing
* /dev/disk/by-vdev/L28, normally devphys_iter() would match the
* ZPOOL_CONFIG_PHYS_PATH of "L28" from the old disk config to "L28"
* of the new disk config. However, we've seen cases where
* ZPOOL_CONFIG_PHYS_PATH was not in the config for the old disk. Here's
* an example of a real 2-disk mirror pool where one disk was force
* faulted:
*
* com.delphix:vdev_zap_top: 129
* children[0]:
* type: 'disk'
* id: 0
* guid: 14309659774640089719
* path: '/dev/disk/by-vdev/L28'
* whole_disk: 0
* DTL: 654
* create_txg: 4
* com.delphix:vdev_zap_leaf: 1161
* faulted: 1
* aux_state: 'external'
* children[1]:
* type: 'disk'
* id: 1
* guid: 16002508084177980912
* path: '/dev/disk/by-vdev/L29'
* devid: 'dm-uuid-mpath-35000c500a61d68a3'
* phys_path: 'L29'
* vdev_enc_sysfs_path: '/sys/class/enclosure/0:0:1:0/SLOT 30 32'
* whole_disk: 0
* DTL: 1028
* create_txg: 4
* com.delphix:vdev_zap_leaf: 131
*
* So in the case above, the only thing we could compare is the path.
*
* We can do this because we assume by-vdev paths are authoritative as physical
* paths. We could not assume this for normal paths like /dev/sda since the
* physical location /dev/sda points to could change over time.
*/
static boolean_t
by_vdev_path_iter(const char *by_vdev_path, const char *devid,
zfs_process_func_t func, boolean_t is_slice)
{
dev_data_t data = { 0 };
data.dd_compare = by_vdev_path;
data.dd_func = func;
data.dd_prop = ZPOOL_CONFIG_PATH;
data.dd_found = B_FALSE;
data.dd_islabeled = is_slice;
data.dd_new_devid = devid;
if (strncmp(by_vdev_path, DEV_BYVDEV_PATH,
strlen(DEV_BYVDEV_PATH)) != 0) {
/* by_vdev_path doesn't start with "/dev/disk/by-vdev/" */
return (B_FALSE);
}
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
return (data.dd_found);
}
/*
* Given a device identifier, find any vdevs with a matching devid.
* On Linux we can match devid directly which is always a whole disk.
*/
static boolean_t
devid_iter(const char *devid, zfs_process_func_t func, boolean_t is_slice)
{
dev_data_t data = { 0 };
data.dd_compare = devid;
data.dd_func = func;
data.dd_prop = ZPOOL_CONFIG_DEVID;
data.dd_found = B_FALSE;
data.dd_islabeled = is_slice;
data.dd_new_devid = devid;
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
return (data.dd_found);
}
/*
* Given a device guid, find any vdevs with a matching guid.
*/
static boolean_t
guid_iter(uint64_t pool_guid, uint64_t vdev_guid, const char *devid,
zfs_process_func_t func, boolean_t is_slice)
{
dev_data_t data = { 0 };
data.dd_func = func;
data.dd_found = B_FALSE;
data.dd_pool_guid = pool_guid;
data.dd_vdev_guid = vdev_guid;
data.dd_islabeled = is_slice;
data.dd_new_devid = devid;
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
return (data.dd_found);
}
/*
* Handle a EC_DEV_ADD.ESC_DISK event.
*
* illumos
* Expects: DEV_PHYS_PATH string in schema
* Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
*
* path: '/dev/dsk/c0t1d0s0' (persistent)
* devid: 'id1,sd@SATA_____Hitachi_HDS72101______JP2940HZ3H74MC/a'
* phys_path: '/pci@0,0/pci103c,1609@11/disk@1,0:a'
*
* linux
* provides: DEV_PHYS_PATH and DEV_IDENTIFIER strings in schema
* Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
*
* path: '/dev/sdc1' (not persistent)
* devid: 'ata-SAMSUNG_HD204UI_S2HGJD2Z805891-part1'
* phys_path: 'pci-0000:04:00.0-sas-0x4433221106000000-lun-0'
*/
static int
zfs_deliver_add(nvlist_t *nvl)
{
char *devpath = NULL, *devid = NULL;
uint64_t pool_guid = 0, vdev_guid = 0;
boolean_t is_slice;
/*
* Expecting a devid string and an optional physical location and guid
*/
if (nvlist_lookup_string(nvl, DEV_IDENTIFIER, &devid) != 0) {
zed_log_msg(LOG_INFO, "%s: no dev identifier\n", __func__);
return (-1);
}
(void) nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devpath);
(void) nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID, &pool_guid);
(void) nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID, &vdev_guid);
is_slice = (nvlist_lookup_boolean(nvl, DEV_IS_PART) == 0);
zed_log_msg(LOG_INFO, "zfs_deliver_add: adding %s (%s) (is_slice %d)",
devid, devpath ? devpath : "NULL", is_slice);
/*
* Iterate over all vdevs looking for a match in the following order:
* 1. ZPOOL_CONFIG_DEVID (identifies the unique disk)
* 2. ZPOOL_CONFIG_PHYS_PATH (identifies disk physical location).
* 3. ZPOOL_CONFIG_GUID (identifies unique vdev).
* 4. ZPOOL_CONFIG_PATH for /dev/disk/by-vdev devices only (since
* by-vdev paths represent physical paths).
*/
if (devid_iter(devid, zfs_process_add, is_slice))
return (0);
if (devpath != NULL && devphys_iter(devpath, devid, zfs_process_add,
is_slice))
return (0);
if (vdev_guid != 0)
(void) guid_iter(pool_guid, vdev_guid, devid, zfs_process_add,
is_slice);
if (devpath != NULL) {
/* Can we match a /dev/disk/by-vdev/ path? */
char by_vdev_path[MAXPATHLEN];
snprintf(by_vdev_path, sizeof (by_vdev_path),
"/dev/disk/by-vdev/%s", devpath);
if (by_vdev_path_iter(by_vdev_path, devid, zfs_process_add,
is_slice))
return (0);
}
return (0);
}
/*
* Called when we receive a VDEV_CHECK event, which indicates a device could not
* be opened during initial pool open, but the autoreplace property was set on
* the pool. In this case, we treat it as if it were an add event.
*/
static int
zfs_deliver_check(nvlist_t *nvl)
{
dev_data_t data = { 0 };
if (nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID,
&data.dd_pool_guid) != 0 ||
nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID,
&data.dd_vdev_guid) != 0 ||
data.dd_vdev_guid == 0)
return (0);
zed_log_msg(LOG_INFO, "zfs_deliver_check: pool '%llu', vdev %llu",
data.dd_pool_guid, data.dd_vdev_guid);
data.dd_func = zfs_process_add;
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
return (0);
}
+/*
+ * Given a path to a vdev, lookup the vdev's physical size from its
+ * config nvlist.
+ *
+ * Returns the vdev's physical size in bytes on success, 0 on error.
+ */
+static uint64_t
+vdev_size_from_config(zpool_handle_t *zhp, const char *vdev_path)
+{
+ nvlist_t *nvl = NULL;
+ boolean_t avail_spare, l2cache, log;
+ vdev_stat_t *vs = NULL;
+ uint_t c;
+
+ nvl = zpool_find_vdev(zhp, vdev_path, &avail_spare, &l2cache, &log);
+ if (!nvl)
+ return (0);
+
+ verify(nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_VDEV_STATS,
+ (uint64_t **)&vs, &c) == 0);
+ if (!vs) {
+ zed_log_msg(LOG_INFO, "%s: no nvlist for '%s'", __func__,
+ vdev_path);
+ return (0);
+ }
+
+ return (vs->vs_pspace);
+}
+
+/*
+ * Given a path to a vdev, lookup if the vdev is a "whole disk" in the
+ * config nvlist. "whole disk" means that ZFS was passed a whole disk
+ * at pool creation time, which it partitioned up and has full control over.
+ * Thus a partition with wholedisk=1 set tells us that zfs created the
+ * partition at creation time. A partition without whole disk set would have
+ * been created by externally (like with fdisk) and passed to ZFS.
+ *
+ * Returns the whole disk value (either 0 or 1).
+ */
+static uint64_t
+vdev_whole_disk_from_config(zpool_handle_t *zhp, const char *vdev_path)
+{
+ nvlist_t *nvl = NULL;
+ boolean_t avail_spare, l2cache, log;
+ uint64_t wholedisk;
+
+ nvl = zpool_find_vdev(zhp, vdev_path, &avail_spare, &l2cache, &log);
+ if (!nvl)
+ return (0);
+
+ verify(nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_WHOLE_DISK,
+ &wholedisk) == 0);
+
+ return (wholedisk);
+}
+
+/*
+ * If the device size grew more than 1% then return true.
+ */
+#define DEVICE_GREW(oldsize, newsize) \
+ ((newsize > oldsize) && \
+ ((newsize / (newsize - oldsize)) <= 100))
+
static int
zfsdle_vdev_online(zpool_handle_t *zhp, void *data)
{
- char *devname = data;
boolean_t avail_spare, l2cache;
+ nvlist_t *udev_nvl = data;
nvlist_t *tgt;
int error;
+ char *tmp_devname, devname[MAXPATHLEN] = "";
+ uint64_t guid;
+
+ if (nvlist_lookup_uint64(udev_nvl, ZFS_EV_VDEV_GUID, &guid) == 0) {
+ sprintf(devname, "%llu", (u_longlong_t)guid);
+ } else if (nvlist_lookup_string(udev_nvl, DEV_PHYS_PATH,
+ &tmp_devname) == 0) {
+ strlcpy(devname, tmp_devname, MAXPATHLEN);
+ zfs_append_partition(devname, MAXPATHLEN);
+ } else {
+ zed_log_msg(LOG_INFO, "%s: no guid or physpath", __func__);
+ }
+
zed_log_msg(LOG_INFO, "zfsdle_vdev_online: searching for '%s' in '%s'",
devname, zpool_get_name(zhp));
if ((tgt = zpool_find_vdev_by_physpath(zhp, devname,
&avail_spare, &l2cache, NULL)) != NULL) {
char *path, fullpath[MAXPATHLEN];
uint64_t wholedisk;
error = nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &path);
if (error) {
zpool_close(zhp);
return (0);
}
error = nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
&wholedisk);
if (error)
wholedisk = 0;
if (wholedisk) {
path = strrchr(path, '/');
if (path != NULL) {
path = zfs_strip_partition(path + 1);
if (path == NULL) {
zpool_close(zhp);
return (0);
}
} else {
zpool_close(zhp);
return (0);
}
(void) strlcpy(fullpath, path, sizeof (fullpath));
free(path);
/*
* We need to reopen the pool associated with this
* device so that the kernel can update the size of
* the expanded device. When expanding there is no
* need to restart the scrub from the beginning.
*/
boolean_t scrub_restart = B_FALSE;
(void) zpool_reopen_one(zhp, &scrub_restart);
} else {
(void) strlcpy(fullpath, path, sizeof (fullpath));
}
if (zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
vdev_state_t newstate;
if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) {
- error = zpool_vdev_online(zhp, fullpath, 0,
- &newstate);
- zed_log_msg(LOG_INFO, "zfsdle_vdev_online: "
- "setting device '%s' to ONLINE state "
- "in pool '%s': %d", fullpath,
- zpool_get_name(zhp), error);
+ /*
+ * If this disk size has not changed, then
+ * there's no need to do an autoexpand. To
+ * check we look at the disk's size in its
+ * config, and compare it to the disk size
+ * that udev is reporting.
+ */
+ uint64_t udev_size = 0, conf_size = 0,
+ wholedisk = 0, udev_parent_size = 0;
+
+ /*
+ * Get the size of our disk that udev is
+ * reporting.
+ */
+ if (nvlist_lookup_uint64(udev_nvl, DEV_SIZE,
+ &udev_size) != 0) {
+ udev_size = 0;
+ }
+
+ /*
+ * Get the size of our disk's parent device
+ * from udev (where sda1's parent is sda).
+ */
+ if (nvlist_lookup_uint64(udev_nvl,
+ DEV_PARENT_SIZE, &udev_parent_size) != 0) {
+ udev_parent_size = 0;
+ }
+
+ conf_size = vdev_size_from_config(zhp,
+ fullpath);
+
+ wholedisk = vdev_whole_disk_from_config(zhp,
+ fullpath);
+
+ /*
+ * Only attempt an autoexpand if the vdev size
+ * changed. There are two different cases
+ * to consider.
+ *
+ * 1. wholedisk=1
+ * If you do a 'zpool create' on a whole disk
+ * (like /dev/sda), then zfs will create
+ * partitions on the disk (like /dev/sda1). In
+ * that case, wholedisk=1 will be set in the
+ * partition's nvlist config. So zed will need
+ * to see if your parent device (/dev/sda)
+ * expanded in size, and if so, then attempt
+ * the autoexpand.
+ *
+ * 2. wholedisk=0
+ * If you do a 'zpool create' on an existing
+ * partition, or a device that doesn't allow
+ * partitions, then wholedisk=0, and you will
+ * simply need to check if the device itself
+ * expanded in size.
+ */
+ if (DEVICE_GREW(conf_size, udev_size) ||
+ (wholedisk && DEVICE_GREW(conf_size,
+ udev_parent_size))) {
+ error = zpool_vdev_online(zhp, fullpath,
+ 0, &newstate);
+
+ zed_log_msg(LOG_INFO,
+ "%s: autoexpanding '%s' from %llu"
+ " to %llu bytes in pool '%s': %d",
+ __func__, fullpath, conf_size,
+ MAX(udev_size, udev_parent_size),
+ zpool_get_name(zhp), error);
+ }
}
}
zpool_close(zhp);
return (1);
}
zpool_close(zhp);
return (0);
}
/*
* This function handles the ESC_DEV_DLE device change event. Use the
* provided vdev guid when looking up a disk or partition, when the guid
* is not present assume the entire disk is owned by ZFS and append the
* expected -part1 partition information then lookup by physical path.
*/
static int
zfs_deliver_dle(nvlist_t *nvl)
{
char *devname, name[MAXPATHLEN];
uint64_t guid;
if (nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID, &guid) == 0) {
sprintf(name, "%llu", (u_longlong_t)guid);
} else if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devname) == 0) {
strlcpy(name, devname, MAXPATHLEN);
zfs_append_partition(name, MAXPATHLEN);
} else {
zed_log_msg(LOG_INFO, "zfs_deliver_dle: no guid or physpath");
}
- if (zpool_iter(g_zfshdl, zfsdle_vdev_online, name) != 1) {
+ if (zpool_iter(g_zfshdl, zfsdle_vdev_online, nvl) != 1) {
zed_log_msg(LOG_INFO, "zfs_deliver_dle: device '%s' not "
"found", name);
return (1);
}
return (0);
}
/*
* syseventd daemon module event handler
*
* Handles syseventd daemon zfs device related events:
*
* EC_DEV_ADD.ESC_DISK
* EC_DEV_STATUS.ESC_DEV_DLE
* EC_ZFS.ESC_ZFS_VDEV_CHECK
*
* Note: assumes only one thread active at a time (not thread safe)
*/
static int
zfs_slm_deliver_event(const char *class, const char *subclass, nvlist_t *nvl)
{
int ret;
boolean_t is_check = B_FALSE, is_dle = B_FALSE;
if (strcmp(class, EC_DEV_ADD) == 0) {
/*
* We're mainly interested in disk additions, but we also listen
* for new loop devices, to allow for simplified testing.
*/
if (strcmp(subclass, ESC_DISK) != 0 &&
strcmp(subclass, ESC_LOFI) != 0)
return (0);
is_check = B_FALSE;
} else if (strcmp(class, EC_ZFS) == 0 &&
strcmp(subclass, ESC_ZFS_VDEV_CHECK) == 0) {
/*
* This event signifies that a device failed to open
* during pool load, but the 'autoreplace' property was
* set, so we should pretend it's just been added.
*/
is_check = B_TRUE;
} else if (strcmp(class, EC_DEV_STATUS) == 0 &&
strcmp(subclass, ESC_DEV_DLE) == 0) {
is_dle = B_TRUE;
} else {
return (0);
}
if (is_dle)
ret = zfs_deliver_dle(nvl);
else if (is_check)
ret = zfs_deliver_check(nvl);
else
ret = zfs_deliver_add(nvl);
return (ret);
}
static void *
zfs_enum_pools(void *arg)
{
(void) arg;
(void) zpool_iter(g_zfshdl, zfs_unavail_pool, (void *)&g_pool_list);
/*
* Linux - instead of using a thread pool, each list entry
* will spawn a thread when an unavailable pool transitions
* to available. zfs_slm_fini will wait for these threads.
*/
g_enumeration_done = B_TRUE;
return (NULL);
}
/*
* called from zed daemon at startup
*
* sent messages from zevents or udev monitor
*
* For now, each agent has its own libzfs instance
*/
int
zfs_slm_init(void)
{
if ((g_zfshdl = libzfs_init()) == NULL)
return (-1);
/*
* collect a list of unavailable pools (asynchronously,
* since this can take a while)
*/
list_create(&g_pool_list, sizeof (struct unavailpool),
offsetof(struct unavailpool, uap_node));
if (pthread_create(&g_zfs_tid, NULL, zfs_enum_pools, NULL) != 0) {
list_destroy(&g_pool_list);
libzfs_fini(g_zfshdl);
return (-1);
}
pthread_setname_np(g_zfs_tid, "enum-pools");
list_create(&g_device_list, sizeof (struct pendingdev),
offsetof(struct pendingdev, pd_node));
return (0);
}
void
zfs_slm_fini(void)
{
unavailpool_t *pool;
pendingdev_t *device;
/* wait for zfs_enum_pools thread to complete */
(void) pthread_join(g_zfs_tid, NULL);
/* destroy the thread pool */
if (g_tpool != NULL) {
tpool_wait(g_tpool);
tpool_destroy(g_tpool);
}
while ((pool = (list_head(&g_pool_list))) != NULL) {
list_remove(&g_pool_list, pool);
zpool_close(pool->uap_zhp);
free(pool);
}
list_destroy(&g_pool_list);
while ((device = (list_head(&g_device_list))) != NULL) {
list_remove(&g_device_list, device);
free(device);
}
list_destroy(&g_device_list);
libzfs_fini(g_zfshdl);
}
void
zfs_slm_event(const char *class, const char *subclass, nvlist_t *nvl)
{
zed_log_msg(LOG_INFO, "zfs_slm_event: %s.%s", class, subclass);
(void) zfs_slm_deliver_event(class, subclass, nvl);
}
diff --git a/sys/contrib/openzfs/cmd/zed/zed_conf.c b/sys/contrib/openzfs/cmd/zed/zed_conf.c
index 9a39d1a80985..29de27c77c34 100644
--- a/sys/contrib/openzfs/cmd/zed/zed_conf.c
+++ b/sys/contrib/openzfs/cmd/zed/zed_conf.c
@@ -1,720 +1,720 @@
/*
* This file is part of the ZFS Event Daemon (ZED).
*
* Developed at Lawrence Livermore National Laboratory (LLNL-CODE-403049).
* Copyright (C) 2013-2014 Lawrence Livermore National Security, LLC.
* Refer to the OpenZFS git commit log for authoritative copyright attribution.
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License Version 1.0 (CDDL-1.0).
* You can obtain a copy of the license from the top-level file
* "OPENSOLARIS.LICENSE" or at <http://opensource.org/licenses/CDDL-1.0>.
* You may not use this file except in compliance with the license.
*/
#include <assert.h>
#include <ctype.h>
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
#include <libgen.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/uio.h>
#include <unistd.h>
#include "zed.h"
#include "zed_conf.h"
#include "zed_file.h"
#include "zed_log.h"
#include "zed_strings.h"
/*
* Initialise the configuration with default values.
*/
void
zed_conf_init(struct zed_conf *zcp)
{
memset(zcp, 0, sizeof (*zcp));
/* zcp->zfs_hdl opened in zed_event_init() */
/* zcp->zedlets created in zed_conf_scan_dir() */
zcp->pid_fd = -1; /* opened in zed_conf_write_pid() */
zcp->state_fd = -1; /* opened in zed_conf_open_state() */
zcp->zevent_fd = -1; /* opened in zed_event_init() */
zcp->max_jobs = 16;
zcp->max_zevent_buf_len = 1 << 20;
if (!(zcp->pid_file = strdup(ZED_PID_FILE)) ||
!(zcp->zedlet_dir = strdup(ZED_ZEDLET_DIR)) ||
!(zcp->state_file = strdup(ZED_STATE_FILE)))
zed_log_die("Failed to create conf: %s", strerror(errno));
}
/*
* Destroy the configuration [zcp].
*
* Note: zfs_hdl & zevent_fd are destroyed via zed_event_fini().
*/
void
zed_conf_destroy(struct zed_conf *zcp)
{
if (zcp->state_fd >= 0) {
if (close(zcp->state_fd) < 0)
zed_log_msg(LOG_WARNING,
"Failed to close state file \"%s\": %s",
zcp->state_file, strerror(errno));
zcp->state_fd = -1;
}
if (zcp->pid_file) {
if ((unlink(zcp->pid_file) < 0) && (errno != ENOENT))
zed_log_msg(LOG_WARNING,
"Failed to remove PID file \"%s\": %s",
zcp->pid_file, strerror(errno));
}
if (zcp->pid_fd >= 0) {
if (close(zcp->pid_fd) < 0)
zed_log_msg(LOG_WARNING,
"Failed to close PID file \"%s\": %s",
zcp->pid_file, strerror(errno));
zcp->pid_fd = -1;
}
if (zcp->pid_file) {
free(zcp->pid_file);
zcp->pid_file = NULL;
}
if (zcp->zedlet_dir) {
free(zcp->zedlet_dir);
zcp->zedlet_dir = NULL;
}
if (zcp->state_file) {
free(zcp->state_file);
zcp->state_file = NULL;
}
if (zcp->zedlets) {
zed_strings_destroy(zcp->zedlets);
zcp->zedlets = NULL;
}
}
/*
* Display command-line help and exit.
*
* If [got_err] is 0, output to stdout and exit normally;
* otherwise, output to stderr and exit with a failure status.
*/
static void
_zed_conf_display_help(const char *prog, boolean_t got_err)
{
struct opt { const char *o, *d, *v; };
FILE *fp = got_err ? stderr : stdout;
struct opt *oo;
struct opt iopts[] = {
{ .o = "-h", .d = "Display help" },
{ .o = "-L", .d = "Display license information" },
{ .o = "-V", .d = "Display version information" },
{},
};
struct opt nopts[] = {
{ .o = "-v", .d = "Be verbose" },
{ .o = "-f", .d = "Force daemon to run" },
{ .o = "-F", .d = "Run daemon in the foreground" },
{ .o = "-I",
.d = "Idle daemon until kernel module is (re)loaded" },
{ .o = "-M", .d = "Lock all pages in memory" },
{ .o = "-P", .d = "$PATH for ZED to use (only used by ZTS)" },
{ .o = "-Z", .d = "Zero state file" },
{},
};
struct opt vopts[] = {
{ .o = "-d DIR", .d = "Read enabled ZEDLETs from DIR.",
.v = ZED_ZEDLET_DIR },
{ .o = "-p FILE", .d = "Write daemon's PID to FILE.",
.v = ZED_PID_FILE },
{ .o = "-s FILE", .d = "Write daemon's state to FILE.",
.v = ZED_STATE_FILE },
{ .o = "-j JOBS", .d = "Start at most JOBS at once.",
.v = "16" },
{ .o = "-b LEN", .d = "Cap kernel event buffer at LEN entries.",
.v = "1048576" },
{},
};
fprintf(fp, "Usage: %s [OPTION]...\n", (prog ? prog : "zed"));
fprintf(fp, "\n");
for (oo = iopts; oo->o; ++oo)
fprintf(fp, " %*s %s\n", -8, oo->o, oo->d);
fprintf(fp, "\n");
for (oo = nopts; oo->o; ++oo)
fprintf(fp, " %*s %s\n", -8, oo->o, oo->d);
fprintf(fp, "\n");
for (oo = vopts; oo->o; ++oo)
fprintf(fp, " %*s %s [%s]\n", -8, oo->o, oo->d, oo->v);
fprintf(fp, "\n");
exit(got_err ? EXIT_FAILURE : EXIT_SUCCESS);
}
/*
* Display license information to stdout and exit.
*/
static void
_zed_conf_display_license(void)
{
printf(
"The ZFS Event Daemon (ZED) is distributed under the terms of the\n"
" Common Development and Distribution License (CDDL-1.0)\n"
" <http://opensource.org/licenses/CDDL-1.0>.\n"
"\n"
"Developed at Lawrence Livermore National Laboratory"
" (LLNL-CODE-403049).\n"
"\n");
exit(EXIT_SUCCESS);
}
/*
* Display version information to stdout and exit.
*/
static void
_zed_conf_display_version(void)
{
printf("%s-%s-%s\n",
ZFS_META_NAME, ZFS_META_VERSION, ZFS_META_RELEASE);
exit(EXIT_SUCCESS);
}
/*
* Copy the [path] string to the [resultp] ptr.
* If [path] is not an absolute path, prefix it with the current working dir.
* If [resultp] is non-null, free its existing string before assignment.
*/
static void
_zed_conf_parse_path(char **resultp, const char *path)
{
char buf[PATH_MAX];
assert(resultp != NULL);
assert(path != NULL);
if (*resultp)
free(*resultp);
if (path[0] == '/') {
*resultp = strdup(path);
} else {
if (!getcwd(buf, sizeof (buf)))
zed_log_die("Failed to get current working dir: %s",
strerror(errno));
if (strlcat(buf, "/", sizeof (buf)) >= sizeof (buf) ||
strlcat(buf, path, sizeof (buf)) >= sizeof (buf))
zed_log_die("Failed to copy path: %s",
strerror(ENAMETOOLONG));
*resultp = strdup(buf);
}
if (!*resultp)
zed_log_die("Failed to copy path: %s", strerror(ENOMEM));
}
/*
* Parse the command-line options into the configuration [zcp].
*/
void
zed_conf_parse_opts(struct zed_conf *zcp, int argc, char **argv)
{
const char * const opts = ":hLVd:p:P:s:vfFMZIj:b:";
int opt;
unsigned long raw;
if (!zcp || !argv || !argv[0])
zed_log_die("Failed to parse options: Internal error");
opterr = 0; /* suppress default getopt err msgs */
while ((opt = getopt(argc, argv, opts)) != -1) {
switch (opt) {
case 'h':
_zed_conf_display_help(argv[0], B_FALSE);
break;
case 'L':
_zed_conf_display_license();
break;
case 'V':
_zed_conf_display_version();
break;
case 'd':
_zed_conf_parse_path(&zcp->zedlet_dir, optarg);
break;
case 'I':
zcp->do_idle = 1;
break;
case 'p':
_zed_conf_parse_path(&zcp->pid_file, optarg);
break;
case 'P':
_zed_conf_parse_path(&zcp->path, optarg);
break;
case 's':
_zed_conf_parse_path(&zcp->state_file, optarg);
break;
case 'v':
zcp->do_verbose = 1;
break;
case 'f':
zcp->do_force = 1;
break;
case 'F':
zcp->do_foreground = 1;
break;
case 'M':
zcp->do_memlock = 1;
break;
case 'Z':
zcp->do_zero = 1;
break;
case 'j':
errno = 0;
raw = strtoul(optarg, NULL, 0);
if (errno == ERANGE || raw > INT16_MAX) {
zed_log_die("%lu is too many jobs", raw);
} if (raw == 0) {
zed_log_die("0 jobs makes no sense");
} else {
zcp->max_jobs = raw;
}
break;
case 'b':
errno = 0;
raw = strtoul(optarg, NULL, 0);
if (errno == ERANGE || raw > INT32_MAX) {
zed_log_die("%lu is too large", raw);
} if (raw == 0) {
zcp->max_zevent_buf_len = INT32_MAX;
} else {
zcp->max_zevent_buf_len = raw;
}
break;
case '?':
default:
if (optopt == '?')
_zed_conf_display_help(argv[0], B_FALSE);
fprintf(stderr, "%s: Invalid option '-%c'\n\n",
argv[0], optopt);
_zed_conf_display_help(argv[0], B_TRUE);
break;
}
}
}
/*
* Scan the [zcp] zedlet_dir for files to exec based on the event class.
* Files must be executable by user, but not writable by group or other.
* Dotfiles are ignored.
*
* Return 0 on success with an updated set of zedlets,
* or -1 on error with errno set.
*/
int
zed_conf_scan_dir(struct zed_conf *zcp)
{
zed_strings_t *zedlets;
DIR *dirp;
struct dirent *direntp;
char pathname[PATH_MAX];
struct stat st;
int n;
if (!zcp) {
errno = EINVAL;
zed_log_msg(LOG_ERR, "Failed to scan zedlet dir: %s",
strerror(errno));
return (-1);
}
zedlets = zed_strings_create();
if (!zedlets) {
errno = ENOMEM;
zed_log_msg(LOG_WARNING, "Failed to scan dir \"%s\": %s",
zcp->zedlet_dir, strerror(errno));
return (-1);
}
dirp = opendir(zcp->zedlet_dir);
if (!dirp) {
int errno_bak = errno;
zed_log_msg(LOG_WARNING, "Failed to open dir \"%s\": %s",
zcp->zedlet_dir, strerror(errno));
zed_strings_destroy(zedlets);
errno = errno_bak;
return (-1);
}
while ((direntp = readdir(dirp))) {
if (direntp->d_name[0] == '.')
continue;
n = snprintf(pathname, sizeof (pathname),
"%s/%s", zcp->zedlet_dir, direntp->d_name);
if ((n < 0) || (n >= sizeof (pathname))) {
zed_log_msg(LOG_WARNING, "Failed to stat \"%s\": %s",
direntp->d_name, strerror(ENAMETOOLONG));
continue;
}
if (stat(pathname, &st) < 0) {
zed_log_msg(LOG_WARNING, "Failed to stat \"%s\": %s",
pathname, strerror(errno));
continue;
}
if (!S_ISREG(st.st_mode)) {
zed_log_msg(LOG_INFO,
"Ignoring \"%s\": not a regular file",
direntp->d_name);
continue;
}
if ((st.st_uid != 0) && !zcp->do_force) {
zed_log_msg(LOG_NOTICE,
"Ignoring \"%s\": not owned by root",
direntp->d_name);
continue;
}
if (!(st.st_mode & S_IXUSR)) {
zed_log_msg(LOG_INFO,
"Ignoring \"%s\": not executable by user",
direntp->d_name);
continue;
}
if ((st.st_mode & S_IWGRP) && !zcp->do_force) {
zed_log_msg(LOG_NOTICE,
"Ignoring \"%s\": writable by group",
direntp->d_name);
continue;
}
if ((st.st_mode & S_IWOTH) && !zcp->do_force) {
zed_log_msg(LOG_NOTICE,
"Ignoring \"%s\": writable by other",
direntp->d_name);
continue;
}
if (zed_strings_add(zedlets, NULL, direntp->d_name) < 0) {
zed_log_msg(LOG_WARNING,
"Failed to register \"%s\": %s",
direntp->d_name, strerror(errno));
continue;
}
if (zcp->do_verbose)
zed_log_msg(LOG_INFO,
"Registered zedlet \"%s\"", direntp->d_name);
}
if (closedir(dirp) < 0) {
int errno_bak = errno;
zed_log_msg(LOG_WARNING, "Failed to close dir \"%s\": %s",
zcp->zedlet_dir, strerror(errno));
zed_strings_destroy(zedlets);
errno = errno_bak;
return (-1);
}
if (zcp->zedlets)
zed_strings_destroy(zcp->zedlets);
zcp->zedlets = zedlets;
return (0);
}
/*
* Write the PID file specified in [zcp].
* Return 0 on success, -1 on error.
*
* This must be called after fork()ing to become a daemon (so the correct PID
* is recorded), but before daemonization is complete and the parent process
* exits (for synchronization with systemd).
*/
int
zed_conf_write_pid(struct zed_conf *zcp)
{
char buf[PATH_MAX];
int n;
char *p;
mode_t mask;
int rv;
if (!zcp || !zcp->pid_file) {
errno = EINVAL;
zed_log_msg(LOG_ERR, "Failed to create PID file: %s",
strerror(errno));
return (-1);
}
assert(zcp->pid_fd == -1);
/*
* Create PID file directory if needed.
*/
n = strlcpy(buf, zcp->pid_file, sizeof (buf));
if (n >= sizeof (buf)) {
errno = ENAMETOOLONG;
zed_log_msg(LOG_ERR, "Failed to create PID file: %s",
strerror(errno));
goto err;
}
p = strrchr(buf, '/');
if (p)
*p = '\0';
if ((mkdirp(buf, 0755) < 0) && (errno != EEXIST)) {
zed_log_msg(LOG_ERR, "Failed to create directory \"%s\": %s",
buf, strerror(errno));
goto err;
}
/*
* Obtain PID file lock.
*/
mask = umask(0);
umask(mask | 022);
zcp->pid_fd = open(zcp->pid_file, O_RDWR | O_CREAT | O_CLOEXEC, 0644);
umask(mask);
if (zcp->pid_fd < 0) {
zed_log_msg(LOG_ERR, "Failed to open PID file \"%s\": %s",
zcp->pid_file, strerror(errno));
goto err;
}
rv = zed_file_lock(zcp->pid_fd);
if (rv < 0) {
zed_log_msg(LOG_ERR, "Failed to lock PID file \"%s\": %s",
zcp->pid_file, strerror(errno));
goto err;
} else if (rv > 0) {
pid_t pid = zed_file_is_locked(zcp->pid_fd);
if (pid < 0) {
zed_log_msg(LOG_ERR,
"Failed to test lock on PID file \"%s\"",
zcp->pid_file);
} else if (pid > 0) {
zed_log_msg(LOG_ERR,
"Found PID %d bound to PID file \"%s\"",
pid, zcp->pid_file);
} else {
zed_log_msg(LOG_ERR,
"Inconsistent lock state on PID file \"%s\"",
zcp->pid_file);
}
goto err;
}
/*
* Write PID file.
*/
n = snprintf(buf, sizeof (buf), "%d\n", (int)getpid());
if ((n < 0) || (n >= sizeof (buf))) {
errno = ERANGE;
zed_log_msg(LOG_ERR, "Failed to write PID file \"%s\": %s",
zcp->pid_file, strerror(errno));
} else if (write(zcp->pid_fd, buf, n) != n) {
zed_log_msg(LOG_ERR, "Failed to write PID file \"%s\": %s",
zcp->pid_file, strerror(errno));
} else if (fdatasync(zcp->pid_fd) < 0) {
zed_log_msg(LOG_ERR, "Failed to sync PID file \"%s\": %s",
zcp->pid_file, strerror(errno));
} else {
return (0);
}
err:
if (zcp->pid_fd >= 0) {
(void) close(zcp->pid_fd);
zcp->pid_fd = -1;
}
return (-1);
}
/*
* Open and lock the [zcp] state_file.
* Return 0 on success, -1 on error.
*
* FIXME: Move state information into kernel.
*/
int
zed_conf_open_state(struct zed_conf *zcp)
{
char dirbuf[PATH_MAX];
int n;
char *p;
int rv;
if (!zcp || !zcp->state_file) {
errno = EINVAL;
zed_log_msg(LOG_ERR, "Failed to open state file: %s",
strerror(errno));
return (-1);
}
n = strlcpy(dirbuf, zcp->state_file, sizeof (dirbuf));
if (n >= sizeof (dirbuf)) {
errno = ENAMETOOLONG;
zed_log_msg(LOG_WARNING, "Failed to open state file: %s",
strerror(errno));
return (-1);
}
p = strrchr(dirbuf, '/');
if (p)
*p = '\0';
if ((mkdirp(dirbuf, 0755) < 0) && (errno != EEXIST)) {
zed_log_msg(LOG_WARNING,
"Failed to create directory \"%s\": %s",
dirbuf, strerror(errno));
return (-1);
}
if (zcp->state_fd >= 0) {
if (close(zcp->state_fd) < 0) {
zed_log_msg(LOG_WARNING,
"Failed to close state file \"%s\": %s",
zcp->state_file, strerror(errno));
return (-1);
}
}
if (zcp->do_zero)
(void) unlink(zcp->state_file);
zcp->state_fd = open(zcp->state_file,
O_RDWR | O_CREAT | O_CLOEXEC, 0644);
if (zcp->state_fd < 0) {
zed_log_msg(LOG_WARNING, "Failed to open state file \"%s\": %s",
zcp->state_file, strerror(errno));
return (-1);
}
rv = zed_file_lock(zcp->state_fd);
if (rv < 0) {
zed_log_msg(LOG_WARNING, "Failed to lock state file \"%s\": %s",
zcp->state_file, strerror(errno));
return (-1);
}
if (rv > 0) {
pid_t pid = zed_file_is_locked(zcp->state_fd);
if (pid < 0) {
zed_log_msg(LOG_WARNING,
"Failed to test lock on state file \"%s\"",
zcp->state_file);
} else if (pid > 0) {
zed_log_msg(LOG_WARNING,
"Found PID %d bound to state file \"%s\"",
pid, zcp->state_file);
} else {
zed_log_msg(LOG_WARNING,
"Inconsistent lock state on state file \"%s\"",
zcp->state_file);
}
return (-1);
}
return (0);
}
/*
* Read the opened [zcp] state_file to obtain the eid & etime of the last event
* processed. Write the state from the last event to the [eidp] & [etime] args
* passed by reference. Note that etime[] is an array of size 2.
* Return 0 on success, -1 on error.
*/
int
zed_conf_read_state(struct zed_conf *zcp, uint64_t *eidp, int64_t etime[])
{
ssize_t len;
struct iovec iov[3];
ssize_t n;
if (!zcp || !eidp || !etime) {
errno = EINVAL;
zed_log_msg(LOG_ERR,
"Failed to read state file: %s", strerror(errno));
return (-1);
}
if (lseek(zcp->state_fd, 0, SEEK_SET) == (off_t)-1) {
zed_log_msg(LOG_WARNING,
"Failed to reposition state file offset: %s",
strerror(errno));
return (-1);
}
len = 0;
iov[0].iov_base = eidp;
len += iov[0].iov_len = sizeof (*eidp);
iov[1].iov_base = &etime[0];
len += iov[1].iov_len = sizeof (etime[0]);
iov[2].iov_base = &etime[1];
len += iov[2].iov_len = sizeof (etime[1]);
n = readv(zcp->state_fd, iov, 3);
if (n == 0) {
*eidp = 0;
} else if (n < 0) {
zed_log_msg(LOG_WARNING,
"Failed to read state file \"%s\": %s",
zcp->state_file, strerror(errno));
return (-1);
} else if (n != len) {
errno = EIO;
zed_log_msg(LOG_WARNING,
- "Failed to read state file \"%s\": Read %d of %d bytes",
+ "Failed to read state file \"%s\": Read %zd of %zd bytes",
zcp->state_file, n, len);
return (-1);
}
return (0);
}
/*
* Write the [eid] & [etime] of the last processed event to the opened
* [zcp] state_file. Note that etime[] is an array of size 2.
* Return 0 on success, -1 on error.
*/
int
zed_conf_write_state(struct zed_conf *zcp, uint64_t eid, int64_t etime[])
{
ssize_t len;
struct iovec iov[3];
ssize_t n;
if (!zcp) {
errno = EINVAL;
zed_log_msg(LOG_ERR,
"Failed to write state file: %s", strerror(errno));
return (-1);
}
if (lseek(zcp->state_fd, 0, SEEK_SET) == (off_t)-1) {
zed_log_msg(LOG_WARNING,
"Failed to reposition state file offset: %s",
strerror(errno));
return (-1);
}
len = 0;
iov[0].iov_base = &eid;
len += iov[0].iov_len = sizeof (eid);
iov[1].iov_base = &etime[0];
len += iov[1].iov_len = sizeof (etime[0]);
iov[2].iov_base = &etime[1];
len += iov[2].iov_len = sizeof (etime[1]);
n = writev(zcp->state_fd, iov, 3);
if (n < 0) {
zed_log_msg(LOG_WARNING,
"Failed to write state file \"%s\": %s",
zcp->state_file, strerror(errno));
return (-1);
}
if (n != len) {
errno = EIO;
zed_log_msg(LOG_WARNING,
- "Failed to write state file \"%s\": Wrote %d of %d bytes",
+ "Failed to write state file \"%s\": Wrote %zd of %zd bytes",
zcp->state_file, n, len);
return (-1);
}
if (fdatasync(zcp->state_fd) < 0) {
zed_log_msg(LOG_WARNING,
"Failed to sync state file \"%s\": %s",
zcp->state_file, strerror(errno));
return (-1);
}
return (0);
}
diff --git a/sys/contrib/openzfs/cmd/zed/zed_disk_event.c b/sys/contrib/openzfs/cmd/zed/zed_disk_event.c
index 8845c5b2d009..db89ecc907bb 100644
--- a/sys/contrib/openzfs/cmd/zed/zed_disk_event.c
+++ b/sys/contrib/openzfs/cmd/zed/zed_disk_event.c
@@ -1,453 +1,469 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License Version 1.0 (CDDL-1.0).
* You can obtain a copy of the license from the top-level file
* "OPENSOLARIS.LICENSE" or at <http://opensource.org/licenses/CDDL-1.0>.
* You may not use this file except in compliance with the license.
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2016, 2017, Intel Corporation.
*/
#ifdef HAVE_LIBUDEV
#include <errno.h>
#include <fcntl.h>
#include <libnvpair.h>
#include <libudev.h>
#include <libzfs.h>
#include <libzutil.h>
#include <pthread.h>
#include <stdlib.h>
#include <string.h>
#include <sys/sysevent/eventdefs.h>
#include <sys/sysevent/dev.h>
#include "zed_log.h"
#include "zed_disk_event.h"
#include "agents/zfs_agents.h"
/*
* Portions of ZED need to see disk events for disks belonging to ZFS pools.
* A libudev monitor is established to monitor block device actions and pass
* them on to internal ZED logic modules. Initially, zfs_mod.c is the only
* consumer and is the Linux equivalent for the illumos syseventd ZFS SLM
* module responsible for handling disk events for ZFS.
*/
pthread_t g_mon_tid;
struct udev *g_udev;
struct udev_monitor *g_mon;
#define DEV_BYID_PATH "/dev/disk/by-id/"
/* 64MB is minimum usable disk for ZFS */
-#define MINIMUM_SECTORS 131072
+#define MINIMUM_SECTORS 131072ULL
/*
* Post disk event to SLM module
*
* occurs in the context of monitor thread
*/
static void
zed_udev_event(const char *class, const char *subclass, nvlist_t *nvl)
{
char *strval;
uint64_t numval;
zed_log_msg(LOG_INFO, "zed_disk_event:");
zed_log_msg(LOG_INFO, "\tclass: %s", class);
zed_log_msg(LOG_INFO, "\tsubclass: %s", subclass);
if (nvlist_lookup_string(nvl, DEV_NAME, &strval) == 0)
zed_log_msg(LOG_INFO, "\t%s: %s", DEV_NAME, strval);
if (nvlist_lookup_string(nvl, DEV_PATH, &strval) == 0)
zed_log_msg(LOG_INFO, "\t%s: %s", DEV_PATH, strval);
if (nvlist_lookup_string(nvl, DEV_IDENTIFIER, &strval) == 0)
zed_log_msg(LOG_INFO, "\t%s: %s", DEV_IDENTIFIER, strval);
if (nvlist_lookup_boolean(nvl, DEV_IS_PART) == B_TRUE)
zed_log_msg(LOG_INFO, "\t%s: B_TRUE", DEV_IS_PART);
if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &strval) == 0)
zed_log_msg(LOG_INFO, "\t%s: %s", DEV_PHYS_PATH, strval);
if (nvlist_lookup_uint64(nvl, DEV_SIZE, &numval) == 0)
zed_log_msg(LOG_INFO, "\t%s: %llu", DEV_SIZE, numval);
+ if (nvlist_lookup_uint64(nvl, DEV_PARENT_SIZE, &numval) == 0)
+ zed_log_msg(LOG_INFO, "\t%s: %llu", DEV_PARENT_SIZE, numval);
if (nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID, &numval) == 0)
zed_log_msg(LOG_INFO, "\t%s: %llu", ZFS_EV_POOL_GUID, numval);
if (nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID, &numval) == 0)
zed_log_msg(LOG_INFO, "\t%s: %llu", ZFS_EV_VDEV_GUID, numval);
(void) zfs_agent_post_event(class, subclass, nvl);
}
/*
* dev_event_nvlist: place event schema into an nv pair list
*
* NAME VALUE (example)
* -------------- --------------------------------------------------------
* DEV_NAME /dev/sdl
* DEV_PATH /devices/pci0000:00/0000:00:03.0/0000:04:00.0/host0/...
* DEV_IDENTIFIER ata-Hitachi_HTS725050A9A362_100601PCG420VLJ37DMC
* DEV_PHYS_PATH pci-0000:04:00.0-sas-0x4433221101000000-lun-0
* DEV_IS_PART ---
* DEV_SIZE 500107862016
* ZFS_EV_POOL_GUID 17523635698032189180
* ZFS_EV_VDEV_GUID 14663607734290803088
*/
static nvlist_t *
dev_event_nvlist(struct udev_device *dev)
{
nvlist_t *nvl;
char strval[128];
const char *value, *path;
uint64_t guid;
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
return (NULL);
if (zfs_device_get_devid(dev, strval, sizeof (strval)) == 0)
(void) nvlist_add_string(nvl, DEV_IDENTIFIER, strval);
if (zfs_device_get_physical(dev, strval, sizeof (strval)) == 0)
(void) nvlist_add_string(nvl, DEV_PHYS_PATH, strval);
if ((path = udev_device_get_devnode(dev)) != NULL)
(void) nvlist_add_string(nvl, DEV_NAME, path);
if ((value = udev_device_get_devpath(dev)) != NULL)
(void) nvlist_add_string(nvl, DEV_PATH, value);
value = udev_device_get_devtype(dev);
if ((value != NULL && strcmp("partition", value) == 0) ||
(udev_device_get_property_value(dev, "ID_PART_ENTRY_NUMBER")
!= NULL)) {
(void) nvlist_add_boolean(nvl, DEV_IS_PART);
}
if ((value = udev_device_get_sysattr_value(dev, "size")) != NULL) {
uint64_t numval = DEV_BSIZE;
numval *= strtoull(value, NULL, 10);
(void) nvlist_add_uint64(nvl, DEV_SIZE, numval);
+
+ /*
+ * If the device has a parent, then get the parent block
+ * device's size as well. For example, /dev/sda1's parent
+ * is /dev/sda.
+ */
+ struct udev_device *parent_dev = udev_device_get_parent(dev);
+ if ((value = udev_device_get_sysattr_value(parent_dev, "size"))
+ != NULL) {
+ uint64_t numval = DEV_BSIZE;
+
+ numval *= strtoull(value, NULL, 10);
+ (void) nvlist_add_uint64(nvl, DEV_PARENT_SIZE, numval);
+ }
}
/*
* Grab the pool and vdev guids from blkid cache
*/
value = udev_device_get_property_value(dev, "ID_FS_UUID");
if (value != NULL && (guid = strtoull(value, NULL, 10)) != 0)
(void) nvlist_add_uint64(nvl, ZFS_EV_POOL_GUID, guid);
value = udev_device_get_property_value(dev, "ID_FS_UUID_SUB");
if (value != NULL && (guid = strtoull(value, NULL, 10)) != 0)
(void) nvlist_add_uint64(nvl, ZFS_EV_VDEV_GUID, guid);
/*
* Either a vdev guid or a devid must be present for matching
*/
if (!nvlist_exists(nvl, DEV_IDENTIFIER) &&
!nvlist_exists(nvl, ZFS_EV_VDEV_GUID)) {
nvlist_free(nvl);
return (NULL);
}
return (nvl);
}
/*
* Listen for block device uevents
*/
static void *
zed_udev_monitor(void *arg)
{
struct udev_monitor *mon = arg;
char *tmp, *tmp2;
zed_log_msg(LOG_INFO, "Waiting for new udev disk events...");
while (1) {
struct udev_device *dev;
const char *action, *type, *part, *sectors;
const char *bus, *uuid, *devpath;
const char *class, *subclass;
nvlist_t *nvl;
boolean_t is_zfs = B_FALSE;
/* allow a cancellation while blocked (recvmsg) */
pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
/* blocks at recvmsg until an event occurs */
if ((dev = udev_monitor_receive_device(mon)) == NULL) {
zed_log_msg(LOG_WARNING, "zed_udev_monitor: receive "
"device error %d", errno);
continue;
}
/* allow all steps to complete before a cancellation */
pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
/*
* Strongly typed device is the preferred filter
*/
type = udev_device_get_property_value(dev, "ID_FS_TYPE");
if (type != NULL && type[0] != '\0') {
if (strcmp(type, "zfs_member") == 0) {
is_zfs = B_TRUE;
} else {
/* not ours, so skip */
zed_log_msg(LOG_INFO, "zed_udev_monitor: skip "
"%s (in use by %s)",
udev_device_get_devnode(dev), type);
udev_device_unref(dev);
continue;
}
}
/*
* if this is a disk and it is partitioned, then the
* zfs label will reside in a DEVTYPE=partition and
* we can skip passing this event
*
* Special case: Blank disks are sometimes reported with
* an erroneous 'atari' partition, and should not be
* excluded from being used as an autoreplace disk:
*
* https://github.com/openzfs/zfs/issues/13497
*/
type = udev_device_get_property_value(dev, "DEVTYPE");
part = udev_device_get_property_value(dev,
"ID_PART_TABLE_TYPE");
if (type != NULL && type[0] != '\0' &&
strcmp(type, "disk") == 0 &&
part != NULL && part[0] != '\0') {
const char *devname =
udev_device_get_property_value(dev, "DEVNAME");
if (strcmp(part, "atari") == 0) {
zed_log_msg(LOG_INFO,
"%s: %s is reporting an atari partition, "
"but we're going to assume it's a false "
"positive and still use it (issue #13497)",
__func__, devname);
} else {
zed_log_msg(LOG_INFO,
"%s: skip %s since it has a %s partition "
"already", __func__, devname, part);
/* skip and wait for partition event */
udev_device_unref(dev);
continue;
}
}
/*
* ignore small partitions
*/
sectors = udev_device_get_property_value(dev,
"ID_PART_ENTRY_SIZE");
if (sectors == NULL)
sectors = udev_device_get_sysattr_value(dev, "size");
if (sectors != NULL &&
strtoull(sectors, NULL, 10) < MINIMUM_SECTORS) {
zed_log_msg(LOG_INFO,
"%s: %s sectors %s < %llu (minimum)",
__func__,
udev_device_get_property_value(dev, "DEVNAME"),
sectors, MINIMUM_SECTORS);
udev_device_unref(dev);
continue;
}
/*
* If the blkid probe didn't find ZFS, then a persistent
* device id string is required in the message schema
* for matching with vdevs. Preflight here for expected
* udev information.
*
* Special case:
* NVMe devices don't have ID_BUS set (at least on RHEL 7-8),
* but they are valid for autoreplace. Add a special case for
* them by searching for "/nvme/" in the udev DEVPATH:
*
* DEVPATH=/devices/pci0000:00/0000:00:1e.0/nvme/nvme2/nvme2n1
*/
bus = udev_device_get_property_value(dev, "ID_BUS");
uuid = udev_device_get_property_value(dev, "DM_UUID");
devpath = udev_device_get_devpath(dev);
if (!is_zfs && (bus == NULL && uuid == NULL &&
strstr(devpath, "/nvme/") == NULL)) {
zed_log_msg(LOG_INFO, "zed_udev_monitor: %s no devid "
"source", udev_device_get_devnode(dev));
udev_device_unref(dev);
continue;
}
action = udev_device_get_action(dev);
if (strcmp(action, "add") == 0) {
class = EC_DEV_ADD;
subclass = ESC_DISK;
} else if (strcmp(action, "remove") == 0) {
class = EC_DEV_REMOVE;
subclass = ESC_DISK;
} else if (strcmp(action, "change") == 0) {
class = EC_DEV_STATUS;
subclass = ESC_DEV_DLE;
} else {
zed_log_msg(LOG_WARNING, "zed_udev_monitor: %s unknown",
action);
udev_device_unref(dev);
continue;
}
/*
* Special case an EC_DEV_ADD for multipath devices
*
* When a multipath device is created, udev reports the
* following:
*
* 1. "add" event of the dm device for the multipath device
* (like /dev/dm-3).
* 2. "change" event to create the actual multipath device
* symlink (like /dev/mapper/mpatha). The event also
* passes back the relevant DM vars we care about, like
* DM_UUID.
* 3. Another "change" event identical to #2 (that we ignore).
*
* To get the behavior we want, we treat the "change" event
* in #2 as a "add" event; as if "/dev/mapper/mpatha" was
* a new disk being added.
*/
if (strcmp(class, EC_DEV_STATUS) == 0 &&
udev_device_get_property_value(dev, "DM_UUID") &&
udev_device_get_property_value(dev, "MPATH_SBIN_PATH")) {
tmp = (char *)udev_device_get_devnode(dev);
tmp2 = zfs_get_underlying_path(tmp);
if (tmp && tmp2 && (strcmp(tmp, tmp2) != 0)) {
/*
* We have a real underlying device, which
* means that this multipath "change" event is
* an "add" event.
*
* If the multipath device and the underlying
* dev are the same name (i.e. /dev/dm-5), then
* there is no real underlying disk for this
* multipath device, and so this "change" event
* really is a multipath removal.
*/
class = EC_DEV_ADD;
subclass = ESC_DISK;
} else {
tmp = (char *)
udev_device_get_property_value(dev,
"DM_NR_VALID_PATHS");
/* treat as a multipath remove */
if (tmp != NULL && strcmp(tmp, "0") == 0) {
class = EC_DEV_REMOVE;
subclass = ESC_DISK;
}
}
free(tmp2);
}
/*
* Special case an EC_DEV_ADD for scsi_debug devices
*
* These devices require a udevadm trigger command after
* creation in order to register the vdev_id scsidebug alias
* rule (adds a persistent path (phys_path) used for fault
* management automated tests in the ZFS test suite.
*
* After udevadm trigger command, event registers as a "change"
* event but needs to instead be handled as another "add" event
* to allow for disk labeling and partitioning to occur.
*/
if (strcmp(class, EC_DEV_STATUS) == 0 &&
udev_device_get_property_value(dev, "ID_VDEV") &&
udev_device_get_property_value(dev, "ID_MODEL")) {
const char *id_model, *id_model_sd = "scsi_debug";
id_model = udev_device_get_property_value(dev,
"ID_MODEL");
if (strcmp(id_model, id_model_sd) == 0) {
class = EC_DEV_ADD;
subclass = ESC_DISK;
}
}
if ((nvl = dev_event_nvlist(dev)) != NULL) {
zed_udev_event(class, subclass, nvl);
nvlist_free(nvl);
}
udev_device_unref(dev);
}
return (NULL);
}
int
zed_disk_event_init(void)
{
int fd, fflags;
if ((g_udev = udev_new()) == NULL) {
zed_log_msg(LOG_WARNING, "udev_new failed (%d)", errno);
return (-1);
}
/* Set up a udev monitor for block devices */
g_mon = udev_monitor_new_from_netlink(g_udev, "udev");
udev_monitor_filter_add_match_subsystem_devtype(g_mon, "block", "disk");
udev_monitor_filter_add_match_subsystem_devtype(g_mon, "block",
"partition");
udev_monitor_enable_receiving(g_mon);
/* Make sure monitoring socket is blocking */
fd = udev_monitor_get_fd(g_mon);
if ((fflags = fcntl(fd, F_GETFL)) & O_NONBLOCK)
(void) fcntl(fd, F_SETFL, fflags & ~O_NONBLOCK);
/* spawn a thread to monitor events */
if (pthread_create(&g_mon_tid, NULL, zed_udev_monitor, g_mon) != 0) {
udev_monitor_unref(g_mon);
udev_unref(g_udev);
zed_log_msg(LOG_WARNING, "pthread_create failed");
return (-1);
}
pthread_setname_np(g_mon_tid, "udev monitor");
zed_log_msg(LOG_INFO, "zed_disk_event_init");
return (0);
}
void
zed_disk_event_fini(void)
{
/* cancel monitor thread at recvmsg() */
(void) pthread_cancel(g_mon_tid);
(void) pthread_join(g_mon_tid, NULL);
/* cleanup udev resources */
udev_monitor_unref(g_mon);
udev_unref(g_udev);
zed_log_msg(LOG_INFO, "zed_disk_event_fini");
}
#else
#include "zed_disk_event.h"
int
zed_disk_event_init(void)
{
return (0);
}
void
zed_disk_event_fini(void)
{
}
#endif /* HAVE_LIBUDEV */
diff --git a/sys/contrib/openzfs/cmd/zed/zed_exec.c b/sys/contrib/openzfs/cmd/zed/zed_exec.c
index 369c4b6950c5..51c292d41ccc 100644
--- a/sys/contrib/openzfs/cmd/zed/zed_exec.c
+++ b/sys/contrib/openzfs/cmd/zed/zed_exec.c
@@ -1,373 +1,373 @@
/*
* This file is part of the ZFS Event Daemon (ZED).
*
* Developed at Lawrence Livermore National Laboratory (LLNL-CODE-403049).
* Copyright (C) 2013-2014 Lawrence Livermore National Security, LLC.
* Refer to the OpenZFS git commit log for authoritative copyright attribution.
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License Version 1.0 (CDDL-1.0).
* You can obtain a copy of the license from the top-level file
* "OPENSOLARIS.LICENSE" or at <http://opensource.org/licenses/CDDL-1.0>.
* You may not use this file except in compliance with the license.
*/
#include <assert.h>
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <stdlib.h>
#include <string.h>
#include <stddef.h>
#include <sys/avl.h>
#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/wait.h>
#include <time.h>
#include <unistd.h>
#include <pthread.h>
#include <signal.h>
#include "zed_exec.h"
#include "zed_log.h"
#include "zed_strings.h"
#define ZEVENT_FILENO 3
struct launched_process_node {
avl_node_t node;
pid_t pid;
uint64_t eid;
char *name;
};
static int
_launched_process_node_compare(const void *x1, const void *x2)
{
pid_t p1;
pid_t p2;
assert(x1 != NULL);
assert(x2 != NULL);
p1 = ((const struct launched_process_node *) x1)->pid;
p2 = ((const struct launched_process_node *) x2)->pid;
if (p1 < p2)
return (-1);
else if (p1 == p2)
return (0);
else
return (1);
}
static pthread_t _reap_children_tid = (pthread_t)-1;
static volatile boolean_t _reap_children_stop;
static avl_tree_t _launched_processes;
static pthread_mutex_t _launched_processes_lock = PTHREAD_MUTEX_INITIALIZER;
static int16_t _launched_processes_limit;
/*
* Create an environment string array for passing to execve() using the
* NAME=VALUE strings in container [zsp].
* Return a newly-allocated environment, or NULL on error.
*/
static char **
_zed_exec_create_env(zed_strings_t *zsp)
{
int num_ptrs;
int buflen;
char *buf;
char **pp;
char *p;
const char *q;
int i;
int len;
num_ptrs = zed_strings_count(zsp) + 1;
buflen = num_ptrs * sizeof (char *);
for (q = zed_strings_first(zsp); q; q = zed_strings_next(zsp))
buflen += strlen(q) + 1;
buf = calloc(1, buflen);
if (!buf)
return (NULL);
pp = (char **)buf;
p = buf + (num_ptrs * sizeof (char *));
i = 0;
for (q = zed_strings_first(zsp); q; q = zed_strings_next(zsp)) {
pp[i] = p;
len = strlen(q) + 1;
memcpy(p, q, len);
p += len;
i++;
}
pp[i] = NULL;
assert(buf + buflen == p);
return ((char **)buf);
}
/*
* Fork a child process to handle event [eid]. The program [prog]
* in directory [dir] is executed with the environment [env].
*
* The file descriptor [zfd] is the zevent_fd used to track the
* current cursor location within the zevent nvlist.
*/
static void
_zed_exec_fork_child(uint64_t eid, const char *dir, const char *prog,
char *env[], int zfd, boolean_t in_foreground)
{
char path[PATH_MAX];
int n;
pid_t pid;
int fd;
struct launched_process_node *node;
sigset_t mask;
struct timespec launch_timeout =
{ .tv_sec = 0, .tv_nsec = 200 * 1000 * 1000, };
assert(dir != NULL);
assert(prog != NULL);
assert(env != NULL);
assert(zfd >= 0);
while (__atomic_load_n(&_launched_processes_limit,
__ATOMIC_SEQ_CST) <= 0)
(void) nanosleep(&launch_timeout, NULL);
n = snprintf(path, sizeof (path), "%s/%s", dir, prog);
if ((n < 0) || (n >= sizeof (path))) {
zed_log_msg(LOG_WARNING,
"Failed to fork \"%s\" for eid=%llu: %s",
prog, eid, strerror(ENAMETOOLONG));
return;
}
(void) pthread_mutex_lock(&_launched_processes_lock);
pid = fork();
if (pid < 0) {
(void) pthread_mutex_unlock(&_launched_processes_lock);
zed_log_msg(LOG_WARNING,
"Failed to fork \"%s\" for eid=%llu: %s",
prog, eid, strerror(errno));
return;
} else if (pid == 0) {
(void) sigemptyset(&mask);
(void) sigprocmask(SIG_SETMASK, &mask, NULL);
(void) umask(022);
if (in_foreground && /* we're already devnulled if daemonised */
(fd = open("/dev/null", O_RDWR | O_CLOEXEC)) != -1) {
(void) dup2(fd, STDIN_FILENO);
(void) dup2(fd, STDOUT_FILENO);
(void) dup2(fd, STDERR_FILENO);
}
(void) dup2(zfd, ZEVENT_FILENO);
execle(path, prog, NULL, env);
_exit(127);
}
/* parent process */
node = calloc(1, sizeof (*node));
if (node) {
node->pid = pid;
node->eid = eid;
node->name = strdup(prog);
avl_add(&_launched_processes, node);
}
(void) pthread_mutex_unlock(&_launched_processes_lock);
__atomic_sub_fetch(&_launched_processes_limit, 1, __ATOMIC_SEQ_CST);
zed_log_msg(LOG_INFO, "Invoking \"%s\" eid=%llu pid=%d",
prog, eid, pid);
}
static void
_nop(int sig)
{
(void) sig;
}
static void *
_reap_children(void *arg)
{
(void) arg;
struct launched_process_node node, *pnode;
pid_t pid;
int status;
struct rusage usage;
struct sigaction sa = {};
(void) sigfillset(&sa.sa_mask);
(void) sigdelset(&sa.sa_mask, SIGCHLD);
(void) pthread_sigmask(SIG_SETMASK, &sa.sa_mask, NULL);
(void) sigemptyset(&sa.sa_mask);
sa.sa_handler = _nop;
sa.sa_flags = SA_NOCLDSTOP;
(void) sigaction(SIGCHLD, &sa, NULL);
for (_reap_children_stop = B_FALSE; !_reap_children_stop; ) {
(void) pthread_mutex_lock(&_launched_processes_lock);
pid = wait4(0, &status, WNOHANG, &usage);
if (pid == 0 || pid == (pid_t)-1) {
(void) pthread_mutex_unlock(&_launched_processes_lock);
if (pid == 0 || errno == ECHILD)
pause();
else if (errno != EINTR)
zed_log_msg(LOG_WARNING,
"Failed to wait for children: %s",
strerror(errno));
} else {
memset(&node, 0, sizeof (node));
node.pid = pid;
pnode = avl_find(&_launched_processes, &node, NULL);
if (pnode) {
memcpy(&node, pnode, sizeof (node));
avl_remove(&_launched_processes, pnode);
free(pnode);
}
(void) pthread_mutex_unlock(&_launched_processes_lock);
__atomic_add_fetch(&_launched_processes_limit, 1,
__ATOMIC_SEQ_CST);
usage.ru_utime.tv_sec += usage.ru_stime.tv_sec;
usage.ru_utime.tv_usec += usage.ru_stime.tv_usec;
usage.ru_utime.tv_sec +=
usage.ru_utime.tv_usec / (1000 * 1000);
usage.ru_utime.tv_usec %= 1000 * 1000;
if (WIFEXITED(status)) {
zed_log_msg(LOG_INFO,
"Finished \"%s\" eid=%llu pid=%d "
"time=%llu.%06us exit=%d",
node.name, node.eid, pid,
(unsigned long long) usage.ru_utime.tv_sec,
(unsigned int) usage.ru_utime.tv_usec,
WEXITSTATUS(status));
} else if (WIFSIGNALED(status)) {
zed_log_msg(LOG_INFO,
"Finished \"%s\" eid=%llu pid=%d "
"time=%llu.%06us sig=%d/%s",
node.name, node.eid, pid,
(unsigned long long) usage.ru_utime.tv_sec,
(unsigned int) usage.ru_utime.tv_usec,
WTERMSIG(status),
strsignal(WTERMSIG(status)));
} else {
zed_log_msg(LOG_INFO,
"Finished \"%s\" eid=%llu pid=%d "
"time=%llu.%06us status=0x%X",
- node.name, node.eid,
+ node.name, node.eid, pid,
(unsigned long long) usage.ru_utime.tv_sec,
(unsigned int) usage.ru_utime.tv_usec,
(unsigned int) status);
}
free(node.name);
}
}
return (NULL);
}
void
zed_exec_fini(void)
{
struct launched_process_node *node;
void *ck = NULL;
if (_reap_children_tid == (pthread_t)-1)
return;
_reap_children_stop = B_TRUE;
(void) pthread_kill(_reap_children_tid, SIGCHLD);
(void) pthread_join(_reap_children_tid, NULL);
while ((node = avl_destroy_nodes(&_launched_processes, &ck)) != NULL) {
free(node->name);
free(node);
}
avl_destroy(&_launched_processes);
(void) pthread_mutex_destroy(&_launched_processes_lock);
(void) pthread_mutex_init(&_launched_processes_lock, NULL);
_reap_children_tid = (pthread_t)-1;
}
/*
* Process the event [eid] by synchronously invoking all zedlets with a
* matching class prefix.
*
* Each executable in [zcp->zedlets] from the directory [zcp->zedlet_dir]
* is matched against the event's [class], [subclass], and the "all" class
* (which matches all events).
* Every zedlet with a matching class prefix is invoked.
* The NAME=VALUE strings in [envs] will be passed to the zedlet as
* environment variables.
*
* The file descriptor [zcp->zevent_fd] is the zevent_fd used to track the
* current cursor location within the zevent nvlist.
*
* Return 0 on success, -1 on error.
*/
int
zed_exec_process(uint64_t eid, const char *class, const char *subclass,
struct zed_conf *zcp, zed_strings_t *envs)
{
const char *class_strings[4];
const char *allclass = "all";
const char **csp;
const char *z;
char **e;
int n;
if (!zcp->zedlet_dir || !zcp->zedlets || !envs || zcp->zevent_fd < 0)
return (-1);
if (_reap_children_tid == (pthread_t)-1) {
_launched_processes_limit = zcp->max_jobs;
if (pthread_create(&_reap_children_tid, NULL,
_reap_children, NULL) != 0)
return (-1);
pthread_setname_np(_reap_children_tid, "reap ZEDLETs");
avl_create(&_launched_processes, _launched_process_node_compare,
sizeof (struct launched_process_node),
offsetof(struct launched_process_node, node));
}
csp = class_strings;
if (class)
*csp++ = class;
if (subclass)
*csp++ = subclass;
if (allclass)
*csp++ = allclass;
*csp = NULL;
e = _zed_exec_create_env(envs);
for (z = zed_strings_first(zcp->zedlets); z;
z = zed_strings_next(zcp->zedlets)) {
for (csp = class_strings; *csp; csp++) {
n = strlen(*csp);
if ((strncmp(z, *csp, n) == 0) && !isalpha(z[n]))
_zed_exec_fork_child(eid, zcp->zedlet_dir,
z, e, zcp->zevent_fd, zcp->do_foreground);
}
}
free(e);
return (0);
}
diff --git a/sys/contrib/openzfs/cmd/zfs/zfs_main.c b/sys/contrib/openzfs/cmd/zfs/zfs_main.c
index f1d686753c25..008f1bea0ec9 100644
--- a/sys/contrib/openzfs/cmd/zfs/zfs_main.c
+++ b/sys/contrib/openzfs/cmd/zfs/zfs_main.c
@@ -1,8817 +1,8820 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright 2012 Milan Jurik. All rights reserved.
* Copyright (c) 2012, Joyent, Inc. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
* Copyright 2016 Nexenta Systems, Inc.
* Copyright (c) 2019 Datto Inc.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>
* Copyright 2019 Joyent, Inc.
* Copyright (c) 2019, 2020 by Christian Schwarz. All rights reserved.
*/
#include <assert.h>
#include <ctype.h>
#include <sys/debug.h>
#include <errno.h>
#include <getopt.h>
#include <libgen.h>
#include <libintl.h>
#include <libuutil.h>
#include <libnvpair.h>
#include <locale.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <zone.h>
#include <grp.h>
#include <pwd.h>
#include <umem.h>
#include <pthread.h>
#include <signal.h>
#include <sys/list.h>
#include <sys/mkdev.h>
#include <sys/mntent.h>
#include <sys/mnttab.h>
#include <sys/mount.h>
#include <sys/stat.h>
#include <sys/fs/zfs.h>
#include <sys/systeminfo.h>
#include <sys/types.h>
#include <time.h>
#include <sys/zfs_project.h>
#include <libzfs.h>
#include <libzfs_core.h>
#include <zfs_prop.h>
#include <zfs_deleg.h>
#include <libzutil.h>
#ifdef HAVE_IDMAP
#include <aclutils.h>
#include <directory.h>
#endif /* HAVE_IDMAP */
#include "zfs_iter.h"
#include "zfs_util.h"
#include "zfs_comutil.h"
#include "zfs_projectutil.h"
libzfs_handle_t *g_zfs;
static char history_str[HIS_MAX_RECORD_LEN];
static boolean_t log_history = B_TRUE;
static int zfs_do_clone(int argc, char **argv);
static int zfs_do_create(int argc, char **argv);
static int zfs_do_destroy(int argc, char **argv);
static int zfs_do_get(int argc, char **argv);
static int zfs_do_inherit(int argc, char **argv);
static int zfs_do_list(int argc, char **argv);
static int zfs_do_mount(int argc, char **argv);
static int zfs_do_rename(int argc, char **argv);
static int zfs_do_rollback(int argc, char **argv);
static int zfs_do_set(int argc, char **argv);
static int zfs_do_upgrade(int argc, char **argv);
static int zfs_do_snapshot(int argc, char **argv);
static int zfs_do_unmount(int argc, char **argv);
static int zfs_do_share(int argc, char **argv);
static int zfs_do_unshare(int argc, char **argv);
static int zfs_do_send(int argc, char **argv);
static int zfs_do_receive(int argc, char **argv);
static int zfs_do_promote(int argc, char **argv);
static int zfs_do_userspace(int argc, char **argv);
static int zfs_do_allow(int argc, char **argv);
static int zfs_do_unallow(int argc, char **argv);
static int zfs_do_hold(int argc, char **argv);
static int zfs_do_holds(int argc, char **argv);
static int zfs_do_release(int argc, char **argv);
static int zfs_do_diff(int argc, char **argv);
static int zfs_do_bookmark(int argc, char **argv);
static int zfs_do_channel_program(int argc, char **argv);
static int zfs_do_load_key(int argc, char **argv);
static int zfs_do_unload_key(int argc, char **argv);
static int zfs_do_change_key(int argc, char **argv);
static int zfs_do_project(int argc, char **argv);
static int zfs_do_version(int argc, char **argv);
static int zfs_do_redact(int argc, char **argv);
static int zfs_do_wait(int argc, char **argv);
#ifdef __FreeBSD__
static int zfs_do_jail(int argc, char **argv);
static int zfs_do_unjail(int argc, char **argv);
#endif
#ifdef __linux__
static int zfs_do_zone(int argc, char **argv);
static int zfs_do_unzone(int argc, char **argv);
#endif
/*
* Enable a reasonable set of defaults for libumem debugging on DEBUG builds.
*/
#ifdef DEBUG
const char *
_umem_debug_init(void)
{
return ("default,verbose"); /* $UMEM_DEBUG setting */
}
const char *
_umem_logging_init(void)
{
return ("fail,contents"); /* $UMEM_LOGGING setting */
}
#endif
typedef enum {
HELP_CLONE,
HELP_CREATE,
HELP_DESTROY,
HELP_GET,
HELP_INHERIT,
HELP_UPGRADE,
HELP_LIST,
HELP_MOUNT,
HELP_PROMOTE,
HELP_RECEIVE,
HELP_RENAME,
HELP_ROLLBACK,
HELP_SEND,
HELP_SET,
HELP_SHARE,
HELP_SNAPSHOT,
HELP_UNMOUNT,
HELP_UNSHARE,
HELP_ALLOW,
HELP_UNALLOW,
HELP_USERSPACE,
HELP_GROUPSPACE,
HELP_PROJECTSPACE,
HELP_PROJECT,
HELP_HOLD,
HELP_HOLDS,
HELP_RELEASE,
HELP_DIFF,
HELP_BOOKMARK,
HELP_CHANNEL_PROGRAM,
HELP_LOAD_KEY,
HELP_UNLOAD_KEY,
HELP_CHANGE_KEY,
HELP_VERSION,
HELP_REDACT,
HELP_JAIL,
HELP_UNJAIL,
HELP_WAIT,
HELP_ZONE,
HELP_UNZONE,
} zfs_help_t;
typedef struct zfs_command {
const char *name;
int (*func)(int argc, char **argv);
zfs_help_t usage;
} zfs_command_t;
/*
* Master command table. Each ZFS command has a name, associated function, and
* usage message. The usage messages need to be internationalized, so we have
* to have a function to return the usage message based on a command index.
*
* These commands are organized according to how they are displayed in the usage
* message. An empty command (one with a NULL name) indicates an empty line in
* the generic usage message.
*/
static zfs_command_t command_table[] = {
{ "version", zfs_do_version, HELP_VERSION },
{ NULL },
{ "create", zfs_do_create, HELP_CREATE },
{ "destroy", zfs_do_destroy, HELP_DESTROY },
{ NULL },
{ "snapshot", zfs_do_snapshot, HELP_SNAPSHOT },
{ "rollback", zfs_do_rollback, HELP_ROLLBACK },
{ "clone", zfs_do_clone, HELP_CLONE },
{ "promote", zfs_do_promote, HELP_PROMOTE },
{ "rename", zfs_do_rename, HELP_RENAME },
{ "bookmark", zfs_do_bookmark, HELP_BOOKMARK },
{ "program", zfs_do_channel_program, HELP_CHANNEL_PROGRAM },
{ NULL },
{ "list", zfs_do_list, HELP_LIST },
{ NULL },
{ "set", zfs_do_set, HELP_SET },
{ "get", zfs_do_get, HELP_GET },
{ "inherit", zfs_do_inherit, HELP_INHERIT },
{ "upgrade", zfs_do_upgrade, HELP_UPGRADE },
{ NULL },
{ "userspace", zfs_do_userspace, HELP_USERSPACE },
{ "groupspace", zfs_do_userspace, HELP_GROUPSPACE },
{ "projectspace", zfs_do_userspace, HELP_PROJECTSPACE },
{ NULL },
{ "project", zfs_do_project, HELP_PROJECT },
{ NULL },
{ "mount", zfs_do_mount, HELP_MOUNT },
{ "unmount", zfs_do_unmount, HELP_UNMOUNT },
{ "share", zfs_do_share, HELP_SHARE },
{ "unshare", zfs_do_unshare, HELP_UNSHARE },
{ NULL },
{ "send", zfs_do_send, HELP_SEND },
{ "receive", zfs_do_receive, HELP_RECEIVE },
{ NULL },
{ "allow", zfs_do_allow, HELP_ALLOW },
{ NULL },
{ "unallow", zfs_do_unallow, HELP_UNALLOW },
{ NULL },
{ "hold", zfs_do_hold, HELP_HOLD },
{ "holds", zfs_do_holds, HELP_HOLDS },
{ "release", zfs_do_release, HELP_RELEASE },
{ "diff", zfs_do_diff, HELP_DIFF },
{ "load-key", zfs_do_load_key, HELP_LOAD_KEY },
{ "unload-key", zfs_do_unload_key, HELP_UNLOAD_KEY },
{ "change-key", zfs_do_change_key, HELP_CHANGE_KEY },
{ "redact", zfs_do_redact, HELP_REDACT },
{ "wait", zfs_do_wait, HELP_WAIT },
#ifdef __FreeBSD__
{ "jail", zfs_do_jail, HELP_JAIL },
{ "unjail", zfs_do_unjail, HELP_UNJAIL },
#endif
#ifdef __linux__
{ "zone", zfs_do_zone, HELP_ZONE },
{ "unzone", zfs_do_unzone, HELP_UNZONE },
#endif
};
#define NCOMMAND (sizeof (command_table) / sizeof (command_table[0]))
zfs_command_t *current_command;
static const char *
get_usage(zfs_help_t idx)
{
switch (idx) {
case HELP_CLONE:
return (gettext("\tclone [-p] [-o property=value] ... "
"<snapshot> <filesystem|volume>\n"));
case HELP_CREATE:
return (gettext("\tcreate [-Pnpuv] [-o property=value] ... "
"<filesystem>\n"
"\tcreate [-Pnpsv] [-b blocksize] [-o property=value] ... "
"-V <size> <volume>\n"));
case HELP_DESTROY:
return (gettext("\tdestroy [-fnpRrv] <filesystem|volume>\n"
"\tdestroy [-dnpRrv] "
"<filesystem|volume>@<snap>[%<snap>][,...]\n"
"\tdestroy <filesystem|volume>#<bookmark>\n"));
case HELP_GET:
return (gettext("\tget [-rHp] [-d max] "
"[-o \"all\" | field[,...]]\n"
"\t [-t type[,...]] [-s source[,...]]\n"
"\t <\"all\" | property[,...]> "
"[filesystem|volume|snapshot|bookmark] ...\n"));
case HELP_INHERIT:
return (gettext("\tinherit [-rS] <property> "
"<filesystem|volume|snapshot> ...\n"));
case HELP_UPGRADE:
return (gettext("\tupgrade [-v]\n"
"\tupgrade [-r] [-V version] <-a | filesystem ...>\n"));
case HELP_LIST:
return (gettext("\tlist [-Hp] [-r|-d max] [-o property[,...]] "
"[-s property]...\n\t [-S property]... [-t type[,...]] "
"[filesystem|volume|snapshot] ...\n"));
case HELP_MOUNT:
return (gettext("\tmount\n"
"\tmount [-flvO] [-o opts] <-a | filesystem>\n"));
case HELP_PROMOTE:
return (gettext("\tpromote <clone-filesystem>\n"));
case HELP_RECEIVE:
return (gettext("\treceive [-vMnsFhu] "
"[-o <property>=<value>] ... [-x <property>] ...\n"
"\t <filesystem|volume|snapshot>\n"
"\treceive [-vMnsFhu] [-o <property>=<value>] ... "
"[-x <property>] ... \n"
"\t [-d | -e] <filesystem>\n"
"\treceive -A <filesystem|volume>\n"));
case HELP_RENAME:
return (gettext("\trename [-f] <filesystem|volume|snapshot> "
"<filesystem|volume|snapshot>\n"
"\trename -p [-f] <filesystem|volume> <filesystem|volume>\n"
"\trename -u [-f] <filesystem> <filesystem>\n"
"\trename -r <snapshot> <snapshot>\n"));
case HELP_ROLLBACK:
return (gettext("\trollback [-rRf] <snapshot>\n"));
case HELP_SEND:
return (gettext("\tsend [-DLPbcehnpsvw] "
"[-i|-I snapshot]\n"
"\t [-R [-X dataset[,dataset]...]] <snapshot>\n"
"\tsend [-DnvPLecw] [-i snapshot|bookmark] "
"<filesystem|volume|snapshot>\n"
"\tsend [-DnPpvLec] [-i bookmark|snapshot] "
"--redact <bookmark> <snapshot>\n"
"\tsend [-nvPe] -t <receive_resume_token>\n"
"\tsend [-Pnv] --saved filesystem\n"));
case HELP_SET:
return (gettext("\tset <property=value> ... "
"<filesystem|volume|snapshot> ...\n"));
case HELP_SHARE:
return (gettext("\tshare [-l] <-a [nfs|smb] | filesystem>\n"));
case HELP_SNAPSHOT:
return (gettext("\tsnapshot [-r] [-o property=value] ... "
"<filesystem|volume>@<snap> ...\n"));
case HELP_UNMOUNT:
return (gettext("\tunmount [-fu] "
"<-a | filesystem|mountpoint>\n"));
case HELP_UNSHARE:
return (gettext("\tunshare "
"<-a [nfs|smb] | filesystem|mountpoint>\n"));
case HELP_ALLOW:
return (gettext("\tallow <filesystem|volume>\n"
"\tallow [-ldug] "
"<\"everyone\"|user|group>[,...] <perm|@setname>[,...]\n"
"\t <filesystem|volume>\n"
"\tallow [-ld] -e <perm|@setname>[,...] "
"<filesystem|volume>\n"
"\tallow -c <perm|@setname>[,...] <filesystem|volume>\n"
"\tallow -s @setname <perm|@setname>[,...] "
"<filesystem|volume>\n"));
case HELP_UNALLOW:
return (gettext("\tunallow [-rldug] "
"<\"everyone\"|user|group>[,...]\n"
"\t [<perm|@setname>[,...]] <filesystem|volume>\n"
"\tunallow [-rld] -e [<perm|@setname>[,...]] "
"<filesystem|volume>\n"
"\tunallow [-r] -c [<perm|@setname>[,...]] "
"<filesystem|volume>\n"
"\tunallow [-r] -s @setname [<perm|@setname>[,...]] "
"<filesystem|volume>\n"));
case HELP_USERSPACE:
return (gettext("\tuserspace [-Hinp] [-o field[,...]] "
"[-s field] ...\n"
"\t [-S field] ... [-t type[,...]] "
"<filesystem|snapshot|path>\n"));
case HELP_GROUPSPACE:
return (gettext("\tgroupspace [-Hinp] [-o field[,...]] "
"[-s field] ...\n"
"\t [-S field] ... [-t type[,...]] "
"<filesystem|snapshot|path>\n"));
case HELP_PROJECTSPACE:
return (gettext("\tprojectspace [-Hp] [-o field[,...]] "
"[-s field] ... \n"
"\t [-S field] ... <filesystem|snapshot|path>\n"));
case HELP_PROJECT:
return (gettext("\tproject [-d|-r] <directory|file ...>\n"
"\tproject -c [-0] [-d|-r] [-p id] <directory|file ...>\n"
"\tproject -C [-k] [-r] <directory ...>\n"
"\tproject [-p id] [-r] [-s] <directory ...>\n"));
case HELP_HOLD:
return (gettext("\thold [-r] <tag> <snapshot> ...\n"));
case HELP_HOLDS:
return (gettext("\tholds [-rH] <snapshot> ...\n"));
case HELP_RELEASE:
return (gettext("\trelease [-r] <tag> <snapshot> ...\n"));
case HELP_DIFF:
return (gettext("\tdiff [-FHt] <snapshot> "
"[snapshot|filesystem]\n"));
case HELP_BOOKMARK:
return (gettext("\tbookmark <snapshot|bookmark> "
"<newbookmark>\n"));
case HELP_CHANNEL_PROGRAM:
return (gettext("\tprogram [-jn] [-t <instruction limit>] "
"[-m <memory limit (b)>]\n"
"\t <pool> <program file> [lua args...]\n"));
case HELP_LOAD_KEY:
return (gettext("\tload-key [-rn] [-L <keylocation>] "
"<-a | filesystem|volume>\n"));
case HELP_UNLOAD_KEY:
return (gettext("\tunload-key [-r] "
"<-a | filesystem|volume>\n"));
case HELP_CHANGE_KEY:
return (gettext("\tchange-key [-l] [-o keyformat=<value>]\n"
"\t [-o keylocation=<value>] [-o pbkdf2iters=<value>]\n"
"\t <filesystem|volume>\n"
"\tchange-key -i [-l] <filesystem|volume>\n"));
case HELP_VERSION:
return (gettext("\tversion\n"));
case HELP_REDACT:
return (gettext("\tredact <snapshot> <bookmark> "
"<redaction_snapshot> ...\n"));
case HELP_JAIL:
return (gettext("\tjail <jailid|jailname> <filesystem>\n"));
case HELP_UNJAIL:
return (gettext("\tunjail <jailid|jailname> <filesystem>\n"));
case HELP_WAIT:
return (gettext("\twait [-t <activity>] <filesystem>\n"));
case HELP_ZONE:
return (gettext("\tzone <nsfile> <filesystem>\n"));
case HELP_UNZONE:
return (gettext("\tunzone <nsfile> <filesystem>\n"));
default:
__builtin_unreachable();
}
}
void
nomem(void)
{
(void) fprintf(stderr, gettext("internal error: out of memory\n"));
exit(1);
}
/*
* Utility function to guarantee malloc() success.
*/
void *
safe_malloc(size_t size)
{
void *data;
if ((data = calloc(1, size)) == NULL)
nomem();
return (data);
}
static void *
safe_realloc(void *data, size_t size)
{
void *newp;
if ((newp = realloc(data, size)) == NULL) {
free(data);
nomem();
}
return (newp);
}
static char *
safe_strdup(const char *str)
{
char *dupstr = strdup(str);
if (dupstr == NULL)
nomem();
return (dupstr);
}
/*
* Callback routine that will print out information for each of
* the properties.
*/
static int
usage_prop_cb(int prop, void *cb)
{
FILE *fp = cb;
(void) fprintf(fp, "\t%-15s ", zfs_prop_to_name(prop));
if (zfs_prop_readonly(prop))
(void) fprintf(fp, " NO ");
else
(void) fprintf(fp, "YES ");
if (zfs_prop_inheritable(prop))
(void) fprintf(fp, " YES ");
else
(void) fprintf(fp, " NO ");
(void) fprintf(fp, "%s\n", zfs_prop_values(prop) ?: "-");
return (ZPROP_CONT);
}
/*
* Display usage message. If we're inside a command, display only the usage for
* that command. Otherwise, iterate over the entire command table and display
* a complete usage message.
*/
static __attribute__((noreturn)) void
usage(boolean_t requested)
{
int i;
boolean_t show_properties = B_FALSE;
FILE *fp = requested ? stdout : stderr;
if (current_command == NULL) {
(void) fprintf(fp, gettext("usage: zfs command args ...\n"));
(void) fprintf(fp,
gettext("where 'command' is one of the following:\n\n"));
for (i = 0; i < NCOMMAND; i++) {
if (command_table[i].name == NULL)
(void) fprintf(fp, "\n");
else
(void) fprintf(fp, "%s",
get_usage(command_table[i].usage));
}
(void) fprintf(fp, gettext("\nEach dataset is of the form: "
"pool/[dataset/]*dataset[@name]\n"));
} else {
(void) fprintf(fp, gettext("usage:\n"));
(void) fprintf(fp, "%s", get_usage(current_command->usage));
}
if (current_command != NULL &&
(strcmp(current_command->name, "set") == 0 ||
strcmp(current_command->name, "get") == 0 ||
strcmp(current_command->name, "inherit") == 0 ||
strcmp(current_command->name, "list") == 0))
show_properties = B_TRUE;
if (show_properties) {
(void) fprintf(fp,
gettext("\nThe following properties are supported:\n"));
(void) fprintf(fp, "\n\t%-14s %s %s %s\n\n",
"PROPERTY", "EDIT", "INHERIT", "VALUES");
/* Iterate over all properties */
(void) zprop_iter(usage_prop_cb, fp, B_FALSE, B_TRUE,
ZFS_TYPE_DATASET);
(void) fprintf(fp, "\t%-15s ", "userused@...");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "groupused@...");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "projectused@...");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "userobjused@...");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "groupobjused@...");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "projectobjused@...");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "userquota@...");
(void) fprintf(fp, "YES NO <size> | none\n");
(void) fprintf(fp, "\t%-15s ", "groupquota@...");
(void) fprintf(fp, "YES NO <size> | none\n");
(void) fprintf(fp, "\t%-15s ", "projectquota@...");
(void) fprintf(fp, "YES NO <size> | none\n");
(void) fprintf(fp, "\t%-15s ", "userobjquota@...");
(void) fprintf(fp, "YES NO <size> | none\n");
(void) fprintf(fp, "\t%-15s ", "groupobjquota@...");
(void) fprintf(fp, "YES NO <size> | none\n");
(void) fprintf(fp, "\t%-15s ", "projectobjquota@...");
(void) fprintf(fp, "YES NO <size> | none\n");
(void) fprintf(fp, "\t%-15s ", "written@<snap>");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "written#<bookmark>");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, gettext("\nSizes are specified in bytes "
"with standard units such as K, M, G, etc.\n"));
(void) fprintf(fp, "%s", gettext("\nUser-defined properties "
"can be specified by using a name containing a colon "
"(:).\n"));
(void) fprintf(fp, gettext("\nThe {user|group|project}"
"[obj]{used|quota}@ properties must be appended with\n"
"a user|group|project specifier of one of these forms:\n"
" POSIX name (eg: \"matt\")\n"
" POSIX id (eg: \"126829\")\n"
" SMB name@domain (eg: \"matt@sun\")\n"
" SMB SID (eg: \"S-1-234-567-89\")\n"));
} else {
(void) fprintf(fp,
gettext("\nFor the property list, run: %s\n"),
"zfs set|get");
(void) fprintf(fp,
gettext("\nFor the delegated permission list, run: %s\n"),
"zfs allow|unallow");
}
/*
* See comments at end of main().
*/
if (getenv("ZFS_ABORT") != NULL) {
(void) printf("dumping core by request\n");
abort();
}
exit(requested ? 0 : 2);
}
/*
* Take a property=value argument string and add it to the given nvlist.
* Modifies the argument inplace.
*/
static boolean_t
parseprop(nvlist_t *props, char *propname)
{
char *propval;
if ((propval = strchr(propname, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for property=value argument\n"));
return (B_FALSE);
}
*propval = '\0';
propval++;
if (nvlist_exists(props, propname)) {
(void) fprintf(stderr, gettext("property '%s' "
"specified multiple times\n"), propname);
return (B_FALSE);
}
if (nvlist_add_string(props, propname, propval) != 0)
nomem();
return (B_TRUE);
}
/*
* Take a property name argument and add it to the given nvlist.
* Modifies the argument inplace.
*/
static boolean_t
parsepropname(nvlist_t *props, char *propname)
{
if (strchr(propname, '=') != NULL) {
(void) fprintf(stderr, gettext("invalid character "
"'=' in property argument\n"));
return (B_FALSE);
}
if (nvlist_exists(props, propname)) {
(void) fprintf(stderr, gettext("property '%s' "
"specified multiple times\n"), propname);
return (B_FALSE);
}
if (nvlist_add_boolean(props, propname) != 0)
nomem();
return (B_TRUE);
}
static int
parse_depth(char *opt, int *flags)
{
char *tmp;
int depth;
depth = (int)strtol(opt, &tmp, 0);
if (*tmp) {
(void) fprintf(stderr,
gettext("%s is not an integer\n"), optarg);
usage(B_FALSE);
}
if (depth < 0) {
(void) fprintf(stderr,
gettext("Depth can not be negative.\n"));
usage(B_FALSE);
}
*flags |= (ZFS_ITER_DEPTH_LIMIT|ZFS_ITER_RECURSE);
return (depth);
}
#define PROGRESS_DELAY 2 /* seconds */
static const char *pt_reverse =
"\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b";
static time_t pt_begin;
static char *pt_header = NULL;
static boolean_t pt_shown;
static void
start_progress_timer(void)
{
pt_begin = time(NULL) + PROGRESS_DELAY;
pt_shown = B_FALSE;
}
static void
set_progress_header(const char *header)
{
assert(pt_header == NULL);
pt_header = safe_strdup(header);
if (pt_shown) {
(void) printf("%s: ", header);
(void) fflush(stdout);
}
}
static void
update_progress(const char *update)
{
if (!pt_shown && time(NULL) > pt_begin) {
int len = strlen(update);
(void) printf("%s: %s%*.*s", pt_header, update, len, len,
pt_reverse);
(void) fflush(stdout);
pt_shown = B_TRUE;
} else if (pt_shown) {
int len = strlen(update);
(void) printf("%s%*.*s", update, len, len, pt_reverse);
(void) fflush(stdout);
}
}
static void
finish_progress(const char *done)
{
if (pt_shown) {
(void) puts(done);
(void) fflush(stdout);
}
free(pt_header);
pt_header = NULL;
}
static int
zfs_mount_and_share(libzfs_handle_t *hdl, const char *dataset, zfs_type_t type)
{
zfs_handle_t *zhp = NULL;
int ret = 0;
zhp = zfs_open(hdl, dataset, type);
if (zhp == NULL)
return (1);
/*
* Volumes may neither be mounted or shared. Potentially in the
* future filesystems detected on these volumes could be mounted.
*/
if (zfs_get_type(zhp) == ZFS_TYPE_VOLUME) {
zfs_close(zhp);
return (0);
}
/*
* Mount and/or share the new filesystem as appropriate. We provide a
* verbose error message to let the user know that their filesystem was
* in fact created, even if we failed to mount or share it.
*
* If the user doesn't want the dataset automatically mounted, then
* skip the mount/share step
*/
if (zfs_prop_valid_for_type(ZFS_PROP_CANMOUNT, type, B_FALSE) &&
zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_ON) {
if (zfs_mount_delegation_check()) {
(void) fprintf(stderr, gettext("filesystem "
"successfully created, but it may only be "
"mounted by root\n"));
ret = 1;
} else if (zfs_mount(zhp, NULL, 0) != 0) {
(void) fprintf(stderr, gettext("filesystem "
"successfully created, but not mounted\n"));
ret = 1;
} else if (zfs_share(zhp, NULL) != 0) {
(void) fprintf(stderr, gettext("filesystem "
"successfully created, but not shared\n"));
ret = 1;
}
zfs_commit_shares(NULL);
}
zfs_close(zhp);
return (ret);
}
/*
* zfs clone [-p] [-o prop=value] ... <snap> <fs | vol>
*
* Given an existing dataset, create a writable copy whose initial contents
* are the same as the source. The newly created dataset maintains a
* dependency on the original; the original cannot be destroyed so long as
* the clone exists.
*
* The '-p' flag creates all the non-existing ancestors of the target first.
*/
static int
zfs_do_clone(int argc, char **argv)
{
zfs_handle_t *zhp = NULL;
boolean_t parents = B_FALSE;
nvlist_t *props;
int ret = 0;
int c;
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
/* check options */
while ((c = getopt(argc, argv, "o:p")) != -1) {
switch (c) {
case 'o':
if (!parseprop(props, optarg)) {
nvlist_free(props);
return (1);
}
break;
case 'p':
parents = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
goto usage;
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing source dataset "
"argument\n"));
goto usage;
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing target dataset "
"argument\n"));
goto usage;
}
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
goto usage;
}
/* open the source dataset */
if ((zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_SNAPSHOT)) == NULL) {
nvlist_free(props);
return (1);
}
if (parents && zfs_name_valid(argv[1], ZFS_TYPE_FILESYSTEM |
ZFS_TYPE_VOLUME)) {
/*
* Now create the ancestors of the target dataset. If the
* target already exists and '-p' option was used we should not
* complain.
*/
if (zfs_dataset_exists(g_zfs, argv[1], ZFS_TYPE_FILESYSTEM |
ZFS_TYPE_VOLUME)) {
zfs_close(zhp);
nvlist_free(props);
return (0);
}
if (zfs_create_ancestors(g_zfs, argv[1]) != 0) {
zfs_close(zhp);
nvlist_free(props);
return (1);
}
}
/* pass to libzfs */
ret = zfs_clone(zhp, argv[1], props);
/* create the mountpoint if necessary */
if (ret == 0) {
if (log_history) {
(void) zpool_log_history(g_zfs, history_str);
log_history = B_FALSE;
}
ret = zfs_mount_and_share(g_zfs, argv[1], ZFS_TYPE_DATASET);
}
zfs_close(zhp);
nvlist_free(props);
return (!!ret);
usage:
ASSERT3P(zhp, ==, NULL);
nvlist_free(props);
usage(B_FALSE);
return (-1);
}
/*
* Return a default volblocksize for the pool which always uses more than
* half of the data sectors. This primarily applies to dRAID which always
* writes full stripe widths.
*/
static uint64_t
default_volblocksize(zpool_handle_t *zhp, nvlist_t *props)
{
uint64_t volblocksize, asize = SPA_MINBLOCKSIZE;
nvlist_t *tree, **vdevs;
uint_t nvdevs;
nvlist_t *config = zpool_get_config(zhp, NULL);
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) != 0 ||
nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN,
&vdevs, &nvdevs) != 0) {
return (ZVOL_DEFAULT_BLOCKSIZE);
}
for (int i = 0; i < nvdevs; i++) {
nvlist_t *nv = vdevs[i];
uint64_t ashift, ndata, nparity;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &ashift) != 0)
continue;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NDATA,
&ndata) == 0) {
/* dRAID minimum allocation width */
asize = MAX(asize, ndata * (1ULL << ashift));
} else if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
&nparity) == 0) {
/* raidz minimum allocation width */
if (nparity == 1)
asize = MAX(asize, 2 * (1ULL << ashift));
else
asize = MAX(asize, 4 * (1ULL << ashift));
} else {
/* mirror or (non-redundant) leaf vdev */
asize = MAX(asize, 1ULL << ashift);
}
}
/*
* Calculate the target volblocksize such that more than half
* of the asize is used. The following table is for 4k sectors.
*
* n asize blksz used | n asize blksz used
* -------------------------+---------------------------------
* 1 4,096 8,192 100% | 9 36,864 32,768 88%
* 2 8,192 8,192 100% | 10 40,960 32,768 80%
* 3 12,288 8,192 66% | 11 45,056 32,768 72%
* 4 16,384 16,384 100% | 12 49,152 32,768 66%
* 5 20,480 16,384 80% | 13 53,248 32,768 61%
* 6 24,576 16,384 66% | 14 57,344 32,768 57%
* 7 28,672 16,384 57% | 15 61,440 32,768 53%
* 8 32,768 32,768 100% | 16 65,536 65,636 100%
*
* This is primarily a concern for dRAID which always allocates
* a full stripe width. For dRAID the default stripe width is
* n=8 in which case the volblocksize is set to 32k. Ignoring
* compression there are no unused sectors. This same reasoning
* applies to raidz[2,3] so target 4 sectors to minimize waste.
*/
uint64_t tgt_volblocksize = ZVOL_DEFAULT_BLOCKSIZE;
while (tgt_volblocksize * 2 <= asize)
tgt_volblocksize *= 2;
const char *prop = zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE);
if (nvlist_lookup_uint64(props, prop, &volblocksize) == 0) {
/* Issue a warning when a non-optimal size is requested. */
if (volblocksize < ZVOL_DEFAULT_BLOCKSIZE) {
(void) fprintf(stderr, gettext("Warning: "
"volblocksize (%llu) is less than the default "
"minimum block size (%llu).\nTo reduce wasted "
"space a volblocksize of %llu is recommended.\n"),
(u_longlong_t)volblocksize,
(u_longlong_t)ZVOL_DEFAULT_BLOCKSIZE,
(u_longlong_t)tgt_volblocksize);
} else if (volblocksize < tgt_volblocksize) {
(void) fprintf(stderr, gettext("Warning: "
"volblocksize (%llu) is much less than the "
"minimum allocation\nunit (%llu), which wastes "
"at least %llu%% of space. To reduce wasted "
"space,\nuse a larger volblocksize (%llu is "
"recommended), fewer dRAID data disks\n"
"per group, or smaller sector size (ashift).\n"),
(u_longlong_t)volblocksize, (u_longlong_t)asize,
(u_longlong_t)((100 * (asize - volblocksize)) /
asize), (u_longlong_t)tgt_volblocksize);
}
} else {
volblocksize = tgt_volblocksize;
fnvlist_add_uint64(props, prop, volblocksize);
}
return (volblocksize);
}
/*
* zfs create [-Pnpv] [-o prop=value] ... fs
* zfs create [-Pnpsv] [-b blocksize] [-o prop=value] ... -V vol size
*
* Create a new dataset. This command can be used to create filesystems
* and volumes. Snapshot creation is handled by 'zfs snapshot'.
* For volumes, the user must specify a size to be used.
*
* The '-s' flag applies only to volumes, and indicates that we should not try
* to set the reservation for this volume. By default we set a reservation
* equal to the size for any volume. For pools with SPA_VERSION >=
* SPA_VERSION_REFRESERVATION, we set a refreservation instead.
*
* The '-p' flag creates all the non-existing ancestors of the target first.
*
* The '-n' flag is no-op (dry run) mode. This will perform a user-space sanity
* check of arguments and properties, but does not check for permissions,
* available space, etc.
*
* The '-u' flag prevents the newly created file system from being mounted.
*
* The '-v' flag is for verbose output.
*
* The '-P' flag is used for parseable output. It implies '-v'.
*/
static int
zfs_do_create(int argc, char **argv)
{
zfs_type_t type = ZFS_TYPE_FILESYSTEM;
zpool_handle_t *zpool_handle = NULL;
nvlist_t *real_props = NULL;
uint64_t volsize = 0;
int c;
boolean_t noreserve = B_FALSE;
boolean_t bflag = B_FALSE;
boolean_t parents = B_FALSE;
boolean_t dryrun = B_FALSE;
boolean_t nomount = B_FALSE;
boolean_t verbose = B_FALSE;
boolean_t parseable = B_FALSE;
int ret = 1;
nvlist_t *props;
uint64_t intval;
char *strval;
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
/* check options */
while ((c = getopt(argc, argv, ":PV:b:nso:puv")) != -1) {
switch (c) {
case 'V':
type = ZFS_TYPE_VOLUME;
if (zfs_nicestrtonum(g_zfs, optarg, &intval) != 0) {
(void) fprintf(stderr, gettext("bad volume "
"size '%s': %s\n"), optarg,
libzfs_error_description(g_zfs));
goto error;
}
if (nvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_VOLSIZE), intval) != 0)
nomem();
volsize = intval;
break;
case 'P':
verbose = B_TRUE;
parseable = B_TRUE;
break;
case 'p':
parents = B_TRUE;
break;
case 'b':
bflag = B_TRUE;
if (zfs_nicestrtonum(g_zfs, optarg, &intval) != 0) {
(void) fprintf(stderr, gettext("bad volume "
"block size '%s': %s\n"), optarg,
libzfs_error_description(g_zfs));
goto error;
}
if (nvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
intval) != 0)
nomem();
break;
case 'n':
dryrun = B_TRUE;
break;
case 'o':
if (!parseprop(props, optarg))
goto error;
break;
case 's':
noreserve = B_TRUE;
break;
case 'u':
nomount = B_TRUE;
break;
case 'v':
verbose = B_TRUE;
break;
case ':':
(void) fprintf(stderr, gettext("missing size "
"argument\n"));
goto badusage;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
goto badusage;
}
}
if ((bflag || noreserve) && type != ZFS_TYPE_VOLUME) {
(void) fprintf(stderr, gettext("'-s' and '-b' can only be "
"used when creating a volume\n"));
goto badusage;
}
if (nomount && type != ZFS_TYPE_FILESYSTEM) {
(void) fprintf(stderr, gettext("'-u' can only be "
"used when creating a filesystem\n"));
goto badusage;
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc == 0) {
(void) fprintf(stderr, gettext("missing %s argument\n"),
zfs_type_to_name(type));
goto badusage;
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
goto badusage;
}
if (dryrun || type == ZFS_TYPE_VOLUME) {
char msg[ZFS_MAX_DATASET_NAME_LEN * 2];
char *p;
if ((p = strchr(argv[0], '/')) != NULL)
*p = '\0';
zpool_handle = zpool_open(g_zfs, argv[0]);
if (p != NULL)
*p = '/';
if (zpool_handle == NULL)
goto error;
(void) snprintf(msg, sizeof (msg),
dryrun ? gettext("cannot verify '%s'") :
gettext("cannot create '%s'"), argv[0]);
if (props && (real_props = zfs_valid_proplist(g_zfs, type,
props, 0, NULL, zpool_handle, B_TRUE, msg)) == NULL) {
zpool_close(zpool_handle);
goto error;
}
}
if (type == ZFS_TYPE_VOLUME) {
const char *prop = zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE);
uint64_t volblocksize = default_volblocksize(zpool_handle,
real_props);
if (volblocksize != ZVOL_DEFAULT_BLOCKSIZE &&
nvlist_lookup_string(props, prop, &strval) != 0) {
if (asprintf(&strval, "%llu",
(u_longlong_t)volblocksize) == -1)
nomem();
nvlist_add_string(props, prop, strval);
free(strval);
}
/*
* If volsize is not a multiple of volblocksize, round it
* up to the nearest multiple of the volblocksize.
*/
if (volsize % volblocksize) {
volsize = P2ROUNDUP_TYPED(volsize, volblocksize,
uint64_t);
if (nvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_VOLSIZE), volsize) != 0) {
nvlist_free(props);
nomem();
}
}
}
if (type == ZFS_TYPE_VOLUME && !noreserve) {
uint64_t spa_version;
zfs_prop_t resv_prop;
spa_version = zpool_get_prop_int(zpool_handle,
ZPOOL_PROP_VERSION, NULL);
if (spa_version >= SPA_VERSION_REFRESERVATION)
resv_prop = ZFS_PROP_REFRESERVATION;
else
resv_prop = ZFS_PROP_RESERVATION;
volsize = zvol_volsize_to_reservation(zpool_handle, volsize,
real_props);
if (nvlist_lookup_string(props, zfs_prop_to_name(resv_prop),
&strval) != 0) {
if (nvlist_add_uint64(props,
zfs_prop_to_name(resv_prop), volsize) != 0) {
nvlist_free(props);
nomem();
}
}
}
if (zpool_handle != NULL) {
zpool_close(zpool_handle);
nvlist_free(real_props);
}
if (parents && zfs_name_valid(argv[0], type)) {
/*
* Now create the ancestors of target dataset. If the target
* already exists and '-p' option was used we should not
* complain.
*/
if (zfs_dataset_exists(g_zfs, argv[0], type)) {
ret = 0;
goto error;
}
if (verbose) {
(void) printf(parseable ? "create_ancestors\t%s\n" :
dryrun ? "would create ancestors of %s\n" :
"create ancestors of %s\n", argv[0]);
}
if (!dryrun) {
if (zfs_create_ancestors(g_zfs, argv[0]) != 0) {
goto error;
}
}
}
if (verbose) {
nvpair_t *nvp = NULL;
(void) printf(parseable ? "create\t%s\n" :
dryrun ? "would create %s\n" : "create %s\n", argv[0]);
while ((nvp = nvlist_next_nvpair(props, nvp)) != NULL) {
uint64_t uval;
char *sval;
switch (nvpair_type(nvp)) {
case DATA_TYPE_UINT64:
VERIFY0(nvpair_value_uint64(nvp, &uval));
(void) printf(parseable ?
"property\t%s\t%llu\n" : "\t%s=%llu\n",
nvpair_name(nvp), (u_longlong_t)uval);
break;
case DATA_TYPE_STRING:
VERIFY0(nvpair_value_string(nvp, &sval));
(void) printf(parseable ?
"property\t%s\t%s\n" : "\t%s=%s\n",
nvpair_name(nvp), sval);
break;
default:
(void) fprintf(stderr, "property '%s' "
"has illegal type %d\n",
nvpair_name(nvp), nvpair_type(nvp));
abort();
}
}
}
if (dryrun) {
ret = 0;
goto error;
}
/* pass to libzfs */
if (zfs_create(g_zfs, argv[0], type, props) != 0)
goto error;
if (log_history) {
(void) zpool_log_history(g_zfs, history_str);
log_history = B_FALSE;
}
if (nomount) {
ret = 0;
goto error;
}
ret = zfs_mount_and_share(g_zfs, argv[0], ZFS_TYPE_DATASET);
error:
nvlist_free(props);
return (ret);
badusage:
nvlist_free(props);
usage(B_FALSE);
return (2);
}
/*
* zfs destroy [-rRf] <fs, vol>
* zfs destroy [-rRd] <snap>
*
* -r Recursively destroy all children
* -R Recursively destroy all dependents, including clones
* -f Force unmounting of any dependents
* -d If we can't destroy now, mark for deferred destruction
*
* Destroys the given dataset. By default, it will unmount any filesystems,
* and refuse to destroy a dataset that has any dependents. A dependent can
* either be a child, or a clone of a child.
*/
typedef struct destroy_cbdata {
boolean_t cb_first;
boolean_t cb_force;
boolean_t cb_recurse;
boolean_t cb_error;
boolean_t cb_doclones;
zfs_handle_t *cb_target;
boolean_t cb_defer_destroy;
boolean_t cb_verbose;
boolean_t cb_parsable;
boolean_t cb_dryrun;
nvlist_t *cb_nvl;
nvlist_t *cb_batchedsnaps;
/* first snap in contiguous run */
char *cb_firstsnap;
/* previous snap in contiguous run */
char *cb_prevsnap;
int64_t cb_snapused;
char *cb_snapspec;
char *cb_bookmark;
uint64_t cb_snap_count;
} destroy_cbdata_t;
/*
* Check for any dependents based on the '-r' or '-R' flags.
*/
static int
destroy_check_dependent(zfs_handle_t *zhp, void *data)
{
destroy_cbdata_t *cbp = data;
const char *tname = zfs_get_name(cbp->cb_target);
const char *name = zfs_get_name(zhp);
if (strncmp(tname, name, strlen(tname)) == 0 &&
(name[strlen(tname)] == '/' || name[strlen(tname)] == '@')) {
/*
* This is a direct descendant, not a clone somewhere else in
* the hierarchy.
*/
if (cbp->cb_recurse)
goto out;
if (cbp->cb_first) {
(void) fprintf(stderr, gettext("cannot destroy '%s': "
"%s has children\n"),
zfs_get_name(cbp->cb_target),
zfs_type_to_name(zfs_get_type(cbp->cb_target)));
(void) fprintf(stderr, gettext("use '-r' to destroy "
"the following datasets:\n"));
cbp->cb_first = B_FALSE;
cbp->cb_error = B_TRUE;
}
(void) fprintf(stderr, "%s\n", zfs_get_name(zhp));
} else {
/*
* This is a clone. We only want to report this if the '-r'
* wasn't specified, or the target is a snapshot.
*/
if (!cbp->cb_recurse &&
zfs_get_type(cbp->cb_target) != ZFS_TYPE_SNAPSHOT)
goto out;
if (cbp->cb_first) {
(void) fprintf(stderr, gettext("cannot destroy '%s': "
"%s has dependent clones\n"),
zfs_get_name(cbp->cb_target),
zfs_type_to_name(zfs_get_type(cbp->cb_target)));
(void) fprintf(stderr, gettext("use '-R' to destroy "
"the following datasets:\n"));
cbp->cb_first = B_FALSE;
cbp->cb_error = B_TRUE;
cbp->cb_dryrun = B_TRUE;
}
(void) fprintf(stderr, "%s\n", zfs_get_name(zhp));
}
out:
zfs_close(zhp);
return (0);
}
static int
destroy_batched(destroy_cbdata_t *cb)
{
int error = zfs_destroy_snaps_nvl(g_zfs,
cb->cb_batchedsnaps, B_FALSE);
fnvlist_free(cb->cb_batchedsnaps);
cb->cb_batchedsnaps = fnvlist_alloc();
return (error);
}
static int
destroy_callback(zfs_handle_t *zhp, void *data)
{
destroy_cbdata_t *cb = data;
const char *name = zfs_get_name(zhp);
int error;
if (cb->cb_verbose) {
if (cb->cb_parsable) {
(void) printf("destroy\t%s\n", name);
} else if (cb->cb_dryrun) {
(void) printf(gettext("would destroy %s\n"),
name);
} else {
(void) printf(gettext("will destroy %s\n"),
name);
}
}
/*
* Ignore pools (which we've already flagged as an error before getting
* here).
*/
if (strchr(zfs_get_name(zhp), '/') == NULL &&
zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) {
zfs_close(zhp);
return (0);
}
if (cb->cb_dryrun) {
zfs_close(zhp);
return (0);
}
/*
* We batch up all contiguous snapshots (even of different
* filesystems) and destroy them with one ioctl. We can't
* simply do all snap deletions and then all fs deletions,
* because we must delete a clone before its origin.
*/
if (zfs_get_type(zhp) == ZFS_TYPE_SNAPSHOT) {
cb->cb_snap_count++;
fnvlist_add_boolean(cb->cb_batchedsnaps, name);
if (cb->cb_snap_count % 10 == 0 && cb->cb_defer_destroy)
error = destroy_batched(cb);
} else {
error = destroy_batched(cb);
if (error != 0 ||
zfs_unmount(zhp, NULL, cb->cb_force ? MS_FORCE : 0) != 0 ||
zfs_destroy(zhp, cb->cb_defer_destroy) != 0) {
zfs_close(zhp);
/*
* When performing a recursive destroy we ignore errors
* so that the recursive destroy could continue
* destroying past problem datasets
*/
if (cb->cb_recurse) {
cb->cb_error = B_TRUE;
return (0);
}
return (-1);
}
}
zfs_close(zhp);
return (0);
}
static int
destroy_print_cb(zfs_handle_t *zhp, void *arg)
{
destroy_cbdata_t *cb = arg;
const char *name = zfs_get_name(zhp);
int err = 0;
if (nvlist_exists(cb->cb_nvl, name)) {
if (cb->cb_firstsnap == NULL)
cb->cb_firstsnap = strdup(name);
if (cb->cb_prevsnap != NULL)
free(cb->cb_prevsnap);
/* this snap continues the current range */
cb->cb_prevsnap = strdup(name);
if (cb->cb_firstsnap == NULL || cb->cb_prevsnap == NULL)
nomem();
if (cb->cb_verbose) {
if (cb->cb_parsable) {
(void) printf("destroy\t%s\n", name);
} else if (cb->cb_dryrun) {
(void) printf(gettext("would destroy %s\n"),
name);
} else {
(void) printf(gettext("will destroy %s\n"),
name);
}
}
} else if (cb->cb_firstsnap != NULL) {
/* end of this range */
uint64_t used = 0;
err = lzc_snaprange_space(cb->cb_firstsnap,
cb->cb_prevsnap, &used);
cb->cb_snapused += used;
free(cb->cb_firstsnap);
cb->cb_firstsnap = NULL;
free(cb->cb_prevsnap);
cb->cb_prevsnap = NULL;
}
zfs_close(zhp);
return (err);
}
static int
destroy_print_snapshots(zfs_handle_t *fs_zhp, destroy_cbdata_t *cb)
{
int err;
assert(cb->cb_firstsnap == NULL);
assert(cb->cb_prevsnap == NULL);
err = zfs_iter_snapshots_sorted(fs_zhp, destroy_print_cb, cb, 0, 0);
if (cb->cb_firstsnap != NULL) {
uint64_t used = 0;
if (err == 0) {
err = lzc_snaprange_space(cb->cb_firstsnap,
cb->cb_prevsnap, &used);
}
cb->cb_snapused += used;
free(cb->cb_firstsnap);
cb->cb_firstsnap = NULL;
free(cb->cb_prevsnap);
cb->cb_prevsnap = NULL;
}
return (err);
}
static int
snapshot_to_nvl_cb(zfs_handle_t *zhp, void *arg)
{
destroy_cbdata_t *cb = arg;
int err = 0;
/* Check for clones. */
if (!cb->cb_doclones && !cb->cb_defer_destroy) {
cb->cb_target = zhp;
cb->cb_first = B_TRUE;
err = zfs_iter_dependents(zhp, B_TRUE,
destroy_check_dependent, cb);
}
if (err == 0) {
if (nvlist_add_boolean(cb->cb_nvl, zfs_get_name(zhp)))
nomem();
}
zfs_close(zhp);
return (err);
}
static int
gather_snapshots(zfs_handle_t *zhp, void *arg)
{
destroy_cbdata_t *cb = arg;
int err = 0;
err = zfs_iter_snapspec(zhp, cb->cb_snapspec, snapshot_to_nvl_cb, cb);
if (err == ENOENT)
err = 0;
if (err != 0)
goto out;
if (cb->cb_verbose) {
err = destroy_print_snapshots(zhp, cb);
if (err != 0)
goto out;
}
if (cb->cb_recurse)
err = zfs_iter_filesystems(zhp, gather_snapshots, cb);
out:
zfs_close(zhp);
return (err);
}
static int
destroy_clones(destroy_cbdata_t *cb)
{
nvpair_t *pair;
for (pair = nvlist_next_nvpair(cb->cb_nvl, NULL);
pair != NULL;
pair = nvlist_next_nvpair(cb->cb_nvl, pair)) {
zfs_handle_t *zhp = zfs_open(g_zfs, nvpair_name(pair),
ZFS_TYPE_SNAPSHOT);
if (zhp != NULL) {
boolean_t defer = cb->cb_defer_destroy;
int err;
/*
* We can't defer destroy non-snapshots, so set it to
* false while destroying the clones.
*/
cb->cb_defer_destroy = B_FALSE;
err = zfs_iter_dependents(zhp, B_FALSE,
destroy_callback, cb);
cb->cb_defer_destroy = defer;
zfs_close(zhp);
if (err != 0)
return (err);
}
}
return (0);
}
static int
zfs_do_destroy(int argc, char **argv)
{
destroy_cbdata_t cb = { 0 };
int rv = 0;
int err = 0;
int c;
zfs_handle_t *zhp = NULL;
char *at, *pound;
zfs_type_t type = ZFS_TYPE_DATASET;
/* check options */
while ((c = getopt(argc, argv, "vpndfrR")) != -1) {
switch (c) {
case 'v':
cb.cb_verbose = B_TRUE;
break;
case 'p':
cb.cb_verbose = B_TRUE;
cb.cb_parsable = B_TRUE;
break;
case 'n':
cb.cb_dryrun = B_TRUE;
break;
case 'd':
cb.cb_defer_destroy = B_TRUE;
type = ZFS_TYPE_SNAPSHOT;
break;
case 'f':
cb.cb_force = B_TRUE;
break;
case 'r':
cb.cb_recurse = B_TRUE;
break;
case 'R':
cb.cb_recurse = B_TRUE;
cb.cb_doclones = B_TRUE;
break;
case '?':
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc == 0) {
(void) fprintf(stderr, gettext("missing dataset argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
at = strchr(argv[0], '@');
pound = strchr(argv[0], '#');
if (at != NULL) {
/* Build the list of snaps to destroy in cb_nvl. */
cb.cb_nvl = fnvlist_alloc();
*at = '\0';
zhp = zfs_open(g_zfs, argv[0],
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL) {
nvlist_free(cb.cb_nvl);
return (1);
}
cb.cb_snapspec = at + 1;
if (gather_snapshots(zfs_handle_dup(zhp), &cb) != 0 ||
cb.cb_error) {
rv = 1;
goto out;
}
if (nvlist_empty(cb.cb_nvl)) {
(void) fprintf(stderr, gettext("could not find any "
"snapshots to destroy; check snapshot names.\n"));
rv = 1;
goto out;
}
if (cb.cb_verbose) {
char buf[16];
zfs_nicebytes(cb.cb_snapused, buf, sizeof (buf));
if (cb.cb_parsable) {
(void) printf("reclaim\t%llu\n",
(u_longlong_t)cb.cb_snapused);
} else if (cb.cb_dryrun) {
(void) printf(gettext("would reclaim %s\n"),
buf);
} else {
(void) printf(gettext("will reclaim %s\n"),
buf);
}
}
if (!cb.cb_dryrun) {
if (cb.cb_doclones) {
cb.cb_batchedsnaps = fnvlist_alloc();
err = destroy_clones(&cb);
if (err == 0) {
err = zfs_destroy_snaps_nvl(g_zfs,
cb.cb_batchedsnaps, B_FALSE);
}
if (err != 0) {
rv = 1;
goto out;
}
}
if (err == 0) {
err = zfs_destroy_snaps_nvl(g_zfs, cb.cb_nvl,
cb.cb_defer_destroy);
}
}
if (err != 0)
rv = 1;
} else if (pound != NULL) {
int err;
nvlist_t *nvl;
if (cb.cb_dryrun) {
(void) fprintf(stderr,
"dryrun is not supported with bookmark\n");
return (-1);
}
if (cb.cb_defer_destroy) {
(void) fprintf(stderr,
"defer destroy is not supported with bookmark\n");
return (-1);
}
if (cb.cb_recurse) {
(void) fprintf(stderr,
"recursive is not supported with bookmark\n");
return (-1);
}
/*
* Unfortunately, zfs_bookmark() doesn't honor the
* casesensitivity setting. However, we can't simply
* remove this check, because lzc_destroy_bookmarks()
* ignores non-existent bookmarks, so this is necessary
* to get a proper error message.
*/
if (!zfs_bookmark_exists(argv[0])) {
(void) fprintf(stderr, gettext("bookmark '%s' "
"does not exist.\n"), argv[0]);
return (1);
}
nvl = fnvlist_alloc();
fnvlist_add_boolean(nvl, argv[0]);
err = lzc_destroy_bookmarks(nvl, NULL);
if (err != 0) {
(void) zfs_standard_error(g_zfs, err,
"cannot destroy bookmark");
}
nvlist_free(nvl);
return (err);
} else {
/* Open the given dataset */
if ((zhp = zfs_open(g_zfs, argv[0], type)) == NULL)
return (1);
cb.cb_target = zhp;
/*
* Perform an explicit check for pools before going any further.
*/
if (!cb.cb_recurse && strchr(zfs_get_name(zhp), '/') == NULL &&
zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) {
(void) fprintf(stderr, gettext("cannot destroy '%s': "
"operation does not apply to pools\n"),
zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use 'zfs destroy -r "
"%s' to destroy all datasets in the pool\n"),
zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use 'zpool destroy %s' "
"to destroy the pool itself\n"), zfs_get_name(zhp));
rv = 1;
goto out;
}
/*
* Check for any dependents and/or clones.
*/
cb.cb_first = B_TRUE;
if (!cb.cb_doclones &&
zfs_iter_dependents(zhp, B_TRUE, destroy_check_dependent,
&cb) != 0) {
rv = 1;
goto out;
}
if (cb.cb_error) {
rv = 1;
goto out;
}
cb.cb_batchedsnaps = fnvlist_alloc();
if (zfs_iter_dependents(zhp, B_FALSE, destroy_callback,
&cb) != 0) {
rv = 1;
goto out;
}
/*
* Do the real thing. The callback will close the
* handle regardless of whether it succeeds or not.
*/
err = destroy_callback(zhp, &cb);
zhp = NULL;
if (err == 0) {
err = zfs_destroy_snaps_nvl(g_zfs,
cb.cb_batchedsnaps, cb.cb_defer_destroy);
}
if (err != 0 || cb.cb_error == B_TRUE)
rv = 1;
}
out:
fnvlist_free(cb.cb_batchedsnaps);
fnvlist_free(cb.cb_nvl);
if (zhp != NULL)
zfs_close(zhp);
return (rv);
}
static boolean_t
is_recvd_column(zprop_get_cbdata_t *cbp)
{
int i;
zfs_get_column_t col;
for (i = 0; i < ZFS_GET_NCOLS &&
(col = cbp->cb_columns[i]) != GET_COL_NONE; i++)
if (col == GET_COL_RECVD)
return (B_TRUE);
return (B_FALSE);
}
/*
* zfs get [-rHp] [-o all | field[,field]...] [-s source[,source]...]
* < all | property[,property]... > < fs | snap | vol > ...
*
* -r recurse over any child datasets
* -H scripted mode. Headers are stripped, and fields are separated
* by tabs instead of spaces.
* -o Set of fields to display. One of "name,property,value,
* received,source". Default is "name,property,value,source".
* "all" is an alias for all five.
* -s Set of sources to allow. One of
* "local,default,inherited,received,temporary,none". Default is
* all six.
* -p Display values in parsable (literal) format.
*
* Prints properties for the given datasets. The user can control which
* columns to display as well as which property types to allow.
*/
/*
* Invoked to display the properties for a single dataset.
*/
static int
get_callback(zfs_handle_t *zhp, void *data)
{
char buf[ZFS_MAXPROPLEN];
char rbuf[ZFS_MAXPROPLEN];
zprop_source_t sourcetype;
char source[ZFS_MAX_DATASET_NAME_LEN];
zprop_get_cbdata_t *cbp = data;
nvlist_t *user_props = zfs_get_user_props(zhp);
zprop_list_t *pl = cbp->cb_proplist;
nvlist_t *propval;
const char *strval;
const char *sourceval;
boolean_t received = is_recvd_column(cbp);
for (; pl != NULL; pl = pl->pl_next) {
char *recvdval = NULL;
/*
* Skip the special fake placeholder. This will also skip over
* the name property when 'all' is specified.
*/
if (pl->pl_prop == ZFS_PROP_NAME &&
pl == cbp->cb_proplist)
continue;
if (pl->pl_prop != ZPROP_USERPROP) {
if (zfs_prop_get(zhp, pl->pl_prop, buf,
sizeof (buf), &sourcetype, source,
sizeof (source),
cbp->cb_literal) != 0) {
if (pl->pl_all)
continue;
if (!zfs_prop_valid_for_type(pl->pl_prop,
ZFS_TYPE_DATASET, B_FALSE)) {
(void) fprintf(stderr,
gettext("No such property '%s'\n"),
zfs_prop_to_name(pl->pl_prop));
continue;
}
sourcetype = ZPROP_SRC_NONE;
(void) strlcpy(buf, "-", sizeof (buf));
}
if (received && (zfs_prop_get_recvd(zhp,
zfs_prop_to_name(pl->pl_prop), rbuf, sizeof (rbuf),
cbp->cb_literal) == 0))
recvdval = rbuf;
zprop_print_one_property(zfs_get_name(zhp), cbp,
zfs_prop_to_name(pl->pl_prop),
buf, sourcetype, source, recvdval);
} else if (zfs_prop_userquota(pl->pl_user_prop)) {
sourcetype = ZPROP_SRC_LOCAL;
if (zfs_prop_get_userquota(zhp, pl->pl_user_prop,
buf, sizeof (buf), cbp->cb_literal) != 0) {
sourcetype = ZPROP_SRC_NONE;
(void) strlcpy(buf, "-", sizeof (buf));
}
zprop_print_one_property(zfs_get_name(zhp), cbp,
pl->pl_user_prop, buf, sourcetype, source, NULL);
} else if (zfs_prop_written(pl->pl_user_prop)) {
sourcetype = ZPROP_SRC_LOCAL;
if (zfs_prop_get_written(zhp, pl->pl_user_prop,
buf, sizeof (buf), cbp->cb_literal) != 0) {
sourcetype = ZPROP_SRC_NONE;
(void) strlcpy(buf, "-", sizeof (buf));
}
zprop_print_one_property(zfs_get_name(zhp), cbp,
pl->pl_user_prop, buf, sourcetype, source, NULL);
} else {
if (nvlist_lookup_nvlist(user_props,
pl->pl_user_prop, &propval) != 0) {
if (pl->pl_all)
continue;
sourcetype = ZPROP_SRC_NONE;
strval = "-";
} else {
strval = fnvlist_lookup_string(propval,
ZPROP_VALUE);
sourceval = fnvlist_lookup_string(propval,
ZPROP_SOURCE);
if (strcmp(sourceval,
zfs_get_name(zhp)) == 0) {
sourcetype = ZPROP_SRC_LOCAL;
} else if (strcmp(sourceval,
ZPROP_SOURCE_VAL_RECVD) == 0) {
sourcetype = ZPROP_SRC_RECEIVED;
} else {
sourcetype = ZPROP_SRC_INHERITED;
(void) strlcpy(source,
sourceval, sizeof (source));
}
}
if (received && (zfs_prop_get_recvd(zhp,
pl->pl_user_prop, rbuf, sizeof (rbuf),
cbp->cb_literal) == 0))
recvdval = rbuf;
zprop_print_one_property(zfs_get_name(zhp), cbp,
pl->pl_user_prop, strval, sourcetype,
source, recvdval);
}
}
return (0);
}
static int
zfs_do_get(int argc, char **argv)
{
zprop_get_cbdata_t cb = { 0 };
int i, c, flags = ZFS_ITER_ARGS_CAN_BE_PATHS;
int types = ZFS_TYPE_DATASET | ZFS_TYPE_BOOKMARK;
char *fields;
int ret = 0;
int limit = 0;
zprop_list_t fake_name = { 0 };
/*
* Set up default columns and sources.
*/
cb.cb_sources = ZPROP_SRC_ALL;
cb.cb_columns[0] = GET_COL_NAME;
cb.cb_columns[1] = GET_COL_PROPERTY;
cb.cb_columns[2] = GET_COL_VALUE;
cb.cb_columns[3] = GET_COL_SOURCE;
cb.cb_type = ZFS_TYPE_DATASET;
/* check options */
while ((c = getopt(argc, argv, ":d:o:s:rt:Hp")) != -1) {
switch (c) {
case 'p':
cb.cb_literal = B_TRUE;
break;
case 'd':
limit = parse_depth(optarg, &flags);
break;
case 'r':
flags |= ZFS_ITER_RECURSE;
break;
case 'H':
cb.cb_scripted = B_TRUE;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case 'o':
/*
* Process the set of columns to display. We zero out
* the structure to give us a blank slate.
*/
memset(&cb.cb_columns, 0, sizeof (cb.cb_columns));
i = 0;
for (char *tok; (tok = strsep(&optarg, ",")); ) {
static const char *const col_subopts[] =
{ "name", "property", "value",
"received", "source", "all" };
static const zfs_get_column_t col_subopt_col[] =
{ GET_COL_NAME, GET_COL_PROPERTY, GET_COL_VALUE,
GET_COL_RECVD, GET_COL_SOURCE };
static const int col_subopt_flags[] =
{ 0, 0, 0, ZFS_ITER_RECVD_PROPS, 0 };
if (i == ZFS_GET_NCOLS) {
(void) fprintf(stderr, gettext("too "
"many fields given to -o "
"option\n"));
usage(B_FALSE);
}
for (c = 0; c < ARRAY_SIZE(col_subopts); ++c)
if (strcmp(tok, col_subopts[c]) == 0)
goto found;
(void) fprintf(stderr,
gettext("invalid column name '%s'\n"), tok);
usage(B_FALSE);
found:
if (c >= 5) {
if (i > 0) {
(void) fprintf(stderr,
gettext("\"all\" conflicts "
"with specific fields "
"given to -o option\n"));
usage(B_FALSE);
}
memcpy(cb.cb_columns, col_subopt_col,
sizeof (col_subopt_col));
flags |= ZFS_ITER_RECVD_PROPS;
i = ZFS_GET_NCOLS;
} else {
cb.cb_columns[i++] = col_subopt_col[c];
flags |= col_subopt_flags[c];
}
}
break;
case 's':
cb.cb_sources = 0;
for (char *tok; (tok = strsep(&optarg, ",")); ) {
static const char *const source_opt[] = {
"local", "default",
"inherited", "received",
"temporary", "none" };
static const int source_flg[] = {
ZPROP_SRC_LOCAL, ZPROP_SRC_DEFAULT,
ZPROP_SRC_INHERITED, ZPROP_SRC_RECEIVED,
ZPROP_SRC_TEMPORARY, ZPROP_SRC_NONE };
for (i = 0; i < ARRAY_SIZE(source_opt); ++i)
if (strcmp(tok, source_opt[i]) == 0) {
cb.cb_sources |= source_flg[i];
goto found2;
}
(void) fprintf(stderr,
gettext("invalid source '%s'\n"), tok);
usage(B_FALSE);
found2:;
}
break;
case 't':
types = 0;
flags &= ~ZFS_ITER_PROP_LISTSNAPS;
for (char *tok; (tok = strsep(&optarg, ",")); ) {
static const char *const type_opts[] = {
"filesystem", "volume",
"snapshot", "snap",
"bookmark",
"all" };
static const int type_types[] = {
ZFS_TYPE_FILESYSTEM, ZFS_TYPE_VOLUME,
ZFS_TYPE_SNAPSHOT, ZFS_TYPE_SNAPSHOT,
ZFS_TYPE_BOOKMARK,
ZFS_TYPE_DATASET | ZFS_TYPE_BOOKMARK };
for (i = 0; i < ARRAY_SIZE(type_opts); ++i)
if (strcmp(tok, type_opts[i]) == 0) {
types |= type_types[i];
goto found3;
}
(void) fprintf(stderr,
gettext("invalid type '%s'\n"), tok);
usage(B_FALSE);
found3:;
}
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing property "
"argument\n"));
usage(B_FALSE);
}
fields = argv[0];
/*
* Handle users who want to get all snapshots or bookmarks
* of a dataset (ex. 'zfs get -t snapshot refer <dataset>').
*/
if ((types == ZFS_TYPE_SNAPSHOT || types == ZFS_TYPE_BOOKMARK) &&
argc > 1 && (flags & ZFS_ITER_RECURSE) == 0 && limit == 0) {
flags |= (ZFS_ITER_DEPTH_LIMIT | ZFS_ITER_RECURSE);
limit = 1;
}
if (zprop_get_list(g_zfs, fields, &cb.cb_proplist, ZFS_TYPE_DATASET)
!= 0)
usage(B_FALSE);
argc--;
argv++;
/*
* As part of zfs_expand_proplist(), we keep track of the maximum column
* width for each property. For the 'NAME' (and 'SOURCE') columns, we
* need to know the maximum name length. However, the user likely did
* not specify 'name' as one of the properties to fetch, so we need to
* make sure we always include at least this property for
* print_get_headers() to work properly.
*/
if (cb.cb_proplist != NULL) {
fake_name.pl_prop = ZFS_PROP_NAME;
fake_name.pl_width = strlen(gettext("NAME"));
fake_name.pl_next = cb.cb_proplist;
cb.cb_proplist = &fake_name;
}
cb.cb_first = B_TRUE;
/* run for each object */
ret = zfs_for_each(argc, argv, flags, types, NULL,
&cb.cb_proplist, limit, get_callback, &cb);
if (cb.cb_proplist == &fake_name)
zprop_free_list(fake_name.pl_next);
else
zprop_free_list(cb.cb_proplist);
return (ret);
}
/*
* inherit [-rS] <property> <fs|vol> ...
*
* -r Recurse over all children
* -S Revert to received value, if any
*
* For each dataset specified on the command line, inherit the given property
* from its parent. Inheriting a property at the pool level will cause it to
* use the default value. The '-r' flag will recurse over all children, and is
* useful for setting a property on a hierarchy-wide basis, regardless of any
* local modifications for each dataset.
*/
typedef struct inherit_cbdata {
const char *cb_propname;
boolean_t cb_received;
} inherit_cbdata_t;
static int
inherit_recurse_cb(zfs_handle_t *zhp, void *data)
{
inherit_cbdata_t *cb = data;
zfs_prop_t prop = zfs_name_to_prop(cb->cb_propname);
/*
* If we're doing it recursively, then ignore properties that
* are not valid for this type of dataset.
*/
if (prop != ZPROP_INVAL &&
!zfs_prop_valid_for_type(prop, zfs_get_type(zhp), B_FALSE))
return (0);
return (zfs_prop_inherit(zhp, cb->cb_propname, cb->cb_received) != 0);
}
static int
inherit_cb(zfs_handle_t *zhp, void *data)
{
inherit_cbdata_t *cb = data;
return (zfs_prop_inherit(zhp, cb->cb_propname, cb->cb_received) != 0);
}
static int
zfs_do_inherit(int argc, char **argv)
{
int c;
zfs_prop_t prop;
inherit_cbdata_t cb = { 0 };
char *propname;
int ret = 0;
int flags = 0;
boolean_t received = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "rS")) != -1) {
switch (c) {
case 'r':
flags |= ZFS_ITER_RECURSE;
break;
case 'S':
received = B_TRUE;
break;
case '?':
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing property argument\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing dataset argument\n"));
usage(B_FALSE);
}
propname = argv[0];
argc--;
argv++;
if ((prop = zfs_name_to_prop(propname)) != ZPROP_USERPROP) {
if (zfs_prop_readonly(prop)) {
(void) fprintf(stderr, gettext(
"%s property is read-only\n"),
propname);
return (1);
}
if (!zfs_prop_inheritable(prop) && !received) {
(void) fprintf(stderr, gettext("'%s' property cannot "
"be inherited\n"), propname);
if (prop == ZFS_PROP_QUOTA ||
prop == ZFS_PROP_RESERVATION ||
prop == ZFS_PROP_REFQUOTA ||
prop == ZFS_PROP_REFRESERVATION) {
(void) fprintf(stderr, gettext("use 'zfs set "
"%s=none' to clear\n"), propname);
(void) fprintf(stderr, gettext("use 'zfs "
"inherit -S %s' to revert to received "
"value\n"), propname);
}
return (1);
}
if (received && (prop == ZFS_PROP_VOLSIZE ||
prop == ZFS_PROP_VERSION)) {
(void) fprintf(stderr, gettext("'%s' property cannot "
"be reverted to a received value\n"), propname);
return (1);
}
} else if (!zfs_prop_user(propname)) {
(void) fprintf(stderr, gettext("invalid property '%s'\n"),
propname);
usage(B_FALSE);
}
cb.cb_propname = propname;
cb.cb_received = received;
if (flags & ZFS_ITER_RECURSE) {
ret = zfs_for_each(argc, argv, flags, ZFS_TYPE_DATASET,
NULL, NULL, 0, inherit_recurse_cb, &cb);
} else {
ret = zfs_for_each(argc, argv, flags, ZFS_TYPE_DATASET,
NULL, NULL, 0, inherit_cb, &cb);
}
return (ret);
}
typedef struct upgrade_cbdata {
uint64_t cb_numupgraded;
uint64_t cb_numsamegraded;
uint64_t cb_numfailed;
uint64_t cb_version;
boolean_t cb_newer;
boolean_t cb_foundone;
char cb_lastfs[ZFS_MAX_DATASET_NAME_LEN];
} upgrade_cbdata_t;
static int
same_pool(zfs_handle_t *zhp, const char *name)
{
int len1 = strcspn(name, "/@");
const char *zhname = zfs_get_name(zhp);
int len2 = strcspn(zhname, "/@");
if (len1 != len2)
return (B_FALSE);
return (strncmp(name, zhname, len1) == 0);
}
static int
upgrade_list_callback(zfs_handle_t *zhp, void *data)
{
upgrade_cbdata_t *cb = data;
int version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
/* list if it's old/new */
if ((!cb->cb_newer && version < ZPL_VERSION) ||
(cb->cb_newer && version > ZPL_VERSION)) {
char *str;
if (cb->cb_newer) {
str = gettext("The following filesystems are "
"formatted using a newer software version and\n"
"cannot be accessed on the current system.\n\n");
} else {
str = gettext("The following filesystems are "
"out of date, and can be upgraded. After being\n"
"upgraded, these filesystems (and any 'zfs send' "
"streams generated from\n"
"subsequent snapshots) will no longer be "
"accessible by older software versions.\n\n");
}
if (!cb->cb_foundone) {
(void) puts(str);
(void) printf(gettext("VER FILESYSTEM\n"));
(void) printf(gettext("--- ------------\n"));
cb->cb_foundone = B_TRUE;
}
(void) printf("%2u %s\n", version, zfs_get_name(zhp));
}
return (0);
}
static int
upgrade_set_callback(zfs_handle_t *zhp, void *data)
{
upgrade_cbdata_t *cb = data;
int version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
int needed_spa_version;
int spa_version;
if (zfs_spa_version(zhp, &spa_version) < 0)
return (-1);
needed_spa_version = zfs_spa_version_map(cb->cb_version);
if (needed_spa_version < 0)
return (-1);
if (spa_version < needed_spa_version) {
/* can't upgrade */
(void) printf(gettext("%s: can not be "
"upgraded; the pool version needs to first "
"be upgraded\nto version %d\n\n"),
zfs_get_name(zhp), needed_spa_version);
cb->cb_numfailed++;
return (0);
}
/* upgrade */
if (version < cb->cb_version) {
char verstr[24];
(void) snprintf(verstr, sizeof (verstr),
"%llu", (u_longlong_t)cb->cb_version);
if (cb->cb_lastfs[0] && !same_pool(zhp, cb->cb_lastfs)) {
/*
* If they did "zfs upgrade -a", then we could
* be doing ioctls to different pools. We need
* to log this history once to each pool, and bypass
* the normal history logging that happens in main().
*/
(void) zpool_log_history(g_zfs, history_str);
log_history = B_FALSE;
}
if (zfs_prop_set(zhp, "version", verstr) == 0)
cb->cb_numupgraded++;
else
cb->cb_numfailed++;
(void) strcpy(cb->cb_lastfs, zfs_get_name(zhp));
} else if (version > cb->cb_version) {
/* can't downgrade */
(void) printf(gettext("%s: can not be downgraded; "
"it is already at version %u\n"),
zfs_get_name(zhp), version);
cb->cb_numfailed++;
} else {
cb->cb_numsamegraded++;
}
return (0);
}
/*
* zfs upgrade
* zfs upgrade -v
* zfs upgrade [-r] [-V <version>] <-a | filesystem>
*/
static int
zfs_do_upgrade(int argc, char **argv)
{
boolean_t all = B_FALSE;
boolean_t showversions = B_FALSE;
int ret = 0;
upgrade_cbdata_t cb = { 0 };
int c;
int flags = ZFS_ITER_ARGS_CAN_BE_PATHS;
/* check options */
while ((c = getopt(argc, argv, "rvV:a")) != -1) {
switch (c) {
case 'r':
flags |= ZFS_ITER_RECURSE;
break;
case 'v':
showversions = B_TRUE;
break;
case 'V':
if (zfs_prop_string_to_index(ZFS_PROP_VERSION,
optarg, &cb.cb_version) != 0) {
(void) fprintf(stderr,
gettext("invalid version %s\n"), optarg);
usage(B_FALSE);
}
break;
case 'a':
all = B_TRUE;
break;
case '?':
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if ((!all && !argc) && ((flags & ZFS_ITER_RECURSE) | cb.cb_version))
usage(B_FALSE);
if (showversions && (flags & ZFS_ITER_RECURSE || all ||
cb.cb_version || argc))
usage(B_FALSE);
if ((all || argc) && (showversions))
usage(B_FALSE);
if (all && argc)
usage(B_FALSE);
if (showversions) {
/* Show info on available versions. */
(void) printf(gettext("The following filesystem versions are "
"supported:\n\n"));
(void) printf(gettext("VER DESCRIPTION\n"));
(void) printf("--- -----------------------------------------"
"---------------\n");
(void) printf(gettext(" 1 Initial ZFS filesystem version\n"));
(void) printf(gettext(" 2 Enhanced directory entries\n"));
(void) printf(gettext(" 3 Case insensitive and filesystem "
"user identifier (FUID)\n"));
(void) printf(gettext(" 4 userquota, groupquota "
"properties\n"));
(void) printf(gettext(" 5 System attributes\n"));
(void) printf(gettext("\nFor more information on a particular "
"version, including supported releases,\n"));
(void) printf("see the ZFS Administration Guide.\n\n");
ret = 0;
} else if (argc || all) {
/* Upgrade filesystems */
if (cb.cb_version == 0)
cb.cb_version = ZPL_VERSION;
ret = zfs_for_each(argc, argv, flags, ZFS_TYPE_FILESYSTEM,
NULL, NULL, 0, upgrade_set_callback, &cb);
(void) printf(gettext("%llu filesystems upgraded\n"),
(u_longlong_t)cb.cb_numupgraded);
if (cb.cb_numsamegraded) {
(void) printf(gettext("%llu filesystems already at "
"this version\n"),
(u_longlong_t)cb.cb_numsamegraded);
}
if (cb.cb_numfailed != 0)
ret = 1;
} else {
/* List old-version filesystems */
boolean_t found;
(void) printf(gettext("This system is currently running "
"ZFS filesystem version %llu.\n\n"), ZPL_VERSION);
flags |= ZFS_ITER_RECURSE;
ret = zfs_for_each(0, NULL, flags, ZFS_TYPE_FILESYSTEM,
NULL, NULL, 0, upgrade_list_callback, &cb);
found = cb.cb_foundone;
cb.cb_foundone = B_FALSE;
cb.cb_newer = B_TRUE;
ret = zfs_for_each(0, NULL, flags, ZFS_TYPE_FILESYSTEM,
NULL, NULL, 0, upgrade_list_callback, &cb);
if (!cb.cb_foundone && !found) {
(void) printf(gettext("All filesystems are "
"formatted with the current version.\n"));
}
}
return (ret);
}
/*
* zfs userspace [-Hinp] [-o field[,...]] [-s field [-s field]...]
* [-S field [-S field]...] [-t type[,...]]
* filesystem | snapshot | path
* zfs groupspace [-Hinp] [-o field[,...]] [-s field [-s field]...]
* [-S field [-S field]...] [-t type[,...]]
* filesystem | snapshot | path
* zfs projectspace [-Hp] [-o field[,...]] [-s field [-s field]...]
* [-S field [-S field]...] filesystem | snapshot | path
*
* -H Scripted mode; elide headers and separate columns by tabs.
* -i Translate SID to POSIX ID.
* -n Print numeric ID instead of user/group name.
* -o Control which fields to display.
* -p Use exact (parsable) numeric output.
* -s Specify sort columns, descending order.
* -S Specify sort columns, ascending order.
* -t Control which object types to display.
*
* Displays space consumed by, and quotas on, each user in the specified
* filesystem or snapshot.
*/
/* us_field_types, us_field_hdr and us_field_names should be kept in sync */
enum us_field_types {
USFIELD_TYPE,
USFIELD_NAME,
USFIELD_USED,
USFIELD_QUOTA,
USFIELD_OBJUSED,
USFIELD_OBJQUOTA
};
static const char *const us_field_hdr[] = { "TYPE", "NAME", "USED", "QUOTA",
"OBJUSED", "OBJQUOTA" };
static const char *const us_field_names[] = { "type", "name", "used", "quota",
"objused", "objquota" };
#define USFIELD_LAST (sizeof (us_field_names) / sizeof (char *))
#define USTYPE_PSX_GRP (1 << 0)
#define USTYPE_PSX_USR (1 << 1)
#define USTYPE_SMB_GRP (1 << 2)
#define USTYPE_SMB_USR (1 << 3)
#define USTYPE_PROJ (1 << 4)
#define USTYPE_ALL \
(USTYPE_PSX_GRP | USTYPE_PSX_USR | USTYPE_SMB_GRP | USTYPE_SMB_USR | \
USTYPE_PROJ)
static int us_type_bits[] = {
USTYPE_PSX_GRP,
USTYPE_PSX_USR,
USTYPE_SMB_GRP,
USTYPE_SMB_USR,
USTYPE_ALL
};
static const char *const us_type_names[] = { "posixgroup", "posixuser",
"smbgroup", "smbuser", "all" };
typedef struct us_node {
nvlist_t *usn_nvl;
uu_avl_node_t usn_avlnode;
uu_list_node_t usn_listnode;
} us_node_t;
typedef struct us_cbdata {
nvlist_t **cb_nvlp;
uu_avl_pool_t *cb_avl_pool;
uu_avl_t *cb_avl;
boolean_t cb_numname;
boolean_t cb_nicenum;
boolean_t cb_sid2posix;
zfs_userquota_prop_t cb_prop;
zfs_sort_column_t *cb_sortcol;
size_t cb_width[USFIELD_LAST];
} us_cbdata_t;
static boolean_t us_populated = B_FALSE;
typedef struct {
zfs_sort_column_t *si_sortcol;
boolean_t si_numname;
} us_sort_info_t;
static int
us_field_index(const char *field)
{
for (int i = 0; i < USFIELD_LAST; i++) {
if (strcmp(field, us_field_names[i]) == 0)
return (i);
}
return (-1);
}
static int
us_compare(const void *larg, const void *rarg, void *unused)
{
const us_node_t *l = larg;
const us_node_t *r = rarg;
us_sort_info_t *si = (us_sort_info_t *)unused;
zfs_sort_column_t *sortcol = si->si_sortcol;
boolean_t numname = si->si_numname;
nvlist_t *lnvl = l->usn_nvl;
nvlist_t *rnvl = r->usn_nvl;
int rc = 0;
boolean_t lvb, rvb;
for (; sortcol != NULL; sortcol = sortcol->sc_next) {
char *lvstr = (char *)"";
char *rvstr = (char *)"";
uint32_t lv32 = 0;
uint32_t rv32 = 0;
uint64_t lv64 = 0;
uint64_t rv64 = 0;
zfs_prop_t prop = sortcol->sc_prop;
const char *propname = NULL;
boolean_t reverse = sortcol->sc_reverse;
switch (prop) {
case ZFS_PROP_TYPE:
propname = "type";
(void) nvlist_lookup_uint32(lnvl, propname, &lv32);
(void) nvlist_lookup_uint32(rnvl, propname, &rv32);
if (rv32 != lv32)
rc = (rv32 < lv32) ? 1 : -1;
break;
case ZFS_PROP_NAME:
propname = "name";
if (numname) {
compare_nums:
(void) nvlist_lookup_uint64(lnvl, propname,
&lv64);
(void) nvlist_lookup_uint64(rnvl, propname,
&rv64);
if (rv64 != lv64)
rc = (rv64 < lv64) ? 1 : -1;
} else {
if ((nvlist_lookup_string(lnvl, propname,
&lvstr) == ENOENT) ||
(nvlist_lookup_string(rnvl, propname,
&rvstr) == ENOENT)) {
goto compare_nums;
}
rc = strcmp(lvstr, rvstr);
}
break;
case ZFS_PROP_USED:
case ZFS_PROP_QUOTA:
if (!us_populated)
break;
if (prop == ZFS_PROP_USED)
propname = "used";
else
propname = "quota";
(void) nvlist_lookup_uint64(lnvl, propname, &lv64);
(void) nvlist_lookup_uint64(rnvl, propname, &rv64);
if (rv64 != lv64)
rc = (rv64 < lv64) ? 1 : -1;
break;
default:
break;
}
if (rc != 0) {
if (rc < 0)
return (reverse ? 1 : -1);
else
return (reverse ? -1 : 1);
}
}
/*
* If entries still seem to be the same, check if they are of the same
* type (smbentity is added only if we are doing SID to POSIX ID
* translation where we can have duplicate type/name combinations).
*/
if (nvlist_lookup_boolean_value(lnvl, "smbentity", &lvb) == 0 &&
nvlist_lookup_boolean_value(rnvl, "smbentity", &rvb) == 0 &&
lvb != rvb)
return (lvb < rvb ? -1 : 1);
return (0);
}
static boolean_t
zfs_prop_is_user(unsigned p)
{
return (p == ZFS_PROP_USERUSED || p == ZFS_PROP_USERQUOTA ||
p == ZFS_PROP_USEROBJUSED || p == ZFS_PROP_USEROBJQUOTA);
}
static boolean_t
zfs_prop_is_group(unsigned p)
{
return (p == ZFS_PROP_GROUPUSED || p == ZFS_PROP_GROUPQUOTA ||
p == ZFS_PROP_GROUPOBJUSED || p == ZFS_PROP_GROUPOBJQUOTA);
}
static boolean_t
zfs_prop_is_project(unsigned p)
{
return (p == ZFS_PROP_PROJECTUSED || p == ZFS_PROP_PROJECTQUOTA ||
p == ZFS_PROP_PROJECTOBJUSED || p == ZFS_PROP_PROJECTOBJQUOTA);
}
static inline const char *
us_type2str(unsigned field_type)
{
switch (field_type) {
case USTYPE_PSX_USR:
return ("POSIX User");
case USTYPE_PSX_GRP:
return ("POSIX Group");
case USTYPE_SMB_USR:
return ("SMB User");
case USTYPE_SMB_GRP:
return ("SMB Group");
case USTYPE_PROJ:
return ("Project");
default:
return ("Undefined");
}
}
static int
userspace_cb(void *arg, const char *domain, uid_t rid, uint64_t space)
{
us_cbdata_t *cb = (us_cbdata_t *)arg;
zfs_userquota_prop_t prop = cb->cb_prop;
char *name = NULL;
const char *propname;
char sizebuf[32];
us_node_t *node;
uu_avl_pool_t *avl_pool = cb->cb_avl_pool;
uu_avl_t *avl = cb->cb_avl;
uu_avl_index_t idx;
nvlist_t *props;
us_node_t *n;
zfs_sort_column_t *sortcol = cb->cb_sortcol;
unsigned type = 0;
const char *typestr;
size_t namelen;
size_t typelen;
size_t sizelen;
int typeidx, nameidx, sizeidx;
us_sort_info_t sortinfo = { sortcol, cb->cb_numname };
boolean_t smbentity = B_FALSE;
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
node = safe_malloc(sizeof (us_node_t));
uu_avl_node_init(node, &node->usn_avlnode, avl_pool);
node->usn_nvl = props;
if (domain != NULL && domain[0] != '\0') {
#ifdef HAVE_IDMAP
/* SMB */
char sid[MAXNAMELEN + 32];
uid_t id;
uint64_t classes;
int err;
directory_error_t e;
smbentity = B_TRUE;
(void) snprintf(sid, sizeof (sid), "%s-%u", domain, rid);
if (prop == ZFS_PROP_GROUPUSED || prop == ZFS_PROP_GROUPQUOTA) {
type = USTYPE_SMB_GRP;
err = sid_to_id(sid, B_FALSE, &id);
} else {
type = USTYPE_SMB_USR;
err = sid_to_id(sid, B_TRUE, &id);
}
if (err == 0) {
rid = id;
if (!cb->cb_sid2posix) {
e = directory_name_from_sid(NULL, sid, &name,
&classes);
if (e != NULL)
directory_error_free(e);
if (name == NULL)
name = sid;
}
}
#else
nvlist_free(props);
free(node);
return (-1);
#endif /* HAVE_IDMAP */
}
if (cb->cb_sid2posix || domain == NULL || domain[0] == '\0') {
/* POSIX or -i */
if (zfs_prop_is_group(prop)) {
type = USTYPE_PSX_GRP;
if (!cb->cb_numname) {
struct group *g;
if ((g = getgrgid(rid)) != NULL)
name = g->gr_name;
}
} else if (zfs_prop_is_user(prop)) {
type = USTYPE_PSX_USR;
if (!cb->cb_numname) {
struct passwd *p;
if ((p = getpwuid(rid)) != NULL)
name = p->pw_name;
}
} else {
type = USTYPE_PROJ;
}
}
/*
* Make sure that the type/name combination is unique when doing
* SID to POSIX ID translation (hence changing the type from SMB to
* POSIX).
*/
if (cb->cb_sid2posix &&
nvlist_add_boolean_value(props, "smbentity", smbentity) != 0)
nomem();
/* Calculate/update width of TYPE field */
typestr = us_type2str(type);
typelen = strlen(gettext(typestr));
typeidx = us_field_index("type");
if (typelen > cb->cb_width[typeidx])
cb->cb_width[typeidx] = typelen;
if (nvlist_add_uint32(props, "type", type) != 0)
nomem();
/* Calculate/update width of NAME field */
if ((cb->cb_numname && cb->cb_sid2posix) || name == NULL) {
if (nvlist_add_uint64(props, "name", rid) != 0)
nomem();
namelen = snprintf(NULL, 0, "%u", rid);
} else {
if (nvlist_add_string(props, "name", name) != 0)
nomem();
namelen = strlen(name);
}
nameidx = us_field_index("name");
if (nameidx >= 0 && namelen > cb->cb_width[nameidx])
cb->cb_width[nameidx] = namelen;
/*
* Check if this type/name combination is in the list and update it;
* otherwise add new node to the list.
*/
if ((n = uu_avl_find(avl, node, &sortinfo, &idx)) == NULL) {
uu_avl_insert(avl, node, idx);
} else {
nvlist_free(props);
free(node);
node = n;
props = node->usn_nvl;
}
/* Calculate/update width of USED/QUOTA fields */
if (cb->cb_nicenum) {
if (prop == ZFS_PROP_USERUSED || prop == ZFS_PROP_GROUPUSED ||
prop == ZFS_PROP_USERQUOTA || prop == ZFS_PROP_GROUPQUOTA ||
prop == ZFS_PROP_PROJECTUSED ||
prop == ZFS_PROP_PROJECTQUOTA) {
zfs_nicebytes(space, sizebuf, sizeof (sizebuf));
} else {
zfs_nicenum(space, sizebuf, sizeof (sizebuf));
}
} else {
(void) snprintf(sizebuf, sizeof (sizebuf), "%llu",
(u_longlong_t)space);
}
sizelen = strlen(sizebuf);
if (prop == ZFS_PROP_USERUSED || prop == ZFS_PROP_GROUPUSED ||
prop == ZFS_PROP_PROJECTUSED) {
propname = "used";
if (!nvlist_exists(props, "quota"))
(void) nvlist_add_uint64(props, "quota", 0);
} else if (prop == ZFS_PROP_USERQUOTA || prop == ZFS_PROP_GROUPQUOTA ||
prop == ZFS_PROP_PROJECTQUOTA) {
propname = "quota";
if (!nvlist_exists(props, "used"))
(void) nvlist_add_uint64(props, "used", 0);
} else if (prop == ZFS_PROP_USEROBJUSED ||
prop == ZFS_PROP_GROUPOBJUSED || prop == ZFS_PROP_PROJECTOBJUSED) {
propname = "objused";
if (!nvlist_exists(props, "objquota"))
(void) nvlist_add_uint64(props, "objquota", 0);
} else if (prop == ZFS_PROP_USEROBJQUOTA ||
prop == ZFS_PROP_GROUPOBJQUOTA ||
prop == ZFS_PROP_PROJECTOBJQUOTA) {
propname = "objquota";
if (!nvlist_exists(props, "objused"))
(void) nvlist_add_uint64(props, "objused", 0);
} else {
return (-1);
}
sizeidx = us_field_index(propname);
if (sizeidx >= 0 && sizelen > cb->cb_width[sizeidx])
cb->cb_width[sizeidx] = sizelen;
if (nvlist_add_uint64(props, propname, space) != 0)
nomem();
return (0);
}
static void
print_us_node(boolean_t scripted, boolean_t parsable, int *fields, int types,
size_t *width, us_node_t *node)
{
nvlist_t *nvl = node->usn_nvl;
char valstr[MAXNAMELEN];
boolean_t first = B_TRUE;
int cfield = 0;
int field;
uint32_t ustype;
/* Check type */
(void) nvlist_lookup_uint32(nvl, "type", &ustype);
if (!(ustype & types))
return;
while ((field = fields[cfield]) != USFIELD_LAST) {
nvpair_t *nvp = NULL;
data_type_t type;
uint32_t val32 = -1;
uint64_t val64 = -1;
const char *strval = "-";
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL)
if (strcmp(nvpair_name(nvp),
us_field_names[field]) == 0)
break;
type = nvp == NULL ? DATA_TYPE_UNKNOWN : nvpair_type(nvp);
switch (type) {
case DATA_TYPE_UINT32:
val32 = fnvpair_value_uint32(nvp);
break;
case DATA_TYPE_UINT64:
val64 = fnvpair_value_uint64(nvp);
break;
case DATA_TYPE_STRING:
strval = fnvpair_value_string(nvp);
break;
case DATA_TYPE_UNKNOWN:
break;
default:
(void) fprintf(stderr, "invalid data type\n");
}
switch (field) {
case USFIELD_TYPE:
if (type == DATA_TYPE_UINT32)
strval = us_type2str(val32);
break;
case USFIELD_NAME:
if (type == DATA_TYPE_UINT64) {
(void) sprintf(valstr, "%llu",
(u_longlong_t)val64);
strval = valstr;
}
break;
case USFIELD_USED:
case USFIELD_QUOTA:
if (type == DATA_TYPE_UINT64) {
if (parsable) {
(void) sprintf(valstr, "%llu",
(u_longlong_t)val64);
strval = valstr;
} else if (field == USFIELD_QUOTA &&
val64 == 0) {
strval = "none";
} else {
zfs_nicebytes(val64, valstr,
sizeof (valstr));
strval = valstr;
}
}
break;
case USFIELD_OBJUSED:
case USFIELD_OBJQUOTA:
if (type == DATA_TYPE_UINT64) {
if (parsable) {
(void) sprintf(valstr, "%llu",
(u_longlong_t)val64);
strval = valstr;
} else if (field == USFIELD_OBJQUOTA &&
val64 == 0) {
strval = "none";
} else {
zfs_nicenum(val64, valstr,
sizeof (valstr));
strval = valstr;
}
}
break;
}
if (!first) {
if (scripted)
(void) putchar('\t');
else
(void) fputs(" ", stdout);
}
if (scripted)
(void) fputs(strval, stdout);
else if (field == USFIELD_TYPE || field == USFIELD_NAME)
(void) printf("%-*s", (int)width[field], strval);
else
(void) printf("%*s", (int)width[field], strval);
first = B_FALSE;
cfield++;
}
(void) putchar('\n');
}
static void
print_us(boolean_t scripted, boolean_t parsable, int *fields, int types,
size_t *width, boolean_t rmnode, uu_avl_t *avl)
{
us_node_t *node;
const char *col;
int cfield = 0;
int field;
if (!scripted) {
boolean_t first = B_TRUE;
while ((field = fields[cfield]) != USFIELD_LAST) {
col = gettext(us_field_hdr[field]);
if (field == USFIELD_TYPE || field == USFIELD_NAME) {
(void) printf(first ? "%-*s" : " %-*s",
(int)width[field], col);
} else {
(void) printf(first ? "%*s" : " %*s",
(int)width[field], col);
}
first = B_FALSE;
cfield++;
}
(void) printf("\n");
}
for (node = uu_avl_first(avl); node; node = uu_avl_next(avl, node)) {
print_us_node(scripted, parsable, fields, types, width, node);
if (rmnode)
nvlist_free(node->usn_nvl);
}
}
static int
zfs_do_userspace(int argc, char **argv)
{
zfs_handle_t *zhp;
zfs_userquota_prop_t p;
uu_avl_pool_t *avl_pool;
uu_avl_t *avl_tree;
uu_avl_walk_t *walk;
char *delim;
char deffields[] = "type,name,used,quota,objused,objquota";
char *ofield = NULL;
char *tfield = NULL;
int cfield = 0;
int fields[256];
int i;
boolean_t scripted = B_FALSE;
boolean_t prtnum = B_FALSE;
boolean_t parsable = B_FALSE;
boolean_t sid2posix = B_FALSE;
int ret = 0;
int c;
zfs_sort_column_t *sortcol = NULL;
int types = USTYPE_PSX_USR | USTYPE_SMB_USR;
us_cbdata_t cb;
us_node_t *node;
us_node_t *rmnode;
uu_list_pool_t *listpool;
uu_list_t *list;
uu_avl_index_t idx = 0;
uu_list_index_t idx2 = 0;
if (argc < 2)
usage(B_FALSE);
if (strcmp(argv[0], "groupspace") == 0) {
/* Toggle default group types */
types = USTYPE_PSX_GRP | USTYPE_SMB_GRP;
} else if (strcmp(argv[0], "projectspace") == 0) {
types = USTYPE_PROJ;
prtnum = B_TRUE;
}
while ((c = getopt(argc, argv, "nHpo:s:S:t:i")) != -1) {
switch (c) {
case 'n':
if (types == USTYPE_PROJ) {
(void) fprintf(stderr,
gettext("invalid option 'n'\n"));
usage(B_FALSE);
}
prtnum = B_TRUE;
break;
case 'H':
scripted = B_TRUE;
break;
case 'p':
parsable = B_TRUE;
break;
case 'o':
ofield = optarg;
break;
case 's':
case 'S':
if (zfs_add_sort_column(&sortcol, optarg,
c == 's' ? B_FALSE : B_TRUE) != 0) {
(void) fprintf(stderr,
gettext("invalid field '%s'\n"), optarg);
usage(B_FALSE);
}
break;
case 't':
if (types == USTYPE_PROJ) {
(void) fprintf(stderr,
gettext("invalid option 't'\n"));
usage(B_FALSE);
}
tfield = optarg;
break;
case 'i':
if (types == USTYPE_PROJ) {
(void) fprintf(stderr,
gettext("invalid option 'i'\n"));
usage(B_FALSE);
}
sid2posix = B_TRUE;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing dataset name\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
/* Use default output fields if not specified using -o */
if (ofield == NULL)
ofield = deffields;
do {
if ((delim = strchr(ofield, ',')) != NULL)
*delim = '\0';
if ((fields[cfield++] = us_field_index(ofield)) == -1) {
(void) fprintf(stderr, gettext("invalid type '%s' "
"for -o option\n"), ofield);
return (-1);
}
if (delim != NULL)
ofield = delim + 1;
} while (delim != NULL);
fields[cfield] = USFIELD_LAST;
/* Override output types (-t option) */
if (tfield != NULL) {
types = 0;
do {
boolean_t found = B_FALSE;
if ((delim = strchr(tfield, ',')) != NULL)
*delim = '\0';
for (i = 0; i < sizeof (us_type_bits) / sizeof (int);
i++) {
if (strcmp(tfield, us_type_names[i]) == 0) {
found = B_TRUE;
types |= us_type_bits[i];
break;
}
}
if (!found) {
(void) fprintf(stderr, gettext("invalid type "
"'%s' for -t option\n"), tfield);
return (-1);
}
if (delim != NULL)
tfield = delim + 1;
} while (delim != NULL);
}
if ((zhp = zfs_path_to_zhandle(g_zfs, argv[0], ZFS_TYPE_FILESYSTEM |
ZFS_TYPE_SNAPSHOT)) == NULL)
return (1);
if (zfs_get_underlying_type(zhp) != ZFS_TYPE_FILESYSTEM) {
(void) fprintf(stderr, gettext("operation is only applicable "
"to filesystems and their snapshots\n"));
zfs_close(zhp);
return (1);
}
if ((avl_pool = uu_avl_pool_create("us_avl_pool", sizeof (us_node_t),
offsetof(us_node_t, usn_avlnode), us_compare, UU_DEFAULT)) == NULL)
nomem();
if ((avl_tree = uu_avl_create(avl_pool, NULL, UU_DEFAULT)) == NULL)
nomem();
/* Always add default sorting columns */
(void) zfs_add_sort_column(&sortcol, "type", B_FALSE);
(void) zfs_add_sort_column(&sortcol, "name", B_FALSE);
cb.cb_sortcol = sortcol;
cb.cb_numname = prtnum;
cb.cb_nicenum = !parsable;
cb.cb_avl_pool = avl_pool;
cb.cb_avl = avl_tree;
cb.cb_sid2posix = sid2posix;
for (i = 0; i < USFIELD_LAST; i++)
cb.cb_width[i] = strlen(gettext(us_field_hdr[i]));
for (p = 0; p < ZFS_NUM_USERQUOTA_PROPS; p++) {
if ((zfs_prop_is_user(p) &&
!(types & (USTYPE_PSX_USR | USTYPE_SMB_USR))) ||
(zfs_prop_is_group(p) &&
!(types & (USTYPE_PSX_GRP | USTYPE_SMB_GRP))) ||
(zfs_prop_is_project(p) && types != USTYPE_PROJ))
continue;
cb.cb_prop = p;
if ((ret = zfs_userspace(zhp, p, userspace_cb, &cb)) != 0) {
zfs_close(zhp);
return (ret);
}
}
zfs_close(zhp);
/* Sort the list */
if ((node = uu_avl_first(avl_tree)) == NULL)
return (0);
us_populated = B_TRUE;
listpool = uu_list_pool_create("tmplist", sizeof (us_node_t),
offsetof(us_node_t, usn_listnode), NULL, UU_DEFAULT);
list = uu_list_create(listpool, NULL, UU_DEFAULT);
uu_list_node_init(node, &node->usn_listnode, listpool);
while (node != NULL) {
rmnode = node;
node = uu_avl_next(avl_tree, node);
uu_avl_remove(avl_tree, rmnode);
if (uu_list_find(list, rmnode, NULL, &idx2) == NULL)
uu_list_insert(list, rmnode, idx2);
}
for (node = uu_list_first(list); node != NULL;
node = uu_list_next(list, node)) {
us_sort_info_t sortinfo = { sortcol, cb.cb_numname };
if (uu_avl_find(avl_tree, node, &sortinfo, &idx) == NULL)
uu_avl_insert(avl_tree, node, idx);
}
uu_list_destroy(list);
uu_list_pool_destroy(listpool);
/* Print and free node nvlist memory */
print_us(scripted, parsable, fields, types, cb.cb_width, B_TRUE,
cb.cb_avl);
zfs_free_sort_columns(sortcol);
/* Clean up the AVL tree */
if ((walk = uu_avl_walk_start(cb.cb_avl, UU_WALK_ROBUST)) == NULL)
nomem();
while ((node = uu_avl_walk_next(walk)) != NULL) {
uu_avl_remove(cb.cb_avl, node);
free(node);
}
uu_avl_walk_end(walk);
uu_avl_destroy(avl_tree);
uu_avl_pool_destroy(avl_pool);
return (ret);
}
/*
* list [-Hp][-r|-d max] [-o property[,...]] [-s property] ... [-S property]
* [-t type[,...]] [filesystem|volume|snapshot] ...
*
* -H Scripted mode; elide headers and separate columns by tabs
* -p Display values in parsable (literal) format.
* -r Recurse over all children
* -d Limit recursion by depth.
* -o Control which fields to display.
* -s Specify sort columns, descending order.
* -S Specify sort columns, ascending order.
* -t Control which object types to display.
*
* When given no arguments, list all filesystems in the system.
* Otherwise, list the specified datasets, optionally recursing down them if
* '-r' is specified.
*/
typedef struct list_cbdata {
boolean_t cb_first;
boolean_t cb_literal;
boolean_t cb_scripted;
zprop_list_t *cb_proplist;
} list_cbdata_t;
/*
* Given a list of columns to display, output appropriate headers for each one.
*/
static void
print_header(list_cbdata_t *cb)
{
zprop_list_t *pl = cb->cb_proplist;
char headerbuf[ZFS_MAXPROPLEN];
const char *header;
int i;
boolean_t first = B_TRUE;
boolean_t right_justify;
for (; pl != NULL; pl = pl->pl_next) {
if (!first) {
(void) printf(" ");
} else {
first = B_FALSE;
}
right_justify = B_FALSE;
if (pl->pl_prop != ZPROP_USERPROP) {
header = zfs_prop_column_name(pl->pl_prop);
right_justify = zfs_prop_align_right(pl->pl_prop);
} else {
for (i = 0; pl->pl_user_prop[i] != '\0'; i++)
headerbuf[i] = toupper(pl->pl_user_prop[i]);
headerbuf[i] = '\0';
header = headerbuf;
}
if (pl->pl_next == NULL && !right_justify)
(void) printf("%s", header);
else if (right_justify)
(void) printf("%*s", (int)pl->pl_width, header);
else
(void) printf("%-*s", (int)pl->pl_width, header);
}
(void) printf("\n");
}
/*
* Given a dataset and a list of fields, print out all the properties according
* to the described layout.
*/
static void
print_dataset(zfs_handle_t *zhp, list_cbdata_t *cb)
{
zprop_list_t *pl = cb->cb_proplist;
boolean_t first = B_TRUE;
char property[ZFS_MAXPROPLEN];
nvlist_t *userprops = zfs_get_user_props(zhp);
nvlist_t *propval;
const char *propstr;
boolean_t right_justify;
for (; pl != NULL; pl = pl->pl_next) {
if (!first) {
if (cb->cb_scripted)
(void) putchar('\t');
else
(void) fputs(" ", stdout);
} else {
first = B_FALSE;
}
if (pl->pl_prop == ZFS_PROP_NAME) {
(void) strlcpy(property, zfs_get_name(zhp),
sizeof (property));
propstr = property;
right_justify = zfs_prop_align_right(pl->pl_prop);
} else if (pl->pl_prop != ZPROP_USERPROP) {
if (zfs_prop_get(zhp, pl->pl_prop, property,
sizeof (property), NULL, NULL, 0,
cb->cb_literal) != 0)
propstr = "-";
else
propstr = property;
right_justify = zfs_prop_align_right(pl->pl_prop);
} else if (zfs_prop_userquota(pl->pl_user_prop)) {
if (zfs_prop_get_userquota(zhp, pl->pl_user_prop,
property, sizeof (property), cb->cb_literal) != 0)
propstr = "-";
else
propstr = property;
right_justify = B_TRUE;
} else if (zfs_prop_written(pl->pl_user_prop)) {
if (zfs_prop_get_written(zhp, pl->pl_user_prop,
property, sizeof (property), cb->cb_literal) != 0)
propstr = "-";
else
propstr = property;
right_justify = B_TRUE;
} else {
if (nvlist_lookup_nvlist(userprops,
pl->pl_user_prop, &propval) != 0)
propstr = "-";
else
propstr = fnvlist_lookup_string(propval,
ZPROP_VALUE);
right_justify = B_FALSE;
}
/*
* If this is being called in scripted mode, or if this is the
* last column and it is left-justified, don't include a width
* format specifier.
*/
if (cb->cb_scripted || (pl->pl_next == NULL && !right_justify))
(void) fputs(propstr, stdout);
else if (right_justify)
(void) printf("%*s", (int)pl->pl_width, propstr);
else
(void) printf("%-*s", (int)pl->pl_width, propstr);
}
(void) putchar('\n');
}
/*
* Generic callback function to list a dataset or snapshot.
*/
static int
list_callback(zfs_handle_t *zhp, void *data)
{
list_cbdata_t *cbp = data;
if (cbp->cb_first) {
if (!cbp->cb_scripted)
print_header(cbp);
cbp->cb_first = B_FALSE;
}
print_dataset(zhp, cbp);
return (0);
}
static int
zfs_do_list(int argc, char **argv)
{
int c;
char default_fields[] =
"name,used,available,referenced,mountpoint";
int types = ZFS_TYPE_DATASET;
boolean_t types_specified = B_FALSE;
char *fields = default_fields;
list_cbdata_t cb = { 0 };
int limit = 0;
int ret = 0;
zfs_sort_column_t *sortcol = NULL;
int flags = ZFS_ITER_PROP_LISTSNAPS | ZFS_ITER_ARGS_CAN_BE_PATHS;
/* check options */
while ((c = getopt(argc, argv, "HS:d:o:prs:t:")) != -1) {
switch (c) {
case 'o':
fields = optarg;
break;
case 'p':
cb.cb_literal = B_TRUE;
flags |= ZFS_ITER_LITERAL_PROPS;
break;
case 'd':
limit = parse_depth(optarg, &flags);
break;
case 'r':
flags |= ZFS_ITER_RECURSE;
break;
case 'H':
cb.cb_scripted = B_TRUE;
break;
case 's':
if (zfs_add_sort_column(&sortcol, optarg,
B_FALSE) != 0) {
(void) fprintf(stderr,
gettext("invalid property '%s'\n"), optarg);
usage(B_FALSE);
}
break;
case 'S':
if (zfs_add_sort_column(&sortcol, optarg,
B_TRUE) != 0) {
(void) fprintf(stderr,
gettext("invalid property '%s'\n"), optarg);
usage(B_FALSE);
}
break;
case 't':
types = 0;
types_specified = B_TRUE;
flags &= ~ZFS_ITER_PROP_LISTSNAPS;
for (char *tok; (tok = strsep(&optarg, ",")); ) {
static const char *const type_subopts[] = {
"filesystem", "volume",
"snapshot", "snap",
"bookmark",
"all" };
static const int type_types[] = {
ZFS_TYPE_FILESYSTEM, ZFS_TYPE_VOLUME,
ZFS_TYPE_SNAPSHOT, ZFS_TYPE_SNAPSHOT,
ZFS_TYPE_BOOKMARK,
ZFS_TYPE_DATASET | ZFS_TYPE_BOOKMARK };
for (c = 0; c < ARRAY_SIZE(type_subopts); ++c)
if (strcmp(tok, type_subopts[c]) == 0) {
types |= type_types[c];
goto found3;
}
(void) fprintf(stderr,
gettext("invalid type '%s'\n"), tok);
usage(B_FALSE);
found3:;
}
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/*
* If we are only going to list snapshot names and sort by name or
* by createtxg, then we can use faster version.
*/
if (strcmp(fields, "name") == 0 &&
(zfs_sort_only_by_name(sortcol) ||
zfs_sort_only_by_createtxg(sortcol))) {
flags |= ZFS_ITER_SIMPLE;
}
/*
* If "-o space" and no types were specified, don't display snapshots.
*/
if (strcmp(fields, "space") == 0 && types_specified == B_FALSE)
types &= ~ZFS_TYPE_SNAPSHOT;
/*
* Handle users who want to list all snapshots or bookmarks
* of the current dataset (ex. 'zfs list -t snapshot <dataset>').
*/
if ((types == ZFS_TYPE_SNAPSHOT || types == ZFS_TYPE_BOOKMARK) &&
argc > 0 && (flags & ZFS_ITER_RECURSE) == 0 && limit == 0) {
flags |= (ZFS_ITER_DEPTH_LIMIT | ZFS_ITER_RECURSE);
limit = 1;
}
/*
* If the user specifies '-o all', the zprop_get_list() doesn't
* normally include the name of the dataset. For 'zfs list', we always
* want this property to be first.
*/
if (zprop_get_list(g_zfs, fields, &cb.cb_proplist, ZFS_TYPE_DATASET)
!= 0)
usage(B_FALSE);
cb.cb_first = B_TRUE;
ret = zfs_for_each(argc, argv, flags, types, sortcol, &cb.cb_proplist,
limit, list_callback, &cb);
zprop_free_list(cb.cb_proplist);
zfs_free_sort_columns(sortcol);
if (ret == 0 && cb.cb_first && !cb.cb_scripted)
(void) fprintf(stderr, gettext("no datasets available\n"));
return (ret);
}
/*
* zfs rename [-fu] <fs | snap | vol> <fs | snap | vol>
* zfs rename [-f] -p <fs | vol> <fs | vol>
* zfs rename [-u] -r <snap> <snap>
*
* Renames the given dataset to another of the same type.
*
* The '-p' flag creates all the non-existing ancestors of the target first.
* The '-u' flag prevents file systems from being remounted during rename.
*/
static int
zfs_do_rename(int argc, char **argv)
{
zfs_handle_t *zhp;
renameflags_t flags = { 0 };
int c;
int ret = 0;
int types;
boolean_t parents = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "pruf")) != -1) {
switch (c) {
case 'p':
parents = B_TRUE;
break;
case 'r':
flags.recursive = B_TRUE;
break;
case 'u':
flags.nounmount = B_TRUE;
break;
case 'f':
flags.forceunmount = B_TRUE;
break;
case '?':
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing source dataset "
"argument\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing target dataset "
"argument\n"));
usage(B_FALSE);
}
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if (flags.recursive && parents) {
(void) fprintf(stderr, gettext("-p and -r options are mutually "
"exclusive\n"));
usage(B_FALSE);
}
if (flags.nounmount && parents) {
(void) fprintf(stderr, gettext("-u and -p options are mutually "
"exclusive\n"));
usage(B_FALSE);
}
if (flags.recursive && strchr(argv[0], '@') == 0) {
(void) fprintf(stderr, gettext("source dataset for recursive "
"rename must be a snapshot\n"));
usage(B_FALSE);
}
if (flags.nounmount)
types = ZFS_TYPE_FILESYSTEM;
else if (parents)
types = ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME;
else
types = ZFS_TYPE_DATASET;
if ((zhp = zfs_open(g_zfs, argv[0], types)) == NULL)
return (1);
/* If we were asked and the name looks good, try to create ancestors. */
if (parents && zfs_name_valid(argv[1], zfs_get_type(zhp)) &&
zfs_create_ancestors(g_zfs, argv[1]) != 0) {
zfs_close(zhp);
return (1);
}
ret = (zfs_rename(zhp, argv[1], flags) != 0);
zfs_close(zhp);
return (ret);
}
/*
* zfs promote <fs>
*
* Promotes the given clone fs to be the parent
*/
static int
zfs_do_promote(int argc, char **argv)
{
zfs_handle_t *zhp;
int ret = 0;
/* check options */
if (argc > 1 && argv[1][0] == '-') {
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
argv[1][1]);
usage(B_FALSE);
}
/* check number of arguments */
if (argc < 2) {
(void) fprintf(stderr, gettext("missing clone filesystem"
" argument\n"));
usage(B_FALSE);
}
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
zhp = zfs_open(g_zfs, argv[1], ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
return (1);
ret = (zfs_promote(zhp) != 0);
zfs_close(zhp);
return (ret);
}
static int
zfs_do_redact(int argc, char **argv)
{
char *snap = NULL;
char *bookname = NULL;
char **rsnaps = NULL;
int numrsnaps = 0;
argv++;
argc--;
if (argc < 3) {
(void) fprintf(stderr, gettext("too few arguments\n"));
usage(B_FALSE);
}
snap = argv[0];
bookname = argv[1];
rsnaps = argv + 2;
numrsnaps = argc - 2;
nvlist_t *rsnapnv = fnvlist_alloc();
for (int i = 0; i < numrsnaps; i++) {
fnvlist_add_boolean(rsnapnv, rsnaps[i]);
}
int err = lzc_redact(snap, bookname, rsnapnv);
fnvlist_free(rsnapnv);
switch (err) {
case 0:
break;
case ENOENT:
(void) fprintf(stderr,
gettext("provided snapshot %s does not exist\n"), snap);
break;
case EEXIST:
(void) fprintf(stderr, gettext("specified redaction bookmark "
"(%s) provided already exists\n"), bookname);
break;
case ENAMETOOLONG:
(void) fprintf(stderr, gettext("provided bookmark name cannot "
"be used, final name would be too long\n"));
break;
case E2BIG:
(void) fprintf(stderr, gettext("too many redaction snapshots "
"specified\n"));
break;
case EINVAL:
if (strchr(bookname, '#') != NULL)
(void) fprintf(stderr, gettext(
"redaction bookmark name must not contain '#'\n"));
else
(void) fprintf(stderr, gettext(
"redaction snapshot must be descendent of "
"snapshot being redacted\n"));
break;
case EALREADY:
(void) fprintf(stderr, gettext("attempted to redact redacted "
"dataset or with respect to redacted dataset\n"));
break;
case ENOTSUP:
(void) fprintf(stderr, gettext("redaction bookmarks feature "
"not enabled\n"));
break;
case EXDEV:
(void) fprintf(stderr, gettext("potentially invalid redaction "
"snapshot; full dataset names required\n"));
break;
default:
(void) fprintf(stderr, gettext("internal error: %s\n"),
strerror(errno));
}
return (err);
}
/*
* zfs rollback [-rRf] <snapshot>
*
* -r Delete any intervening snapshots before doing rollback
* -R Delete any snapshots and their clones
* -f ignored for backwards compatibility
*
* Given a filesystem, rollback to a specific snapshot, discarding any changes
* since then and making it the active dataset. If more recent snapshots exist,
* the command will complain unless the '-r' flag is given.
*/
typedef struct rollback_cbdata {
uint64_t cb_create;
uint8_t cb_younger_ds_printed;
boolean_t cb_first;
int cb_doclones;
char *cb_target;
int cb_error;
boolean_t cb_recurse;
} rollback_cbdata_t;
static int
rollback_check_dependent(zfs_handle_t *zhp, void *data)
{
rollback_cbdata_t *cbp = data;
if (cbp->cb_first && cbp->cb_recurse) {
(void) fprintf(stderr, gettext("cannot rollback to "
"'%s': clones of previous snapshots exist\n"),
cbp->cb_target);
(void) fprintf(stderr, gettext("use '-R' to "
"force deletion of the following clones and "
"dependents:\n"));
cbp->cb_first = 0;
cbp->cb_error = 1;
}
(void) fprintf(stderr, "%s\n", zfs_get_name(zhp));
zfs_close(zhp);
return (0);
}
/*
* Report some snapshots/bookmarks more recent than the one specified.
* Used when '-r' is not specified. We reuse this same callback for the
* snapshot dependents - if 'cb_dependent' is set, then this is a
* dependent and we should report it without checking the transaction group.
*/
static int
rollback_check(zfs_handle_t *zhp, void *data)
{
rollback_cbdata_t *cbp = data;
/*
* Max number of younger snapshots and/or bookmarks to display before
* we stop the iteration.
*/
const uint8_t max_younger = 32;
if (cbp->cb_doclones) {
zfs_close(zhp);
return (0);
}
if (zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG) > cbp->cb_create) {
if (cbp->cb_first && !cbp->cb_recurse) {
(void) fprintf(stderr, gettext("cannot "
"rollback to '%s': more recent snapshots "
"or bookmarks exist\n"),
cbp->cb_target);
(void) fprintf(stderr, gettext("use '-r' to "
"force deletion of the following "
"snapshots and bookmarks:\n"));
cbp->cb_first = 0;
cbp->cb_error = 1;
}
if (cbp->cb_recurse) {
if (zfs_iter_dependents(zhp, B_TRUE,
rollback_check_dependent, cbp) != 0) {
zfs_close(zhp);
return (-1);
}
} else {
(void) fprintf(stderr, "%s\n",
zfs_get_name(zhp));
cbp->cb_younger_ds_printed++;
}
}
zfs_close(zhp);
if (cbp->cb_younger_ds_printed == max_younger) {
/*
* This non-recursive rollback is going to fail due to the
* presence of snapshots and/or bookmarks that are younger than
* the rollback target.
* We printed some of the offending objects, now we stop
* zfs_iter_snapshot/bookmark iteration so we can fail fast and
* avoid iterating over the rest of the younger objects
*/
(void) fprintf(stderr, gettext("Output limited to %d "
"snapshots/bookmarks\n"), max_younger);
return (-1);
}
return (0);
}
static int
zfs_do_rollback(int argc, char **argv)
{
int ret = 0;
int c;
boolean_t force = B_FALSE;
rollback_cbdata_t cb = { 0 };
zfs_handle_t *zhp, *snap;
char parentname[ZFS_MAX_DATASET_NAME_LEN];
char *delim;
uint64_t min_txg = 0;
/* check options */
while ((c = getopt(argc, argv, "rRf")) != -1) {
switch (c) {
case 'r':
cb.cb_recurse = 1;
break;
case 'R':
cb.cb_recurse = 1;
cb.cb_doclones = 1;
break;
case 'f':
force = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing dataset argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
/* open the snapshot */
if ((snap = zfs_open(g_zfs, argv[0], ZFS_TYPE_SNAPSHOT)) == NULL)
return (1);
/* open the parent dataset */
(void) strlcpy(parentname, argv[0], sizeof (parentname));
verify((delim = strrchr(parentname, '@')) != NULL);
*delim = '\0';
if ((zhp = zfs_open(g_zfs, parentname, ZFS_TYPE_DATASET)) == NULL) {
zfs_close(snap);
return (1);
}
/*
* Check for more recent snapshots and/or clones based on the presence
* of '-r' and '-R'.
*/
cb.cb_target = argv[0];
cb.cb_create = zfs_prop_get_int(snap, ZFS_PROP_CREATETXG);
cb.cb_first = B_TRUE;
cb.cb_error = 0;
if (cb.cb_create > 0)
min_txg = cb.cb_create;
if ((ret = zfs_iter_snapshots(zhp, B_FALSE, rollback_check, &cb,
min_txg, 0)) != 0)
goto out;
if ((ret = zfs_iter_bookmarks(zhp, rollback_check, &cb)) != 0)
goto out;
if ((ret = cb.cb_error) != 0)
goto out;
/*
* Rollback parent to the given snapshot.
*/
ret = zfs_rollback(zhp, snap, force);
out:
zfs_close(snap);
zfs_close(zhp);
if (ret == 0)
return (0);
else
return (1);
}
/*
* zfs set property=value ... { fs | snap | vol } ...
*
* Sets the given properties for all datasets specified on the command line.
*/
static int
set_callback(zfs_handle_t *zhp, void *data)
{
nvlist_t *props = data;
if (zfs_prop_set_list(zhp, props) != 0) {
switch (libzfs_errno(g_zfs)) {
case EZFS_MOUNTFAILED:
(void) fprintf(stderr, gettext("property may be set "
"but unable to remount filesystem\n"));
break;
case EZFS_SHARENFSFAILED:
(void) fprintf(stderr, gettext("property may be set "
"but unable to reshare filesystem\n"));
break;
}
return (1);
}
return (0);
}
static int
zfs_do_set(int argc, char **argv)
{
nvlist_t *props = NULL;
int ds_start = -1; /* argv idx of first dataset arg */
int ret = 0;
int i;
/* check for options */
if (argc > 1 && argv[1][0] == '-') {
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
argv[1][1]);
usage(B_FALSE);
}
/* check number of arguments */
if (argc < 2) {
(void) fprintf(stderr, gettext("missing arguments\n"));
usage(B_FALSE);
}
if (argc < 3) {
if (strchr(argv[1], '=') == NULL) {
(void) fprintf(stderr, gettext("missing property=value "
"argument(s)\n"));
} else {
(void) fprintf(stderr, gettext("missing dataset "
"name(s)\n"));
}
usage(B_FALSE);
}
/* validate argument order: prop=val args followed by dataset args */
for (i = 1; i < argc; i++) {
if (strchr(argv[i], '=') != NULL) {
if (ds_start > 0) {
/* out-of-order prop=val argument */
(void) fprintf(stderr, gettext("invalid "
"argument order\n"));
usage(B_FALSE);
}
} else if (ds_start < 0) {
ds_start = i;
}
}
if (ds_start < 0) {
(void) fprintf(stderr, gettext("missing dataset name(s)\n"));
usage(B_FALSE);
}
/* Populate a list of property settings */
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
for (i = 1; i < ds_start; i++) {
if (!parseprop(props, argv[i])) {
ret = -1;
goto error;
}
}
ret = zfs_for_each(argc - ds_start, argv + ds_start, 0,
ZFS_TYPE_DATASET, NULL, NULL, 0, set_callback, props);
error:
nvlist_free(props);
return (ret);
}
typedef struct snap_cbdata {
nvlist_t *sd_nvl;
boolean_t sd_recursive;
const char *sd_snapname;
} snap_cbdata_t;
static int
zfs_snapshot_cb(zfs_handle_t *zhp, void *arg)
{
snap_cbdata_t *sd = arg;
char *name;
int rv = 0;
int error;
if (sd->sd_recursive &&
zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) != 0) {
zfs_close(zhp);
return (0);
}
error = asprintf(&name, "%s@%s", zfs_get_name(zhp), sd->sd_snapname);
if (error == -1)
nomem();
fnvlist_add_boolean(sd->sd_nvl, name);
free(name);
if (sd->sd_recursive)
rv = zfs_iter_filesystems(zhp, zfs_snapshot_cb, sd);
zfs_close(zhp);
return (rv);
}
/*
* zfs snapshot [-r] [-o prop=value] ... <fs@snap>
*
* Creates a snapshot with the given name. While functionally equivalent to
* 'zfs create', it is a separate command to differentiate intent.
*/
static int
zfs_do_snapshot(int argc, char **argv)
{
int ret = 0;
int c;
nvlist_t *props;
snap_cbdata_t sd = { 0 };
boolean_t multiple_snaps = B_FALSE;
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
if (nvlist_alloc(&sd.sd_nvl, NV_UNIQUE_NAME, 0) != 0)
nomem();
/* check options */
while ((c = getopt(argc, argv, "ro:")) != -1) {
switch (c) {
case 'o':
if (!parseprop(props, optarg)) {
nvlist_free(sd.sd_nvl);
nvlist_free(props);
return (1);
}
break;
case 'r':
sd.sd_recursive = B_TRUE;
multiple_snaps = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
goto usage;
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing snapshot argument\n"));
goto usage;
}
if (argc > 1)
multiple_snaps = B_TRUE;
for (; argc > 0; argc--, argv++) {
char *atp;
zfs_handle_t *zhp;
atp = strchr(argv[0], '@');
if (atp == NULL)
goto usage;
*atp = '\0';
sd.sd_snapname = atp + 1;
zhp = zfs_open(g_zfs, argv[0],
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
goto usage;
if (zfs_snapshot_cb(zhp, &sd) != 0)
goto usage;
}
ret = zfs_snapshot_nvl(g_zfs, sd.sd_nvl, props);
nvlist_free(sd.sd_nvl);
nvlist_free(props);
if (ret != 0 && multiple_snaps)
(void) fprintf(stderr, gettext("no snapshots were created\n"));
return (ret != 0);
usage:
nvlist_free(sd.sd_nvl);
nvlist_free(props);
usage(B_FALSE);
return (-1);
}
/*
* Array of prefixes to exclude –
* a linear search, even if executed for each dataset,
* is plenty good enough.
*/
typedef struct zfs_send_exclude_arg {
size_t count;
const char **list;
} zfs_send_exclude_arg_t;
static boolean_t
zfs_do_send_exclude(zfs_handle_t *zhp, void *context)
{
zfs_send_exclude_arg_t *excludes = context;
const char *name = zfs_get_name(zhp);
for (size_t i = 0; i < excludes->count; ++i) {
size_t len = strlen(excludes->list[i]);
if (strncmp(name, excludes->list[i], len) == 0 &&
memchr("/@", name[len], sizeof ("/@")))
return (B_FALSE);
}
return (B_TRUE);
}
/*
* Send a backup stream to stdout.
*/
static int
zfs_do_send(int argc, char **argv)
{
char *fromname = NULL;
char *toname = NULL;
char *resume_token = NULL;
char *cp;
zfs_handle_t *zhp;
sendflags_t flags = { 0 };
int c, err;
nvlist_t *dbgnv = NULL;
char *redactbook = NULL;
zfs_send_exclude_arg_t excludes = { 0 };
struct option long_options[] = {
{"replicate", no_argument, NULL, 'R'},
{"skip-missing", no_argument, NULL, 's'},
{"redact", required_argument, NULL, 'd'},
{"props", no_argument, NULL, 'p'},
{"parsable", no_argument, NULL, 'P'},
{"dedup", no_argument, NULL, 'D'},
{"verbose", no_argument, NULL, 'v'},
{"dryrun", no_argument, NULL, 'n'},
{"large-block", no_argument, NULL, 'L'},
{"embed", no_argument, NULL, 'e'},
{"resume", required_argument, NULL, 't'},
{"compressed", no_argument, NULL, 'c'},
{"raw", no_argument, NULL, 'w'},
{"backup", no_argument, NULL, 'b'},
{"holds", no_argument, NULL, 'h'},
{"saved", no_argument, NULL, 'S'},
{"exclude", required_argument, NULL, 'X'},
{0, 0, 0, 0}
};
/* check options */
while ((c = getopt_long(argc, argv, ":i:I:RsDpvnPLeht:cwbd:SX:",
long_options, NULL)) != -1) {
switch (c) {
case 'X':
for (char *ds; (ds = strsep(&optarg, ",")) != NULL; ) {
if (!zfs_name_valid(ds, ZFS_TYPE_DATASET) ||
strchr(ds, '/') == NULL) {
(void) fprintf(stderr, gettext("-X %s: "
"not a valid non-root dataset name"
".\n"), ds);
usage(B_FALSE);
}
excludes.list = safe_realloc(excludes.list,
sizeof (char *) * (excludes.count + 1));
excludes.list[excludes.count++] = ds;
}
break;
case 'i':
if (fromname)
usage(B_FALSE);
fromname = optarg;
break;
case 'I':
if (fromname)
usage(B_FALSE);
fromname = optarg;
flags.doall = B_TRUE;
break;
case 'R':
flags.replicate = B_TRUE;
break;
case 's':
flags.skipmissing = B_TRUE;
break;
case 'd':
redactbook = optarg;
break;
case 'p':
flags.props = B_TRUE;
break;
case 'b':
flags.backup = B_TRUE;
break;
case 'h':
flags.holds = B_TRUE;
break;
case 'P':
flags.parsable = B_TRUE;
break;
case 'v':
flags.verbosity++;
flags.progress = B_TRUE;
break;
case 'D':
(void) fprintf(stderr,
gettext("WARNING: deduplicated send is no "
"longer supported. A regular,\n"
"non-deduplicated stream will be generated.\n\n"));
break;
case 'n':
flags.dryrun = B_TRUE;
break;
case 'L':
flags.largeblock = B_TRUE;
break;
case 'e':
flags.embed_data = B_TRUE;
break;
case 't':
resume_token = optarg;
break;
case 'c':
flags.compress = B_TRUE;
break;
case 'w':
flags.raw = B_TRUE;
flags.compress = B_TRUE;
flags.embed_data = B_TRUE;
flags.largeblock = B_TRUE;
break;
case 'S':
flags.saved = B_TRUE;
break;
case ':':
/*
* If a parameter was not passed, optopt contains the
* value that would normally lead us into the
* appropriate case statement. If it's > 256, then this
* must be a longopt and we should look at argv to get
* the string. Otherwise it's just the character, so we
* should use it directly.
*/
if (optopt <= UINT8_MAX) {
(void) fprintf(stderr,
gettext("missing argument for '%c' "
"option\n"), optopt);
} else {
(void) fprintf(stderr,
gettext("missing argument for '%s' "
"option\n"), argv[optind - 1]);
}
usage(B_FALSE);
break;
case '?':
default:
/*
* If an invalid flag was passed, optopt contains the
* character if it was a short flag, or 0 if it was a
* longopt.
*/
if (optopt != 0) {
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
} else {
(void) fprintf(stderr,
gettext("invalid option '%s'\n"),
argv[optind - 1]);
}
usage(B_FALSE);
}
}
if (flags.parsable && flags.verbosity == 0)
flags.verbosity = 1;
if (excludes.count > 0 && !flags.replicate) {
(void) fprintf(stderr, gettext("Cannot specify "
"dataset exclusion (-X) on a non-recursive "
"send.\n"));
return (1);
}
argc -= optind;
argv += optind;
if (resume_token != NULL) {
if (fromname != NULL || flags.replicate || flags.props ||
flags.backup || flags.holds ||
flags.saved || redactbook != NULL) {
(void) fprintf(stderr,
gettext("invalid flags combined with -t\n"));
usage(B_FALSE);
}
if (argc > 0) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
} else {
if (argc < 1) {
(void) fprintf(stderr,
gettext("missing snapshot argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
}
if (flags.saved) {
if (fromname != NULL || flags.replicate || flags.props ||
flags.doall || flags.backup ||
flags.holds || flags.largeblock || flags.embed_data ||
flags.compress || flags.raw || redactbook != NULL) {
(void) fprintf(stderr, gettext("incompatible flags "
"combined with saved send flag\n"));
usage(B_FALSE);
}
if (strchr(argv[0], '@') != NULL) {
(void) fprintf(stderr, gettext("saved send must "
"specify the dataset with partially-received "
"state\n"));
usage(B_FALSE);
}
}
if (flags.raw && redactbook != NULL) {
(void) fprintf(stderr,
gettext("Error: raw sends may not be redacted.\n"));
return (1);
}
if (!flags.dryrun && isatty(STDOUT_FILENO)) {
(void) fprintf(stderr,
gettext("Error: Stream can not be written to a terminal.\n"
"You must redirect standard output.\n"));
return (1);
}
if (flags.saved) {
zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_DATASET);
if (zhp == NULL)
return (1);
err = zfs_send_saved(zhp, &flags, STDOUT_FILENO,
resume_token);
zfs_close(zhp);
return (err != 0);
} else if (resume_token != NULL) {
return (zfs_send_resume(g_zfs, &flags, STDOUT_FILENO,
resume_token));
}
if (flags.skipmissing && !flags.replicate) {
(void) fprintf(stderr,
gettext("skip-missing flag can only be used in "
"conjunction with replicate\n"));
usage(B_FALSE);
}
/*
* For everything except -R and -I, use the new, cleaner code path.
*/
if (!(flags.replicate || flags.doall)) {
char frombuf[ZFS_MAX_DATASET_NAME_LEN];
if (fromname != NULL && (strchr(fromname, '#') == NULL &&
strchr(fromname, '@') == NULL)) {
/*
* Neither bookmark or snapshot was specified. Print a
* warning, and assume snapshot.
*/
(void) fprintf(stderr, "Warning: incremental source "
"didn't specify type, assuming snapshot. Use '@' "
"or '#' prefix to avoid ambiguity.\n");
(void) snprintf(frombuf, sizeof (frombuf), "@%s",
fromname);
fromname = frombuf;
}
if (fromname != NULL &&
(fromname[0] == '#' || fromname[0] == '@')) {
/*
* Incremental source name begins with # or @.
* Default to same fs as target.
*/
char tmpbuf[ZFS_MAX_DATASET_NAME_LEN];
(void) strlcpy(tmpbuf, fromname, sizeof (tmpbuf));
(void) strlcpy(frombuf, argv[0], sizeof (frombuf));
cp = strchr(frombuf, '@');
if (cp != NULL)
*cp = '\0';
(void) strlcat(frombuf, tmpbuf, sizeof (frombuf));
fromname = frombuf;
}
zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_DATASET);
if (zhp == NULL)
return (1);
err = zfs_send_one(zhp, fromname, STDOUT_FILENO, &flags,
redactbook);
zfs_close(zhp);
return (err != 0);
}
if (fromname != NULL && strchr(fromname, '#')) {
(void) fprintf(stderr,
gettext("Error: multiple snapshots cannot be "
"sent from a bookmark.\n"));
return (1);
}
if (redactbook != NULL) {
(void) fprintf(stderr, gettext("Error: multiple snapshots "
"cannot be sent redacted.\n"));
return (1);
}
if ((cp = strchr(argv[0], '@')) == NULL) {
(void) fprintf(stderr, gettext("Error: "
"Unsupported flag with filesystem or bookmark.\n"));
return (1);
}
*cp = '\0';
toname = cp + 1;
zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
return (1);
/*
* If they specified the full path to the snapshot, chop off
* everything except the short name of the snapshot, but special
* case if they specify the origin.
*/
if (fromname && (cp = strchr(fromname, '@')) != NULL) {
char origin[ZFS_MAX_DATASET_NAME_LEN];
zprop_source_t src;
(void) zfs_prop_get(zhp, ZFS_PROP_ORIGIN,
origin, sizeof (origin), &src, NULL, 0, B_FALSE);
if (strcmp(origin, fromname) == 0) {
fromname = NULL;
flags.fromorigin = B_TRUE;
} else {
*cp = '\0';
if (cp != fromname && strcmp(argv[0], fromname)) {
(void) fprintf(stderr,
gettext("incremental source must be "
"in same filesystem\n"));
usage(B_FALSE);
}
fromname = cp + 1;
if (strchr(fromname, '@') || strchr(fromname, '/')) {
(void) fprintf(stderr,
gettext("invalid incremental source\n"));
usage(B_FALSE);
}
}
}
if (flags.replicate && fromname == NULL)
flags.doall = B_TRUE;
err = zfs_send(zhp, fromname, toname, &flags, STDOUT_FILENO,
excludes.count > 0 ? zfs_do_send_exclude : NULL,
&excludes, flags.verbosity >= 3 ? &dbgnv : NULL);
if (flags.verbosity >= 3 && dbgnv != NULL) {
/*
* dump_nvlist prints to stdout, but that's been
* redirected to a file. Make it print to stderr
* instead.
*/
(void) dup2(STDERR_FILENO, STDOUT_FILENO);
dump_nvlist(dbgnv, 0);
nvlist_free(dbgnv);
}
zfs_close(zhp);
free(excludes.list);
return (err != 0);
}
/*
* Restore a backup stream from stdin.
*/
static int
zfs_do_receive(int argc, char **argv)
{
int c, err = 0;
recvflags_t flags = { 0 };
boolean_t abort_resumable = B_FALSE;
nvlist_t *props;
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
/* check options */
while ((c = getopt(argc, argv, ":o:x:dehMnuvFsAc")) != -1) {
switch (c) {
case 'o':
if (!parseprop(props, optarg)) {
nvlist_free(props);
usage(B_FALSE);
}
break;
case 'x':
if (!parsepropname(props, optarg)) {
nvlist_free(props);
usage(B_FALSE);
}
break;
case 'd':
if (flags.istail) {
(void) fprintf(stderr, gettext("invalid option "
"combination: -d and -e are mutually "
"exclusive\n"));
usage(B_FALSE);
}
flags.isprefix = B_TRUE;
break;
case 'e':
if (flags.isprefix) {
(void) fprintf(stderr, gettext("invalid option "
"combination: -d and -e are mutually "
"exclusive\n"));
usage(B_FALSE);
}
flags.istail = B_TRUE;
break;
case 'h':
flags.skipholds = B_TRUE;
break;
case 'M':
flags.forceunmount = B_TRUE;
break;
case 'n':
flags.dryrun = B_TRUE;
break;
case 'u':
flags.nomount = B_TRUE;
break;
case 'v':
flags.verbose = B_TRUE;
break;
case 's':
flags.resumable = B_TRUE;
break;
case 'F':
flags.force = B_TRUE;
break;
case 'A':
abort_resumable = B_TRUE;
break;
case 'c':
flags.heal = B_TRUE;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* zfs recv -e (use "tail" name) implies -d (remove dataset "head") */
if (flags.istail)
flags.isprefix = B_TRUE;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing snapshot argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if (abort_resumable) {
if (flags.isprefix || flags.istail || flags.dryrun ||
flags.resumable || flags.nomount) {
(void) fprintf(stderr, gettext("invalid option\n"));
usage(B_FALSE);
}
char namebuf[ZFS_MAX_DATASET_NAME_LEN];
(void) snprintf(namebuf, sizeof (namebuf),
"%s/%%recv", argv[0]);
if (zfs_dataset_exists(g_zfs, namebuf,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME)) {
zfs_handle_t *zhp = zfs_open(g_zfs,
namebuf, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL) {
nvlist_free(props);
return (1);
}
err = zfs_destroy(zhp, B_FALSE);
zfs_close(zhp);
} else {
zfs_handle_t *zhp = zfs_open(g_zfs,
argv[0], ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
usage(B_FALSE);
if (!zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) ||
zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN,
NULL, 0, NULL, NULL, 0, B_TRUE) == -1) {
(void) fprintf(stderr,
gettext("'%s' does not have any "
"resumable receive state to abort\n"),
argv[0]);
nvlist_free(props);
zfs_close(zhp);
return (1);
}
err = zfs_destroy(zhp, B_FALSE);
zfs_close(zhp);
}
nvlist_free(props);
return (err != 0);
}
if (isatty(STDIN_FILENO)) {
(void) fprintf(stderr,
gettext("Error: Backup stream can not be read "
"from a terminal.\n"
"You must redirect standard input.\n"));
nvlist_free(props);
return (1);
}
err = zfs_receive(g_zfs, argv[0], props, &flags, STDIN_FILENO, NULL);
nvlist_free(props);
return (err != 0);
}
/*
* allow/unallow stuff
*/
/* copied from zfs/sys/dsl_deleg.h */
#define ZFS_DELEG_PERM_CREATE "create"
#define ZFS_DELEG_PERM_DESTROY "destroy"
#define ZFS_DELEG_PERM_SNAPSHOT "snapshot"
#define ZFS_DELEG_PERM_ROLLBACK "rollback"
#define ZFS_DELEG_PERM_CLONE "clone"
#define ZFS_DELEG_PERM_PROMOTE "promote"
#define ZFS_DELEG_PERM_RENAME "rename"
#define ZFS_DELEG_PERM_MOUNT "mount"
#define ZFS_DELEG_PERM_SHARE "share"
#define ZFS_DELEG_PERM_SEND "send"
#define ZFS_DELEG_PERM_RECEIVE "receive"
#define ZFS_DELEG_PERM_ALLOW "allow"
#define ZFS_DELEG_PERM_USERPROP "userprop"
#define ZFS_DELEG_PERM_VSCAN "vscan" /* ??? */
#define ZFS_DELEG_PERM_USERQUOTA "userquota"
#define ZFS_DELEG_PERM_GROUPQUOTA "groupquota"
#define ZFS_DELEG_PERM_USERUSED "userused"
#define ZFS_DELEG_PERM_GROUPUSED "groupused"
#define ZFS_DELEG_PERM_USEROBJQUOTA "userobjquota"
#define ZFS_DELEG_PERM_GROUPOBJQUOTA "groupobjquota"
#define ZFS_DELEG_PERM_USEROBJUSED "userobjused"
#define ZFS_DELEG_PERM_GROUPOBJUSED "groupobjused"
#define ZFS_DELEG_PERM_HOLD "hold"
#define ZFS_DELEG_PERM_RELEASE "release"
#define ZFS_DELEG_PERM_DIFF "diff"
#define ZFS_DELEG_PERM_BOOKMARK "bookmark"
#define ZFS_DELEG_PERM_LOAD_KEY "load-key"
#define ZFS_DELEG_PERM_CHANGE_KEY "change-key"
#define ZFS_DELEG_PERM_PROJECTUSED "projectused"
#define ZFS_DELEG_PERM_PROJECTQUOTA "projectquota"
#define ZFS_DELEG_PERM_PROJECTOBJUSED "projectobjused"
#define ZFS_DELEG_PERM_PROJECTOBJQUOTA "projectobjquota"
#define ZFS_NUM_DELEG_NOTES ZFS_DELEG_NOTE_NONE
static zfs_deleg_perm_tab_t zfs_deleg_perm_tbl[] = {
{ ZFS_DELEG_PERM_ALLOW, ZFS_DELEG_NOTE_ALLOW },
{ ZFS_DELEG_PERM_CLONE, ZFS_DELEG_NOTE_CLONE },
{ ZFS_DELEG_PERM_CREATE, ZFS_DELEG_NOTE_CREATE },
{ ZFS_DELEG_PERM_DESTROY, ZFS_DELEG_NOTE_DESTROY },
{ ZFS_DELEG_PERM_DIFF, ZFS_DELEG_NOTE_DIFF},
{ ZFS_DELEG_PERM_HOLD, ZFS_DELEG_NOTE_HOLD },
{ ZFS_DELEG_PERM_MOUNT, ZFS_DELEG_NOTE_MOUNT },
{ ZFS_DELEG_PERM_PROMOTE, ZFS_DELEG_NOTE_PROMOTE },
{ ZFS_DELEG_PERM_RECEIVE, ZFS_DELEG_NOTE_RECEIVE },
{ ZFS_DELEG_PERM_RELEASE, ZFS_DELEG_NOTE_RELEASE },
{ ZFS_DELEG_PERM_RENAME, ZFS_DELEG_NOTE_RENAME },
{ ZFS_DELEG_PERM_ROLLBACK, ZFS_DELEG_NOTE_ROLLBACK },
{ ZFS_DELEG_PERM_SEND, ZFS_DELEG_NOTE_SEND },
{ ZFS_DELEG_PERM_SHARE, ZFS_DELEG_NOTE_SHARE },
{ ZFS_DELEG_PERM_SNAPSHOT, ZFS_DELEG_NOTE_SNAPSHOT },
{ ZFS_DELEG_PERM_BOOKMARK, ZFS_DELEG_NOTE_BOOKMARK },
{ ZFS_DELEG_PERM_LOAD_KEY, ZFS_DELEG_NOTE_LOAD_KEY },
{ ZFS_DELEG_PERM_CHANGE_KEY, ZFS_DELEG_NOTE_CHANGE_KEY },
{ ZFS_DELEG_PERM_GROUPQUOTA, ZFS_DELEG_NOTE_GROUPQUOTA },
{ ZFS_DELEG_PERM_GROUPUSED, ZFS_DELEG_NOTE_GROUPUSED },
{ ZFS_DELEG_PERM_USERPROP, ZFS_DELEG_NOTE_USERPROP },
{ ZFS_DELEG_PERM_USERQUOTA, ZFS_DELEG_NOTE_USERQUOTA },
{ ZFS_DELEG_PERM_USERUSED, ZFS_DELEG_NOTE_USERUSED },
{ ZFS_DELEG_PERM_USEROBJQUOTA, ZFS_DELEG_NOTE_USEROBJQUOTA },
{ ZFS_DELEG_PERM_USEROBJUSED, ZFS_DELEG_NOTE_USEROBJUSED },
{ ZFS_DELEG_PERM_GROUPOBJQUOTA, ZFS_DELEG_NOTE_GROUPOBJQUOTA },
{ ZFS_DELEG_PERM_GROUPOBJUSED, ZFS_DELEG_NOTE_GROUPOBJUSED },
{ ZFS_DELEG_PERM_PROJECTUSED, ZFS_DELEG_NOTE_PROJECTUSED },
{ ZFS_DELEG_PERM_PROJECTQUOTA, ZFS_DELEG_NOTE_PROJECTQUOTA },
{ ZFS_DELEG_PERM_PROJECTOBJUSED, ZFS_DELEG_NOTE_PROJECTOBJUSED },
{ ZFS_DELEG_PERM_PROJECTOBJQUOTA, ZFS_DELEG_NOTE_PROJECTOBJQUOTA },
{ NULL, ZFS_DELEG_NOTE_NONE }
};
/* permission structure */
typedef struct deleg_perm {
zfs_deleg_who_type_t dp_who_type;
const char *dp_name;
boolean_t dp_local;
boolean_t dp_descend;
} deleg_perm_t;
/* */
typedef struct deleg_perm_node {
deleg_perm_t dpn_perm;
uu_avl_node_t dpn_avl_node;
} deleg_perm_node_t;
typedef struct fs_perm fs_perm_t;
/* permissions set */
typedef struct who_perm {
zfs_deleg_who_type_t who_type;
const char *who_name; /* id */
char who_ug_name[256]; /* user/group name */
fs_perm_t *who_fsperm; /* uplink */
uu_avl_t *who_deleg_perm_avl; /* permissions */
} who_perm_t;
/* */
typedef struct who_perm_node {
who_perm_t who_perm;
uu_avl_node_t who_avl_node;
} who_perm_node_t;
typedef struct fs_perm_set fs_perm_set_t;
/* fs permissions */
struct fs_perm {
const char *fsp_name;
uu_avl_t *fsp_sc_avl; /* sets,create */
uu_avl_t *fsp_uge_avl; /* user,group,everyone */
fs_perm_set_t *fsp_set; /* uplink */
};
/* */
typedef struct fs_perm_node {
fs_perm_t fspn_fsperm;
uu_avl_t *fspn_avl;
uu_list_node_t fspn_list_node;
} fs_perm_node_t;
/* top level structure */
struct fs_perm_set {
uu_list_pool_t *fsps_list_pool;
uu_list_t *fsps_list; /* list of fs_perms */
uu_avl_pool_t *fsps_named_set_avl_pool;
uu_avl_pool_t *fsps_who_perm_avl_pool;
uu_avl_pool_t *fsps_deleg_perm_avl_pool;
};
static inline const char *
deleg_perm_type(zfs_deleg_note_t note)
{
/* subcommands */
switch (note) {
/* SUBCOMMANDS */
/* OTHER */
case ZFS_DELEG_NOTE_GROUPQUOTA:
case ZFS_DELEG_NOTE_GROUPUSED:
case ZFS_DELEG_NOTE_USERPROP:
case ZFS_DELEG_NOTE_USERQUOTA:
case ZFS_DELEG_NOTE_USERUSED:
case ZFS_DELEG_NOTE_USEROBJQUOTA:
case ZFS_DELEG_NOTE_USEROBJUSED:
case ZFS_DELEG_NOTE_GROUPOBJQUOTA:
case ZFS_DELEG_NOTE_GROUPOBJUSED:
case ZFS_DELEG_NOTE_PROJECTUSED:
case ZFS_DELEG_NOTE_PROJECTQUOTA:
case ZFS_DELEG_NOTE_PROJECTOBJUSED:
case ZFS_DELEG_NOTE_PROJECTOBJQUOTA:
/* other */
return (gettext("other"));
default:
return (gettext("subcommand"));
}
}
static int
who_type2weight(zfs_deleg_who_type_t who_type)
{
int res;
switch (who_type) {
case ZFS_DELEG_NAMED_SET_SETS:
case ZFS_DELEG_NAMED_SET:
res = 0;
break;
case ZFS_DELEG_CREATE_SETS:
case ZFS_DELEG_CREATE:
res = 1;
break;
case ZFS_DELEG_USER_SETS:
case ZFS_DELEG_USER:
res = 2;
break;
case ZFS_DELEG_GROUP_SETS:
case ZFS_DELEG_GROUP:
res = 3;
break;
case ZFS_DELEG_EVERYONE_SETS:
case ZFS_DELEG_EVERYONE:
res = 4;
break;
default:
res = -1;
}
return (res);
}
static int
who_perm_compare(const void *larg, const void *rarg, void *unused)
{
(void) unused;
const who_perm_node_t *l = larg;
const who_perm_node_t *r = rarg;
zfs_deleg_who_type_t ltype = l->who_perm.who_type;
zfs_deleg_who_type_t rtype = r->who_perm.who_type;
int lweight = who_type2weight(ltype);
int rweight = who_type2weight(rtype);
int res = lweight - rweight;
if (res == 0)
res = strncmp(l->who_perm.who_name, r->who_perm.who_name,
ZFS_MAX_DELEG_NAME-1);
if (res == 0)
return (0);
if (res > 0)
return (1);
else
return (-1);
}
static int
deleg_perm_compare(const void *larg, const void *rarg, void *unused)
{
(void) unused;
const deleg_perm_node_t *l = larg;
const deleg_perm_node_t *r = rarg;
int res = strncmp(l->dpn_perm.dp_name, r->dpn_perm.dp_name,
ZFS_MAX_DELEG_NAME-1);
if (res == 0)
return (0);
if (res > 0)
return (1);
else
return (-1);
}
static inline void
fs_perm_set_init(fs_perm_set_t *fspset)
{
memset(fspset, 0, sizeof (fs_perm_set_t));
if ((fspset->fsps_list_pool = uu_list_pool_create("fsps_list_pool",
sizeof (fs_perm_node_t), offsetof(fs_perm_node_t, fspn_list_node),
NULL, UU_DEFAULT)) == NULL)
nomem();
if ((fspset->fsps_list = uu_list_create(fspset->fsps_list_pool, NULL,
UU_DEFAULT)) == NULL)
nomem();
if ((fspset->fsps_named_set_avl_pool = uu_avl_pool_create(
"named_set_avl_pool", sizeof (who_perm_node_t), offsetof(
who_perm_node_t, who_avl_node), who_perm_compare,
UU_DEFAULT)) == NULL)
nomem();
if ((fspset->fsps_who_perm_avl_pool = uu_avl_pool_create(
"who_perm_avl_pool", sizeof (who_perm_node_t), offsetof(
who_perm_node_t, who_avl_node), who_perm_compare,
UU_DEFAULT)) == NULL)
nomem();
if ((fspset->fsps_deleg_perm_avl_pool = uu_avl_pool_create(
"deleg_perm_avl_pool", sizeof (deleg_perm_node_t), offsetof(
deleg_perm_node_t, dpn_avl_node), deleg_perm_compare, UU_DEFAULT))
== NULL)
nomem();
}
static inline void fs_perm_fini(fs_perm_t *);
static inline void who_perm_fini(who_perm_t *);
static inline void
fs_perm_set_fini(fs_perm_set_t *fspset)
{
fs_perm_node_t *node = uu_list_first(fspset->fsps_list);
while (node != NULL) {
fs_perm_node_t *next_node =
uu_list_next(fspset->fsps_list, node);
fs_perm_t *fsperm = &node->fspn_fsperm;
fs_perm_fini(fsperm);
uu_list_remove(fspset->fsps_list, node);
free(node);
node = next_node;
}
uu_avl_pool_destroy(fspset->fsps_named_set_avl_pool);
uu_avl_pool_destroy(fspset->fsps_who_perm_avl_pool);
uu_avl_pool_destroy(fspset->fsps_deleg_perm_avl_pool);
}
static inline void
deleg_perm_init(deleg_perm_t *deleg_perm, zfs_deleg_who_type_t type,
const char *name)
{
deleg_perm->dp_who_type = type;
deleg_perm->dp_name = name;
}
static inline void
who_perm_init(who_perm_t *who_perm, fs_perm_t *fsperm,
zfs_deleg_who_type_t type, const char *name)
{
uu_avl_pool_t *pool;
pool = fsperm->fsp_set->fsps_deleg_perm_avl_pool;
memset(who_perm, 0, sizeof (who_perm_t));
if ((who_perm->who_deleg_perm_avl = uu_avl_create(pool, NULL,
UU_DEFAULT)) == NULL)
nomem();
who_perm->who_type = type;
who_perm->who_name = name;
who_perm->who_fsperm = fsperm;
}
static inline void
who_perm_fini(who_perm_t *who_perm)
{
deleg_perm_node_t *node = uu_avl_first(who_perm->who_deleg_perm_avl);
while (node != NULL) {
deleg_perm_node_t *next_node =
uu_avl_next(who_perm->who_deleg_perm_avl, node);
uu_avl_remove(who_perm->who_deleg_perm_avl, node);
free(node);
node = next_node;
}
uu_avl_destroy(who_perm->who_deleg_perm_avl);
}
static inline void
fs_perm_init(fs_perm_t *fsperm, fs_perm_set_t *fspset, const char *fsname)
{
uu_avl_pool_t *nset_pool = fspset->fsps_named_set_avl_pool;
uu_avl_pool_t *who_pool = fspset->fsps_who_perm_avl_pool;
memset(fsperm, 0, sizeof (fs_perm_t));
if ((fsperm->fsp_sc_avl = uu_avl_create(nset_pool, NULL, UU_DEFAULT))
== NULL)
nomem();
if ((fsperm->fsp_uge_avl = uu_avl_create(who_pool, NULL, UU_DEFAULT))
== NULL)
nomem();
fsperm->fsp_set = fspset;
fsperm->fsp_name = fsname;
}
static inline void
fs_perm_fini(fs_perm_t *fsperm)
{
who_perm_node_t *node = uu_avl_first(fsperm->fsp_sc_avl);
while (node != NULL) {
who_perm_node_t *next_node = uu_avl_next(fsperm->fsp_sc_avl,
node);
who_perm_t *who_perm = &node->who_perm;
who_perm_fini(who_perm);
uu_avl_remove(fsperm->fsp_sc_avl, node);
free(node);
node = next_node;
}
node = uu_avl_first(fsperm->fsp_uge_avl);
while (node != NULL) {
who_perm_node_t *next_node = uu_avl_next(fsperm->fsp_uge_avl,
node);
who_perm_t *who_perm = &node->who_perm;
who_perm_fini(who_perm);
uu_avl_remove(fsperm->fsp_uge_avl, node);
free(node);
node = next_node;
}
uu_avl_destroy(fsperm->fsp_sc_avl);
uu_avl_destroy(fsperm->fsp_uge_avl);
}
static void
set_deleg_perm_node(uu_avl_t *avl, deleg_perm_node_t *node,
zfs_deleg_who_type_t who_type, const char *name, char locality)
{
uu_avl_index_t idx = 0;
deleg_perm_node_t *found_node = NULL;
deleg_perm_t *deleg_perm = &node->dpn_perm;
deleg_perm_init(deleg_perm, who_type, name);
if ((found_node = uu_avl_find(avl, node, NULL, &idx))
== NULL)
uu_avl_insert(avl, node, idx);
else {
node = found_node;
deleg_perm = &node->dpn_perm;
}
switch (locality) {
case ZFS_DELEG_LOCAL:
deleg_perm->dp_local = B_TRUE;
break;
case ZFS_DELEG_DESCENDENT:
deleg_perm->dp_descend = B_TRUE;
break;
case ZFS_DELEG_NA:
break;
default:
assert(B_FALSE); /* invalid locality */
}
}
static inline int
parse_who_perm(who_perm_t *who_perm, nvlist_t *nvl, char locality)
{
nvpair_t *nvp = NULL;
fs_perm_set_t *fspset = who_perm->who_fsperm->fsp_set;
uu_avl_t *avl = who_perm->who_deleg_perm_avl;
zfs_deleg_who_type_t who_type = who_perm->who_type;
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
const char *name = nvpair_name(nvp);
data_type_t type = nvpair_type(nvp);
uu_avl_pool_t *avl_pool = fspset->fsps_deleg_perm_avl_pool;
deleg_perm_node_t *node =
safe_malloc(sizeof (deleg_perm_node_t));
VERIFY(type == DATA_TYPE_BOOLEAN);
uu_avl_node_init(node, &node->dpn_avl_node, avl_pool);
set_deleg_perm_node(avl, node, who_type, name, locality);
}
return (0);
}
static inline int
parse_fs_perm(fs_perm_t *fsperm, nvlist_t *nvl)
{
nvpair_t *nvp = NULL;
fs_perm_set_t *fspset = fsperm->fsp_set;
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
nvlist_t *nvl2 = NULL;
const char *name = nvpair_name(nvp);
uu_avl_t *avl = NULL;
uu_avl_pool_t *avl_pool = NULL;
zfs_deleg_who_type_t perm_type = name[0];
char perm_locality = name[1];
const char *perm_name = name + 3;
who_perm_t *who_perm = NULL;
assert('$' == name[2]);
if (nvpair_value_nvlist(nvp, &nvl2) != 0)
return (-1);
switch (perm_type) {
case ZFS_DELEG_CREATE:
case ZFS_DELEG_CREATE_SETS:
case ZFS_DELEG_NAMED_SET:
case ZFS_DELEG_NAMED_SET_SETS:
avl_pool = fspset->fsps_named_set_avl_pool;
avl = fsperm->fsp_sc_avl;
break;
case ZFS_DELEG_USER:
case ZFS_DELEG_USER_SETS:
case ZFS_DELEG_GROUP:
case ZFS_DELEG_GROUP_SETS:
case ZFS_DELEG_EVERYONE:
case ZFS_DELEG_EVERYONE_SETS:
avl_pool = fspset->fsps_who_perm_avl_pool;
avl = fsperm->fsp_uge_avl;
break;
default:
assert(!"unhandled zfs_deleg_who_type_t");
}
who_perm_node_t *found_node = NULL;
who_perm_node_t *node = safe_malloc(
sizeof (who_perm_node_t));
who_perm = &node->who_perm;
uu_avl_index_t idx = 0;
uu_avl_node_init(node, &node->who_avl_node, avl_pool);
who_perm_init(who_perm, fsperm, perm_type, perm_name);
if ((found_node = uu_avl_find(avl, node, NULL, &idx))
== NULL) {
if (avl == fsperm->fsp_uge_avl) {
uid_t rid = 0;
struct passwd *p = NULL;
struct group *g = NULL;
const char *nice_name = NULL;
switch (perm_type) {
case ZFS_DELEG_USER_SETS:
case ZFS_DELEG_USER:
rid = atoi(perm_name);
p = getpwuid(rid);
if (p)
nice_name = p->pw_name;
break;
case ZFS_DELEG_GROUP_SETS:
case ZFS_DELEG_GROUP:
rid = atoi(perm_name);
g = getgrgid(rid);
if (g)
nice_name = g->gr_name;
break;
default:
break;
}
if (nice_name != NULL) {
(void) strlcpy(
node->who_perm.who_ug_name,
nice_name, 256);
} else {
/* User or group unknown */
(void) snprintf(
node->who_perm.who_ug_name,
sizeof (node->who_perm.who_ug_name),
"(unknown: %d)", rid);
}
}
uu_avl_insert(avl, node, idx);
} else {
node = found_node;
who_perm = &node->who_perm;
}
assert(who_perm != NULL);
(void) parse_who_perm(who_perm, nvl2, perm_locality);
}
return (0);
}
static inline int
parse_fs_perm_set(fs_perm_set_t *fspset, nvlist_t *nvl)
{
nvpair_t *nvp = NULL;
uu_avl_index_t idx = 0;
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
nvlist_t *nvl2 = NULL;
const char *fsname = nvpair_name(nvp);
data_type_t type = nvpair_type(nvp);
fs_perm_t *fsperm = NULL;
fs_perm_node_t *node = safe_malloc(sizeof (fs_perm_node_t));
fsperm = &node->fspn_fsperm;
VERIFY(DATA_TYPE_NVLIST == type);
uu_list_node_init(node, &node->fspn_list_node,
fspset->fsps_list_pool);
idx = uu_list_numnodes(fspset->fsps_list);
fs_perm_init(fsperm, fspset, fsname);
if (nvpair_value_nvlist(nvp, &nvl2) != 0)
return (-1);
(void) parse_fs_perm(fsperm, nvl2);
uu_list_insert(fspset->fsps_list, node, idx);
}
return (0);
}
static inline const char *
deleg_perm_comment(zfs_deleg_note_t note)
{
const char *str = "";
/* subcommands */
switch (note) {
/* SUBCOMMANDS */
case ZFS_DELEG_NOTE_ALLOW:
str = gettext("Must also have the permission that is being"
"\n\t\t\t\tallowed");
break;
case ZFS_DELEG_NOTE_CLONE:
str = gettext("Must also have the 'create' ability and 'mount'"
"\n\t\t\t\tability in the origin file system");
break;
case ZFS_DELEG_NOTE_CREATE:
str = gettext("Must also have the 'mount' ability");
break;
case ZFS_DELEG_NOTE_DESTROY:
str = gettext("Must also have the 'mount' ability");
break;
case ZFS_DELEG_NOTE_DIFF:
str = gettext("Allows lookup of paths within a dataset;"
"\n\t\t\t\tgiven an object number. Ordinary users need this"
"\n\t\t\t\tin order to use zfs diff");
break;
case ZFS_DELEG_NOTE_HOLD:
str = gettext("Allows adding a user hold to a snapshot");
break;
case ZFS_DELEG_NOTE_MOUNT:
str = gettext("Allows mount/umount of ZFS datasets");
break;
case ZFS_DELEG_NOTE_PROMOTE:
str = gettext("Must also have the 'mount'\n\t\t\t\tand"
" 'promote' ability in the origin file system");
break;
case ZFS_DELEG_NOTE_RECEIVE:
str = gettext("Must also have the 'mount' and 'create'"
" ability");
break;
case ZFS_DELEG_NOTE_RELEASE:
str = gettext("Allows releasing a user hold which\n\t\t\t\t"
"might destroy the snapshot");
break;
case ZFS_DELEG_NOTE_RENAME:
str = gettext("Must also have the 'mount' and 'create'"
"\n\t\t\t\tability in the new parent");
break;
case ZFS_DELEG_NOTE_ROLLBACK:
str = gettext("");
break;
case ZFS_DELEG_NOTE_SEND:
str = gettext("");
break;
case ZFS_DELEG_NOTE_SHARE:
str = gettext("Allows sharing file systems over NFS or SMB"
"\n\t\t\t\tprotocols");
break;
case ZFS_DELEG_NOTE_SNAPSHOT:
str = gettext("");
break;
case ZFS_DELEG_NOTE_LOAD_KEY:
str = gettext("Allows loading or unloading an encryption key");
break;
case ZFS_DELEG_NOTE_CHANGE_KEY:
str = gettext("Allows changing or adding an encryption key");
break;
/*
* case ZFS_DELEG_NOTE_VSCAN:
* str = gettext("");
* break;
*/
/* OTHER */
case ZFS_DELEG_NOTE_GROUPQUOTA:
str = gettext("Allows accessing any groupquota@... property");
break;
case ZFS_DELEG_NOTE_GROUPUSED:
str = gettext("Allows reading any groupused@... property");
break;
case ZFS_DELEG_NOTE_USERPROP:
str = gettext("Allows changing any user property");
break;
case ZFS_DELEG_NOTE_USERQUOTA:
str = gettext("Allows accessing any userquota@... property");
break;
case ZFS_DELEG_NOTE_USERUSED:
str = gettext("Allows reading any userused@... property");
break;
case ZFS_DELEG_NOTE_USEROBJQUOTA:
str = gettext("Allows accessing any userobjquota@... property");
break;
case ZFS_DELEG_NOTE_GROUPOBJQUOTA:
str = gettext("Allows accessing any \n\t\t\t\t"
"groupobjquota@... property");
break;
case ZFS_DELEG_NOTE_GROUPOBJUSED:
str = gettext("Allows reading any groupobjused@... property");
break;
case ZFS_DELEG_NOTE_USEROBJUSED:
str = gettext("Allows reading any userobjused@... property");
break;
case ZFS_DELEG_NOTE_PROJECTQUOTA:
str = gettext("Allows accessing any projectquota@... property");
break;
case ZFS_DELEG_NOTE_PROJECTOBJQUOTA:
str = gettext("Allows accessing any \n\t\t\t\t"
"projectobjquota@... property");
break;
case ZFS_DELEG_NOTE_PROJECTUSED:
str = gettext("Allows reading any projectused@... property");
break;
case ZFS_DELEG_NOTE_PROJECTOBJUSED:
str = gettext("Allows accessing any \n\t\t\t\t"
"projectobjused@... property");
break;
/* other */
default:
str = "";
}
return (str);
}
struct allow_opts {
boolean_t local;
boolean_t descend;
boolean_t user;
boolean_t group;
boolean_t everyone;
boolean_t create;
boolean_t set;
boolean_t recursive; /* unallow only */
boolean_t prt_usage;
boolean_t prt_perms;
char *who;
char *perms;
const char *dataset;
};
static inline int
prop_cmp(const void *a, const void *b)
{
const char *str1 = *(const char **)a;
const char *str2 = *(const char **)b;
return (strcmp(str1, str2));
}
static void
allow_usage(boolean_t un, boolean_t requested, const char *msg)
{
const char *opt_desc[] = {
"-h", gettext("show this help message and exit"),
"-l", gettext("set permission locally"),
"-d", gettext("set permission for descents"),
"-u", gettext("set permission for user"),
"-g", gettext("set permission for group"),
"-e", gettext("set permission for everyone"),
"-c", gettext("set create time permission"),
"-s", gettext("define permission set"),
/* unallow only */
"-r", gettext("remove permissions recursively"),
};
size_t unallow_size = sizeof (opt_desc) / sizeof (char *);
size_t allow_size = unallow_size - 2;
const char *props[ZFS_NUM_PROPS];
int i;
size_t count = 0;
FILE *fp = requested ? stdout : stderr;
zprop_desc_t *pdtbl = zfs_prop_get_table();
const char *fmt = gettext("%-16s %-14s\t%s\n");
(void) fprintf(fp, gettext("Usage: %s\n"), get_usage(un ? HELP_UNALLOW :
HELP_ALLOW));
(void) fprintf(fp, gettext("Options:\n"));
for (i = 0; i < (un ? unallow_size : allow_size); i += 2) {
const char *opt = opt_desc[i];
const char *optdsc = opt_desc[i + 1];
(void) fprintf(fp, gettext(" %-10s %s\n"), opt, optdsc);
}
(void) fprintf(fp, gettext("\nThe following permissions are "
"supported:\n\n"));
(void) fprintf(fp, fmt, gettext("NAME"), gettext("TYPE"),
gettext("NOTES"));
for (i = 0; i < ZFS_NUM_DELEG_NOTES; i++) {
const char *perm_name = zfs_deleg_perm_tbl[i].z_perm;
zfs_deleg_note_t perm_note = zfs_deleg_perm_tbl[i].z_note;
const char *perm_type = deleg_perm_type(perm_note);
const char *perm_comment = deleg_perm_comment(perm_note);
(void) fprintf(fp, fmt, perm_name, perm_type, perm_comment);
}
for (i = 0; i < ZFS_NUM_PROPS; i++) {
zprop_desc_t *pd = &pdtbl[i];
if (pd->pd_visible != B_TRUE)
continue;
if (pd->pd_attr == PROP_READONLY)
continue;
props[count++] = pd->pd_name;
}
props[count] = NULL;
qsort(props, count, sizeof (char *), prop_cmp);
for (i = 0; i < count; i++)
(void) fprintf(fp, fmt, props[i], gettext("property"), "");
if (msg != NULL)
(void) fprintf(fp, gettext("\nzfs: error: %s"), msg);
exit(requested ? 0 : 2);
}
static inline const char *
munge_args(int argc, char **argv, boolean_t un, size_t expected_argc,
char **permsp)
{
if (un && argc == expected_argc - 1)
*permsp = NULL;
else if (argc == expected_argc)
*permsp = argv[argc - 2];
else
allow_usage(un, B_FALSE,
gettext("wrong number of parameters\n"));
return (argv[argc - 1]);
}
static void
parse_allow_args(int argc, char **argv, boolean_t un, struct allow_opts *opts)
{
int uge_sum = opts->user + opts->group + opts->everyone;
int csuge_sum = opts->create + opts->set + uge_sum;
int ldcsuge_sum = csuge_sum + opts->local + opts->descend;
int all_sum = un ? ldcsuge_sum + opts->recursive : ldcsuge_sum;
if (uge_sum > 1)
allow_usage(un, B_FALSE,
gettext("-u, -g, and -e are mutually exclusive\n"));
if (opts->prt_usage) {
if (argc == 0 && all_sum == 0)
allow_usage(un, B_TRUE, NULL);
else
usage(B_FALSE);
}
if (opts->set) {
if (csuge_sum > 1)
allow_usage(un, B_FALSE,
gettext("invalid options combined with -s\n"));
opts->dataset = munge_args(argc, argv, un, 3, &opts->perms);
if (argv[0][0] != '@')
allow_usage(un, B_FALSE,
gettext("invalid set name: missing '@' prefix\n"));
opts->who = argv[0];
} else if (opts->create) {
if (ldcsuge_sum > 1)
allow_usage(un, B_FALSE,
gettext("invalid options combined with -c\n"));
opts->dataset = munge_args(argc, argv, un, 2, &opts->perms);
} else if (opts->everyone) {
if (csuge_sum > 1)
allow_usage(un, B_FALSE,
gettext("invalid options combined with -e\n"));
opts->dataset = munge_args(argc, argv, un, 2, &opts->perms);
} else if (uge_sum == 0 && argc > 0 && strcmp(argv[0], "everyone")
== 0) {
opts->everyone = B_TRUE;
argc--;
argv++;
opts->dataset = munge_args(argc, argv, un, 2, &opts->perms);
} else if (argc == 1 && !un) {
opts->prt_perms = B_TRUE;
opts->dataset = argv[argc-1];
} else {
opts->dataset = munge_args(argc, argv, un, 3, &opts->perms);
opts->who = argv[0];
}
if (!opts->local && !opts->descend) {
opts->local = B_TRUE;
opts->descend = B_TRUE;
}
}
static void
store_allow_perm(zfs_deleg_who_type_t type, boolean_t local, boolean_t descend,
const char *who, char *perms, nvlist_t *top_nvl)
{
int i;
char ld[2] = { '\0', '\0' };
char who_buf[MAXNAMELEN + 32];
char base_type = '\0';
char set_type = '\0';
nvlist_t *base_nvl = NULL;
nvlist_t *set_nvl = NULL;
nvlist_t *nvl;
if (nvlist_alloc(&base_nvl, NV_UNIQUE_NAME, 0) != 0)
nomem();
if (nvlist_alloc(&set_nvl, NV_UNIQUE_NAME, 0) != 0)
nomem();
switch (type) {
case ZFS_DELEG_NAMED_SET_SETS:
case ZFS_DELEG_NAMED_SET:
set_type = ZFS_DELEG_NAMED_SET_SETS;
base_type = ZFS_DELEG_NAMED_SET;
ld[0] = ZFS_DELEG_NA;
break;
case ZFS_DELEG_CREATE_SETS:
case ZFS_DELEG_CREATE:
set_type = ZFS_DELEG_CREATE_SETS;
base_type = ZFS_DELEG_CREATE;
ld[0] = ZFS_DELEG_NA;
break;
case ZFS_DELEG_USER_SETS:
case ZFS_DELEG_USER:
set_type = ZFS_DELEG_USER_SETS;
base_type = ZFS_DELEG_USER;
if (local)
ld[0] = ZFS_DELEG_LOCAL;
if (descend)
ld[1] = ZFS_DELEG_DESCENDENT;
break;
case ZFS_DELEG_GROUP_SETS:
case ZFS_DELEG_GROUP:
set_type = ZFS_DELEG_GROUP_SETS;
base_type = ZFS_DELEG_GROUP;
if (local)
ld[0] = ZFS_DELEG_LOCAL;
if (descend)
ld[1] = ZFS_DELEG_DESCENDENT;
break;
case ZFS_DELEG_EVERYONE_SETS:
case ZFS_DELEG_EVERYONE:
set_type = ZFS_DELEG_EVERYONE_SETS;
base_type = ZFS_DELEG_EVERYONE;
if (local)
ld[0] = ZFS_DELEG_LOCAL;
if (descend)
ld[1] = ZFS_DELEG_DESCENDENT;
break;
default:
assert(set_type != '\0' && base_type != '\0');
}
if (perms != NULL) {
char *curr = perms;
char *end = curr + strlen(perms);
while (curr < end) {
char *delim = strchr(curr, ',');
if (delim == NULL)
delim = end;
else
*delim = '\0';
if (curr[0] == '@')
nvl = set_nvl;
else
nvl = base_nvl;
(void) nvlist_add_boolean(nvl, curr);
if (delim != end)
*delim = ',';
curr = delim + 1;
}
for (i = 0; i < 2; i++) {
char locality = ld[i];
if (locality == 0)
continue;
if (!nvlist_empty(base_nvl)) {
if (who != NULL)
(void) snprintf(who_buf,
sizeof (who_buf), "%c%c$%s",
base_type, locality, who);
else
(void) snprintf(who_buf,
sizeof (who_buf), "%c%c$",
base_type, locality);
(void) nvlist_add_nvlist(top_nvl, who_buf,
base_nvl);
}
if (!nvlist_empty(set_nvl)) {
if (who != NULL)
(void) snprintf(who_buf,
sizeof (who_buf), "%c%c$%s",
set_type, locality, who);
else
(void) snprintf(who_buf,
sizeof (who_buf), "%c%c$",
set_type, locality);
(void) nvlist_add_nvlist(top_nvl, who_buf,
set_nvl);
}
}
} else {
for (i = 0; i < 2; i++) {
char locality = ld[i];
if (locality == 0)
continue;
if (who != NULL)
(void) snprintf(who_buf, sizeof (who_buf),
"%c%c$%s", base_type, locality, who);
else
(void) snprintf(who_buf, sizeof (who_buf),
"%c%c$", base_type, locality);
(void) nvlist_add_boolean(top_nvl, who_buf);
if (who != NULL)
(void) snprintf(who_buf, sizeof (who_buf),
"%c%c$%s", set_type, locality, who);
else
(void) snprintf(who_buf, sizeof (who_buf),
"%c%c$", set_type, locality);
(void) nvlist_add_boolean(top_nvl, who_buf);
}
}
}
static int
construct_fsacl_list(boolean_t un, struct allow_opts *opts, nvlist_t **nvlp)
{
if (nvlist_alloc(nvlp, NV_UNIQUE_NAME, 0) != 0)
nomem();
if (opts->set) {
store_allow_perm(ZFS_DELEG_NAMED_SET, opts->local,
opts->descend, opts->who, opts->perms, *nvlp);
} else if (opts->create) {
store_allow_perm(ZFS_DELEG_CREATE, opts->local,
opts->descend, NULL, opts->perms, *nvlp);
} else if (opts->everyone) {
store_allow_perm(ZFS_DELEG_EVERYONE, opts->local,
opts->descend, NULL, opts->perms, *nvlp);
} else {
char *curr = opts->who;
char *end = curr + strlen(curr);
while (curr < end) {
const char *who;
zfs_deleg_who_type_t who_type = ZFS_DELEG_WHO_UNKNOWN;
char *endch;
char *delim = strchr(curr, ',');
char errbuf[256];
char id[64];
struct passwd *p = NULL;
struct group *g = NULL;
uid_t rid;
if (delim == NULL)
delim = end;
else
*delim = '\0';
rid = (uid_t)strtol(curr, &endch, 0);
if (opts->user) {
who_type = ZFS_DELEG_USER;
if (*endch != '\0')
p = getpwnam(curr);
else
p = getpwuid(rid);
if (p != NULL)
rid = p->pw_uid;
else if (*endch != '\0') {
(void) snprintf(errbuf, 256, gettext(
"invalid user %s\n"), curr);
allow_usage(un, B_TRUE, errbuf);
}
} else if (opts->group) {
who_type = ZFS_DELEG_GROUP;
if (*endch != '\0')
g = getgrnam(curr);
else
g = getgrgid(rid);
if (g != NULL)
rid = g->gr_gid;
else if (*endch != '\0') {
(void) snprintf(errbuf, 256, gettext(
"invalid group %s\n"), curr);
allow_usage(un, B_TRUE, errbuf);
}
} else {
if (*endch != '\0') {
p = getpwnam(curr);
} else {
p = getpwuid(rid);
}
if (p == NULL) {
if (*endch != '\0') {
g = getgrnam(curr);
} else {
g = getgrgid(rid);
}
}
if (p != NULL) {
who_type = ZFS_DELEG_USER;
rid = p->pw_uid;
} else if (g != NULL) {
who_type = ZFS_DELEG_GROUP;
rid = g->gr_gid;
} else {
(void) snprintf(errbuf, 256, gettext(
"invalid user/group %s\n"), curr);
allow_usage(un, B_TRUE, errbuf);
}
}
(void) sprintf(id, "%u", rid);
who = id;
store_allow_perm(who_type, opts->local,
opts->descend, who, opts->perms, *nvlp);
curr = delim + 1;
}
}
return (0);
}
static void
print_set_creat_perms(uu_avl_t *who_avl)
{
const char *sc_title[] = {
gettext("Permission sets:\n"),
gettext("Create time permissions:\n"),
NULL
};
who_perm_node_t *who_node = NULL;
int prev_weight = -1;
for (who_node = uu_avl_first(who_avl); who_node != NULL;
who_node = uu_avl_next(who_avl, who_node)) {
uu_avl_t *avl = who_node->who_perm.who_deleg_perm_avl;
zfs_deleg_who_type_t who_type = who_node->who_perm.who_type;
const char *who_name = who_node->who_perm.who_name;
int weight = who_type2weight(who_type);
boolean_t first = B_TRUE;
deleg_perm_node_t *deleg_node;
if (prev_weight != weight) {
(void) printf("%s", sc_title[weight]);
prev_weight = weight;
}
if (who_name == NULL || strnlen(who_name, 1) == 0)
(void) printf("\t");
else
(void) printf("\t%s ", who_name);
for (deleg_node = uu_avl_first(avl); deleg_node != NULL;
deleg_node = uu_avl_next(avl, deleg_node)) {
if (first) {
(void) printf("%s",
deleg_node->dpn_perm.dp_name);
first = B_FALSE;
} else
(void) printf(",%s",
deleg_node->dpn_perm.dp_name);
}
(void) printf("\n");
}
}
static void
print_uge_deleg_perms(uu_avl_t *who_avl, boolean_t local, boolean_t descend,
const char *title)
{
who_perm_node_t *who_node = NULL;
boolean_t prt_title = B_TRUE;
uu_avl_walk_t *walk;
if ((walk = uu_avl_walk_start(who_avl, UU_WALK_ROBUST)) == NULL)
nomem();
while ((who_node = uu_avl_walk_next(walk)) != NULL) {
const char *who_name = who_node->who_perm.who_name;
const char *nice_who_name = who_node->who_perm.who_ug_name;
uu_avl_t *avl = who_node->who_perm.who_deleg_perm_avl;
zfs_deleg_who_type_t who_type = who_node->who_perm.who_type;
char delim = ' ';
deleg_perm_node_t *deleg_node;
boolean_t prt_who = B_TRUE;
for (deleg_node = uu_avl_first(avl);
deleg_node != NULL;
deleg_node = uu_avl_next(avl, deleg_node)) {
if (local != deleg_node->dpn_perm.dp_local ||
descend != deleg_node->dpn_perm.dp_descend)
continue;
if (prt_who) {
const char *who = NULL;
if (prt_title) {
prt_title = B_FALSE;
(void) printf("%s", title);
}
switch (who_type) {
case ZFS_DELEG_USER_SETS:
case ZFS_DELEG_USER:
who = gettext("user");
if (nice_who_name)
who_name = nice_who_name;
break;
case ZFS_DELEG_GROUP_SETS:
case ZFS_DELEG_GROUP:
who = gettext("group");
if (nice_who_name)
who_name = nice_who_name;
break;
case ZFS_DELEG_EVERYONE_SETS:
case ZFS_DELEG_EVERYONE:
who = gettext("everyone");
who_name = NULL;
break;
default:
assert(who != NULL);
}
prt_who = B_FALSE;
if (who_name == NULL)
(void) printf("\t%s", who);
else
(void) printf("\t%s %s", who, who_name);
}
(void) printf("%c%s", delim,
deleg_node->dpn_perm.dp_name);
delim = ',';
}
if (!prt_who)
(void) printf("\n");
}
uu_avl_walk_end(walk);
}
static void
print_fs_perms(fs_perm_set_t *fspset)
{
fs_perm_node_t *node = NULL;
char buf[MAXNAMELEN + 32];
const char *dsname = buf;
for (node = uu_list_first(fspset->fsps_list); node != NULL;
node = uu_list_next(fspset->fsps_list, node)) {
uu_avl_t *sc_avl = node->fspn_fsperm.fsp_sc_avl;
uu_avl_t *uge_avl = node->fspn_fsperm.fsp_uge_avl;
int left = 0;
(void) snprintf(buf, sizeof (buf),
gettext("---- Permissions on %s "),
node->fspn_fsperm.fsp_name);
(void) printf("%s", dsname);
left = 70 - strlen(buf);
while (left-- > 0)
(void) printf("-");
(void) printf("\n");
print_set_creat_perms(sc_avl);
print_uge_deleg_perms(uge_avl, B_TRUE, B_FALSE,
gettext("Local permissions:\n"));
print_uge_deleg_perms(uge_avl, B_FALSE, B_TRUE,
gettext("Descendent permissions:\n"));
print_uge_deleg_perms(uge_avl, B_TRUE, B_TRUE,
gettext("Local+Descendent permissions:\n"));
}
}
static fs_perm_set_t fs_perm_set = { NULL, NULL, NULL, NULL };
struct deleg_perms {
boolean_t un;
nvlist_t *nvl;
};
static int
set_deleg_perms(zfs_handle_t *zhp, void *data)
{
struct deleg_perms *perms = (struct deleg_perms *)data;
zfs_type_t zfs_type = zfs_get_type(zhp);
if (zfs_type != ZFS_TYPE_FILESYSTEM && zfs_type != ZFS_TYPE_VOLUME)
return (0);
return (zfs_set_fsacl(zhp, perms->un, perms->nvl));
}
static int
zfs_do_allow_unallow_impl(int argc, char **argv, boolean_t un)
{
zfs_handle_t *zhp;
nvlist_t *perm_nvl = NULL;
nvlist_t *update_perm_nvl = NULL;
int error = 1;
int c;
struct allow_opts opts = { 0 };
const char *optstr = un ? "ldugecsrh" : "ldugecsh";
/* check opts */
while ((c = getopt(argc, argv, optstr)) != -1) {
switch (c) {
case 'l':
opts.local = B_TRUE;
break;
case 'd':
opts.descend = B_TRUE;
break;
case 'u':
opts.user = B_TRUE;
break;
case 'g':
opts.group = B_TRUE;
break;
case 'e':
opts.everyone = B_TRUE;
break;
case 's':
opts.set = B_TRUE;
break;
case 'c':
opts.create = B_TRUE;
break;
case 'r':
opts.recursive = B_TRUE;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case 'h':
opts.prt_usage = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check arguments */
parse_allow_args(argc, argv, un, &opts);
/* try to open the dataset */
if ((zhp = zfs_open(g_zfs, opts.dataset, ZFS_TYPE_FILESYSTEM |
ZFS_TYPE_VOLUME)) == NULL) {
(void) fprintf(stderr, "Failed to open dataset: %s\n",
opts.dataset);
return (-1);
}
if (zfs_get_fsacl(zhp, &perm_nvl) != 0)
goto cleanup2;
fs_perm_set_init(&fs_perm_set);
if (parse_fs_perm_set(&fs_perm_set, perm_nvl) != 0) {
(void) fprintf(stderr, "Failed to parse fsacl permissions\n");
goto cleanup1;
}
if (opts.prt_perms)
print_fs_perms(&fs_perm_set);
else {
(void) construct_fsacl_list(un, &opts, &update_perm_nvl);
if (zfs_set_fsacl(zhp, un, update_perm_nvl) != 0)
goto cleanup0;
if (un && opts.recursive) {
struct deleg_perms data = { un, update_perm_nvl };
if (zfs_iter_filesystems(zhp, set_deleg_perms,
&data) != 0)
goto cleanup0;
}
}
error = 0;
cleanup0:
nvlist_free(perm_nvl);
nvlist_free(update_perm_nvl);
cleanup1:
fs_perm_set_fini(&fs_perm_set);
cleanup2:
zfs_close(zhp);
return (error);
}
static int
zfs_do_allow(int argc, char **argv)
{
return (zfs_do_allow_unallow_impl(argc, argv, B_FALSE));
}
static int
zfs_do_unallow(int argc, char **argv)
{
return (zfs_do_allow_unallow_impl(argc, argv, B_TRUE));
}
static int
zfs_do_hold_rele_impl(int argc, char **argv, boolean_t holding)
{
int errors = 0;
int i;
const char *tag;
boolean_t recursive = B_FALSE;
const char *opts = holding ? "rt" : "r";
int c;
/* check options */
while ((c = getopt(argc, argv, opts)) != -1) {
switch (c) {
case 'r':
recursive = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 2)
usage(B_FALSE);
tag = argv[0];
--argc;
++argv;
if (holding && tag[0] == '.') {
/* tags starting with '.' are reserved for libzfs */
(void) fprintf(stderr, gettext("tag may not start with '.'\n"));
usage(B_FALSE);
}
for (i = 0; i < argc; ++i) {
zfs_handle_t *zhp;
char parent[ZFS_MAX_DATASET_NAME_LEN];
const char *delim;
char *path = argv[i];
delim = strchr(path, '@');
if (delim == NULL) {
(void) fprintf(stderr,
gettext("'%s' is not a snapshot\n"), path);
++errors;
continue;
}
(void) strncpy(parent, path, delim - path);
parent[delim - path] = '\0';
zhp = zfs_open(g_zfs, parent,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL) {
++errors;
continue;
}
if (holding) {
if (zfs_hold(zhp, delim+1, tag, recursive, -1) != 0)
++errors;
} else {
if (zfs_release(zhp, delim+1, tag, recursive) != 0)
++errors;
}
zfs_close(zhp);
}
return (errors != 0);
}
/*
* zfs hold [-r] [-t] <tag> <snap> ...
*
* -r Recursively hold
*
* Apply a user-hold with the given tag to the list of snapshots.
*/
static int
zfs_do_hold(int argc, char **argv)
{
return (zfs_do_hold_rele_impl(argc, argv, B_TRUE));
}
/*
* zfs release [-r] <tag> <snap> ...
*
* -r Recursively release
*
* Release a user-hold with the given tag from the list of snapshots.
*/
static int
zfs_do_release(int argc, char **argv)
{
return (zfs_do_hold_rele_impl(argc, argv, B_FALSE));
}
typedef struct holds_cbdata {
boolean_t cb_recursive;
const char *cb_snapname;
nvlist_t **cb_nvlp;
size_t cb_max_namelen;
size_t cb_max_taglen;
} holds_cbdata_t;
#define STRFTIME_FMT_STR "%a %b %e %H:%M %Y"
#define DATETIME_BUF_LEN (32)
/*
*
*/
static void
print_holds(boolean_t scripted, int nwidth, int tagwidth, nvlist_t *nvl)
{
int i;
nvpair_t *nvp = NULL;
const char *const hdr_cols[] = { "NAME", "TAG", "TIMESTAMP" };
const char *col;
if (!scripted) {
for (i = 0; i < 3; i++) {
col = gettext(hdr_cols[i]);
if (i < 2)
(void) printf("%-*s ", i ? tagwidth : nwidth,
col);
else
(void) printf("%s\n", col);
}
}
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
char *zname = nvpair_name(nvp);
nvlist_t *nvl2;
nvpair_t *nvp2 = NULL;
(void) nvpair_value_nvlist(nvp, &nvl2);
while ((nvp2 = nvlist_next_nvpair(nvl2, nvp2)) != NULL) {
char tsbuf[DATETIME_BUF_LEN];
const char *tagname = nvpair_name(nvp2);
uint64_t val = 0;
time_t time;
struct tm t;
(void) nvpair_value_uint64(nvp2, &val);
time = (time_t)val;
(void) localtime_r(&time, &t);
(void) strftime(tsbuf, DATETIME_BUF_LEN,
gettext(STRFTIME_FMT_STR), &t);
if (scripted) {
(void) printf("%s\t%s\t%s\n", zname,
tagname, tsbuf);
} else {
(void) printf("%-*s %-*s %s\n", nwidth,
zname, tagwidth, tagname, tsbuf);
}
}
}
}
/*
* Generic callback function to list a dataset or snapshot.
*/
static int
holds_callback(zfs_handle_t *zhp, void *data)
{
holds_cbdata_t *cbp = data;
nvlist_t *top_nvl = *cbp->cb_nvlp;
nvlist_t *nvl = NULL;
nvpair_t *nvp = NULL;
const char *zname = zfs_get_name(zhp);
size_t znamelen = strlen(zname);
if (cbp->cb_recursive) {
const char *snapname;
char *delim = strchr(zname, '@');
if (delim == NULL)
return (0);
snapname = delim + 1;
if (strcmp(cbp->cb_snapname, snapname))
return (0);
}
if (zfs_get_holds(zhp, &nvl) != 0)
return (-1);
if (znamelen > cbp->cb_max_namelen)
cbp->cb_max_namelen = znamelen;
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
const char *tag = nvpair_name(nvp);
size_t taglen = strlen(tag);
if (taglen > cbp->cb_max_taglen)
cbp->cb_max_taglen = taglen;
}
return (nvlist_add_nvlist(top_nvl, zname, nvl));
}
/*
* zfs holds [-rH] <snap> ...
*
* -r Lists holds that are set on the named snapshots recursively.
* -H Scripted mode; elide headers and separate columns by tabs.
*/
static int
zfs_do_holds(int argc, char **argv)
{
int c;
boolean_t errors = B_FALSE;
boolean_t scripted = B_FALSE;
boolean_t recursive = B_FALSE;
int types = ZFS_TYPE_SNAPSHOT;
holds_cbdata_t cb = { 0 };
int limit = 0;
int ret = 0;
int flags = 0;
/* check options */
while ((c = getopt(argc, argv, "rH")) != -1) {
switch (c) {
case 'r':
recursive = B_TRUE;
break;
case 'H':
scripted = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
if (recursive) {
types |= ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME;
flags |= ZFS_ITER_RECURSE;
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1)
usage(B_FALSE);
nvlist_t *nvl = fnvlist_alloc();
for (int i = 0; i < argc; ++i) {
char *snapshot = argv[i];
const char *delim;
const char *snapname;
delim = strchr(snapshot, '@');
if (delim == NULL) {
(void) fprintf(stderr,
gettext("'%s' is not a snapshot\n"), snapshot);
errors = B_TRUE;
continue;
}
snapname = delim + 1;
if (recursive)
snapshot[delim - snapshot] = '\0';
cb.cb_recursive = recursive;
cb.cb_snapname = snapname;
cb.cb_nvlp = &nvl;
/*
* 1. collect holds data, set format options
*/
ret = zfs_for_each(1, argv + i, flags, types, NULL, NULL, limit,
holds_callback, &cb);
if (ret != 0)
errors = B_TRUE;
}
/*
* 2. print holds data
*/
print_holds(scripted, cb.cb_max_namelen, cb.cb_max_taglen, nvl);
if (nvlist_empty(nvl))
(void) fprintf(stderr, gettext("no datasets available\n"));
nvlist_free(nvl);
return (errors);
}
#define CHECK_SPINNER 30
#define SPINNER_TIME 3 /* seconds */
#define MOUNT_TIME 1 /* seconds */
typedef struct get_all_state {
boolean_t ga_verbose;
get_all_cb_t *ga_cbp;
} get_all_state_t;
static int
get_one_dataset(zfs_handle_t *zhp, void *data)
{
static const char *const spin[] = { "-", "\\", "|", "/" };
static int spinval = 0;
static int spincheck = 0;
static time_t last_spin_time = (time_t)0;
get_all_state_t *state = data;
zfs_type_t type = zfs_get_type(zhp);
if (state->ga_verbose) {
if (--spincheck < 0) {
time_t now = time(NULL);
if (last_spin_time + SPINNER_TIME < now) {
update_progress(spin[spinval++ % 4]);
last_spin_time = now;
}
spincheck = CHECK_SPINNER;
}
}
/*
* Iterate over any nested datasets.
*/
if (zfs_iter_filesystems(zhp, get_one_dataset, data) != 0) {
zfs_close(zhp);
return (1);
}
/*
* Skip any datasets whose type does not match.
*/
if ((type & ZFS_TYPE_FILESYSTEM) == 0) {
zfs_close(zhp);
return (0);
}
libzfs_add_handle(state->ga_cbp, zhp);
assert(state->ga_cbp->cb_used <= state->ga_cbp->cb_alloc);
return (0);
}
static void
get_all_datasets(get_all_cb_t *cbp, boolean_t verbose)
{
get_all_state_t state = {
.ga_verbose = verbose,
.ga_cbp = cbp
};
if (verbose)
set_progress_header(gettext("Reading ZFS config"));
(void) zfs_iter_root(g_zfs, get_one_dataset, &state);
if (verbose)
finish_progress(gettext("done."));
}
/*
* Generic callback for sharing or mounting filesystems. Because the code is so
* similar, we have a common function with an extra parameter to determine which
* mode we are using.
*/
typedef enum { OP_SHARE, OP_MOUNT } share_mount_op_t;
typedef struct share_mount_state {
share_mount_op_t sm_op;
boolean_t sm_verbose;
int sm_flags;
char *sm_options;
enum sa_protocol sm_proto; /* only valid for OP_SHARE */
pthread_mutex_t sm_lock; /* protects the remaining fields */
uint_t sm_total; /* number of filesystems to process */
uint_t sm_done; /* number of filesystems processed */
int sm_status; /* -1 if any of the share/mount operations failed */
} share_mount_state_t;
/*
* Share or mount a dataset.
*/
static int
share_mount_one(zfs_handle_t *zhp, int op, int flags, enum sa_protocol protocol,
boolean_t explicit, const char *options)
{
char mountpoint[ZFS_MAXPROPLEN];
char shareopts[ZFS_MAXPROPLEN];
char smbshareopts[ZFS_MAXPROPLEN];
const char *cmdname = op == OP_SHARE ? "share" : "mount";
struct mnttab mnt;
uint64_t zoned, canmount;
boolean_t shared_nfs, shared_smb;
assert(zfs_get_type(zhp) & ZFS_TYPE_FILESYSTEM);
/*
* Check to make sure we can mount/share this dataset. If we
* are in the global zone and the filesystem is exported to a
* local zone, or if we are in a local zone and the
* filesystem is not exported, then it is an error.
*/
zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
if (zoned && getzoneid() == GLOBAL_ZONEID) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"dataset is exported to a local zone\n"), cmdname,
zfs_get_name(zhp));
return (1);
} else if (!zoned && getzoneid() != GLOBAL_ZONEID) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"permission denied\n"), cmdname,
zfs_get_name(zhp));
return (1);
}
/*
* Ignore any filesystems which don't apply to us. This
* includes those with a legacy mountpoint, or those with
* legacy share options.
*/
verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, mountpoint,
sizeof (mountpoint), NULL, NULL, 0, B_FALSE) == 0);
verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS, shareopts,
sizeof (shareopts), NULL, NULL, 0, B_FALSE) == 0);
verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB, smbshareopts,
sizeof (smbshareopts), NULL, NULL, 0, B_FALSE) == 0);
if (op == OP_SHARE && strcmp(shareopts, "off") == 0 &&
strcmp(smbshareopts, "off") == 0) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot share '%s': "
"legacy share\n"), zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use exports(5) or "
"smb.conf(5) to share this filesystem, or set "
"the sharenfs or sharesmb property\n"));
return (1);
}
/*
* We cannot share or mount legacy filesystems. If the
* shareopts is non-legacy but the mountpoint is legacy, we
* treat it as a legacy share.
*/
if (strcmp(mountpoint, "legacy") == 0) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"legacy mountpoint\n"), cmdname, zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use %s(8) to "
"%s this filesystem\n"), cmdname, cmdname);
return (1);
}
if (strcmp(mountpoint, "none") == 0) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': no "
"mountpoint set\n"), cmdname, zfs_get_name(zhp));
return (1);
}
/*
* canmount explicit outcome
* on no pass through
* on yes pass through
* off no return 0
* off yes display error, return 1
* noauto no return 0
* noauto yes pass through
*/
canmount = zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT);
if (canmount == ZFS_CANMOUNT_OFF) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"'canmount' property is set to 'off'\n"), cmdname,
zfs_get_name(zhp));
return (1);
} else if (canmount == ZFS_CANMOUNT_NOAUTO && !explicit) {
/*
* When performing a 'zfs mount -a', we skip any mounts for
* datasets that have 'noauto' set. Sharing a dataset with
* 'noauto' set is only allowed if it's mounted.
*/
if (op == OP_MOUNT)
return (0);
if (op == OP_SHARE && !zfs_is_mounted(zhp, NULL)) {
/* also purge it from existing exports */
zfs_unshare(zhp, mountpoint, NULL);
return (0);
}
}
/*
* If this filesystem is encrypted and does not have
* a loaded key, we can not mount it.
*/
if ((flags & MS_CRYPT) == 0 &&
zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) != ZIO_CRYPT_OFF &&
zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) ==
ZFS_KEYSTATUS_UNAVAILABLE) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"encryption key not loaded\n"), cmdname, zfs_get_name(zhp));
return (1);
}
/*
* If this filesystem is inconsistent and has a receive resume
* token, we can not mount it.
*/
if (zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) &&
zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN,
NULL, 0, NULL, NULL, 0, B_TRUE) == 0) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"Contains partially-completed state from "
"\"zfs receive -s\", which can be resumed with "
"\"zfs send -t\"\n"),
cmdname, zfs_get_name(zhp));
return (1);
}
if (zfs_prop_get_int(zhp, ZFS_PROP_REDACTED) && !(flags & MS_FORCE)) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"Dataset is not complete, was created by receiving "
"a redacted zfs send stream.\n"), cmdname,
zfs_get_name(zhp));
return (1);
}
/*
* At this point, we have verified that the mountpoint and/or
* shareopts are appropriate for auto management. If the
* filesystem is already mounted or shared, return (failing
* for explicit requests); otherwise mount or share the
* filesystem.
*/
switch (op) {
case OP_SHARE: {
enum sa_protocol prot[] = {SA_PROTOCOL_NFS, SA_NO_PROTOCOL};
shared_nfs = zfs_is_shared(zhp, NULL, prot);
*prot = SA_PROTOCOL_SMB;
shared_smb = zfs_is_shared(zhp, NULL, prot);
if ((shared_nfs && shared_smb) ||
(shared_nfs && strcmp(shareopts, "on") == 0 &&
strcmp(smbshareopts, "off") == 0) ||
(shared_smb && strcmp(smbshareopts, "on") == 0 &&
strcmp(shareopts, "off") == 0)) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot share "
"'%s': filesystem already shared\n"),
zfs_get_name(zhp));
return (1);
}
if (!zfs_is_mounted(zhp, NULL) &&
zfs_mount(zhp, NULL, flags) != 0)
return (1);
*prot = protocol;
if (zfs_share(zhp, protocol == SA_NO_PROTOCOL ? NULL : prot))
return (1);
}
break;
case OP_MOUNT:
mnt.mnt_mntopts = (char *)(options ?: "");
if (!hasmntopt(&mnt, MNTOPT_REMOUNT) &&
zfs_is_mounted(zhp, NULL)) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot mount "
"'%s': filesystem already mounted\n"),
zfs_get_name(zhp));
return (1);
}
if (zfs_mount(zhp, options, flags) != 0)
return (1);
break;
}
return (0);
}
/*
* Reports progress in the form "(current/total)". Not thread-safe.
*/
static void
report_mount_progress(int current, int total)
{
static time_t last_progress_time = 0;
time_t now = time(NULL);
char info[32];
/* display header if we're here for the first time */
if (current == 1) {
set_progress_header(gettext("Mounting ZFS filesystems"));
} else if (current != total && last_progress_time + MOUNT_TIME >= now) {
/* too soon to report again */
return;
}
last_progress_time = now;
(void) sprintf(info, "(%d/%d)", current, total);
if (current == total)
finish_progress(info);
else
update_progress(info);
}
/*
* zfs_foreach_mountpoint() callback that mounts or shares one filesystem and
* updates the progress meter.
*/
static int
share_mount_one_cb(zfs_handle_t *zhp, void *arg)
{
share_mount_state_t *sms = arg;
int ret;
ret = share_mount_one(zhp, sms->sm_op, sms->sm_flags, sms->sm_proto,
B_FALSE, sms->sm_options);
pthread_mutex_lock(&sms->sm_lock);
if (ret != 0)
sms->sm_status = ret;
sms->sm_done++;
if (sms->sm_verbose)
report_mount_progress(sms->sm_done, sms->sm_total);
pthread_mutex_unlock(&sms->sm_lock);
return (ret);
}
static void
append_options(char *mntopts, char *newopts)
{
int len = strlen(mntopts);
/* original length plus new string to append plus 1 for the comma */
if (len + 1 + strlen(newopts) >= MNT_LINE_MAX) {
(void) fprintf(stderr, gettext("the opts argument for "
"'%s' option is too long (more than %d chars)\n"),
"-o", MNT_LINE_MAX);
usage(B_FALSE);
}
if (*mntopts)
mntopts[len++] = ',';
(void) strcpy(&mntopts[len], newopts);
}
static enum sa_protocol
sa_protocol_decode(const char *protocol)
{
for (enum sa_protocol i = 0; i < ARRAY_SIZE(sa_protocol_names); ++i)
if (strcmp(protocol, sa_protocol_names[i]) == 0)
return (i);
(void) fputs(gettext("share type must be one of: "), stderr);
for (enum sa_protocol i = 0;
i < ARRAY_SIZE(sa_protocol_names); ++i)
(void) fprintf(stderr, "%s%s",
i != 0 ? ", " : "", sa_protocol_names[i]);
(void) fputc('\n', stderr);
usage(B_FALSE);
}
static int
share_mount(int op, int argc, char **argv)
{
int do_all = 0;
boolean_t verbose = B_FALSE;
int c, ret = 0;
char *options = NULL;
int flags = 0;
/* check options */
while ((c = getopt(argc, argv, op == OP_MOUNT ? ":alvo:Of" : "al"))
!= -1) {
switch (c) {
case 'a':
do_all = 1;
break;
case 'v':
verbose = B_TRUE;
break;
case 'l':
flags |= MS_CRYPT;
break;
case 'o':
if (*optarg == '\0') {
(void) fprintf(stderr, gettext("empty mount "
"options (-o) specified\n"));
usage(B_FALSE);
}
if (options == NULL)
options = safe_malloc(MNT_LINE_MAX + 1);
/* option validation is done later */
append_options(options, optarg);
break;
case 'O':
flags |= MS_OVERLAY;
break;
case 'f':
flags |= MS_FORCE;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (do_all) {
enum sa_protocol protocol = SA_NO_PROTOCOL;
if (op == OP_SHARE && argc > 0) {
protocol = sa_protocol_decode(argv[0]);
argc--;
argv++;
}
if (argc != 0) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
start_progress_timer();
get_all_cb_t cb = { 0 };
get_all_datasets(&cb, verbose);
if (cb.cb_used == 0) {
free(options);
return (0);
}
share_mount_state_t share_mount_state = { 0 };
share_mount_state.sm_op = op;
share_mount_state.sm_verbose = verbose;
share_mount_state.sm_flags = flags;
share_mount_state.sm_options = options;
share_mount_state.sm_proto = protocol;
share_mount_state.sm_total = cb.cb_used;
pthread_mutex_init(&share_mount_state.sm_lock, NULL);
+ /* For a 'zfs share -a' operation start with a clean slate. */
+ zfs_truncate_shares(NULL);
+
/*
* libshare isn't mt-safe, so only do the operation in parallel
* if we're mounting. Additionally, the key-loading option must
* be serialized so that we can prompt the user for their keys
* in a consistent manner.
*/
zfs_foreach_mountpoint(g_zfs, cb.cb_handles, cb.cb_used,
share_mount_one_cb, &share_mount_state,
op == OP_MOUNT && !(flags & MS_CRYPT));
zfs_commit_shares(NULL);
ret = share_mount_state.sm_status;
for (int i = 0; i < cb.cb_used; i++)
zfs_close(cb.cb_handles[i]);
free(cb.cb_handles);
} else if (argc == 0) {
FILE *mnttab;
struct mnttab entry;
if ((op == OP_SHARE) || (options != NULL)) {
(void) fprintf(stderr, gettext("missing filesystem "
"argument (specify -a for all)\n"));
usage(B_FALSE);
}
/*
* When mount is given no arguments, go through
* /proc/self/mounts and display any active ZFS mounts.
* We hide any snapshots, since they are controlled
* automatically.
*/
if ((mnttab = fopen(MNTTAB, "re")) == NULL) {
free(options);
return (ENOENT);
}
while (getmntent(mnttab, &entry) == 0) {
if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0 ||
strchr(entry.mnt_special, '@') != NULL)
continue;
(void) printf("%-30s %s\n", entry.mnt_special,
entry.mnt_mountp);
}
(void) fclose(mnttab);
} else {
zfs_handle_t *zhp;
if (argc > 1) {
(void) fprintf(stderr,
gettext("too many arguments\n"));
usage(B_FALSE);
}
if ((zhp = zfs_open(g_zfs, argv[0],
ZFS_TYPE_FILESYSTEM)) == NULL) {
ret = 1;
} else {
ret = share_mount_one(zhp, op, flags, SA_NO_PROTOCOL,
B_TRUE, options);
zfs_commit_shares(NULL);
zfs_close(zhp);
}
}
free(options);
return (ret);
}
/*
* zfs mount -a
* zfs mount filesystem
*
* Mount all filesystems, or mount the given filesystem.
*/
static int
zfs_do_mount(int argc, char **argv)
{
return (share_mount(OP_MOUNT, argc, argv));
}
/*
* zfs share -a [nfs | smb]
* zfs share filesystem
*
* Share all filesystems, or share the given filesystem.
*/
static int
zfs_do_share(int argc, char **argv)
{
return (share_mount(OP_SHARE, argc, argv));
}
typedef struct unshare_unmount_node {
zfs_handle_t *un_zhp;
char *un_mountp;
uu_avl_node_t un_avlnode;
} unshare_unmount_node_t;
static int
unshare_unmount_compare(const void *larg, const void *rarg, void *unused)
{
(void) unused;
const unshare_unmount_node_t *l = larg;
const unshare_unmount_node_t *r = rarg;
return (strcmp(l->un_mountp, r->un_mountp));
}
/*
* Convenience routine used by zfs_do_umount() and manual_unmount(). Given an
* absolute path, find the entry /proc/self/mounts, verify that it's a
* ZFS filesystem, and unmount it appropriately.
*/
static int
unshare_unmount_path(int op, char *path, int flags, boolean_t is_manual)
{
zfs_handle_t *zhp;
int ret = 0;
struct stat64 statbuf;
struct extmnttab entry;
const char *cmdname = (op == OP_SHARE) ? "unshare" : "unmount";
ino_t path_inode;
/*
* Search for the given (major,minor) pair in the mount table.
*/
if (getextmntent(path, &entry, &statbuf) != 0) {
if (op == OP_SHARE) {
(void) fprintf(stderr, gettext("cannot %s '%s': not "
"currently mounted\n"), cmdname, path);
return (1);
}
(void) fprintf(stderr, gettext("warning: %s not in"
"/proc/self/mounts\n"), path);
if ((ret = umount2(path, flags)) != 0)
(void) fprintf(stderr, gettext("%s: %s\n"), path,
strerror(errno));
return (ret != 0);
}
path_inode = statbuf.st_ino;
if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0) {
(void) fprintf(stderr, gettext("cannot %s '%s': not a ZFS "
"filesystem\n"), cmdname, path);
return (1);
}
if ((zhp = zfs_open(g_zfs, entry.mnt_special,
ZFS_TYPE_FILESYSTEM)) == NULL)
return (1);
ret = 1;
if (stat64(entry.mnt_mountp, &statbuf) != 0) {
(void) fprintf(stderr, gettext("cannot %s '%s': %s\n"),
cmdname, path, strerror(errno));
goto out;
} else if (statbuf.st_ino != path_inode) {
(void) fprintf(stderr, gettext("cannot "
"%s '%s': not a mountpoint\n"), cmdname, path);
goto out;
}
if (op == OP_SHARE) {
char nfs_mnt_prop[ZFS_MAXPROPLEN];
char smbshare_prop[ZFS_MAXPROPLEN];
verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS, nfs_mnt_prop,
sizeof (nfs_mnt_prop), NULL, NULL, 0, B_FALSE) == 0);
verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB, smbshare_prop,
sizeof (smbshare_prop), NULL, NULL, 0, B_FALSE) == 0);
if (strcmp(nfs_mnt_prop, "off") == 0 &&
strcmp(smbshare_prop, "off") == 0) {
(void) fprintf(stderr, gettext("cannot unshare "
"'%s': legacy share\n"), path);
(void) fprintf(stderr, gettext("use exportfs(8) "
"or smbcontrol(1) to unshare this filesystem\n"));
} else if (!zfs_is_shared(zhp, NULL, NULL)) {
(void) fprintf(stderr, gettext("cannot unshare '%s': "
"not currently shared\n"), path);
} else {
ret = zfs_unshare(zhp, path, NULL);
zfs_commit_shares(NULL);
}
} else {
char mtpt_prop[ZFS_MAXPROPLEN];
verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, mtpt_prop,
sizeof (mtpt_prop), NULL, NULL, 0, B_FALSE) == 0);
if (is_manual) {
ret = zfs_unmount(zhp, NULL, flags);
} else if (strcmp(mtpt_prop, "legacy") == 0) {
(void) fprintf(stderr, gettext("cannot unmount "
"'%s': legacy mountpoint\n"),
zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use umount(8) "
"to unmount this filesystem\n"));
} else {
ret = zfs_unmountall(zhp, flags);
}
}
out:
zfs_close(zhp);
return (ret != 0);
}
/*
* Generic callback for unsharing or unmounting a filesystem.
*/
static int
unshare_unmount(int op, int argc, char **argv)
{
int do_all = 0;
int flags = 0;
int ret = 0;
int c;
zfs_handle_t *zhp;
char nfs_mnt_prop[ZFS_MAXPROPLEN];
char sharesmb[ZFS_MAXPROPLEN];
/* check options */
while ((c = getopt(argc, argv, op == OP_SHARE ? ":a" : "afu")) != -1) {
switch (c) {
case 'a':
do_all = 1;
break;
case 'f':
flags |= MS_FORCE;
break;
case 'u':
flags |= MS_CRYPT;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (do_all) {
/*
* We could make use of zfs_for_each() to walk all datasets in
* the system, but this would be very inefficient, especially
* since we would have to linearly search /proc/self/mounts for
* each one. Instead, do one pass through /proc/self/mounts
* looking for zfs entries and call zfs_unmount() for each one.
*
* Things get a little tricky if the administrator has created
* mountpoints beneath other ZFS filesystems. In this case, we
* have to unmount the deepest filesystems first. To accomplish
* this, we place all the mountpoints in an AVL tree sorted by
* the special type (dataset name), and walk the result in
* reverse to make sure to get any snapshots first.
*/
FILE *mnttab;
struct mnttab entry;
uu_avl_pool_t *pool;
uu_avl_t *tree = NULL;
unshare_unmount_node_t *node;
uu_avl_index_t idx;
uu_avl_walk_t *walk;
enum sa_protocol *protocol = NULL,
single_protocol[] = {SA_NO_PROTOCOL, SA_NO_PROTOCOL};
if (op == OP_SHARE && argc > 0) {
*single_protocol = sa_protocol_decode(argv[0]);
protocol = single_protocol;
argc--;
argv++;
}
if (argc != 0) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if (((pool = uu_avl_pool_create("unmount_pool",
sizeof (unshare_unmount_node_t),
offsetof(unshare_unmount_node_t, un_avlnode),
unshare_unmount_compare, UU_DEFAULT)) == NULL) ||
((tree = uu_avl_create(pool, NULL, UU_DEFAULT)) == NULL))
nomem();
if ((mnttab = fopen(MNTTAB, "re")) == NULL)
return (ENOENT);
while (getmntent(mnttab, &entry) == 0) {
/* ignore non-ZFS entries */
if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0)
continue;
/* ignore snapshots */
if (strchr(entry.mnt_special, '@') != NULL)
continue;
if ((zhp = zfs_open(g_zfs, entry.mnt_special,
ZFS_TYPE_FILESYSTEM)) == NULL) {
ret = 1;
continue;
}
/*
* Ignore datasets that are excluded/restricted by
* parent pool name.
*/
if (zpool_skip_pool(zfs_get_pool_name(zhp))) {
zfs_close(zhp);
continue;
}
switch (op) {
case OP_SHARE:
verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS,
nfs_mnt_prop,
sizeof (nfs_mnt_prop),
NULL, NULL, 0, B_FALSE) == 0);
if (strcmp(nfs_mnt_prop, "off") != 0)
break;
verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB,
nfs_mnt_prop,
sizeof (nfs_mnt_prop),
NULL, NULL, 0, B_FALSE) == 0);
if (strcmp(nfs_mnt_prop, "off") == 0)
continue;
break;
case OP_MOUNT:
/* Ignore legacy mounts */
verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT,
nfs_mnt_prop,
sizeof (nfs_mnt_prop),
NULL, NULL, 0, B_FALSE) == 0);
if (strcmp(nfs_mnt_prop, "legacy") == 0)
continue;
/* Ignore canmount=noauto mounts */
if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) ==
ZFS_CANMOUNT_NOAUTO)
continue;
break;
default:
break;
}
node = safe_malloc(sizeof (unshare_unmount_node_t));
node->un_zhp = zhp;
node->un_mountp = safe_strdup(entry.mnt_mountp);
uu_avl_node_init(node, &node->un_avlnode, pool);
if (uu_avl_find(tree, node, NULL, &idx) == NULL) {
uu_avl_insert(tree, node, idx);
} else {
zfs_close(node->un_zhp);
free(node->un_mountp);
free(node);
}
}
(void) fclose(mnttab);
/*
* Walk the AVL tree in reverse, unmounting each filesystem and
* removing it from the AVL tree in the process.
*/
if ((walk = uu_avl_walk_start(tree,
UU_WALK_REVERSE | UU_WALK_ROBUST)) == NULL)
nomem();
while ((node = uu_avl_walk_next(walk)) != NULL) {
const char *mntarg = NULL;
uu_avl_remove(tree, node);
switch (op) {
case OP_SHARE:
if (zfs_unshare(node->un_zhp,
node->un_mountp, protocol) != 0)
ret = 1;
break;
case OP_MOUNT:
if (zfs_unmount(node->un_zhp,
mntarg, flags) != 0)
ret = 1;
break;
}
zfs_close(node->un_zhp);
free(node->un_mountp);
free(node);
}
if (op == OP_SHARE)
zfs_commit_shares(protocol);
uu_avl_walk_end(walk);
uu_avl_destroy(tree);
uu_avl_pool_destroy(pool);
} else {
if (argc != 1) {
if (argc == 0)
(void) fprintf(stderr,
gettext("missing filesystem argument\n"));
else
(void) fprintf(stderr,
gettext("too many arguments\n"));
usage(B_FALSE);
}
/*
* We have an argument, but it may be a full path or a ZFS
* filesystem. Pass full paths off to unmount_path() (shared by
* manual_unmount), otherwise open the filesystem and pass to
* zfs_unmount().
*/
if (argv[0][0] == '/')
return (unshare_unmount_path(op, argv[0],
flags, B_FALSE));
if ((zhp = zfs_open(g_zfs, argv[0],
ZFS_TYPE_FILESYSTEM)) == NULL)
return (1);
verify(zfs_prop_get(zhp, op == OP_SHARE ?
ZFS_PROP_SHARENFS : ZFS_PROP_MOUNTPOINT,
nfs_mnt_prop, sizeof (nfs_mnt_prop), NULL,
NULL, 0, B_FALSE) == 0);
switch (op) {
case OP_SHARE:
verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS,
nfs_mnt_prop,
sizeof (nfs_mnt_prop),
NULL, NULL, 0, B_FALSE) == 0);
verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB,
sharesmb, sizeof (sharesmb), NULL, NULL,
0, B_FALSE) == 0);
if (strcmp(nfs_mnt_prop, "off") == 0 &&
strcmp(sharesmb, "off") == 0) {
(void) fprintf(stderr, gettext("cannot "
"unshare '%s': legacy share\n"),
zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use "
"exports(5) or smb.conf(5) to unshare "
"this filesystem\n"));
ret = 1;
} else if (!zfs_is_shared(zhp, NULL, NULL)) {
(void) fprintf(stderr, gettext("cannot "
"unshare '%s': not currently "
"shared\n"), zfs_get_name(zhp));
ret = 1;
} else if (zfs_unshareall(zhp, NULL) != 0) {
ret = 1;
}
break;
case OP_MOUNT:
if (strcmp(nfs_mnt_prop, "legacy") == 0) {
(void) fprintf(stderr, gettext("cannot "
"unmount '%s': legacy "
"mountpoint\n"), zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use "
"umount(8) to unmount this "
"filesystem\n"));
ret = 1;
} else if (!zfs_is_mounted(zhp, NULL)) {
(void) fprintf(stderr, gettext("cannot "
"unmount '%s': not currently "
"mounted\n"),
zfs_get_name(zhp));
ret = 1;
} else if (zfs_unmountall(zhp, flags) != 0) {
ret = 1;
}
break;
}
zfs_close(zhp);
}
return (ret);
}
/*
* zfs unmount [-fu] -a
* zfs unmount [-fu] filesystem
*
* Unmount all filesystems, or a specific ZFS filesystem.
*/
static int
zfs_do_unmount(int argc, char **argv)
{
return (unshare_unmount(OP_MOUNT, argc, argv));
}
/*
* zfs unshare -a
* zfs unshare filesystem
*
* Unshare all filesystems, or a specific ZFS filesystem.
*/
static int
zfs_do_unshare(int argc, char **argv)
{
return (unshare_unmount(OP_SHARE, argc, argv));
}
static int
find_command_idx(const char *command, int *idx)
{
int i;
for (i = 0; i < NCOMMAND; i++) {
if (command_table[i].name == NULL)
continue;
if (strcmp(command, command_table[i].name) == 0) {
*idx = i;
return (0);
}
}
return (1);
}
static int
zfs_do_diff(int argc, char **argv)
{
zfs_handle_t *zhp;
int flags = 0;
char *tosnap = NULL;
char *fromsnap = NULL;
char *atp, *copy;
int err = 0;
int c;
struct sigaction sa;
while ((c = getopt(argc, argv, "FHth")) != -1) {
switch (c) {
case 'F':
flags |= ZFS_DIFF_CLASSIFY;
break;
case 'H':
flags |= ZFS_DIFF_PARSEABLE;
break;
case 't':
flags |= ZFS_DIFF_TIMESTAMP;
break;
case 'h':
flags |= ZFS_DIFF_NO_MANGLE;
break;
default:
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr,
gettext("must provide at least one snapshot name\n"));
usage(B_FALSE);
}
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
fromsnap = argv[0];
tosnap = (argc == 2) ? argv[1] : NULL;
copy = NULL;
if (*fromsnap != '@')
copy = strdup(fromsnap);
else if (tosnap)
copy = strdup(tosnap);
if (copy == NULL)
usage(B_FALSE);
if ((atp = strchr(copy, '@')) != NULL)
*atp = '\0';
if ((zhp = zfs_open(g_zfs, copy, ZFS_TYPE_FILESYSTEM)) == NULL) {
free(copy);
return (1);
}
free(copy);
/*
* Ignore SIGPIPE so that the library can give us
* information on any failure
*/
if (sigemptyset(&sa.sa_mask) == -1) {
err = errno;
goto out;
}
sa.sa_flags = 0;
sa.sa_handler = SIG_IGN;
if (sigaction(SIGPIPE, &sa, NULL) == -1) {
err = errno;
goto out;
}
err = zfs_show_diffs(zhp, STDOUT_FILENO, fromsnap, tosnap, flags);
out:
zfs_close(zhp);
return (err != 0);
}
/*
* zfs bookmark <fs@source>|<fs#source> <fs#bookmark>
*
* Creates a bookmark with the given name from the source snapshot
* or creates a copy of an existing source bookmark.
*/
static int
zfs_do_bookmark(int argc, char **argv)
{
char *source, *bookname;
char expbuf[ZFS_MAX_DATASET_NAME_LEN];
int source_type;
nvlist_t *nvl;
int ret = 0;
int c;
/* check options */
while ((c = getopt(argc, argv, "")) != -1) {
switch (c) {
case '?':
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
goto usage;
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing source argument\n"));
goto usage;
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing bookmark argument\n"));
goto usage;
}
source = argv[0];
bookname = argv[1];
if (strchr(source, '@') == NULL && strchr(source, '#') == NULL) {
(void) fprintf(stderr,
gettext("invalid source name '%s': "
"must contain a '@' or '#'\n"), source);
goto usage;
}
if (strchr(bookname, '#') == NULL) {
(void) fprintf(stderr,
gettext("invalid bookmark name '%s': "
"must contain a '#'\n"), bookname);
goto usage;
}
/*
* expand source or bookname to full path:
* one of them may be specified as short name
*/
{
char **expand;
char *source_short, *bookname_short;
source_short = strpbrk(source, "@#");
bookname_short = strpbrk(bookname, "#");
if (source_short == source &&
bookname_short == bookname) {
(void) fprintf(stderr, gettext(
"either source or bookmark must be specified as "
"full dataset paths"));
goto usage;
} else if (source_short != source &&
bookname_short != bookname) {
expand = NULL;
} else if (source_short != source) {
strlcpy(expbuf, source, sizeof (expbuf));
expand = &bookname;
} else if (bookname_short != bookname) {
strlcpy(expbuf, bookname, sizeof (expbuf));
expand = &source;
} else {
abort();
}
if (expand != NULL) {
*strpbrk(expbuf, "@#") = '\0'; /* dataset name in buf */
(void) strlcat(expbuf, *expand, sizeof (expbuf));
*expand = expbuf;
}
}
/* determine source type */
switch (*strpbrk(source, "@#")) {
case '@': source_type = ZFS_TYPE_SNAPSHOT; break;
case '#': source_type = ZFS_TYPE_BOOKMARK; break;
default: abort();
}
/* test the source exists */
zfs_handle_t *zhp;
zhp = zfs_open(g_zfs, source, source_type);
if (zhp == NULL)
goto usage;
zfs_close(zhp);
nvl = fnvlist_alloc();
fnvlist_add_string(nvl, bookname, source);
ret = lzc_bookmark(nvl, NULL);
fnvlist_free(nvl);
if (ret != 0) {
const char *err_msg = NULL;
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot create bookmark '%s'"), bookname);
switch (ret) {
case EXDEV:
err_msg = "bookmark is in a different pool";
break;
case ZFS_ERR_BOOKMARK_SOURCE_NOT_ANCESTOR:
err_msg = "source is not an ancestor of the "
"new bookmark's dataset";
break;
case EEXIST:
err_msg = "bookmark exists";
break;
case EINVAL:
err_msg = "invalid argument";
break;
case ENOTSUP:
err_msg = "bookmark feature not enabled";
break;
case ENOSPC:
err_msg = "out of space";
break;
case ENOENT:
err_msg = "dataset does not exist";
break;
default:
(void) zfs_standard_error(g_zfs, ret, errbuf);
break;
}
if (err_msg != NULL) {
(void) fprintf(stderr, "%s: %s\n", errbuf,
dgettext(TEXT_DOMAIN, err_msg));
}
}
return (ret != 0);
usage:
usage(B_FALSE);
return (-1);
}
static int
zfs_do_channel_program(int argc, char **argv)
{
int ret, fd, c;
size_t progsize, progread;
nvlist_t *outnvl = NULL;
uint64_t instrlimit = ZCP_DEFAULT_INSTRLIMIT;
uint64_t memlimit = ZCP_DEFAULT_MEMLIMIT;
boolean_t sync_flag = B_TRUE, json_output = B_FALSE;
zpool_handle_t *zhp;
/* check options */
while ((c = getopt(argc, argv, "nt:m:j")) != -1) {
switch (c) {
case 't':
case 'm': {
uint64_t arg;
char *endp;
errno = 0;
arg = strtoull(optarg, &endp, 0);
if (errno != 0 || *endp != '\0') {
(void) fprintf(stderr, gettext(
"invalid argument "
"'%s': expected integer\n"), optarg);
goto usage;
}
if (c == 't') {
instrlimit = arg;
} else {
ASSERT3U(c, ==, 'm');
memlimit = arg;
}
break;
}
case 'n': {
sync_flag = B_FALSE;
break;
}
case 'j': {
json_output = B_TRUE;
break;
}
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
goto usage;
}
}
argc -= optind;
argv += optind;
if (argc < 2) {
(void) fprintf(stderr,
gettext("invalid number of arguments\n"));
goto usage;
}
const char *poolname = argv[0];
const char *filename = argv[1];
if (strcmp(filename, "-") == 0) {
fd = 0;
filename = "standard input";
} else if ((fd = open(filename, O_RDONLY)) < 0) {
(void) fprintf(stderr, gettext("cannot open '%s': %s\n"),
filename, strerror(errno));
return (1);
}
if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
(void) fprintf(stderr, gettext("cannot open pool '%s'\n"),
poolname);
if (fd != 0)
(void) close(fd);
return (1);
}
zpool_close(zhp);
/*
* Read in the channel program, expanding the program buffer as
* necessary.
*/
progread = 0;
progsize = 1024;
char *progbuf = safe_malloc(progsize);
do {
ret = read(fd, progbuf + progread, progsize - progread);
progread += ret;
if (progread == progsize && ret > 0) {
progsize *= 2;
progbuf = safe_realloc(progbuf, progsize);
}
} while (ret > 0);
if (fd != 0)
(void) close(fd);
if (ret < 0) {
free(progbuf);
(void) fprintf(stderr,
gettext("cannot read '%s': %s\n"),
filename, strerror(errno));
return (1);
}
progbuf[progread] = '\0';
/*
* Any remaining arguments are passed as arguments to the lua script as
* a string array:
* {
* "argv" -> [ "arg 1", ... "arg n" ],
* }
*/
nvlist_t *argnvl = fnvlist_alloc();
fnvlist_add_string_array(argnvl, ZCP_ARG_CLIARGV,
(const char **)argv + 2, argc - 2);
if (sync_flag) {
ret = lzc_channel_program(poolname, progbuf,
instrlimit, memlimit, argnvl, &outnvl);
} else {
ret = lzc_channel_program_nosync(poolname, progbuf,
instrlimit, memlimit, argnvl, &outnvl);
}
if (ret != 0) {
/*
* On error, report the error message handed back by lua if one
* exists. Otherwise, generate an appropriate error message,
* falling back on strerror() for an unexpected return code.
*/
const char *errstring = NULL;
const char *msg = gettext("Channel program execution failed");
uint64_t instructions = 0;
if (outnvl != NULL && nvlist_exists(outnvl, ZCP_RET_ERROR)) {
char *es = NULL;
(void) nvlist_lookup_string(outnvl,
ZCP_RET_ERROR, &es);
if (es == NULL)
errstring = strerror(ret);
else
errstring = es;
if (ret == ETIME) {
(void) nvlist_lookup_uint64(outnvl,
ZCP_ARG_INSTRLIMIT, &instructions);
}
} else {
switch (ret) {
case EINVAL:
errstring =
"Invalid instruction or memory limit.";
break;
case ENOMEM:
errstring = "Return value too large.";
break;
case ENOSPC:
errstring = "Memory limit exhausted.";
break;
case ETIME:
errstring = "Timed out.";
break;
case EPERM:
errstring = "Permission denied. Channel "
"programs must be run as root.";
break;
default:
(void) zfs_standard_error(g_zfs, ret, msg);
}
}
if (errstring != NULL)
(void) fprintf(stderr, "%s:\n%s\n", msg, errstring);
if (ret == ETIME && instructions != 0)
(void) fprintf(stderr,
gettext("%llu Lua instructions\n"),
(u_longlong_t)instructions);
} else {
if (json_output) {
(void) nvlist_print_json(stdout, outnvl);
} else if (nvlist_empty(outnvl)) {
(void) fprintf(stdout, gettext("Channel program fully "
"executed and did not produce output.\n"));
} else {
(void) fprintf(stdout, gettext("Channel program fully "
"executed and produced output:\n"));
dump_nvlist(outnvl, 4);
}
}
free(progbuf);
fnvlist_free(outnvl);
fnvlist_free(argnvl);
return (ret != 0);
usage:
usage(B_FALSE);
return (-1);
}
typedef struct loadkey_cbdata {
boolean_t cb_loadkey;
boolean_t cb_recursive;
boolean_t cb_noop;
char *cb_keylocation;
uint64_t cb_numfailed;
uint64_t cb_numattempted;
} loadkey_cbdata_t;
static int
load_key_callback(zfs_handle_t *zhp, void *data)
{
int ret;
boolean_t is_encroot;
loadkey_cbdata_t *cb = data;
uint64_t keystatus = zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS);
/*
* If we are working recursively, we want to skip loading / unloading
* keys for non-encryption roots and datasets whose keys are already
* in the desired end-state.
*/
if (cb->cb_recursive) {
ret = zfs_crypto_get_encryption_root(zhp, &is_encroot, NULL);
if (ret != 0)
return (ret);
if (!is_encroot)
return (0);
if ((cb->cb_loadkey && keystatus == ZFS_KEYSTATUS_AVAILABLE) ||
(!cb->cb_loadkey && keystatus == ZFS_KEYSTATUS_UNAVAILABLE))
return (0);
}
cb->cb_numattempted++;
if (cb->cb_loadkey)
ret = zfs_crypto_load_key(zhp, cb->cb_noop, cb->cb_keylocation);
else
ret = zfs_crypto_unload_key(zhp);
if (ret != 0) {
cb->cb_numfailed++;
return (ret);
}
return (0);
}
static int
load_unload_keys(int argc, char **argv, boolean_t loadkey)
{
int c, ret = 0, flags = 0;
boolean_t do_all = B_FALSE;
loadkey_cbdata_t cb = { 0 };
cb.cb_loadkey = loadkey;
while ((c = getopt(argc, argv, "anrL:")) != -1) {
/* noop and alternate keylocations only apply to zfs load-key */
if (loadkey) {
switch (c) {
case 'n':
cb.cb_noop = B_TRUE;
continue;
case 'L':
cb.cb_keylocation = optarg;
continue;
default:
break;
}
}
switch (c) {
case 'a':
do_all = B_TRUE;
cb.cb_recursive = B_TRUE;
break;
case 'r':
flags |= ZFS_ITER_RECURSE;
cb.cb_recursive = B_TRUE;
break;
default:
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (!do_all && argc == 0) {
(void) fprintf(stderr,
gettext("Missing dataset argument or -a option\n"));
usage(B_FALSE);
}
if (do_all && argc != 0) {
(void) fprintf(stderr,
gettext("Cannot specify dataset with -a option\n"));
usage(B_FALSE);
}
if (cb.cb_recursive && cb.cb_keylocation != NULL &&
strcmp(cb.cb_keylocation, "prompt") != 0) {
(void) fprintf(stderr, gettext("alternate keylocation may only "
"be 'prompt' with -r or -a\n"));
usage(B_FALSE);
}
ret = zfs_for_each(argc, argv, flags,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, NULL, NULL, 0,
load_key_callback, &cb);
if (cb.cb_noop || (cb.cb_recursive && cb.cb_numattempted != 0)) {
(void) printf(gettext("%llu / %llu key(s) successfully %s\n"),
(u_longlong_t)(cb.cb_numattempted - cb.cb_numfailed),
(u_longlong_t)cb.cb_numattempted,
loadkey ? (cb.cb_noop ? "verified" : "loaded") :
"unloaded");
}
if (cb.cb_numfailed != 0)
ret = -1;
return (ret);
}
static int
zfs_do_load_key(int argc, char **argv)
{
return (load_unload_keys(argc, argv, B_TRUE));
}
static int
zfs_do_unload_key(int argc, char **argv)
{
return (load_unload_keys(argc, argv, B_FALSE));
}
static int
zfs_do_change_key(int argc, char **argv)
{
int c, ret;
uint64_t keystatus;
boolean_t loadkey = B_FALSE, inheritkey = B_FALSE;
zfs_handle_t *zhp = NULL;
nvlist_t *props = fnvlist_alloc();
while ((c = getopt(argc, argv, "lio:")) != -1) {
switch (c) {
case 'l':
loadkey = B_TRUE;
break;
case 'i':
inheritkey = B_TRUE;
break;
case 'o':
if (!parseprop(props, optarg)) {
nvlist_free(props);
return (1);
}
break;
default:
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
usage(B_FALSE);
}
}
if (inheritkey && !nvlist_empty(props)) {
(void) fprintf(stderr,
gettext("Properties not allowed for inheriting\n"));
usage(B_FALSE);
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("Missing dataset argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("Too many arguments\n"));
usage(B_FALSE);
}
zhp = zfs_open(g_zfs, argv[argc - 1],
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
usage(B_FALSE);
if (loadkey) {
keystatus = zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS);
if (keystatus != ZFS_KEYSTATUS_AVAILABLE) {
ret = zfs_crypto_load_key(zhp, B_FALSE, NULL);
if (ret != 0) {
nvlist_free(props);
zfs_close(zhp);
return (-1);
}
}
/* refresh the properties so the new keystatus is visible */
zfs_refresh_properties(zhp);
}
ret = zfs_crypto_rewrap(zhp, props, inheritkey);
if (ret != 0) {
nvlist_free(props);
zfs_close(zhp);
return (-1);
}
nvlist_free(props);
zfs_close(zhp);
return (0);
}
/*
* 1) zfs project [-d|-r] <file|directory ...>
* List project ID and inherit flag of file(s) or directories.
* -d: List the directory itself, not its children.
* -r: List subdirectories recursively.
*
* 2) zfs project -C [-k] [-r] <file|directory ...>
* Clear project inherit flag and/or ID on the file(s) or directories.
* -k: Keep the project ID unchanged. If not specified, the project ID
* will be reset as zero.
* -r: Clear on subdirectories recursively.
*
* 3) zfs project -c [-0] [-d|-r] [-p id] <file|directory ...>
* Check project ID and inherit flag on the file(s) or directories,
* report the outliers.
* -0: Print file name followed by a NUL instead of newline.
* -d: Check the directory itself, not its children.
* -p: Specify the referenced ID for comparing with the target file(s)
* or directories' project IDs. If not specified, the target (top)
* directory's project ID will be used as the referenced one.
* -r: Check subdirectories recursively.
*
* 4) zfs project [-p id] [-r] [-s] <file|directory ...>
* Set project ID and/or inherit flag on the file(s) or directories.
* -p: Set the project ID as the given id.
* -r: Set on subdirectories recursively. If not specify "-p" option,
* it will use top-level directory's project ID as the given id,
* then set both project ID and inherit flag on all descendants
* of the top-level directory.
* -s: Set project inherit flag.
*/
static int
zfs_do_project(int argc, char **argv)
{
zfs_project_control_t zpc = {
.zpc_expected_projid = ZFS_INVALID_PROJID,
.zpc_op = ZFS_PROJECT_OP_DEFAULT,
.zpc_dironly = B_FALSE,
.zpc_keep_projid = B_FALSE,
.zpc_newline = B_TRUE,
.zpc_recursive = B_FALSE,
.zpc_set_flag = B_FALSE,
};
int ret = 0, c;
if (argc < 2)
usage(B_FALSE);
while ((c = getopt(argc, argv, "0Ccdkp:rs")) != -1) {
switch (c) {
case '0':
zpc.zpc_newline = B_FALSE;
break;
case 'C':
if (zpc.zpc_op != ZFS_PROJECT_OP_DEFAULT) {
(void) fprintf(stderr, gettext("cannot "
"specify '-C' '-c' '-s' together\n"));
usage(B_FALSE);
}
zpc.zpc_op = ZFS_PROJECT_OP_CLEAR;
break;
case 'c':
if (zpc.zpc_op != ZFS_PROJECT_OP_DEFAULT) {
(void) fprintf(stderr, gettext("cannot "
"specify '-C' '-c' '-s' together\n"));
usage(B_FALSE);
}
zpc.zpc_op = ZFS_PROJECT_OP_CHECK;
break;
case 'd':
zpc.zpc_dironly = B_TRUE;
/* overwrite "-r" option */
zpc.zpc_recursive = B_FALSE;
break;
case 'k':
zpc.zpc_keep_projid = B_TRUE;
break;
case 'p': {
char *endptr;
errno = 0;
zpc.zpc_expected_projid = strtoull(optarg, &endptr, 0);
if (errno != 0 || *endptr != '\0') {
(void) fprintf(stderr,
gettext("project ID must be less than "
"%u\n"), UINT32_MAX);
usage(B_FALSE);
}
if (zpc.zpc_expected_projid >= UINT32_MAX) {
(void) fprintf(stderr,
gettext("invalid project ID\n"));
usage(B_FALSE);
}
break;
}
case 'r':
zpc.zpc_recursive = B_TRUE;
/* overwrite "-d" option */
zpc.zpc_dironly = B_FALSE;
break;
case 's':
if (zpc.zpc_op != ZFS_PROJECT_OP_DEFAULT) {
(void) fprintf(stderr, gettext("cannot "
"specify '-C' '-c' '-s' together\n"));
usage(B_FALSE);
}
zpc.zpc_set_flag = B_TRUE;
zpc.zpc_op = ZFS_PROJECT_OP_SET;
break;
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
if (zpc.zpc_op == ZFS_PROJECT_OP_DEFAULT) {
if (zpc.zpc_expected_projid != ZFS_INVALID_PROJID)
zpc.zpc_op = ZFS_PROJECT_OP_SET;
else
zpc.zpc_op = ZFS_PROJECT_OP_LIST;
}
switch (zpc.zpc_op) {
case ZFS_PROJECT_OP_LIST:
if (zpc.zpc_keep_projid) {
(void) fprintf(stderr,
gettext("'-k' is only valid together with '-C'\n"));
usage(B_FALSE);
}
if (!zpc.zpc_newline) {
(void) fprintf(stderr,
gettext("'-0' is only valid together with '-c'\n"));
usage(B_FALSE);
}
break;
case ZFS_PROJECT_OP_CHECK:
if (zpc.zpc_keep_projid) {
(void) fprintf(stderr,
gettext("'-k' is only valid together with '-C'\n"));
usage(B_FALSE);
}
break;
case ZFS_PROJECT_OP_CLEAR:
if (zpc.zpc_dironly) {
(void) fprintf(stderr,
gettext("'-d' is useless together with '-C'\n"));
usage(B_FALSE);
}
if (!zpc.zpc_newline) {
(void) fprintf(stderr,
gettext("'-0' is only valid together with '-c'\n"));
usage(B_FALSE);
}
if (zpc.zpc_expected_projid != ZFS_INVALID_PROJID) {
(void) fprintf(stderr,
gettext("'-p' is useless together with '-C'\n"));
usage(B_FALSE);
}
break;
case ZFS_PROJECT_OP_SET:
if (zpc.zpc_dironly) {
(void) fprintf(stderr,
gettext("'-d' is useless for set project ID and/or "
"inherit flag\n"));
usage(B_FALSE);
}
if (zpc.zpc_keep_projid) {
(void) fprintf(stderr,
gettext("'-k' is only valid together with '-C'\n"));
usage(B_FALSE);
}
if (!zpc.zpc_newline) {
(void) fprintf(stderr,
gettext("'-0' is only valid together with '-c'\n"));
usage(B_FALSE);
}
break;
default:
ASSERT(0);
break;
}
argv += optind;
argc -= optind;
if (argc == 0) {
(void) fprintf(stderr,
gettext("missing file or directory target(s)\n"));
usage(B_FALSE);
}
for (int i = 0; i < argc; i++) {
int err;
err = zfs_project_handle(argv[i], &zpc);
if (err && !ret)
ret = err;
}
return (ret);
}
static int
zfs_do_wait(int argc, char **argv)
{
boolean_t enabled[ZFS_WAIT_NUM_ACTIVITIES];
int error, i;
int c;
/* By default, wait for all types of activity. */
for (i = 0; i < ZFS_WAIT_NUM_ACTIVITIES; i++)
enabled[i] = B_TRUE;
while ((c = getopt(argc, argv, "t:")) != -1) {
switch (c) {
case 't':
/* Reset activities array */
memset(&enabled, 0, sizeof (enabled));
for (char *tok; (tok = strsep(&optarg, ",")); ) {
static const char *const col_subopts[
ZFS_WAIT_NUM_ACTIVITIES] = { "deleteq" };
for (i = 0; i < ARRAY_SIZE(col_subopts); ++i)
if (strcmp(tok, col_subopts[i]) == 0) {
enabled[i] = B_TRUE;
goto found;
}
(void) fprintf(stderr,
gettext("invalid activity '%s'\n"), tok);
usage(B_FALSE);
found:;
}
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argv += optind;
argc -= optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing 'filesystem' "
"argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
zfs_handle_t *zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_FILESYSTEM);
if (zhp == NULL)
return (1);
for (;;) {
boolean_t missing = B_FALSE;
boolean_t any_waited = B_FALSE;
for (int i = 0; i < ZFS_WAIT_NUM_ACTIVITIES; i++) {
boolean_t waited;
if (!enabled[i])
continue;
error = zfs_wait_status(zhp, i, &missing, &waited);
if (error != 0 || missing)
break;
any_waited = (any_waited || waited);
}
if (error != 0 || missing || !any_waited)
break;
}
zfs_close(zhp);
return (error);
}
/*
* Display version message
*/
static int
zfs_do_version(int argc, char **argv)
{
(void) argc, (void) argv;
return (zfs_version_print() != 0);
}
int
main(int argc, char **argv)
{
int ret = 0;
int i = 0;
const char *cmdname;
char **newargv;
(void) setlocale(LC_ALL, "");
(void) setlocale(LC_NUMERIC, "C");
(void) textdomain(TEXT_DOMAIN);
opterr = 0;
/*
* Make sure the user has specified some command.
*/
if (argc < 2) {
(void) fprintf(stderr, gettext("missing command\n"));
usage(B_FALSE);
}
cmdname = argv[1];
/*
* The 'umount' command is an alias for 'unmount'
*/
if (strcmp(cmdname, "umount") == 0)
cmdname = "unmount";
/*
* The 'recv' command is an alias for 'receive'
*/
if (strcmp(cmdname, "recv") == 0)
cmdname = "receive";
/*
* The 'snap' command is an alias for 'snapshot'
*/
if (strcmp(cmdname, "snap") == 0)
cmdname = "snapshot";
/*
* Special case '-?'
*/
if ((strcmp(cmdname, "-?") == 0) ||
(strcmp(cmdname, "--help") == 0))
usage(B_TRUE);
/*
* Special case '-V|--version'
*/
if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0))
return (zfs_do_version(argc, argv));
if ((g_zfs = libzfs_init()) == NULL) {
(void) fprintf(stderr, "%s\n", libzfs_error_init(errno));
return (1);
}
zfs_save_arguments(argc, argv, history_str, sizeof (history_str));
libzfs_print_on_error(g_zfs, B_TRUE);
/*
* Many commands modify input strings for string parsing reasons.
* We create a copy to protect the original argv.
*/
newargv = safe_malloc((argc + 1) * sizeof (newargv[0]));
for (i = 0; i < argc; i++)
newargv[i] = strdup(argv[i]);
newargv[argc] = NULL;
/*
* Run the appropriate command.
*/
libzfs_mnttab_cache(g_zfs, B_TRUE);
if (find_command_idx(cmdname, &i) == 0) {
current_command = &command_table[i];
ret = command_table[i].func(argc - 1, newargv + 1);
} else if (strchr(cmdname, '=') != NULL) {
verify(find_command_idx("set", &i) == 0);
current_command = &command_table[i];
ret = command_table[i].func(argc, newargv);
} else {
(void) fprintf(stderr, gettext("unrecognized "
"command '%s'\n"), cmdname);
usage(B_FALSE);
ret = 1;
}
for (i = 0; i < argc; i++)
free(newargv[i]);
free(newargv);
if (ret == 0 && log_history)
(void) zpool_log_history(g_zfs, history_str);
libzfs_fini(g_zfs);
/*
* The 'ZFS_ABORT' environment variable causes us to dump core on exit
* for the purposes of running ::findleaks.
*/
if (getenv("ZFS_ABORT") != NULL) {
(void) printf("dumping core by request\n");
abort();
}
return (ret);
}
/*
* zfs zone nsfile filesystem
*
* Add or delete the given dataset to/from the namespace.
*/
#ifdef __linux__
static int
zfs_do_zone_impl(int argc, char **argv, boolean_t attach)
{
zfs_handle_t *zhp;
int ret;
if (argc < 3) {
(void) fprintf(stderr, gettext("missing argument(s)\n"));
usage(B_FALSE);
}
if (argc > 3) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
zhp = zfs_open(g_zfs, argv[2], ZFS_TYPE_FILESYSTEM);
if (zhp == NULL)
return (1);
ret = (zfs_userns(zhp, argv[1], attach) != 0);
zfs_close(zhp);
return (ret);
}
static int
zfs_do_zone(int argc, char **argv)
{
return (zfs_do_zone_impl(argc, argv, B_TRUE));
}
static int
zfs_do_unzone(int argc, char **argv)
{
return (zfs_do_zone_impl(argc, argv, B_FALSE));
}
#endif
#ifdef __FreeBSD__
#include <sys/jail.h>
#include <jail.h>
/*
* Attach/detach the given dataset to/from the given jail
*/
static int
zfs_do_jail_impl(int argc, char **argv, boolean_t attach)
{
zfs_handle_t *zhp;
int jailid, ret;
/* check number of arguments */
if (argc < 3) {
(void) fprintf(stderr, gettext("missing argument(s)\n"));
usage(B_FALSE);
}
if (argc > 3) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
jailid = jail_getid(argv[1]);
if (jailid < 0) {
(void) fprintf(stderr, gettext("invalid jail id or name\n"));
usage(B_FALSE);
}
zhp = zfs_open(g_zfs, argv[2], ZFS_TYPE_FILESYSTEM);
if (zhp == NULL)
return (1);
ret = (zfs_jail(zhp, jailid, attach) != 0);
zfs_close(zhp);
return (ret);
}
/*
* zfs jail jailid filesystem
*
* Attach the given dataset to the given jail
*/
static int
zfs_do_jail(int argc, char **argv)
{
return (zfs_do_jail_impl(argc, argv, B_TRUE));
}
/*
* zfs unjail jailid filesystem
*
* Detach the given dataset from the given jail
*/
static int
zfs_do_unjail(int argc, char **argv)
{
return (zfs_do_jail_impl(argc, argv, B_FALSE));
}
#endif
diff --git a/sys/contrib/openzfs/cmd/zilstat.in b/sys/contrib/openzfs/cmd/zilstat.in
new file mode 100755
index 000000000000..cf4e2e0dd0c8
--- /dev/null
+++ b/sys/contrib/openzfs/cmd/zilstat.in
@@ -0,0 +1,467 @@
+#!/usr/bin/env @PYTHON_SHEBANG@
+#
+# Print out statistics for all zil stats. This information is
+# available through the zil kstat.
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# This script must remain compatible with Python 3.6+.
+#
+
+import sys
+import subprocess
+import time
+import copy
+import os
+import re
+import signal
+from collections import defaultdict
+import argparse
+from argparse import RawTextHelpFormatter
+
+cols = {
+ # hdr: [size, scale, kstat name]
+ "time": [8, -1, "time"],
+ "pool": [12, -1, "pool"],
+ "ds": [12, -1, "dataset_name"],
+ "obj": [12, -1, "objset"],
+ "zcc": [10, 1000, "zil_commit_count"],
+ "zcwc": [10, 1000, "zil_commit_writer_count"],
+ "ziic": [10, 1000, "zil_itx_indirect_count"],
+ "zic": [10, 1000, "zil_itx_count"],
+ "ziib": [10, 1024, "zil_itx_indirect_bytes"],
+ "zicc": [10, 1000, "zil_itx_copied_count"],
+ "zicb": [10, 1024, "zil_itx_copied_bytes"],
+ "zinc": [10, 1000, "zil_itx_needcopy_count"],
+ "zinb": [10, 1024, "zil_itx_needcopy_bytes"],
+ "zimnc": [10, 1000, "zil_itx_metaslab_normal_count"],
+ "zimnb": [10, 1024, "zil_itx_metaslab_normal_bytes"],
+ "zimsc": [10, 1000, "zil_itx_metaslab_slog_count"],
+ "zimsb": [10, 1024, "zil_itx_metaslab_slog_bytes"],
+}
+
+hdr = ["time", "pool", "ds", "obj", "zcc", "zcwc", "ziic", "zic", "ziib", \
+ "zicc", "zicb", "zinc", "zinb", "zimnc", "zimnb", "zimsc", "zimsb"]
+
+ghdr = ["time", "zcc", "zcwc", "ziic", "zic", "ziib", "zicc", "zicb",
+ "zinc", "zinb", "zimnc", "zimnb", "zimsc", "zimsb"]
+
+cmd = ("Usage: zilstat [-hgdv] [-i interval] [-p pool_name]")
+
+curr = {}
+diff = {}
+kstat = {}
+ds_pairs = {}
+pool_name = None
+dataset_name = None
+interval = 0
+sep = " "
+gFlag = True
+dsFlag = False
+
+def prettynum(sz, scale, num=0):
+ suffix = [' ', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']
+ index = 0
+ save = 0
+
+ if scale == -1:
+ return "%*s" % (sz, num)
+
+ # Rounding error, return 0
+ elif 0 < num < 1:
+ num = 0
+
+ while num > scale and index < 5:
+ save = num
+ num = num / scale
+ index += 1
+
+ if index == 0:
+ return "%*d" % (sz, num)
+
+ if (save / scale) < 10:
+ return "%*.1f%s" % (sz - 1, num, suffix[index])
+ else:
+ return "%*d%s" % (sz - 1, num, suffix[index])
+
+def print_header():
+ global hdr
+ global sep
+ for col in hdr:
+ new_col = col
+ if interval > 0 and col not in ['time', 'pool', 'ds', 'obj']:
+ new_col += "/s"
+ sys.stdout.write("%*s%s" % (cols[col][0], new_col, sep))
+ sys.stdout.write("\n")
+
+def print_values(v):
+ global hdr
+ global sep
+ for col in hdr:
+ val = v[cols[col][2]]
+ if col not in ['time', 'pool', 'ds', 'obj'] and interval > 0:
+ val = v[cols[col][2]] // interval
+ sys.stdout.write("%s%s" % (
+ prettynum(cols[col][0], cols[col][1], val), sep))
+ sys.stdout.write("\n")
+
+def print_dict(d):
+ for pool in d:
+ for objset in d[pool]:
+ print_values(d[pool][objset])
+
+def detailed_usage():
+ sys.stderr.write("%s\n" % cmd)
+ sys.stderr.write("Field definitions are as follows:\n")
+ for key in cols:
+ sys.stderr.write("%11s : %s\n" % (key, cols[key][2]))
+ sys.stderr.write("\n")
+ sys.exit(0)
+
+def init():
+ global pool_name
+ global dataset_name
+ global interval
+ global hdr
+ global curr
+ global gFlag
+ global sep
+
+ curr = dict()
+
+ parser = argparse.ArgumentParser(description='Program to print zilstats',
+ add_help=True,
+ formatter_class=RawTextHelpFormatter,
+ epilog="\nUsage Examples\n"\
+ "Note: Global zilstats is shown by default,"\
+ " if none of a|p|d option is not provided\n"\
+ "\tzilstat -a\n"\
+ '\tzilstat -v\n'\
+ '\tzilstat -p tank\n'\
+ '\tzilstat -d tank/d1,tank/d2,tank/zv1\n'\
+ '\tzilstat -i 1\n'\
+ '\tzilstat -s \"***\"\n'\
+ '\tzilstat -f zcwc,zimnb,zimsb\n')
+
+ parser.add_argument(
+ "-v", "--verbose",
+ action="store_true",
+ help="List field headers and definitions"
+ )
+
+ pool_grp = parser.add_mutually_exclusive_group()
+
+ pool_grp.add_argument(
+ "-a", "--all",
+ action="store_true",
+ dest="all",
+ help="Print all dataset stats"
+ )
+
+ pool_grp.add_argument(
+ "-p", "--pool",
+ type=str,
+ help="Print stats for all datasets of a speicfied pool"
+ )
+
+ pool_grp.add_argument(
+ "-d", "--dataset",
+ type=str,
+ help="Print given dataset(s) (Comma separated)"
+ )
+
+ parser.add_argument(
+ "-f", "--columns",
+ type=str,
+ help="Specify specific fields to print (see -v)"
+ )
+
+ parser.add_argument(
+ "-s", "--separator",
+ type=str,
+ help="Override default field separator with custom "
+ "character or string"
+ )
+
+ parser.add_argument(
+ "-i", "--interval",
+ type=int,
+ dest="interval",
+ help="Print stats between specified interval"
+ " (in seconds)"
+ )
+
+ parsed_args = parser.parse_args()
+
+ if parsed_args.verbose:
+ detailed_usage()
+
+ if parsed_args.all:
+ gFlag = False
+
+ if parsed_args.interval:
+ interval = parsed_args.interval
+
+ if parsed_args.pool:
+ pool_name = parsed_args.pool
+ gFlag = False
+
+ if parsed_args.dataset:
+ dataset_name = parsed_args.dataset
+ gFlag = False
+
+ if parsed_args.separator:
+ sep = parsed_args.separator
+
+ if gFlag:
+ hdr = ghdr
+
+ if parsed_args.columns:
+ hdr = parsed_args.columns.split(",")
+
+ invalid = []
+ for ele in hdr:
+ if gFlag and ele not in ghdr:
+ invalid.append(ele)
+ elif ele not in cols:
+ invalid.append(ele)
+
+ if len(invalid) > 0:
+ sys.stderr.write("Invalid column definition! -- %s\n" % invalid)
+ sys.exit(1)
+
+ if pool_name and dataset_name:
+ print ("Error: Can not filter both dataset and pool")
+ sys.exit(1)
+
+def FileCheck(fname):
+ try:
+ return (open(fname))
+ except IOError:
+ print ("Unable to open zilstat proc file: " + fname)
+ sys.exit(1)
+
+if sys.platform.startswith('freebsd'):
+ # Requires py-sysctl on FreeBSD
+ import sysctl
+
+ def kstat_update(pool = None, objid = None):
+ global kstat
+ kstat = {}
+ if not pool:
+ file = "kstat.zfs.misc.zil"
+ k = [ctl for ctl in sysctl.filter(file) \
+ if ctl.type != sysctl.CTLTYPE_NODE]
+ kstat_process_str(k, file, "GLOBAL", len(file + "."))
+ elif objid:
+ file = "kstat.zfs." + pool + ".dataset.objset-" + objid
+ k = [ctl for ctl in sysctl.filter(file) if ctl.type \
+ != sysctl.CTLTYPE_NODE]
+ kstat_process_str(k, file, objid, len(file + "."))
+ else:
+ file = "kstat.zfs." + pool + ".dataset"
+ zil_start = len(file + ".")
+ obj_start = len("kstat.zfs." + pool + ".")
+ k = [ctl for ctl in sysctl.filter(file)
+ if ctl.type != sysctl.CTLTYPE_NODE]
+ for s in k:
+ if not s or (s.name.find("zil") == -1 and \
+ s.name.find("dataset_name") == -1):
+ continue
+ name, value = s.name, s.value
+ objid = re.findall(r'0x[0-9A-F]+', \
+ name[obj_start:], re.I)[0]
+ if objid not in kstat:
+ kstat[objid] = dict()
+ zil_start = len(file + ".objset-" + \
+ objid + ".")
+ kstat[objid][name[zil_start:]] = value \
+ if (name.find("dataset_name")) \
+ else int(value)
+
+ def kstat_process_str(k, file, objset = "GLOBAL", zil_start = 0):
+ global kstat
+ if not k:
+ print("Unable to process kstat for: " + file)
+ sys.exit(1)
+ kstat[objset] = dict()
+ for s in k:
+ if not s or (s.name.find("zil") == -1 and \
+ s.name.find("dataset_name") == -1):
+ continue
+ name, value = s.name, s.value
+ kstat[objset][name[zil_start:]] = value \
+ if (name.find("dataset_name")) else int(value)
+
+elif sys.platform.startswith('linux'):
+ def kstat_update(pool = None, objid = None):
+ global kstat
+ kstat = {}
+ if not pool:
+ k = [line.strip() for line in \
+ FileCheck("/proc/spl/kstat/zfs/zil")]
+ kstat_process_str(k, "/proc/spl/kstat/zfs/zil")
+ elif objid:
+ file = "/proc/spl/kstat/zfs/" + pool + "/objset-" + objid
+ k = [line.strip() for line in FileCheck(file)]
+ kstat_process_str(k, file, objid)
+ else:
+ if not os.path.exists(f"/proc/spl/kstat/zfs/{pool}"):
+ print("Pool \"" + pool + "\" does not exist, Exitting")
+ sys.exit(1)
+ objsets = os.listdir(f'/proc/spl/kstat/zfs/{pool}')
+ for objid in objsets:
+ if objid.find("objset-") == -1:
+ continue
+ file = "/proc/spl/kstat/zfs/" + pool + "/" + objid
+ k = [line.strip() for line in FileCheck(file)]
+ kstat_process_str(k, file, objid.replace("objset-", ""))
+
+ def kstat_process_str(k, file, objset = "GLOBAL", zil_start = 0):
+ global kstat
+ if not k:
+ print("Unable to process kstat for: " + file)
+ sys.exit(1)
+
+ kstat[objset] = dict()
+ for s in k:
+ if not s or (s.find("zil") == -1 and \
+ s.find("dataset_name") == -1):
+ continue
+ name, unused, value = s.split()
+ kstat[objset][name] = value \
+ if (name == "dataset_name") else int(value)
+
+def zil_process_kstat():
+ global curr, pool_name, dataset_name, dsFlag, ds_pairs
+ curr.clear()
+ if gFlag == True:
+ kstat_update()
+ zil_build_dict()
+ else:
+ if pool_name:
+ kstat_update(pool_name)
+ zil_build_dict(pool_name)
+ elif dataset_name:
+ if dsFlag == False:
+ dsFlag = True
+ datasets = dataset_name.split(',')
+ ds_pairs = defaultdict(list)
+ for ds in datasets:
+ try:
+ objid = subprocess.check_output(['zfs',
+ 'list', '-Hpo', 'objsetid', ds], \
+ stderr=subprocess.DEVNULL) \
+ .decode('utf-8').strip()
+ except subprocess.CalledProcessError as e:
+ print("Command: \"zfs list -Hpo objset "\
+ + str(ds) + "\" failed with error code:"\
+ + str(e.returncode))
+ print("Please make sure that dataset \""\
+ + str(ds) + "\" exists")
+ sys.exit(1)
+ if not objid:
+ continue
+ ds_pairs[ds.split('/')[0]]. \
+ append(hex(int(objid)))
+ for pool, objids in ds_pairs.items():
+ for objid in objids:
+ kstat_update(pool, objid)
+ zil_build_dict(pool)
+ else:
+ try:
+ pools = subprocess.check_output(['zpool', 'list', '-Hpo',\
+ 'name']).decode('utf-8').split()
+ except subprocess.CalledProcessError as e:
+ print("Command: \"zpool list -Hpo name\" failed with error"\
+ "code: " + str(e.returncode))
+ sys.exit(1)
+ for pool in pools:
+ kstat_update(pool)
+ zil_build_dict(pool)
+
+def calculate_diff():
+ global curr, diff
+ prev = copy.deepcopy(curr)
+ zil_process_kstat()
+ diff = copy.deepcopy(curr)
+ for pool in curr:
+ for objset in curr[pool]:
+ for col in hdr:
+ if col not in ['time', 'pool', 'ds', 'obj']:
+ key = cols[col][2]
+ # If prev is NULL, this is the
+ # first time we are here
+ if not prev:
+ diff[pool][objset][key] = 0
+ else:
+ diff[pool][objset][key] \
+ = curr[pool][objset][key] \
+ - prev[pool][objset][key]
+
+def zil_build_dict(pool = "GLOBAL"):
+ global kstat
+ for objset in kstat:
+ for key in kstat[objset]:
+ val = kstat[objset][key]
+ if pool not in curr:
+ curr[pool] = dict()
+ if objset not in curr[pool]:
+ curr[pool][objset] = dict()
+ curr[pool][objset][key] = val
+ curr[pool][objset]["pool"] = pool
+ curr[pool][objset]["objset"] = objset
+ curr[pool][objset]["time"] = time.strftime("%H:%M:%S", \
+ time.localtime())
+
+def sign_handler_epipe(sig, frame):
+ print("Caught EPIPE signal: " + str(frame))
+ print("Exitting...")
+ sys.exit(0)
+
+def main():
+ global interval
+ global curr
+ hprint = False
+ init()
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ signal.signal(signal.SIGPIPE, sign_handler_epipe)
+
+ if interval > 0:
+ while True:
+ calculate_diff()
+ if not diff:
+ print ("Error: No stats to show")
+ sys.exit(0)
+ if hprint == False:
+ print_header()
+ hprint = True
+ print_dict(diff)
+ time.sleep(interval)
+ else:
+ zil_process_kstat()
+ if not curr:
+ print ("Error: No stats to show")
+ sys.exit(0)
+ print_header()
+ print_dict(curr)
+
+if __name__ == '__main__':
+ main()
+
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool_main.c b/sys/contrib/openzfs/cmd/zpool/zpool_main.c
index 13a51691fa78..b5b0beef5324 100644
--- a/sys/contrib/openzfs/cmd/zpool/zpool_main.c
+++ b/sys/contrib/openzfs/cmd/zpool/zpool_main.c
@@ -1,10958 +1,10958 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2012 by Frederik Wessels. All rights reserved.
* Copyright (c) 2012 by Cyril Plisko. All rights reserved.
* Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
* Copyright (c) 2017 Datto Inc.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
* Copyright (c) 2021, Klara Inc.
* Copyright [2021] Hewlett Packard Enterprise Development LP
*/
#include <assert.h>
#include <ctype.h>
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
#include <getopt.h>
#include <libgen.h>
#include <libintl.h>
#include <libuutil.h>
#include <locale.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#include <pwd.h>
#include <zone.h>
#include <sys/wait.h>
#include <zfs_prop.h>
#include <sys/fs/zfs.h>
#include <sys/stat.h>
#include <sys/systeminfo.h>
#include <sys/fm/fs/zfs.h>
#include <sys/fm/util.h>
#include <sys/fm/protocol.h>
#include <sys/zfs_ioctl.h>
#include <sys/mount.h>
#include <sys/sysmacros.h>
#include <math.h>
#include <libzfs.h>
#include <libzutil.h>
#include "zpool_util.h"
#include "zfs_comutil.h"
#include "zfeature_common.h"
#include "statcommon.h"
libzfs_handle_t *g_zfs;
static int zpool_do_create(int, char **);
static int zpool_do_destroy(int, char **);
static int zpool_do_add(int, char **);
static int zpool_do_remove(int, char **);
static int zpool_do_labelclear(int, char **);
static int zpool_do_checkpoint(int, char **);
static int zpool_do_list(int, char **);
static int zpool_do_iostat(int, char **);
static int zpool_do_status(int, char **);
static int zpool_do_online(int, char **);
static int zpool_do_offline(int, char **);
static int zpool_do_clear(int, char **);
static int zpool_do_reopen(int, char **);
static int zpool_do_reguid(int, char **);
static int zpool_do_attach(int, char **);
static int zpool_do_detach(int, char **);
static int zpool_do_replace(int, char **);
static int zpool_do_split(int, char **);
static int zpool_do_initialize(int, char **);
static int zpool_do_scrub(int, char **);
static int zpool_do_resilver(int, char **);
static int zpool_do_trim(int, char **);
static int zpool_do_import(int, char **);
static int zpool_do_export(int, char **);
static int zpool_do_upgrade(int, char **);
static int zpool_do_history(int, char **);
static int zpool_do_events(int, char **);
static int zpool_do_get(int, char **);
static int zpool_do_set(int, char **);
static int zpool_do_sync(int, char **);
static int zpool_do_version(int, char **);
static int zpool_do_wait(int, char **);
static zpool_compat_status_t zpool_do_load_compat(
const char *, boolean_t *);
/*
* These libumem hooks provide a reasonable set of defaults for the allocator's
* debugging facilities.
*/
#ifdef DEBUG
const char *
_umem_debug_init(void)
{
return ("default,verbose"); /* $UMEM_DEBUG setting */
}
const char *
_umem_logging_init(void)
{
return ("fail,contents"); /* $UMEM_LOGGING setting */
}
#endif
typedef enum {
HELP_ADD,
HELP_ATTACH,
HELP_CLEAR,
HELP_CREATE,
HELP_CHECKPOINT,
HELP_DESTROY,
HELP_DETACH,
HELP_EXPORT,
HELP_HISTORY,
HELP_IMPORT,
HELP_IOSTAT,
HELP_LABELCLEAR,
HELP_LIST,
HELP_OFFLINE,
HELP_ONLINE,
HELP_REPLACE,
HELP_REMOVE,
HELP_INITIALIZE,
HELP_SCRUB,
HELP_RESILVER,
HELP_TRIM,
HELP_STATUS,
HELP_UPGRADE,
HELP_EVENTS,
HELP_GET,
HELP_SET,
HELP_SPLIT,
HELP_SYNC,
HELP_REGUID,
HELP_REOPEN,
HELP_VERSION,
HELP_WAIT
} zpool_help_t;
/*
* Flags for stats to display with "zpool iostats"
*/
enum iostat_type {
IOS_DEFAULT = 0,
IOS_LATENCY = 1,
IOS_QUEUES = 2,
IOS_L_HISTO = 3,
IOS_RQ_HISTO = 4,
IOS_COUNT, /* always last element */
};
/* iostat_type entries as bitmasks */
#define IOS_DEFAULT_M (1ULL << IOS_DEFAULT)
#define IOS_LATENCY_M (1ULL << IOS_LATENCY)
#define IOS_QUEUES_M (1ULL << IOS_QUEUES)
#define IOS_L_HISTO_M (1ULL << IOS_L_HISTO)
#define IOS_RQ_HISTO_M (1ULL << IOS_RQ_HISTO)
/* Mask of all the histo bits */
#define IOS_ANYHISTO_M (IOS_L_HISTO_M | IOS_RQ_HISTO_M)
/*
* Lookup table for iostat flags to nvlist names. Basically a list
* of all the nvlists a flag requires. Also specifies the order in
* which data gets printed in zpool iostat.
*/
static const char *vsx_type_to_nvlist[IOS_COUNT][15] = {
[IOS_L_HISTO] = {
ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
NULL},
[IOS_LATENCY] = {
ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
NULL},
[IOS_QUEUES] = {
ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
NULL},
[IOS_RQ_HISTO] = {
ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO,
ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO,
ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO,
ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO,
ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO,
ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO,
ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO,
NULL},
};
/*
* Given a cb->cb_flags with a histogram bit set, return the iostat_type.
* Right now, only one histo bit is ever set at one time, so we can
* just do a highbit64(a)
*/
#define IOS_HISTO_IDX(a) (highbit64(a & IOS_ANYHISTO_M) - 1)
typedef struct zpool_command {
const char *name;
int (*func)(int, char **);
zpool_help_t usage;
} zpool_command_t;
/*
* Master command table. Each ZFS command has a name, associated function, and
* usage message. The usage messages need to be internationalized, so we have
* to have a function to return the usage message based on a command index.
*
* These commands are organized according to how they are displayed in the usage
* message. An empty command (one with a NULL name) indicates an empty line in
* the generic usage message.
*/
static zpool_command_t command_table[] = {
{ "version", zpool_do_version, HELP_VERSION },
{ NULL },
{ "create", zpool_do_create, HELP_CREATE },
{ "destroy", zpool_do_destroy, HELP_DESTROY },
{ NULL },
{ "add", zpool_do_add, HELP_ADD },
{ "remove", zpool_do_remove, HELP_REMOVE },
{ NULL },
{ "labelclear", zpool_do_labelclear, HELP_LABELCLEAR },
{ NULL },
{ "checkpoint", zpool_do_checkpoint, HELP_CHECKPOINT },
{ NULL },
{ "list", zpool_do_list, HELP_LIST },
{ "iostat", zpool_do_iostat, HELP_IOSTAT },
{ "status", zpool_do_status, HELP_STATUS },
{ NULL },
{ "online", zpool_do_online, HELP_ONLINE },
{ "offline", zpool_do_offline, HELP_OFFLINE },
{ "clear", zpool_do_clear, HELP_CLEAR },
{ "reopen", zpool_do_reopen, HELP_REOPEN },
{ NULL },
{ "attach", zpool_do_attach, HELP_ATTACH },
{ "detach", zpool_do_detach, HELP_DETACH },
{ "replace", zpool_do_replace, HELP_REPLACE },
{ "split", zpool_do_split, HELP_SPLIT },
{ NULL },
{ "initialize", zpool_do_initialize, HELP_INITIALIZE },
{ "resilver", zpool_do_resilver, HELP_RESILVER },
{ "scrub", zpool_do_scrub, HELP_SCRUB },
{ "trim", zpool_do_trim, HELP_TRIM },
{ NULL },
{ "import", zpool_do_import, HELP_IMPORT },
{ "export", zpool_do_export, HELP_EXPORT },
{ "upgrade", zpool_do_upgrade, HELP_UPGRADE },
{ "reguid", zpool_do_reguid, HELP_REGUID },
{ NULL },
{ "history", zpool_do_history, HELP_HISTORY },
{ "events", zpool_do_events, HELP_EVENTS },
{ NULL },
{ "get", zpool_do_get, HELP_GET },
{ "set", zpool_do_set, HELP_SET },
{ "sync", zpool_do_sync, HELP_SYNC },
{ NULL },
{ "wait", zpool_do_wait, HELP_WAIT },
};
#define NCOMMAND (ARRAY_SIZE(command_table))
#define VDEV_ALLOC_CLASS_LOGS "logs"
static zpool_command_t *current_command;
static zfs_type_t current_prop_type = (ZFS_TYPE_POOL | ZFS_TYPE_VDEV);
static char history_str[HIS_MAX_RECORD_LEN];
static boolean_t log_history = B_TRUE;
static uint_t timestamp_fmt = NODATE;
static const char *
get_usage(zpool_help_t idx)
{
switch (idx) {
case HELP_ADD:
return (gettext("\tadd [-fgLnP] [-o property=value] "
"<pool> <vdev> ...\n"));
case HELP_ATTACH:
return (gettext("\tattach [-fsw] [-o property=value] "
"<pool> <device> <new-device>\n"));
case HELP_CLEAR:
return (gettext("\tclear [-nF] <pool> [device]\n"));
case HELP_CREATE:
return (gettext("\tcreate [-fnd] [-o property=value] ... \n"
"\t [-O file-system-property=value] ... \n"
"\t [-m mountpoint] [-R root] <pool> <vdev> ...\n"));
case HELP_CHECKPOINT:
return (gettext("\tcheckpoint [-d [-w]] <pool> ...\n"));
case HELP_DESTROY:
return (gettext("\tdestroy [-f] <pool>\n"));
case HELP_DETACH:
return (gettext("\tdetach <pool> <device>\n"));
case HELP_EXPORT:
return (gettext("\texport [-af] <pool> ...\n"));
case HELP_HISTORY:
return (gettext("\thistory [-il] [<pool>] ...\n"));
case HELP_IMPORT:
return (gettext("\timport [-d dir] [-D]\n"
"\timport [-o mntopts] [-o property=value] ... \n"
"\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
"[-R root] [-F [-n]] -a\n"
"\timport [-o mntopts] [-o property=value] ... \n"
"\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
"[-R root] [-F [-n]]\n"
"\t [--rewind-to-checkpoint] <pool | id> [newpool]\n"));
case HELP_IOSTAT:
return (gettext("\tiostat [[[-c [script1,script2,...]"
"[-lq]]|[-rw]] [-T d | u] [-ghHLpPvy]\n"
"\t [[pool ...]|[pool vdev ...]|[vdev ...]]"
" [[-n] interval [count]]\n"));
case HELP_LABELCLEAR:
return (gettext("\tlabelclear [-f] <vdev>\n"));
case HELP_LIST:
return (gettext("\tlist [-gHLpPv] [-o property[,...]] "
"[-T d|u] [pool] ... \n"
"\t [interval [count]]\n"));
case HELP_OFFLINE:
return (gettext("\toffline [-f] [-t] <pool> <device> ...\n"));
case HELP_ONLINE:
return (gettext("\tonline [-e] <pool> <device> ...\n"));
case HELP_REPLACE:
return (gettext("\treplace [-fsw] [-o property=value] "
"<pool> <device> [new-device]\n"));
case HELP_REMOVE:
return (gettext("\tremove [-npsw] <pool> <device> ...\n"));
case HELP_REOPEN:
return (gettext("\treopen [-n] <pool>\n"));
case HELP_INITIALIZE:
return (gettext("\tinitialize [-c | -s] [-w] <pool> "
"[<device> ...]\n"));
case HELP_SCRUB:
return (gettext("\tscrub [-s | -p] [-w] <pool> ...\n"));
case HELP_RESILVER:
return (gettext("\tresilver <pool> ...\n"));
case HELP_TRIM:
return (gettext("\ttrim [-dw] [-r <rate>] [-c | -s] <pool> "
"[<device> ...]\n"));
case HELP_STATUS:
return (gettext("\tstatus [-c [script1,script2,...]] "
"[-igLpPstvxD] [-T d|u] [pool] ... \n"
"\t [interval [count]]\n"));
case HELP_UPGRADE:
return (gettext("\tupgrade\n"
"\tupgrade -v\n"
"\tupgrade [-V version] <-a | pool ...>\n"));
case HELP_EVENTS:
return (gettext("\tevents [-vHf [pool] | -c]\n"));
case HELP_GET:
return (gettext("\tget [-Hp] [-o \"all\" | field[,...]] "
"<\"all\" | property[,...]> <pool> ...\n"));
case HELP_SET:
return (gettext("\tset <property=value> <pool> \n"));
case HELP_SPLIT:
return (gettext("\tsplit [-gLnPl] [-R altroot] [-o mntopts]\n"
"\t [-o property=value] <pool> <newpool> "
"[<device> ...]\n"));
case HELP_REGUID:
return (gettext("\treguid <pool>\n"));
case HELP_SYNC:
return (gettext("\tsync [pool] ...\n"));
case HELP_VERSION:
return (gettext("\tversion\n"));
case HELP_WAIT:
return (gettext("\twait [-Hp] [-T d|u] [-t <activity>[,...]] "
"<pool> [interval]\n"));
default:
__builtin_unreachable();
}
}
static void
zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res)
{
uint_t children = 0;
nvlist_t **child;
uint_t i;
(void) nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children);
if (children == 0) {
char *path = zpool_vdev_name(g_zfs, zhp, nvroot,
VDEV_NAME_PATH);
if (strcmp(path, VDEV_TYPE_INDIRECT) != 0 &&
strcmp(path, VDEV_TYPE_HOLE) != 0)
fnvlist_add_boolean(res, path);
free(path);
return;
}
for (i = 0; i < children; i++) {
zpool_collect_leaves(zhp, child[i], res);
}
}
/*
* Callback routine that will print out a pool property value.
*/
static int
print_pool_prop_cb(int prop, void *cb)
{
FILE *fp = cb;
(void) fprintf(fp, "\t%-19s ", zpool_prop_to_name(prop));
if (zpool_prop_readonly(prop))
(void) fprintf(fp, " NO ");
else
(void) fprintf(fp, " YES ");
if (zpool_prop_values(prop) == NULL)
(void) fprintf(fp, "-\n");
else
(void) fprintf(fp, "%s\n", zpool_prop_values(prop));
return (ZPROP_CONT);
}
/*
* Callback routine that will print out a vdev property value.
*/
static int
print_vdev_prop_cb(int prop, void *cb)
{
FILE *fp = cb;
(void) fprintf(fp, "\t%-19s ", vdev_prop_to_name(prop));
if (vdev_prop_readonly(prop))
(void) fprintf(fp, " NO ");
else
(void) fprintf(fp, " YES ");
if (vdev_prop_values(prop) == NULL)
(void) fprintf(fp, "-\n");
else
(void) fprintf(fp, "%s\n", vdev_prop_values(prop));
return (ZPROP_CONT);
}
/*
* Display usage message. If we're inside a command, display only the usage for
* that command. Otherwise, iterate over the entire command table and display
* a complete usage message.
*/
static __attribute__((noreturn)) void
usage(boolean_t requested)
{
FILE *fp = requested ? stdout : stderr;
if (current_command == NULL) {
int i;
(void) fprintf(fp, gettext("usage: zpool command args ...\n"));
(void) fprintf(fp,
gettext("where 'command' is one of the following:\n\n"));
for (i = 0; i < NCOMMAND; i++) {
if (command_table[i].name == NULL)
(void) fprintf(fp, "\n");
else
(void) fprintf(fp, "%s",
get_usage(command_table[i].usage));
}
} else {
(void) fprintf(fp, gettext("usage:\n"));
(void) fprintf(fp, "%s", get_usage(current_command->usage));
}
if (current_command != NULL &&
current_prop_type != (ZFS_TYPE_POOL | ZFS_TYPE_VDEV) &&
((strcmp(current_command->name, "set") == 0) ||
(strcmp(current_command->name, "get") == 0) ||
(strcmp(current_command->name, "list") == 0))) {
(void) fprintf(fp,
gettext("\nthe following properties are supported:\n"));
(void) fprintf(fp, "\n\t%-19s %s %s\n\n",
"PROPERTY", "EDIT", "VALUES");
/* Iterate over all properties */
if (current_prop_type == ZFS_TYPE_POOL) {
(void) zprop_iter(print_pool_prop_cb, fp, B_FALSE,
B_TRUE, current_prop_type);
(void) fprintf(fp, "\t%-19s ", "feature@...");
(void) fprintf(fp, "YES "
"disabled | enabled | active\n");
(void) fprintf(fp, gettext("\nThe feature@ properties "
"must be appended with a feature name.\n"
"See zpool-features(7).\n"));
} else if (current_prop_type == ZFS_TYPE_VDEV) {
(void) zprop_iter(print_vdev_prop_cb, fp, B_FALSE,
B_TRUE, current_prop_type);
}
}
/*
* See comments at end of main().
*/
if (getenv("ZFS_ABORT") != NULL) {
(void) printf("dumping core by request\n");
abort();
}
exit(requested ? 0 : 2);
}
/*
* zpool initialize [-c | -s] [-w] <pool> [<vdev> ...]
* Initialize all unused blocks in the specified vdevs, or all vdevs in the pool
* if none specified.
*
* -c Cancel. Ends active initializing.
* -s Suspend. Initializing can then be restarted with no flags.
* -w Wait. Blocks until initializing has completed.
*/
int
zpool_do_initialize(int argc, char **argv)
{
int c;
char *poolname;
zpool_handle_t *zhp;
nvlist_t *vdevs;
int err = 0;
boolean_t wait = B_FALSE;
struct option long_options[] = {
{"cancel", no_argument, NULL, 'c'},
{"suspend", no_argument, NULL, 's'},
{"wait", no_argument, NULL, 'w'},
{0, 0, 0, 0}
};
pool_initialize_func_t cmd_type = POOL_INITIALIZE_START;
while ((c = getopt_long(argc, argv, "csw", long_options, NULL)) != -1) {
switch (c) {
case 'c':
if (cmd_type != POOL_INITIALIZE_START &&
cmd_type != POOL_INITIALIZE_CANCEL) {
(void) fprintf(stderr, gettext("-c cannot be "
"combined with other options\n"));
usage(B_FALSE);
}
cmd_type = POOL_INITIALIZE_CANCEL;
break;
case 's':
if (cmd_type != POOL_INITIALIZE_START &&
cmd_type != POOL_INITIALIZE_SUSPEND) {
(void) fprintf(stderr, gettext("-s cannot be "
"combined with other options\n"));
usage(B_FALSE);
}
cmd_type = POOL_INITIALIZE_SUSPEND;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
if (optopt != 0) {
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
} else {
(void) fprintf(stderr,
gettext("invalid option '%s'\n"),
argv[optind - 1]);
}
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
return (-1);
}
if (wait && (cmd_type != POOL_INITIALIZE_START)) {
(void) fprintf(stderr, gettext("-w cannot be used with -c or "
"-s\n"));
usage(B_FALSE);
}
poolname = argv[0];
zhp = zpool_open(g_zfs, poolname);
if (zhp == NULL)
return (-1);
vdevs = fnvlist_alloc();
if (argc == 1) {
/* no individual leaf vdevs specified, so add them all */
nvlist_t *config = zpool_get_config(zhp, NULL);
nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE);
zpool_collect_leaves(zhp, nvroot, vdevs);
} else {
for (int i = 1; i < argc; i++) {
fnvlist_add_boolean(vdevs, argv[i]);
}
}
if (wait)
err = zpool_initialize_wait(zhp, cmd_type, vdevs);
else
err = zpool_initialize(zhp, cmd_type, vdevs);
fnvlist_free(vdevs);
zpool_close(zhp);
return (err);
}
/*
* print a pool vdev config for dry runs
*/
static void
print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent,
const char *match, int name_flags)
{
nvlist_t **child;
uint_t c, children;
char *vname;
boolean_t printed = B_FALSE;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0) {
if (name != NULL)
(void) printf("\t%*s%s\n", indent, "", name);
return;
}
for (c = 0; c < children; c++) {
uint64_t is_log = B_FALSE, is_hole = B_FALSE;
char *class = (char *)"";
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
&is_hole);
if (is_hole == B_TRUE) {
continue;
}
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&is_log);
if (is_log)
class = (char *)VDEV_ALLOC_BIAS_LOG;
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &class);
if (strcmp(match, class) != 0)
continue;
if (!printed && name != NULL) {
(void) printf("\t%*s%s\n", indent, "", name);
printed = B_TRUE;
}
vname = zpool_vdev_name(g_zfs, zhp, child[c], name_flags);
print_vdev_tree(zhp, vname, child[c], indent + 2, "",
name_flags);
free(vname);
}
}
/*
* Print the list of l2cache devices for dry runs.
*/
static void
print_cache_list(nvlist_t *nv, int indent)
{
nvlist_t **child;
uint_t c, children;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0 && children > 0) {
(void) printf("\t%*s%s\n", indent, "", "cache");
} else {
return;
}
for (c = 0; c < children; c++) {
char *vname;
vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
(void) printf("\t%*s%s\n", indent + 2, "", vname);
free(vname);
}
}
/*
* Print the list of spares for dry runs.
*/
static void
print_spare_list(nvlist_t *nv, int indent)
{
nvlist_t **child;
uint_t c, children;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) == 0 && children > 0) {
(void) printf("\t%*s%s\n", indent, "", "spares");
} else {
return;
}
for (c = 0; c < children; c++) {
char *vname;
vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
(void) printf("\t%*s%s\n", indent + 2, "", vname);
free(vname);
}
}
static boolean_t
prop_list_contains_feature(nvlist_t *proplist)
{
nvpair_t *nvp;
for (nvp = nvlist_next_nvpair(proplist, NULL); NULL != nvp;
nvp = nvlist_next_nvpair(proplist, nvp)) {
if (zpool_prop_feature(nvpair_name(nvp)))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Add a property pair (name, string-value) into a property nvlist.
*/
static int
add_prop_list(const char *propname, const char *propval, nvlist_t **props,
boolean_t poolprop)
{
zpool_prop_t prop = ZPOOL_PROP_INVAL;
nvlist_t *proplist;
const char *normnm;
char *strval;
if (*props == NULL &&
nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) {
(void) fprintf(stderr,
gettext("internal error: out of memory\n"));
return (1);
}
proplist = *props;
if (poolprop) {
const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION);
const char *cname =
zpool_prop_to_name(ZPOOL_PROP_COMPATIBILITY);
if ((prop = zpool_name_to_prop(propname)) == ZPOOL_PROP_INVAL &&
(!zpool_prop_feature(propname) &&
!zpool_prop_vdev(propname))) {
(void) fprintf(stderr, gettext("property '%s' is "
"not a valid pool or vdev property\n"), propname);
return (2);
}
/*
* feature@ properties and version should not be specified
* at the same time.
*/
if ((prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname) &&
nvlist_exists(proplist, vname)) ||
(prop == ZPOOL_PROP_VERSION &&
prop_list_contains_feature(proplist))) {
(void) fprintf(stderr, gettext("'feature@' and "
"'version' properties cannot be specified "
"together\n"));
return (2);
}
/*
* if version is specified, only "legacy" compatibility
* may be requested
*/
if ((prop == ZPOOL_PROP_COMPATIBILITY &&
strcmp(propval, ZPOOL_COMPAT_LEGACY) != 0 &&
nvlist_exists(proplist, vname)) ||
(prop == ZPOOL_PROP_VERSION &&
nvlist_exists(proplist, cname) &&
strcmp(fnvlist_lookup_string(proplist, cname),
ZPOOL_COMPAT_LEGACY) != 0)) {
(void) fprintf(stderr, gettext("when 'version' is "
"specified, the 'compatibility' feature may only "
"be set to '" ZPOOL_COMPAT_LEGACY "'\n"));
return (2);
}
if (zpool_prop_feature(propname) || zpool_prop_vdev(propname))
normnm = propname;
else
normnm = zpool_prop_to_name(prop);
} else {
zfs_prop_t fsprop = zfs_name_to_prop(propname);
if (zfs_prop_valid_for_type(fsprop, ZFS_TYPE_FILESYSTEM,
B_FALSE)) {
normnm = zfs_prop_to_name(fsprop);
} else if (zfs_prop_user(propname) ||
zfs_prop_userquota(propname)) {
normnm = propname;
} else {
(void) fprintf(stderr, gettext("property '%s' is "
"not a valid filesystem property\n"), propname);
return (2);
}
}
if (nvlist_lookup_string(proplist, normnm, &strval) == 0 &&
prop != ZPOOL_PROP_CACHEFILE) {
(void) fprintf(stderr, gettext("property '%s' "
"specified multiple times\n"), propname);
return (2);
}
if (nvlist_add_string(proplist, normnm, propval) != 0) {
(void) fprintf(stderr, gettext("internal "
"error: out of memory\n"));
return (1);
}
return (0);
}
/*
* Set a default property pair (name, string-value) in a property nvlist
*/
static int
add_prop_list_default(const char *propname, const char *propval,
nvlist_t **props)
{
char *pval;
if (nvlist_lookup_string(*props, propname, &pval) == 0)
return (0);
return (add_prop_list(propname, propval, props, B_TRUE));
}
/*
* zpool add [-fgLnP] [-o property=value] <pool> <vdev> ...
*
* -f Force addition of devices, even if they appear in use
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -n Do not add the devices, but display the resulting layout if
* they were to be added.
* -o Set property=value.
* -P Display full path for vdev name.
*
* Adds the given vdevs to 'pool'. As with create, the bulk of this work is
* handled by make_root_vdev(), which constructs the nvlist needed to pass to
* libzfs.
*/
int
zpool_do_add(int argc, char **argv)
{
boolean_t force = B_FALSE;
boolean_t dryrun = B_FALSE;
int name_flags = 0;
int c;
nvlist_t *nvroot;
char *poolname;
int ret;
zpool_handle_t *zhp;
nvlist_t *config;
nvlist_t *props = NULL;
char *propval;
/* check options */
while ((c = getopt(argc, argv, "fgLno:P")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
case 'g':
name_flags |= VDEV_NAME_GUID;
break;
case 'L':
name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'n':
dryrun = B_TRUE;
break;
case 'o':
if ((propval = strchr(optarg, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for -o option\n"));
usage(B_FALSE);
}
*propval = '\0';
propval++;
if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
(add_prop_list(optarg, propval, &props, B_TRUE)))
usage(B_FALSE);
break;
case 'P':
name_flags |= VDEV_NAME_PATH;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing vdev specification\n"));
usage(B_FALSE);
}
poolname = argv[0];
argc--;
argv++;
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
if ((config = zpool_get_config(zhp, NULL)) == NULL) {
(void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
poolname);
zpool_close(zhp);
return (1);
}
/* unless manually specified use "ashift" pool property (if set) */
if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
int intval;
zprop_source_t src;
char strval[ZPOOL_MAXPROPLEN];
intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
if (src != ZPROP_SRC_DEFAULT) {
(void) sprintf(strval, "%" PRId32, intval);
verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
&props, B_TRUE) == 0);
}
}
/* pass off to make_root_vdev for processing */
nvroot = make_root_vdev(zhp, props, force, !force, B_FALSE, dryrun,
argc, argv);
if (nvroot == NULL) {
zpool_close(zhp);
return (1);
}
if (dryrun) {
nvlist_t *poolnvroot;
nvlist_t **l2child, **sparechild;
uint_t l2children, sparechildren, c;
char *vname;
boolean_t hadcache = B_FALSE, hadspare = B_FALSE;
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&poolnvroot) == 0);
(void) printf(gettext("would update '%s' to the following "
"configuration:\n\n"), zpool_get_name(zhp));
/* print original main pool and new tree */
print_vdev_tree(zhp, poolname, poolnvroot, 0, "",
name_flags | VDEV_NAME_TYPE_ID);
print_vdev_tree(zhp, NULL, nvroot, 0, "", name_flags);
/* print other classes: 'dedup', 'special', and 'log' */
if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_DEDUP)) {
print_vdev_tree(zhp, "dedup", poolnvroot, 0,
VDEV_ALLOC_BIAS_DEDUP, name_flags);
print_vdev_tree(zhp, NULL, nvroot, 0,
VDEV_ALLOC_BIAS_DEDUP, name_flags);
} else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_DEDUP)) {
print_vdev_tree(zhp, "dedup", nvroot, 0,
VDEV_ALLOC_BIAS_DEDUP, name_flags);
}
if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
print_vdev_tree(zhp, "special", poolnvroot, 0,
VDEV_ALLOC_BIAS_SPECIAL, name_flags);
print_vdev_tree(zhp, NULL, nvroot, 0,
VDEV_ALLOC_BIAS_SPECIAL, name_flags);
} else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
print_vdev_tree(zhp, "special", nvroot, 0,
VDEV_ALLOC_BIAS_SPECIAL, name_flags);
}
if (num_logs(poolnvroot) > 0) {
print_vdev_tree(zhp, "logs", poolnvroot, 0,
VDEV_ALLOC_BIAS_LOG, name_flags);
print_vdev_tree(zhp, NULL, nvroot, 0,
VDEV_ALLOC_BIAS_LOG, name_flags);
} else if (num_logs(nvroot) > 0) {
print_vdev_tree(zhp, "logs", nvroot, 0,
VDEV_ALLOC_BIAS_LOG, name_flags);
}
/* Do the same for the caches */
if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_L2CACHE,
&l2child, &l2children) == 0 && l2children) {
hadcache = B_TRUE;
(void) printf(gettext("\tcache\n"));
for (c = 0; c < l2children; c++) {
vname = zpool_vdev_name(g_zfs, NULL,
l2child[c], name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2child, &l2children) == 0 && l2children) {
if (!hadcache)
(void) printf(gettext("\tcache\n"));
for (c = 0; c < l2children; c++) {
vname = zpool_vdev_name(g_zfs, NULL,
l2child[c], name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
/* And finally the spares */
if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_SPARES,
&sparechild, &sparechildren) == 0 && sparechildren > 0) {
hadspare = B_TRUE;
(void) printf(gettext("\tspares\n"));
for (c = 0; c < sparechildren; c++) {
vname = zpool_vdev_name(g_zfs, NULL,
sparechild[c], name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&sparechild, &sparechildren) == 0 && sparechildren > 0) {
if (!hadspare)
(void) printf(gettext("\tspares\n"));
for (c = 0; c < sparechildren; c++) {
vname = zpool_vdev_name(g_zfs, NULL,
sparechild[c], name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
ret = 0;
} else {
ret = (zpool_add(zhp, nvroot) != 0);
}
nvlist_free(props);
nvlist_free(nvroot);
zpool_close(zhp);
return (ret);
}
/*
* zpool remove [-npsw] <pool> <vdev> ...
*
* Removes the given vdev from the pool.
*/
int
zpool_do_remove(int argc, char **argv)
{
char *poolname;
int i, ret = 0;
zpool_handle_t *zhp = NULL;
boolean_t stop = B_FALSE;
int c;
boolean_t noop = B_FALSE;
boolean_t parsable = B_FALSE;
boolean_t wait = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "npsw")) != -1) {
switch (c) {
case 'n':
noop = B_TRUE;
break;
case 'p':
parsable = B_TRUE;
break;
case 's':
stop = B_TRUE;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
poolname = argv[0];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
if (stop && noop) {
(void) fprintf(stderr, gettext("stop request ignored\n"));
return (0);
}
if (stop) {
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if (zpool_vdev_remove_cancel(zhp) != 0)
ret = 1;
if (wait) {
(void) fprintf(stderr, gettext("invalid option "
"combination: -w cannot be used with -s\n"));
usage(B_FALSE);
}
} else {
if (argc < 2) {
(void) fprintf(stderr, gettext("missing device\n"));
usage(B_FALSE);
}
for (i = 1; i < argc; i++) {
if (noop) {
uint64_t size;
if (zpool_vdev_indirect_size(zhp, argv[i],
&size) != 0) {
ret = 1;
break;
}
if (parsable) {
(void) printf("%s %llu\n",
argv[i], (unsigned long long)size);
} else {
char valstr[32];
zfs_nicenum(size, valstr,
sizeof (valstr));
(void) printf("Memory that will be "
"used after removing %s: %s\n",
argv[i], valstr);
}
} else {
if (zpool_vdev_remove(zhp, argv[i]) != 0)
ret = 1;
}
}
if (ret == 0 && wait)
ret = zpool_wait(zhp, ZPOOL_WAIT_REMOVE);
}
zpool_close(zhp);
return (ret);
}
/*
* Return 1 if a vdev is active (being used in a pool)
* Return 0 if a vdev is inactive (offlined or faulted, or not in active pool)
*
* This is useful for checking if a disk in an active pool is offlined or
* faulted.
*/
static int
vdev_is_active(char *vdev_path)
{
int fd;
fd = open(vdev_path, O_EXCL);
if (fd < 0) {
return (1); /* cant open O_EXCL - disk is active */
}
close(fd);
return (0); /* disk is inactive in the pool */
}
/*
* zpool labelclear [-f] <vdev>
*
* -f Force clearing the label for the vdevs which are members of
* the exported or foreign pools.
*
* Verifies that the vdev is not active and zeros out the label information
* on the device.
*/
int
zpool_do_labelclear(int argc, char **argv)
{
char vdev[MAXPATHLEN];
char *name = NULL;
struct stat st;
int c, fd = -1, ret = 0;
nvlist_t *config;
pool_state_t state;
boolean_t inuse = B_FALSE;
boolean_t force = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "f")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get vdev name */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing vdev name\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
/*
* Check if we were given absolute path and use it as is.
* Otherwise if the provided vdev name doesn't point to a file,
* try prepending expected disk paths and partition numbers.
*/
(void) strlcpy(vdev, argv[0], sizeof (vdev));
if (vdev[0] != '/' && stat(vdev, &st) != 0) {
int error;
error = zfs_resolve_shortname(argv[0], vdev, MAXPATHLEN);
if (error == 0 && zfs_dev_is_whole_disk(vdev)) {
if (zfs_append_partition(vdev, MAXPATHLEN) == -1)
error = ENOENT;
}
if (error || (stat(vdev, &st) != 0)) {
(void) fprintf(stderr, gettext(
"failed to find device %s, try specifying absolute "
"path instead\n"), argv[0]);
return (1);
}
}
if ((fd = open(vdev, O_RDWR)) < 0) {
(void) fprintf(stderr, gettext("failed to open %s: %s\n"),
vdev, strerror(errno));
return (1);
}
/*
* Flush all dirty pages for the block device. This should not be
* fatal when the device does not support BLKFLSBUF as would be the
* case for a file vdev.
*/
if ((zfs_dev_flush(fd) != 0) && (errno != ENOTTY))
(void) fprintf(stderr, gettext("failed to invalidate "
"cache for %s: %s\n"), vdev, strerror(errno));
if (zpool_read_label(fd, &config, NULL) != 0) {
(void) fprintf(stderr,
gettext("failed to read label from %s\n"), vdev);
ret = 1;
goto errout;
}
nvlist_free(config);
ret = zpool_in_use(g_zfs, fd, &state, &name, &inuse);
if (ret != 0) {
(void) fprintf(stderr,
gettext("failed to check state for %s\n"), vdev);
ret = 1;
goto errout;
}
if (!inuse)
goto wipe_label;
switch (state) {
default:
case POOL_STATE_ACTIVE:
case POOL_STATE_SPARE:
case POOL_STATE_L2CACHE:
/*
* We allow the user to call 'zpool offline -f'
* on an offlined disk in an active pool. We can check if
* the disk is online by calling vdev_is_active().
*/
if (force && !vdev_is_active(vdev))
break;
(void) fprintf(stderr, gettext(
"%s is a member (%s) of pool \"%s\""),
vdev, zpool_pool_state_to_name(state), name);
if (force) {
(void) fprintf(stderr, gettext(
". Offline the disk first to clear its label."));
}
printf("\n");
ret = 1;
goto errout;
case POOL_STATE_EXPORTED:
if (force)
break;
(void) fprintf(stderr, gettext(
"use '-f' to override the following error:\n"
"%s is a member of exported pool \"%s\"\n"),
vdev, name);
ret = 1;
goto errout;
case POOL_STATE_POTENTIALLY_ACTIVE:
if (force)
break;
(void) fprintf(stderr, gettext(
"use '-f' to override the following error:\n"
"%s is a member of potentially active pool \"%s\"\n"),
vdev, name);
ret = 1;
goto errout;
case POOL_STATE_DESTROYED:
/* inuse should never be set for a destroyed pool */
assert(0);
break;
}
wipe_label:
ret = zpool_clear_label(fd);
if (ret != 0) {
(void) fprintf(stderr,
gettext("failed to clear label for %s\n"), vdev);
}
errout:
free(name);
(void) close(fd);
return (ret);
}
/*
* zpool create [-fnd] [-o property=value] ...
* [-O file-system-property=value] ...
* [-R root] [-m mountpoint] <pool> <dev> ...
*
* -f Force creation, even if devices appear in use
* -n Do not create the pool, but display the resulting layout if it
* were to be created.
* -R Create a pool under an alternate root
* -m Set default mountpoint for the root dataset. By default it's
* '/<pool>'
* -o Set property=value.
* -o Set feature@feature=enabled|disabled.
* -d Don't automatically enable all supported pool features
* (individual features can be enabled with -o).
* -O Set fsproperty=value in the pool's root file system
*
* Creates the named pool according to the given vdev specification. The
* bulk of the vdev processing is done in make_root_vdev() in zpool_vdev.c.
* Once we get the nvlist back from make_root_vdev(), we either print out the
* contents (if '-n' was specified), or pass it to libzfs to do the creation.
*/
int
zpool_do_create(int argc, char **argv)
{
boolean_t force = B_FALSE;
boolean_t dryrun = B_FALSE;
boolean_t enable_pool_features = B_TRUE;
int c;
nvlist_t *nvroot = NULL;
char *poolname;
char *tname = NULL;
int ret = 1;
char *altroot = NULL;
char *compat = NULL;
char *mountpoint = NULL;
nvlist_t *fsprops = NULL;
nvlist_t *props = NULL;
char *propval;
/* check options */
while ((c = getopt(argc, argv, ":fndR:m:o:O:t:")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
case 'n':
dryrun = B_TRUE;
break;
case 'd':
enable_pool_features = B_FALSE;
break;
case 'R':
altroot = optarg;
if (add_prop_list(zpool_prop_to_name(
ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
goto errout;
if (add_prop_list_default(zpool_prop_to_name(
ZPOOL_PROP_CACHEFILE), "none", &props))
goto errout;
break;
case 'm':
/* Equivalent to -O mountpoint=optarg */
mountpoint = optarg;
break;
case 'o':
if ((propval = strchr(optarg, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for -o option\n"));
goto errout;
}
*propval = '\0';
propval++;
if (add_prop_list(optarg, propval, &props, B_TRUE))
goto errout;
/*
* If the user is creating a pool that doesn't support
* feature flags, don't enable any features.
*/
if (zpool_name_to_prop(optarg) == ZPOOL_PROP_VERSION) {
char *end;
u_longlong_t ver;
ver = strtoull(propval, &end, 10);
if (*end == '\0' &&
ver < SPA_VERSION_FEATURES) {
enable_pool_features = B_FALSE;
}
}
if (zpool_name_to_prop(optarg) == ZPOOL_PROP_ALTROOT)
altroot = propval;
if (zpool_name_to_prop(optarg) ==
ZPOOL_PROP_COMPATIBILITY)
compat = propval;
break;
case 'O':
if ((propval = strchr(optarg, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for -O option\n"));
goto errout;
}
*propval = '\0';
propval++;
/*
* Mountpoints are checked and then added later.
* Uniquely among properties, they can be specified
* more than once, to avoid conflict with -m.
*/
if (0 == strcmp(optarg,
zfs_prop_to_name(ZFS_PROP_MOUNTPOINT))) {
mountpoint = propval;
} else if (add_prop_list(optarg, propval, &fsprops,
B_FALSE)) {
goto errout;
}
break;
case 't':
/*
* Sanity check temporary pool name.
*/
if (strchr(optarg, '/') != NULL) {
(void) fprintf(stderr, gettext("cannot create "
"'%s': invalid character '/' in temporary "
"name\n"), optarg);
(void) fprintf(stderr, gettext("use 'zfs "
"create' to create a dataset\n"));
goto errout;
}
if (add_prop_list(zpool_prop_to_name(
ZPOOL_PROP_TNAME), optarg, &props, B_TRUE))
goto errout;
if (add_prop_list_default(zpool_prop_to_name(
ZPOOL_PROP_CACHEFILE), "none", &props))
goto errout;
tname = optarg;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
goto badusage;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
goto badusage;
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
goto badusage;
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing vdev specification\n"));
goto badusage;
}
poolname = argv[0];
/*
* As a special case, check for use of '/' in the name, and direct the
* user to use 'zfs create' instead.
*/
if (strchr(poolname, '/') != NULL) {
(void) fprintf(stderr, gettext("cannot create '%s': invalid "
"character '/' in pool name\n"), poolname);
(void) fprintf(stderr, gettext("use 'zfs create' to "
"create a dataset\n"));
goto errout;
}
/* pass off to make_root_vdev for bulk processing */
nvroot = make_root_vdev(NULL, props, force, !force, B_FALSE, dryrun,
argc - 1, argv + 1);
if (nvroot == NULL)
goto errout;
/* make_root_vdev() allows 0 toplevel children if there are spares */
if (!zfs_allocatable_devs(nvroot)) {
(void) fprintf(stderr, gettext("invalid vdev "
"specification: at least one toplevel vdev must be "
"specified\n"));
goto errout;
}
if (altroot != NULL && altroot[0] != '/') {
(void) fprintf(stderr, gettext("invalid alternate root '%s': "
"must be an absolute path\n"), altroot);
goto errout;
}
/*
* Check the validity of the mountpoint and direct the user to use the
* '-m' mountpoint option if it looks like its in use.
*/
if (mountpoint == NULL ||
(strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) != 0 &&
strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) != 0)) {
char buf[MAXPATHLEN];
DIR *dirp;
if (mountpoint && mountpoint[0] != '/') {
(void) fprintf(stderr, gettext("invalid mountpoint "
"'%s': must be an absolute path, 'legacy', or "
"'none'\n"), mountpoint);
goto errout;
}
if (mountpoint == NULL) {
if (altroot != NULL)
(void) snprintf(buf, sizeof (buf), "%s/%s",
altroot, poolname);
else
(void) snprintf(buf, sizeof (buf), "/%s",
poolname);
} else {
if (altroot != NULL)
(void) snprintf(buf, sizeof (buf), "%s%s",
altroot, mountpoint);
else
(void) snprintf(buf, sizeof (buf), "%s",
mountpoint);
}
if ((dirp = opendir(buf)) == NULL && errno != ENOENT) {
(void) fprintf(stderr, gettext("mountpoint '%s' : "
"%s\n"), buf, strerror(errno));
(void) fprintf(stderr, gettext("use '-m' "
"option to provide a different default\n"));
goto errout;
} else if (dirp) {
int count = 0;
while (count < 3 && readdir(dirp) != NULL)
count++;
(void) closedir(dirp);
if (count > 2) {
(void) fprintf(stderr, gettext("mountpoint "
"'%s' exists and is not empty\n"), buf);
(void) fprintf(stderr, gettext("use '-m' "
"option to provide a "
"different default\n"));
goto errout;
}
}
}
/*
* Now that the mountpoint's validity has been checked, ensure that
* the property is set appropriately prior to creating the pool.
*/
if (mountpoint != NULL) {
ret = add_prop_list(zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
mountpoint, &fsprops, B_FALSE);
if (ret != 0)
goto errout;
}
ret = 1;
if (dryrun) {
/*
* For a dry run invocation, print out a basic message and run
* through all the vdevs in the list and print out in an
* appropriate hierarchy.
*/
(void) printf(gettext("would create '%s' with the "
"following layout:\n\n"), poolname);
print_vdev_tree(NULL, poolname, nvroot, 0, "", 0);
print_vdev_tree(NULL, "dedup", nvroot, 0,
VDEV_ALLOC_BIAS_DEDUP, 0);
print_vdev_tree(NULL, "special", nvroot, 0,
VDEV_ALLOC_BIAS_SPECIAL, 0);
print_vdev_tree(NULL, "logs", nvroot, 0,
VDEV_ALLOC_BIAS_LOG, 0);
print_cache_list(nvroot, 0);
print_spare_list(nvroot, 0);
ret = 0;
} else {
/*
* Load in feature set.
* Note: if compatibility property not given, we'll have
* NULL, which means 'all features'.
*/
boolean_t requested_features[SPA_FEATURES];
if (zpool_do_load_compat(compat, requested_features) !=
ZPOOL_COMPATIBILITY_OK)
goto errout;
/*
* props contains list of features to enable.
* For each feature:
* - remove it if feature@name=disabled
* - leave it there if feature@name=enabled
* - add it if:
* - enable_pool_features (ie: no '-d' or '-o version')
* - it's supported by the kernel module
* - it's in the requested feature set
* - warn if it's enabled but not in compat
*/
for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
char propname[MAXPATHLEN];
char *propval;
zfeature_info_t *feat = &spa_feature_table[i];
(void) snprintf(propname, sizeof (propname),
"feature@%s", feat->fi_uname);
if (!nvlist_lookup_string(props, propname, &propval)) {
if (strcmp(propval,
ZFS_FEATURE_DISABLED) == 0) {
(void) nvlist_remove_all(props,
propname);
} else if (strcmp(propval,
ZFS_FEATURE_ENABLED) == 0 &&
!requested_features[i]) {
(void) fprintf(stderr, gettext(
"Warning: feature \"%s\" enabled "
"but is not in specified "
"'compatibility' feature set.\n"),
feat->fi_uname);
}
} else if (
enable_pool_features &&
feat->fi_zfs_mod_supported &&
requested_features[i]) {
ret = add_prop_list(propname,
ZFS_FEATURE_ENABLED, &props, B_TRUE);
if (ret != 0)
goto errout;
}
}
ret = 1;
if (zpool_create(g_zfs, poolname,
nvroot, props, fsprops) == 0) {
zfs_handle_t *pool = zfs_open(g_zfs,
tname ? tname : poolname, ZFS_TYPE_FILESYSTEM);
if (pool != NULL) {
if (zfs_mount(pool, NULL, 0) == 0) {
ret = zfs_share(pool, NULL);
zfs_commit_shares(NULL);
}
zfs_close(pool);
}
} else if (libzfs_errno(g_zfs) == EZFS_INVALIDNAME) {
(void) fprintf(stderr, gettext("pool name may have "
"been omitted\n"));
}
}
errout:
nvlist_free(nvroot);
nvlist_free(fsprops);
nvlist_free(props);
return (ret);
badusage:
nvlist_free(fsprops);
nvlist_free(props);
usage(B_FALSE);
return (2);
}
/*
* zpool destroy <pool>
*
* -f Forcefully unmount any datasets
*
* Destroy the given pool. Automatically unmounts any datasets in the pool.
*/
int
zpool_do_destroy(int argc, char **argv)
{
boolean_t force = B_FALSE;
int c;
char *pool;
zpool_handle_t *zhp;
int ret;
/* check options */
while ((c = getopt(argc, argv, "f")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
pool = argv[0];
if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
/*
* As a special case, check for use of '/' in the name, and
* direct the user to use 'zfs destroy' instead.
*/
if (strchr(pool, '/') != NULL)
(void) fprintf(stderr, gettext("use 'zfs destroy' to "
"destroy a dataset\n"));
return (1);
}
if (zpool_disable_datasets(zhp, force) != 0) {
(void) fprintf(stderr, gettext("could not destroy '%s': "
"could not unmount datasets\n"), zpool_get_name(zhp));
zpool_close(zhp);
return (1);
}
/* The history must be logged as part of the export */
log_history = B_FALSE;
ret = (zpool_destroy(zhp, history_str) != 0);
zpool_close(zhp);
return (ret);
}
typedef struct export_cbdata {
boolean_t force;
boolean_t hardforce;
} export_cbdata_t;
/*
* Export one pool
*/
static int
zpool_export_one(zpool_handle_t *zhp, void *data)
{
export_cbdata_t *cb = data;
if (zpool_disable_datasets(zhp, cb->force) != 0)
return (1);
/* The history must be logged as part of the export */
log_history = B_FALSE;
if (cb->hardforce) {
if (zpool_export_force(zhp, history_str) != 0)
return (1);
} else if (zpool_export(zhp, cb->force, history_str) != 0) {
return (1);
}
return (0);
}
/*
* zpool export [-f] <pool> ...
*
* -a Export all pools
* -f Forcefully unmount datasets
*
* Export the given pools. By default, the command will attempt to cleanly
* unmount any active datasets within the pool. If the '-f' flag is specified,
* then the datasets will be forcefully unmounted.
*/
int
zpool_do_export(int argc, char **argv)
{
export_cbdata_t cb;
boolean_t do_all = B_FALSE;
boolean_t force = B_FALSE;
boolean_t hardforce = B_FALSE;
int c, ret;
/* check options */
while ((c = getopt(argc, argv, "afF")) != -1) {
switch (c) {
case 'a':
do_all = B_TRUE;
break;
case 'f':
force = B_TRUE;
break;
case 'F':
hardforce = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
cb.force = force;
cb.hardforce = hardforce;
argc -= optind;
argv += optind;
if (do_all) {
if (argc != 0) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
return (for_each_pool(argc, argv, B_TRUE, NULL,
ZFS_TYPE_POOL, B_FALSE, zpool_export_one, &cb));
}
/* check arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool argument\n"));
usage(B_FALSE);
}
ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
B_FALSE, zpool_export_one, &cb);
return (ret);
}
/*
* Given a vdev configuration, determine the maximum width needed for the device
* name column.
*/
static int
max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max,
int name_flags)
{
static const char *const subtypes[] =
{ZPOOL_CONFIG_SPARES, ZPOOL_CONFIG_L2CACHE, ZPOOL_CONFIG_CHILDREN};
char *name = zpool_vdev_name(g_zfs, zhp, nv, name_flags);
max = MAX(strlen(name) + depth, max);
free(name);
nvlist_t **child;
uint_t children;
for (size_t i = 0; i < ARRAY_SIZE(subtypes); ++i)
if (nvlist_lookup_nvlist_array(nv, subtypes[i],
&child, &children) == 0)
for (uint_t c = 0; c < children; ++c)
max = MAX(max_width(zhp, child[c], depth + 2,
max, name_flags), max);
return (max);
}
typedef struct spare_cbdata {
uint64_t cb_guid;
zpool_handle_t *cb_zhp;
} spare_cbdata_t;
static boolean_t
find_vdev(nvlist_t *nv, uint64_t search)
{
uint64_t guid;
nvlist_t **child;
uint_t c, children;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
search == guid)
return (B_TRUE);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (c = 0; c < children; c++)
if (find_vdev(child[c], search))
return (B_TRUE);
}
return (B_FALSE);
}
static int
find_spare(zpool_handle_t *zhp, void *data)
{
spare_cbdata_t *cbp = data;
nvlist_t *config, *nvroot;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
if (find_vdev(nvroot, cbp->cb_guid)) {
cbp->cb_zhp = zhp;
return (1);
}
zpool_close(zhp);
return (0);
}
typedef struct status_cbdata {
int cb_count;
int cb_name_flags;
int cb_namewidth;
boolean_t cb_allpools;
boolean_t cb_verbose;
boolean_t cb_literal;
boolean_t cb_explain;
boolean_t cb_first;
boolean_t cb_dedup_stats;
boolean_t cb_print_status;
boolean_t cb_print_slow_ios;
boolean_t cb_print_vdev_init;
boolean_t cb_print_vdev_trim;
vdev_cmd_data_list_t *vcdl;
} status_cbdata_t;
/* Return 1 if string is NULL, empty, or whitespace; return 0 otherwise. */
static boolean_t
is_blank_str(const char *str)
{
for (; str != NULL && *str != '\0'; ++str)
if (!isblank(*str))
return (B_FALSE);
return (B_TRUE);
}
/* Print command output lines for specific vdev in a specific pool */
static void
zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, char *path)
{
vdev_cmd_data_t *data;
int i, j;
const char *val;
for (i = 0; i < vcdl->count; i++) {
if ((strcmp(vcdl->data[i].path, path) != 0) ||
(strcmp(vcdl->data[i].pool, pool) != 0)) {
/* Not the vdev we're looking for */
continue;
}
data = &vcdl->data[i];
/* Print out all the output values for this vdev */
for (j = 0; j < vcdl->uniq_cols_cnt; j++) {
val = NULL;
/* Does this vdev have values for this column? */
for (int k = 0; k < data->cols_cnt; k++) {
if (strcmp(data->cols[k],
vcdl->uniq_cols[j]) == 0) {
/* yes it does, record the value */
val = data->lines[k];
break;
}
}
/*
* Mark empty values with dashes to make output
* awk-able.
*/
if (val == NULL || is_blank_str(val))
val = "-";
printf("%*s", vcdl->uniq_cols_width[j], val);
if (j < vcdl->uniq_cols_cnt - 1)
fputs(" ", stdout);
}
/* Print out any values that aren't in a column at the end */
for (j = data->cols_cnt; j < data->lines_cnt; j++) {
/* Did we have any columns? If so print a spacer. */
if (vcdl->uniq_cols_cnt > 0)
fputs(" ", stdout);
val = data->lines[j];
fputs(val ?: "", stdout);
}
break;
}
}
/*
* Print vdev initialization status for leaves
*/
static void
print_status_initialize(vdev_stat_t *vs, boolean_t verbose)
{
if (verbose) {
if ((vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE ||
vs->vs_initialize_state == VDEV_INITIALIZE_SUSPENDED ||
vs->vs_initialize_state == VDEV_INITIALIZE_COMPLETE) &&
!vs->vs_scan_removing) {
char zbuf[1024];
char tbuf[256];
struct tm zaction_ts;
time_t t = vs->vs_initialize_action_time;
int initialize_pct = 100;
if (vs->vs_initialize_state !=
VDEV_INITIALIZE_COMPLETE) {
initialize_pct = (vs->vs_initialize_bytes_done *
100 / (vs->vs_initialize_bytes_est + 1));
}
(void) localtime_r(&t, &zaction_ts);
(void) strftime(tbuf, sizeof (tbuf), "%c", &zaction_ts);
switch (vs->vs_initialize_state) {
case VDEV_INITIALIZE_SUSPENDED:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("suspended, started at"), tbuf);
break;
case VDEV_INITIALIZE_ACTIVE:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("started at"), tbuf);
break;
case VDEV_INITIALIZE_COMPLETE:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("completed at"), tbuf);
break;
}
(void) printf(gettext(" (%d%% initialized%s)"),
initialize_pct, zbuf);
} else {
(void) printf(gettext(" (uninitialized)"));
}
} else if (vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) {
(void) printf(gettext(" (initializing)"));
}
}
/*
* Print vdev TRIM status for leaves
*/
static void
print_status_trim(vdev_stat_t *vs, boolean_t verbose)
{
if (verbose) {
if ((vs->vs_trim_state == VDEV_TRIM_ACTIVE ||
vs->vs_trim_state == VDEV_TRIM_SUSPENDED ||
vs->vs_trim_state == VDEV_TRIM_COMPLETE) &&
!vs->vs_scan_removing) {
char zbuf[1024];
char tbuf[256];
struct tm zaction_ts;
time_t t = vs->vs_trim_action_time;
int trim_pct = 100;
if (vs->vs_trim_state != VDEV_TRIM_COMPLETE) {
trim_pct = (vs->vs_trim_bytes_done *
100 / (vs->vs_trim_bytes_est + 1));
}
(void) localtime_r(&t, &zaction_ts);
(void) strftime(tbuf, sizeof (tbuf), "%c", &zaction_ts);
switch (vs->vs_trim_state) {
case VDEV_TRIM_SUSPENDED:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("suspended, started at"), tbuf);
break;
case VDEV_TRIM_ACTIVE:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("started at"), tbuf);
break;
case VDEV_TRIM_COMPLETE:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("completed at"), tbuf);
break;
}
(void) printf(gettext(" (%d%% trimmed%s)"),
trim_pct, zbuf);
} else if (vs->vs_trim_notsup) {
(void) printf(gettext(" (trim unsupported)"));
} else {
(void) printf(gettext(" (untrimmed)"));
}
} else if (vs->vs_trim_state == VDEV_TRIM_ACTIVE) {
(void) printf(gettext(" (trimming)"));
}
}
/*
* Return the color associated with a health string. This includes returning
* NULL for no color change.
*/
static const char *
health_str_to_color(const char *health)
{
if (strcmp(health, gettext("FAULTED")) == 0 ||
strcmp(health, gettext("SUSPENDED")) == 0 ||
strcmp(health, gettext("UNAVAIL")) == 0) {
return (ANSI_RED);
}
if (strcmp(health, gettext("OFFLINE")) == 0 ||
strcmp(health, gettext("DEGRADED")) == 0 ||
strcmp(health, gettext("REMOVED")) == 0) {
return (ANSI_YELLOW);
}
return (NULL);
}
/*
* Print out configuration state as requested by status_callback.
*/
static void
print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
nvlist_t *nv, int depth, boolean_t isspare, vdev_rebuild_stat_t *vrs)
{
nvlist_t **child, *root;
uint_t c, i, vsc, children;
pool_scan_stat_t *ps = NULL;
vdev_stat_t *vs;
char rbuf[6], wbuf[6], cbuf[6];
char *vname;
uint64_t notpresent;
spare_cbdata_t spare_cb;
const char *state;
char *type;
char *path = NULL;
const char *rcolor = NULL, *wcolor = NULL, *ccolor = NULL;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &vsc) == 0);
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)
return;
state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
if (isspare) {
/*
* For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for
* online drives.
*/
if (vs->vs_aux == VDEV_AUX_SPARED)
state = gettext("INUSE");
else if (vs->vs_state == VDEV_STATE_HEALTHY)
state = gettext("AVAIL");
}
printf_color(health_str_to_color(state),
"\t%*s%-*s %-8s", depth, "", cb->cb_namewidth - depth,
name, state);
if (!isspare) {
if (vs->vs_read_errors)
rcolor = ANSI_RED;
if (vs->vs_write_errors)
wcolor = ANSI_RED;
if (vs->vs_checksum_errors)
ccolor = ANSI_RED;
if (cb->cb_literal) {
fputc(' ', stdout);
printf_color(rcolor, "%5llu",
(u_longlong_t)vs->vs_read_errors);
fputc(' ', stdout);
printf_color(wcolor, "%5llu",
(u_longlong_t)vs->vs_write_errors);
fputc(' ', stdout);
printf_color(ccolor, "%5llu",
(u_longlong_t)vs->vs_checksum_errors);
} else {
zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf));
zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf));
zfs_nicenum(vs->vs_checksum_errors, cbuf,
sizeof (cbuf));
fputc(' ', stdout);
printf_color(rcolor, "%5s", rbuf);
fputc(' ', stdout);
printf_color(wcolor, "%5s", wbuf);
fputc(' ', stdout);
printf_color(ccolor, "%5s", cbuf);
}
if (cb->cb_print_slow_ios) {
if (children == 0) {
/* Only leafs vdevs have slow IOs */
zfs_nicenum(vs->vs_slow_ios, rbuf,
sizeof (rbuf));
} else {
snprintf(rbuf, sizeof (rbuf), "-");
}
if (cb->cb_literal)
printf(" %5llu", (u_longlong_t)vs->vs_slow_ios);
else
printf(" %5s", rbuf);
}
}
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
&notpresent) == 0) {
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
(void) printf(" %s %s", gettext("was"), path);
} else if (vs->vs_aux != 0) {
(void) printf(" ");
color_start(ANSI_RED);
switch (vs->vs_aux) {
case VDEV_AUX_OPEN_FAILED:
(void) printf(gettext("cannot open"));
break;
case VDEV_AUX_BAD_GUID_SUM:
(void) printf(gettext("missing device"));
break;
case VDEV_AUX_NO_REPLICAS:
(void) printf(gettext("insufficient replicas"));
break;
case VDEV_AUX_VERSION_NEWER:
(void) printf(gettext("newer version"));
break;
case VDEV_AUX_UNSUP_FEAT:
(void) printf(gettext("unsupported feature(s)"));
break;
case VDEV_AUX_ASHIFT_TOO_BIG:
(void) printf(gettext("unsupported minimum blocksize"));
break;
case VDEV_AUX_SPARED:
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
&spare_cb.cb_guid) == 0);
if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {
if (strcmp(zpool_get_name(spare_cb.cb_zhp),
zpool_get_name(zhp)) == 0)
(void) printf(gettext("currently in "
"use"));
else
(void) printf(gettext("in use by "
"pool '%s'"),
zpool_get_name(spare_cb.cb_zhp));
zpool_close(spare_cb.cb_zhp);
} else {
(void) printf(gettext("currently in use"));
}
break;
case VDEV_AUX_ERR_EXCEEDED:
(void) printf(gettext("too many errors"));
break;
case VDEV_AUX_IO_FAILURE:
(void) printf(gettext("experienced I/O failures"));
break;
case VDEV_AUX_BAD_LOG:
(void) printf(gettext("bad intent log"));
break;
case VDEV_AUX_EXTERNAL:
(void) printf(gettext("external device fault"));
break;
case VDEV_AUX_SPLIT_POOL:
(void) printf(gettext("split into new pool"));
break;
case VDEV_AUX_ACTIVE:
(void) printf(gettext("currently in use"));
break;
case VDEV_AUX_CHILDREN_OFFLINE:
(void) printf(gettext("all children offline"));
break;
case VDEV_AUX_BAD_LABEL:
(void) printf(gettext("invalid label"));
break;
default:
(void) printf(gettext("corrupted data"));
break;
}
color_end();
} else if (children == 0 && !isspare &&
getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL &&
VDEV_STAT_VALID(vs_physical_ashift, vsc) &&
vs->vs_configured_ashift < vs->vs_physical_ashift) {
(void) printf(
gettext(" block size: %dB configured, %dB native"),
1 << vs->vs_configured_ashift, 1 << vs->vs_physical_ashift);
}
if (vs->vs_scan_removing != 0) {
(void) printf(gettext(" (removing)"));
} else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) {
(void) printf(gettext(" (non-allocating)"));
}
/* The root vdev has the scrub/resilver stats */
root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
ZPOOL_CONFIG_VDEV_TREE);
(void) nvlist_lookup_uint64_array(root, ZPOOL_CONFIG_SCAN_STATS,
(uint64_t **)&ps, &c);
if (ps != NULL && ps->pss_state == DSS_SCANNING && children == 0) {
if (vs->vs_scan_processed != 0) {
(void) printf(gettext(" (%s)"),
(ps->pss_func == POOL_SCAN_RESILVER) ?
"resilvering" : "repairing");
} else if (vs->vs_resilver_deferred) {
(void) printf(gettext(" (awaiting resilver)"));
}
}
/* The top-level vdevs have the rebuild stats */
if (vrs != NULL && vrs->vrs_state == VDEV_REBUILD_ACTIVE &&
children == 0) {
if (vs->vs_rebuild_processed != 0) {
(void) printf(gettext(" (resilvering)"));
}
}
if (cb->vcdl != NULL) {
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
printf(" ");
zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
}
}
/* Display vdev initialization and trim status for leaves. */
if (children == 0) {
print_status_initialize(vs, cb->cb_print_vdev_init);
print_status_trim(vs, cb->cb_print_vdev_trim);
}
(void) printf("\n");
for (c = 0; c < children; c++) {
uint64_t islog = B_FALSE, ishole = B_FALSE;
/* Don't print logs or holes here */
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&islog);
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
&ishole);
if (islog || ishole)
continue;
/* Only print normal classes here */
if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
continue;
/* Provide vdev_rebuild_stats to children if available */
if (vrs == NULL) {
(void) nvlist_lookup_uint64_array(nv,
ZPOOL_CONFIG_REBUILD_STATS,
(uint64_t **)&vrs, &i);
}
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
print_status_config(zhp, cb, vname, child[c], depth + 2,
isspare, vrs);
free(vname);
}
}
/*
* Print the configuration of an exported pool. Iterate over all vdevs in the
* pool, printing out the name and status for each one.
*/
static void
print_import_config(status_cbdata_t *cb, const char *name, nvlist_t *nv,
int depth)
{
nvlist_t **child;
uint_t c, children;
vdev_stat_t *vs;
char *type, *vname;
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
if (strcmp(type, VDEV_TYPE_MISSING) == 0 ||
strcmp(type, VDEV_TYPE_HOLE) == 0)
return;
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
(void) printf("\t%*s%-*s", depth, "", cb->cb_namewidth - depth, name);
(void) printf(" %s", zpool_state_to_name(vs->vs_state, vs->vs_aux));
if (vs->vs_aux != 0) {
(void) printf(" ");
switch (vs->vs_aux) {
case VDEV_AUX_OPEN_FAILED:
(void) printf(gettext("cannot open"));
break;
case VDEV_AUX_BAD_GUID_SUM:
(void) printf(gettext("missing device"));
break;
case VDEV_AUX_NO_REPLICAS:
(void) printf(gettext("insufficient replicas"));
break;
case VDEV_AUX_VERSION_NEWER:
(void) printf(gettext("newer version"));
break;
case VDEV_AUX_UNSUP_FEAT:
(void) printf(gettext("unsupported feature(s)"));
break;
case VDEV_AUX_ERR_EXCEEDED:
(void) printf(gettext("too many errors"));
break;
case VDEV_AUX_ACTIVE:
(void) printf(gettext("currently in use"));
break;
case VDEV_AUX_CHILDREN_OFFLINE:
(void) printf(gettext("all children offline"));
break;
case VDEV_AUX_BAD_LABEL:
(void) printf(gettext("invalid label"));
break;
default:
(void) printf(gettext("corrupted data"));
break;
}
}
(void) printf("\n");
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
return;
for (c = 0; c < children; c++) {
uint64_t is_log = B_FALSE;
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&is_log);
if (is_log)
continue;
if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
continue;
vname = zpool_vdev_name(g_zfs, NULL, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
print_import_config(cb, vname, child[c], depth + 2);
free(vname);
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0) {
(void) printf(gettext("\tcache\n"));
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, NULL, child[c],
cb->cb_name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) == 0) {
(void) printf(gettext("\tspares\n"));
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, NULL, child[c],
cb->cb_name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
}
/*
* Print specialized class vdevs.
*
* These are recorded as top level vdevs in the main pool child array
* but with "is_log" set to 1 or an "alloc_bias" string. We use either
* print_status_config() or print_import_config() to print the top level
* class vdevs then any of their children (eg mirrored slogs) are printed
* recursively - which works because only the top level vdev is marked.
*/
static void
print_class_vdevs(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
const char *class)
{
uint_t c, children;
nvlist_t **child;
boolean_t printed = B_FALSE;
assert(zhp != NULL || !cb->cb_verbose);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
&children) != 0)
return;
for (c = 0; c < children; c++) {
uint64_t is_log = B_FALSE;
char *bias = NULL;
char *type = NULL;
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&is_log);
if (is_log) {
bias = (char *)VDEV_ALLOC_CLASS_LOGS;
} else {
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_TYPE, &type);
}
if (bias == NULL || strcmp(bias, class) != 0)
continue;
if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
continue;
if (!printed) {
(void) printf("\t%s\t\n", gettext(class));
printed = B_TRUE;
}
char *name = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
if (cb->cb_print_status)
print_status_config(zhp, cb, name, child[c], 2,
B_FALSE, NULL);
else
print_import_config(cb, name, child[c], 2);
free(name);
}
}
/*
* Display the status for the given pool.
*/
static int
show_import(nvlist_t *config, boolean_t report_error)
{
uint64_t pool_state;
vdev_stat_t *vs;
char *name;
uint64_t guid;
uint64_t hostid = 0;
const char *msgid;
const char *hostname = "unknown";
nvlist_t *nvroot, *nvinfo;
zpool_status_t reason;
zpool_errata_t errata;
const char *health;
uint_t vsc;
char *comment;
status_cbdata_t cb = { 0 };
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&name) == 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&guid) == 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
&pool_state) == 0);
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &vsc) == 0);
health = zpool_state_to_name(vs->vs_state, vs->vs_aux);
reason = zpool_import_status(config, &msgid, &errata);
/*
* If we're importing using a cachefile, then we won't report any
* errors unless we are in the scan phase of the import.
*/
if (reason != ZPOOL_STATUS_OK && !report_error)
return (reason);
(void) printf(gettext(" pool: %s\n"), name);
(void) printf(gettext(" id: %llu\n"), (u_longlong_t)guid);
(void) printf(gettext(" state: %s"), health);
if (pool_state == POOL_STATE_DESTROYED)
(void) printf(gettext(" (DESTROYED)"));
(void) printf("\n");
switch (reason) {
case ZPOOL_STATUS_MISSING_DEV_R:
case ZPOOL_STATUS_MISSING_DEV_NR:
case ZPOOL_STATUS_BAD_GUID_SUM:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"missing from the system.\n"));
break;
case ZPOOL_STATUS_CORRUPT_LABEL_R:
case ZPOOL_STATUS_CORRUPT_LABEL_NR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices contains"
" corrupted data.\n"));
break;
case ZPOOL_STATUS_CORRUPT_DATA:
(void) printf(
gettext(" status: The pool data is corrupted.\n"));
break;
case ZPOOL_STATUS_OFFLINE_DEV:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices "
"are offlined.\n"));
break;
case ZPOOL_STATUS_CORRUPT_POOL:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool metadata is "
"corrupted.\n"));
break;
case ZPOOL_STATUS_VERSION_OLDER:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
"a legacy on-disk version.\n"));
break;
case ZPOOL_STATUS_VERSION_NEWER:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
"an incompatible version.\n"));
break;
case ZPOOL_STATUS_FEAT_DISABLED:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("Some supported "
"features are not enabled on the pool.\n\t"
"(Note that they may be intentionally disabled "
"if the\n\t'compatibility' property is set.)\n"));
break;
case ZPOOL_STATUS_COMPATIBILITY_ERR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("Error reading or parsing "
"the file(s) indicated by the 'compatibility'\n"
"property.\n"));
break;
case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more features "
"are enabled on the pool despite not being\n"
"requested by the 'compatibility' property.\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_READ:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool uses the following "
"feature(s) not supported on this system:\n"));
color_start(ANSI_YELLOW);
zpool_print_unsup_feat(config);
color_end();
break;
case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool can only be "
"accessed in read-only mode on this system. It\n\tcannot be"
" accessed in read-write mode because it uses the "
"following\n\tfeature(s) not supported on this system:\n"));
color_start(ANSI_YELLOW);
zpool_print_unsup_feat(config);
color_end();
break;
case ZPOOL_STATUS_HOSTID_ACTIVE:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool is currently "
"imported by another system.\n"));
break;
case ZPOOL_STATUS_HOSTID_REQUIRED:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool has the "
"multihost property on. It cannot\n\tbe safely imported "
"when the system hostid is not set.\n"));
break;
case ZPOOL_STATUS_HOSTID_MISMATCH:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool was last accessed "
"by another system.\n"));
break;
case ZPOOL_STATUS_FAULTED_DEV_R:
case ZPOOL_STATUS_FAULTED_DEV_NR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"faulted.\n"));
break;
case ZPOOL_STATUS_BAD_LOG:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("An intent log record cannot "
"be read.\n"));
break;
case ZPOOL_STATUS_RESILVERING:
case ZPOOL_STATUS_REBUILDING:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices were "
"being resilvered.\n"));
break;
case ZPOOL_STATUS_ERRATA:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"),
errata);
break;
case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"configured to use a non-native block size.\n"
"\tExpect reduced performance.\n"));
break;
default:
/*
* No other status can be seen when importing pools.
*/
assert(reason == ZPOOL_STATUS_OK);
}
/*
* Print out an action according to the overall state of the pool.
*/
if (vs->vs_state == VDEV_STATE_HEALTHY) {
if (reason == ZPOOL_STATUS_VERSION_OLDER ||
reason == ZPOOL_STATUS_FEAT_DISABLED) {
(void) printf(gettext(" action: The pool can be "
"imported using its name or numeric identifier, "
"though\n\tsome features will not be available "
"without an explicit 'zpool upgrade'.\n"));
} else if (reason == ZPOOL_STATUS_COMPATIBILITY_ERR) {
(void) printf(gettext(" action: The pool can be "
"imported using its name or numeric\n\tidentifier, "
"though the file(s) indicated by its "
"'compatibility'\n\tproperty cannot be parsed at "
"this time.\n"));
} else if (reason == ZPOOL_STATUS_HOSTID_MISMATCH) {
(void) printf(gettext(" action: The pool can be "
"imported using its name or numeric "
"identifier and\n\tthe '-f' flag.\n"));
} else if (reason == ZPOOL_STATUS_ERRATA) {
switch (errata) {
case ZPOOL_ERRATA_NONE:
break;
case ZPOOL_ERRATA_ZOL_2094_SCRUB:
(void) printf(gettext(" action: The pool can "
"be imported using its name or numeric "
"identifier,\n\thowever there is a compat"
"ibility issue which should be corrected"
"\n\tby running 'zpool scrub'\n"));
break;
case ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY:
(void) printf(gettext(" action: The pool can"
"not be imported with this version of ZFS "
"due to\n\tan active asynchronous destroy. "
"Revert to an earlier version\n\tand "
"allow the destroy to complete before "
"updating.\n"));
break;
case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
(void) printf(gettext(" action: Existing "
"encrypted datasets contain an on-disk "
"incompatibility, which\n\tneeds to be "
"corrected. Backup these datasets to new "
"encrypted datasets\n\tand destroy the "
"old ones.\n"));
break;
case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
(void) printf(gettext(" action: Existing "
"encrypted snapshots and bookmarks contain "
"an on-disk\n\tincompatibility. This may "
"cause on-disk corruption if they are used"
"\n\twith 'zfs recv'. To correct the "
"issue, enable the bookmark_v2 feature.\n\t"
"No additional action is needed if there "
"are no encrypted snapshots or\n\t"
"bookmarks. If preserving the encrypted "
"snapshots and bookmarks is\n\trequired, "
"use a non-raw send to backup and restore "
"them. Alternately,\n\tthey may be removed"
" to resolve the incompatibility.\n"));
break;
default:
/*
* All errata must contain an action message.
*/
assert(0);
}
} else {
(void) printf(gettext(" action: The pool can be "
"imported using its name or numeric "
"identifier.\n"));
}
} else if (vs->vs_state == VDEV_STATE_DEGRADED) {
(void) printf(gettext(" action: The pool can be imported "
"despite missing or damaged devices. The\n\tfault "
"tolerance of the pool may be compromised if imported.\n"));
} else {
switch (reason) {
case ZPOOL_STATUS_VERSION_NEWER:
(void) printf(gettext(" action: The pool cannot be "
"imported. Access the pool on a system running "
"newer\n\tsoftware, or recreate the pool from "
"backup.\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_READ:
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("The pool cannot be "
"imported. Access the pool on a system that "
"supports\n\tthe required feature(s), or recreate "
"the pool from backup.\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("The pool cannot be "
"imported in read-write mode. Import the pool "
"with\n"
"\t\"-o readonly=on\", access the pool on a system "
"that supports the\n\trequired feature(s), or "
"recreate the pool from backup.\n"));
break;
case ZPOOL_STATUS_MISSING_DEV_R:
case ZPOOL_STATUS_MISSING_DEV_NR:
case ZPOOL_STATUS_BAD_GUID_SUM:
(void) printf(gettext(" action: The pool cannot be "
"imported. Attach the missing\n\tdevices and try "
"again.\n"));
break;
case ZPOOL_STATUS_HOSTID_ACTIVE:
VERIFY0(nvlist_lookup_nvlist(config,
ZPOOL_CONFIG_LOAD_INFO, &nvinfo));
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
hostname = fnvlist_lookup_string(nvinfo,
ZPOOL_CONFIG_MMP_HOSTNAME);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
hostid = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_HOSTID);
(void) printf(gettext(" action: The pool must be "
"exported from %s (hostid=%"PRIx64")\n\tbefore it "
"can be safely imported.\n"), hostname, hostid);
break;
case ZPOOL_STATUS_HOSTID_REQUIRED:
(void) printf(gettext(" action: Set a unique system "
"hostid with the zgenhostid(8) command.\n"));
break;
default:
(void) printf(gettext(" action: The pool cannot be "
"imported due to damaged devices or data.\n"));
}
}
/* Print the comment attached to the pool. */
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
(void) printf(gettext("comment: %s\n"), comment);
/*
* If the state is "closed" or "can't open", and the aux state
* is "corrupt data":
*/
if (((vs->vs_state == VDEV_STATE_CLOSED) ||
(vs->vs_state == VDEV_STATE_CANT_OPEN)) &&
(vs->vs_aux == VDEV_AUX_CORRUPT_DATA)) {
if (pool_state == POOL_STATE_DESTROYED)
(void) printf(gettext("\tThe pool was destroyed, "
"but can be imported using the '-Df' flags.\n"));
else if (pool_state != POOL_STATE_EXPORTED)
(void) printf(gettext("\tThe pool may be active on "
"another system, but can be imported using\n\t"
"the '-f' flag.\n"));
}
if (msgid != NULL) {
(void) printf(gettext(
" see: https://openzfs.github.io/openzfs-docs/msg/%s\n"),
msgid);
}
(void) printf(gettext(" config:\n\n"));
cb.cb_namewidth = max_width(NULL, nvroot, 0, strlen(name),
VDEV_NAME_TYPE_ID);
if (cb.cb_namewidth < 10)
cb.cb_namewidth = 10;
print_import_config(&cb, name, nvroot, 0);
print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_DEDUP);
print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_CLASS_LOGS);
if (reason == ZPOOL_STATUS_BAD_GUID_SUM) {
(void) printf(gettext("\n\tAdditional devices are known to "
"be part of this pool, though their\n\texact "
"configuration cannot be determined.\n"));
}
return (0);
}
static boolean_t
zfs_force_import_required(nvlist_t *config)
{
uint64_t state;
uint64_t hostid = 0;
nvlist_t *nvinfo;
state = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE);
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid);
if (state != POOL_STATE_EXPORTED && hostid != get_system_hostid())
return (B_TRUE);
nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) {
mmp_state_t mmp_state = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_STATE);
if (mmp_state != MMP_STATE_INACTIVE)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Perform the import for the given configuration. This passes the heavy
* lifting off to zpool_import_props(), and then mounts the datasets contained
* within the pool.
*/
static int
do_import(nvlist_t *config, const char *newname, const char *mntopts,
nvlist_t *props, int flags)
{
int ret = 0;
zpool_handle_t *zhp;
const char *name;
uint64_t version;
name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
if (!SPA_VERSION_IS_SUPPORTED(version)) {
(void) fprintf(stderr, gettext("cannot import '%s': pool "
"is formatted using an unsupported ZFS version\n"), name);
return (1);
} else if (zfs_force_import_required(config) &&
!(flags & ZFS_IMPORT_ANY_HOST)) {
mmp_state_t mmp_state = MMP_STATE_INACTIVE;
nvlist_t *nvinfo;
nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE))
mmp_state = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_STATE);
if (mmp_state == MMP_STATE_ACTIVE) {
const char *hostname = "<unknown>";
uint64_t hostid = 0;
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
hostname = fnvlist_lookup_string(nvinfo,
ZPOOL_CONFIG_MMP_HOSTNAME);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
hostid = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_HOSTID);
(void) fprintf(stderr, gettext("cannot import '%s': "
"pool is imported on %s (hostid: "
"0x%"PRIx64")\nExport the pool on the other "
"system, then run 'zpool import'.\n"),
name, hostname, hostid);
} else if (mmp_state == MMP_STATE_NO_HOSTID) {
(void) fprintf(stderr, gettext("Cannot import '%s': "
"pool has the multihost property on and the\n"
"system's hostid is not set. Set a unique hostid "
"with the zgenhostid(8) command.\n"), name);
} else {
const char *hostname = "<unknown>";
time_t timestamp = 0;
uint64_t hostid = 0;
if (nvlist_exists(config, ZPOOL_CONFIG_HOSTNAME))
hostname = fnvlist_lookup_string(config,
ZPOOL_CONFIG_HOSTNAME);
if (nvlist_exists(config, ZPOOL_CONFIG_TIMESTAMP))
timestamp = fnvlist_lookup_uint64(config,
ZPOOL_CONFIG_TIMESTAMP);
if (nvlist_exists(config, ZPOOL_CONFIG_HOSTID))
hostid = fnvlist_lookup_uint64(config,
ZPOOL_CONFIG_HOSTID);
(void) fprintf(stderr, gettext("cannot import '%s': "
"pool was previously in use from another system.\n"
"Last accessed by %s (hostid=%"PRIx64") at %s"
"The pool can be imported, use 'zpool import -f' "
"to import the pool.\n"), name, hostname,
hostid, ctime(&timestamp));
}
return (1);
}
if (zpool_import_props(g_zfs, config, newname, props, flags) != 0)
return (1);
if (newname != NULL)
name = newname;
if ((zhp = zpool_open_canfail(g_zfs, name)) == NULL)
return (1);
/*
* Loading keys is best effort. We don't want to return immediately
* if it fails but we do want to give the error to the caller.
*/
if (flags & ZFS_IMPORT_LOAD_KEYS &&
zfs_crypto_attempt_load_keys(g_zfs, name) != 0)
ret = 1;
if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
!(flags & ZFS_IMPORT_ONLY) &&
zpool_enable_datasets(zhp, mntopts, 0) != 0) {
zpool_close(zhp);
return (1);
}
zpool_close(zhp);
return (ret);
}
static int
import_pools(nvlist_t *pools, nvlist_t *props, char *mntopts, int flags,
char *orig_name, char *new_name,
boolean_t do_destroyed, boolean_t pool_specified, boolean_t do_all,
importargs_t *import)
{
nvlist_t *config = NULL;
nvlist_t *found_config = NULL;
uint64_t pool_state;
/*
* At this point we have a list of import candidate configs. Even if
* we were searching by pool name or guid, we still need to
* post-process the list to deal with pool state and possible
* duplicate names.
*/
int err = 0;
nvpair_t *elem = NULL;
boolean_t first = B_TRUE;
while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
verify(nvpair_value_nvlist(elem, &config) == 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
&pool_state) == 0);
if (!do_destroyed && pool_state == POOL_STATE_DESTROYED)
continue;
if (do_destroyed && pool_state != POOL_STATE_DESTROYED)
continue;
verify(nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
import->policy) == 0);
if (!pool_specified) {
if (first)
first = B_FALSE;
else if (!do_all)
(void) fputc('\n', stdout);
if (do_all) {
err |= do_import(config, NULL, mntopts,
props, flags);
} else {
/*
* If we're importing from cachefile, then
* we don't want to report errors until we
* are in the scan phase of the import. If
* we get an error, then we return that error
* to invoke the scan phase.
*/
if (import->cachefile && !import->scan)
err = show_import(config, B_FALSE);
else
(void) show_import(config, B_TRUE);
}
} else if (import->poolname != NULL) {
char *name;
/*
* We are searching for a pool based on name.
*/
verify(nvlist_lookup_string(config,
ZPOOL_CONFIG_POOL_NAME, &name) == 0);
if (strcmp(name, import->poolname) == 0) {
if (found_config != NULL) {
(void) fprintf(stderr, gettext(
"cannot import '%s': more than "
"one matching pool\n"),
import->poolname);
(void) fprintf(stderr, gettext(
"import by numeric ID instead\n"));
err = B_TRUE;
}
found_config = config;
}
} else {
uint64_t guid;
/*
* Search for a pool by guid.
*/
verify(nvlist_lookup_uint64(config,
ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
if (guid == import->guid)
found_config = config;
}
}
/*
* If we were searching for a specific pool, verify that we found a
* pool, and then do the import.
*/
if (pool_specified && err == 0) {
if (found_config == NULL) {
(void) fprintf(stderr, gettext("cannot import '%s': "
"no such pool available\n"), orig_name);
err = B_TRUE;
} else {
err |= do_import(found_config, new_name,
mntopts, props, flags);
}
}
/*
* If we were just looking for pools, report an error if none were
* found.
*/
if (!pool_specified && first)
(void) fprintf(stderr,
gettext("no pools available to import\n"));
return (err);
}
typedef struct target_exists_args {
const char *poolname;
uint64_t poolguid;
} target_exists_args_t;
static int
name_or_guid_exists(zpool_handle_t *zhp, void *data)
{
target_exists_args_t *args = data;
nvlist_t *config = zpool_get_config(zhp, NULL);
int found = 0;
if (config == NULL)
return (0);
if (args->poolname != NULL) {
char *pool_name;
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&pool_name) == 0);
if (strcmp(pool_name, args->poolname) == 0)
found = 1;
} else {
uint64_t pool_guid;
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&pool_guid) == 0);
if (pool_guid == args->poolguid)
found = 1;
}
zpool_close(zhp);
return (found);
}
/*
* zpool checkpoint <pool>
* checkpoint --discard <pool>
*
* -d Discard the checkpoint from a checkpointed
* --discard pool.
*
* -w Wait for discarding a checkpoint to complete.
* --wait
*
* Checkpoints the specified pool, by taking a "snapshot" of its
* current state. A pool can only have one checkpoint at a time.
*/
int
zpool_do_checkpoint(int argc, char **argv)
{
boolean_t discard, wait;
char *pool;
zpool_handle_t *zhp;
int c, err;
struct option long_options[] = {
{"discard", no_argument, NULL, 'd'},
{"wait", no_argument, NULL, 'w'},
{0, 0, 0, 0}
};
discard = B_FALSE;
wait = B_FALSE;
while ((c = getopt_long(argc, argv, ":dw", long_options, NULL)) != -1) {
switch (c) {
case 'd':
discard = B_TRUE;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
if (wait && !discard) {
(void) fprintf(stderr, gettext("--wait only valid when "
"--discard also specified\n"));
usage(B_FALSE);
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
pool = argv[0];
if ((zhp = zpool_open(g_zfs, pool)) == NULL) {
/* As a special case, check for use of '/' in the name */
if (strchr(pool, '/') != NULL)
(void) fprintf(stderr, gettext("'zpool checkpoint' "
"doesn't work on datasets. To save the state "
"of a dataset from a specific point in time "
"please use 'zfs snapshot'\n"));
return (1);
}
if (discard) {
err = (zpool_discard_checkpoint(zhp) != 0);
if (err == 0 && wait)
err = zpool_wait(zhp, ZPOOL_WAIT_CKPT_DISCARD);
} else {
err = (zpool_checkpoint(zhp) != 0);
}
zpool_close(zhp);
return (err);
}
#define CHECKPOINT_OPT 1024
/*
* zpool import [-d dir] [-D]
* import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
* [-d dir | -c cachefile | -s] [-f] -a
* import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
* [-d dir | -c cachefile | -s] [-f] [-n] [-F] <pool | id>
* [newpool]
*
* -c Read pool information from a cachefile instead of searching
* devices. If importing from a cachefile config fails, then
* fallback to searching for devices only in the directories that
* exist in the cachefile.
*
* -d Scan in a specific directory, other than /dev/. More than
* one directory can be specified using multiple '-d' options.
*
* -D Scan for previously destroyed pools or import all or only
* specified destroyed pools.
*
* -R Temporarily import the pool, with all mountpoints relative to
* the given root. The pool will remain exported when the machine
* is rebooted.
*
* -V Import even in the presence of faulted vdevs. This is an
* intentionally undocumented option for testing purposes, and
* treats the pool configuration as complete, leaving any bad
* vdevs in the FAULTED state. In other words, it does verbatim
* import.
*
* -f Force import, even if it appears that the pool is active.
*
* -F Attempt rewind if necessary.
*
* -n See if rewind would work, but don't actually rewind.
*
* -N Import the pool but don't mount datasets.
*
* -T Specify a starting txg to use for import. This option is
* intentionally undocumented option for testing purposes.
*
* -a Import all pools found.
*
* -l Load encryption keys while importing.
*
* -o Set property=value and/or temporary mount options (without '=').
*
* -s Scan using the default search path, the libblkid cache will
* not be consulted.
*
* --rewind-to-checkpoint
* Import the pool and revert back to the checkpoint.
*
* The import command scans for pools to import, and import pools based on pool
* name and GUID. The pool can also be renamed as part of the import process.
*/
int
zpool_do_import(int argc, char **argv)
{
char **searchdirs = NULL;
char *env, *envdup = NULL;
int nsearch = 0;
int c;
int err = 0;
nvlist_t *pools = NULL;
boolean_t do_all = B_FALSE;
boolean_t do_destroyed = B_FALSE;
char *mntopts = NULL;
uint64_t searchguid = 0;
char *searchname = NULL;
char *propval;
nvlist_t *policy = NULL;
nvlist_t *props = NULL;
int flags = ZFS_IMPORT_NORMAL;
uint32_t rewind_policy = ZPOOL_NO_REWIND;
boolean_t dryrun = B_FALSE;
boolean_t do_rewind = B_FALSE;
boolean_t xtreme_rewind = B_FALSE;
boolean_t do_scan = B_FALSE;
boolean_t pool_exists = B_FALSE;
boolean_t pool_specified = B_FALSE;
uint64_t txg = -1ULL;
char *cachefile = NULL;
importargs_t idata = { 0 };
char *endptr;
struct option long_options[] = {
{"rewind-to-checkpoint", no_argument, NULL, CHECKPOINT_OPT},
{0, 0, 0, 0}
};
/* check options */
while ((c = getopt_long(argc, argv, ":aCc:d:DEfFlmnNo:R:stT:VX",
long_options, NULL)) != -1) {
switch (c) {
case 'a':
do_all = B_TRUE;
break;
case 'c':
cachefile = optarg;
break;
case 'd':
searchdirs = safe_realloc(searchdirs,
(nsearch + 1) * sizeof (char *));
searchdirs[nsearch++] = optarg;
break;
case 'D':
do_destroyed = B_TRUE;
break;
case 'f':
flags |= ZFS_IMPORT_ANY_HOST;
break;
case 'F':
do_rewind = B_TRUE;
break;
case 'l':
flags |= ZFS_IMPORT_LOAD_KEYS;
break;
case 'm':
flags |= ZFS_IMPORT_MISSING_LOG;
break;
case 'n':
dryrun = B_TRUE;
break;
case 'N':
flags |= ZFS_IMPORT_ONLY;
break;
case 'o':
if ((propval = strchr(optarg, '=')) != NULL) {
*propval = '\0';
propval++;
if (add_prop_list(optarg, propval,
&props, B_TRUE))
goto error;
} else {
mntopts = optarg;
}
break;
case 'R':
if (add_prop_list(zpool_prop_to_name(
ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
goto error;
if (add_prop_list_default(zpool_prop_to_name(
ZPOOL_PROP_CACHEFILE), "none", &props))
goto error;
break;
case 's':
do_scan = B_TRUE;
break;
case 't':
flags |= ZFS_IMPORT_TEMP_NAME;
if (add_prop_list_default(zpool_prop_to_name(
ZPOOL_PROP_CACHEFILE), "none", &props))
goto error;
break;
case 'T':
errno = 0;
txg = strtoull(optarg, &endptr, 0);
if (errno != 0 || *endptr != '\0') {
(void) fprintf(stderr,
gettext("invalid txg value\n"));
usage(B_FALSE);
}
rewind_policy = ZPOOL_DO_REWIND | ZPOOL_EXTREME_REWIND;
break;
case 'V':
flags |= ZFS_IMPORT_VERBATIM;
break;
case 'X':
xtreme_rewind = B_TRUE;
break;
case CHECKPOINT_OPT:
flags |= ZFS_IMPORT_CHECKPOINT;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (cachefile && nsearch != 0) {
(void) fprintf(stderr, gettext("-c is incompatible with -d\n"));
usage(B_FALSE);
}
if (cachefile && do_scan) {
(void) fprintf(stderr, gettext("-c is incompatible with -s\n"));
usage(B_FALSE);
}
if ((flags & ZFS_IMPORT_LOAD_KEYS) && (flags & ZFS_IMPORT_ONLY)) {
(void) fprintf(stderr, gettext("-l is incompatible with -N\n"));
usage(B_FALSE);
}
if ((flags & ZFS_IMPORT_LOAD_KEYS) && !do_all && argc == 0) {
(void) fprintf(stderr, gettext("-l is only meaningful during "
"an import\n"));
usage(B_FALSE);
}
if ((dryrun || xtreme_rewind) && !do_rewind) {
(void) fprintf(stderr,
gettext("-n or -X only meaningful with -F\n"));
usage(B_FALSE);
}
if (dryrun)
rewind_policy = ZPOOL_TRY_REWIND;
else if (do_rewind)
rewind_policy = ZPOOL_DO_REWIND;
if (xtreme_rewind)
rewind_policy |= ZPOOL_EXTREME_REWIND;
/* In the future, we can capture further policy and include it here */
if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, txg) != 0 ||
nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
rewind_policy) != 0)
goto error;
/* check argument count */
if (do_all) {
if (argc != 0) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
} else {
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
}
/*
* Check for the effective uid. We do this explicitly here because
* otherwise any attempt to discover pools will silently fail.
*/
if (argc == 0 && geteuid() != 0) {
(void) fprintf(stderr, gettext("cannot "
"discover pools: permission denied\n"));
free(searchdirs);
nvlist_free(props);
nvlist_free(policy);
return (1);
}
/*
* Depending on the arguments given, we do one of the following:
*
* <none> Iterate through all pools and display information about
* each one.
*
* -a Iterate through all pools and try to import each one.
*
* <id> Find the pool that corresponds to the given GUID/pool
* name and import that one.
*
* -D Above options applies only to destroyed pools.
*/
if (argc != 0) {
char *endptr;
errno = 0;
searchguid = strtoull(argv[0], &endptr, 10);
if (errno != 0 || *endptr != '\0') {
searchname = argv[0];
searchguid = 0;
}
pool_specified = B_TRUE;
/*
* User specified a name or guid. Ensure it's unique.
*/
target_exists_args_t search = {searchname, searchguid};
pool_exists = zpool_iter(g_zfs, name_or_guid_exists, &search);
}
/*
* Check the environment for the preferred search path.
*/
if ((searchdirs == NULL) && (env = getenv("ZPOOL_IMPORT_PATH"))) {
char *dir, *tmp = NULL;
envdup = strdup(env);
for (dir = strtok_r(envdup, ":", &tmp);
dir != NULL;
dir = strtok_r(NULL, ":", &tmp)) {
searchdirs = safe_realloc(searchdirs,
(nsearch + 1) * sizeof (char *));
searchdirs[nsearch++] = dir;
}
}
idata.path = searchdirs;
idata.paths = nsearch;
idata.poolname = searchname;
idata.guid = searchguid;
idata.cachefile = cachefile;
idata.scan = do_scan;
idata.policy = policy;
pools = zpool_search_import(g_zfs, &idata, &libzfs_config_ops);
if (pools != NULL && pool_exists &&
(argc == 1 || strcmp(argv[0], argv[1]) == 0)) {
(void) fprintf(stderr, gettext("cannot import '%s': "
"a pool with that name already exists\n"),
argv[0]);
(void) fprintf(stderr, gettext("use the form '%s "
"<pool | id> <newpool>' to give it a new name\n"),
"zpool import");
err = 1;
} else if (pools == NULL && pool_exists) {
(void) fprintf(stderr, gettext("cannot import '%s': "
"a pool with that name is already created/imported,\n"),
argv[0]);
(void) fprintf(stderr, gettext("and no additional pools "
"with that name were found\n"));
err = 1;
} else if (pools == NULL) {
if (argc != 0) {
(void) fprintf(stderr, gettext("cannot import '%s': "
"no such pool available\n"), argv[0]);
}
err = 1;
}
if (err == 1) {
free(searchdirs);
free(envdup);
nvlist_free(policy);
nvlist_free(pools);
nvlist_free(props);
return (1);
}
err = import_pools(pools, props, mntopts, flags,
argc >= 1 ? argv[0] : NULL,
argc >= 2 ? argv[1] : NULL,
do_destroyed, pool_specified, do_all, &idata);
/*
* If we're using the cachefile and we failed to import, then
* fallback to scanning the directory for pools that match
* those in the cachefile.
*/
if (err != 0 && cachefile != NULL) {
(void) printf(gettext("cachefile import failed, retrying\n"));
/*
* We use the scan flag to gather the directories that exist
* in the cachefile. If we need to fallback to searching for
* the pool config, we will only search devices in these
* directories.
*/
idata.scan = B_TRUE;
nvlist_free(pools);
pools = zpool_search_import(g_zfs, &idata, &libzfs_config_ops);
err = import_pools(pools, props, mntopts, flags,
argc >= 1 ? argv[0] : NULL,
argc >= 2 ? argv[1] : NULL,
do_destroyed, pool_specified, do_all, &idata);
}
error:
nvlist_free(props);
nvlist_free(pools);
nvlist_free(policy);
free(searchdirs);
free(envdup);
return (err ? 1 : 0);
}
/*
* zpool sync [-f] [pool] ...
*
* -f (undocumented) force uberblock (and config including zpool cache file)
* update.
*
* Sync the specified pool(s).
* Without arguments "zpool sync" will sync all pools.
* This command initiates TXG sync(s) and will return after the TXG(s) commit.
*
*/
static int
zpool_do_sync(int argc, char **argv)
{
int ret;
boolean_t force = B_FALSE;
/* check options */
while ((ret = getopt(argc, argv, "f")) != -1) {
switch (ret) {
case 'f':
force = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* if argc == 0 we will execute zpool_sync_one on all pools */
ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
B_FALSE, zpool_sync_one, &force);
return (ret);
}
typedef struct iostat_cbdata {
uint64_t cb_flags;
int cb_namewidth;
int cb_iteration;
boolean_t cb_verbose;
boolean_t cb_literal;
boolean_t cb_scripted;
zpool_list_t *cb_list;
vdev_cmd_data_list_t *vcdl;
vdev_cbdata_t cb_vdevs;
} iostat_cbdata_t;
/* iostat labels */
typedef struct name_and_columns {
const char *name; /* Column name */
unsigned int columns; /* Center name to this number of columns */
} name_and_columns_t;
#define IOSTAT_MAX_LABELS 15 /* Max number of labels on one line */
static const name_and_columns_t iostat_top_labels[][IOSTAT_MAX_LABELS] =
{
[IOS_DEFAULT] = {{"capacity", 2}, {"operations", 2}, {"bandwidth", 2},
{NULL}},
[IOS_LATENCY] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
{"asyncq_wait", 2}, {"scrub", 1}, {"trim", 1}, {"rebuild", 1},
{NULL}},
[IOS_QUEUES] = {{"syncq_read", 2}, {"syncq_write", 2},
{"asyncq_read", 2}, {"asyncq_write", 2}, {"scrubq_read", 2},
{"trimq_write", 2}, {"rebuildq_write", 2}, {NULL}},
[IOS_L_HISTO] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
{"asyncq_wait", 2}, {NULL}},
[IOS_RQ_HISTO] = {{"sync_read", 2}, {"sync_write", 2},
{"async_read", 2}, {"async_write", 2}, {"scrub", 2},
{"trim", 2}, {"rebuild", 2}, {NULL}},
};
/* Shorthand - if "columns" field not set, default to 1 column */
static const name_and_columns_t iostat_bottom_labels[][IOSTAT_MAX_LABELS] =
{
[IOS_DEFAULT] = {{"alloc"}, {"free"}, {"read"}, {"write"}, {"read"},
{"write"}, {NULL}},
[IOS_LATENCY] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
{"write"}, {"read"}, {"write"}, {"wait"}, {"wait"}, {"wait"},
{NULL}},
[IOS_QUEUES] = {{"pend"}, {"activ"}, {"pend"}, {"activ"}, {"pend"},
{"activ"}, {"pend"}, {"activ"}, {"pend"}, {"activ"},
{"pend"}, {"activ"}, {"pend"}, {"activ"}, {NULL}},
[IOS_L_HISTO] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
{"write"}, {"read"}, {"write"}, {"scrub"}, {"trim"}, {"rebuild"},
{NULL}},
[IOS_RQ_HISTO] = {{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
{"ind"}, {"agg"}, {NULL}},
};
static const char *histo_to_title[] = {
[IOS_L_HISTO] = "latency",
[IOS_RQ_HISTO] = "req_size",
};
/*
* Return the number of labels in a null-terminated name_and_columns_t
* array.
*
*/
static unsigned int
label_array_len(const name_and_columns_t *labels)
{
int i = 0;
while (labels[i].name)
i++;
return (i);
}
/*
* Return the number of strings in a null-terminated string array.
* For example:
*
* const char foo[] = {"bar", "baz", NULL}
*
* returns 2
*/
static uint64_t
str_array_len(const char *array[])
{
uint64_t i = 0;
while (array[i])
i++;
return (i);
}
/*
* Return a default column width for default/latency/queue columns. This does
* not include histograms, which have their columns autosized.
*/
static unsigned int
default_column_width(iostat_cbdata_t *cb, enum iostat_type type)
{
unsigned long column_width = 5; /* Normal niceprint */
static unsigned long widths[] = {
/*
* Choose some sane default column sizes for printing the
* raw numbers.
*/
[IOS_DEFAULT] = 15, /* 1PB capacity */
[IOS_LATENCY] = 10, /* 1B ns = 10sec */
[IOS_QUEUES] = 6, /* 1M queue entries */
[IOS_L_HISTO] = 10, /* 1B ns = 10sec */
[IOS_RQ_HISTO] = 6, /* 1M queue entries */
};
if (cb->cb_literal)
column_width = widths[type];
return (column_width);
}
/*
* Print the column labels, i.e:
*
* capacity operations bandwidth
* alloc free read write read write ...
*
* If force_column_width is set, use it for the column width. If not set, use
* the default column width.
*/
static void
print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width,
const name_and_columns_t labels[][IOSTAT_MAX_LABELS])
{
int i, idx, s;
int text_start, rw_column_width, spaces_to_end;
uint64_t flags = cb->cb_flags;
uint64_t f;
unsigned int column_width = force_column_width;
/* For each bit set in flags */
for (f = flags; f; f &= ~(1ULL << idx)) {
idx = lowbit64(f) - 1;
if (!force_column_width)
column_width = default_column_width(cb, idx);
/* Print our top labels centered over "read write" label. */
for (i = 0; i < label_array_len(labels[idx]); i++) {
const char *name = labels[idx][i].name;
/*
* We treat labels[][].columns == 0 as shorthand
* for one column. It makes writing out the label
* tables more concise.
*/
unsigned int columns = MAX(1, labels[idx][i].columns);
unsigned int slen = strlen(name);
rw_column_width = (column_width * columns) +
(2 * (columns - 1));
text_start = (int)((rw_column_width) / columns -
slen / columns);
if (text_start < 0)
text_start = 0;
printf(" "); /* Two spaces between columns */
/* Space from beginning of column to label */
for (s = 0; s < text_start; s++)
printf(" ");
printf("%s", name);
/* Print space after label to end of column */
spaces_to_end = rw_column_width - text_start - slen;
if (spaces_to_end < 0)
spaces_to_end = 0;
for (s = 0; s < spaces_to_end; s++)
printf(" ");
}
}
}
/*
* print_cmd_columns - Print custom column titles from -c
*
* If the user specified the "zpool status|iostat -c" then print their custom
* column titles in the header. For example, print_cmd_columns() would print
* the " col1 col2" part of this:
*
* $ zpool iostat -vc 'echo col1=val1; echo col2=val2'
* ...
* capacity operations bandwidth
* pool alloc free read write read write col1 col2
* ---------- ----- ----- ----- ----- ----- ----- ---- ----
* mypool 269K 1008M 0 0 107 946
* mirror 269K 1008M 0 0 107 946
* sdb - - 0 0 102 473 val1 val2
* sdc - - 0 0 5 473 val1 val2
* ---------- ----- ----- ----- ----- ----- ----- ---- ----
*/
static void
print_cmd_columns(vdev_cmd_data_list_t *vcdl, int use_dashes)
{
int i, j;
vdev_cmd_data_t *data = &vcdl->data[0];
if (vcdl->count == 0 || data == NULL)
return;
/*
* Each vdev cmd should have the same column names unless the user did
* something weird with their cmd. Just take the column names from the
* first vdev and assume it works for all of them.
*/
for (i = 0; i < vcdl->uniq_cols_cnt; i++) {
printf(" ");
if (use_dashes) {
for (j = 0; j < vcdl->uniq_cols_width[i]; j++)
printf("-");
} else {
printf_color(ANSI_BOLD, "%*s", vcdl->uniq_cols_width[i],
vcdl->uniq_cols[i]);
}
}
}
/*
* Utility function to print out a line of dashes like:
*
* -------------------------------- ----- ----- ----- ----- -----
*
* ...or a dashed named-row line like:
*
* logs - - - - -
*
* @cb: iostat data
*
* @force_column_width If non-zero, use the value as the column width.
* Otherwise use the default column widths.
*
* @name: Print a dashed named-row line starting
* with @name. Otherwise, print a regular
* dashed line.
*/
static void
print_iostat_dashes(iostat_cbdata_t *cb, unsigned int force_column_width,
const char *name)
{
int i;
unsigned int namewidth;
uint64_t flags = cb->cb_flags;
uint64_t f;
int idx;
const name_and_columns_t *labels;
const char *title;
if (cb->cb_flags & IOS_ANYHISTO_M) {
title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
} else if (cb->cb_vdevs.cb_names_count) {
title = "vdev";
} else {
title = "pool";
}
namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
name ? strlen(name) : 0);
if (name) {
printf("%-*s", namewidth, name);
} else {
for (i = 0; i < namewidth; i++)
(void) printf("-");
}
/* For each bit in flags */
for (f = flags; f; f &= ~(1ULL << idx)) {
unsigned int column_width;
idx = lowbit64(f) - 1;
if (force_column_width)
column_width = force_column_width;
else
column_width = default_column_width(cb, idx);
labels = iostat_bottom_labels[idx];
for (i = 0; i < label_array_len(labels); i++) {
if (name)
printf(" %*s-", column_width - 1, " ");
else
printf(" %.*s", column_width,
"--------------------");
}
}
}
static void
print_iostat_separator_impl(iostat_cbdata_t *cb,
unsigned int force_column_width)
{
print_iostat_dashes(cb, force_column_width, NULL);
}
static void
print_iostat_separator(iostat_cbdata_t *cb)
{
print_iostat_separator_impl(cb, 0);
}
static void
print_iostat_header_impl(iostat_cbdata_t *cb, unsigned int force_column_width,
const char *histo_vdev_name)
{
unsigned int namewidth;
const char *title;
if (cb->cb_flags & IOS_ANYHISTO_M) {
title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
} else if (cb->cb_vdevs.cb_names_count) {
title = "vdev";
} else {
title = "pool";
}
namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
histo_vdev_name ? strlen(histo_vdev_name) : 0);
if (histo_vdev_name)
printf("%-*s", namewidth, histo_vdev_name);
else
printf("%*s", namewidth, "");
print_iostat_labels(cb, force_column_width, iostat_top_labels);
printf("\n");
printf("%-*s", namewidth, title);
print_iostat_labels(cb, force_column_width, iostat_bottom_labels);
if (cb->vcdl != NULL)
print_cmd_columns(cb->vcdl, 0);
printf("\n");
print_iostat_separator_impl(cb, force_column_width);
if (cb->vcdl != NULL)
print_cmd_columns(cb->vcdl, 1);
printf("\n");
}
static void
print_iostat_header(iostat_cbdata_t *cb)
{
print_iostat_header_impl(cb, 0, NULL);
}
/*
* Display a single statistic.
*/
static void
print_one_stat(uint64_t value, enum zfs_nicenum_format format,
unsigned int column_size, boolean_t scripted)
{
char buf[64];
zfs_nicenum_format(value, buf, sizeof (buf), format);
if (scripted)
printf("\t%s", buf);
else
printf(" %*s", column_size, buf);
}
/*
* Calculate the default vdev stats
*
* Subtract oldvs from newvs, apply a scaling factor, and save the resulting
* stats into calcvs.
*/
static void
calc_default_iostats(vdev_stat_t *oldvs, vdev_stat_t *newvs,
vdev_stat_t *calcvs)
{
int i;
memcpy(calcvs, newvs, sizeof (*calcvs));
for (i = 0; i < ARRAY_SIZE(calcvs->vs_ops); i++)
calcvs->vs_ops[i] = (newvs->vs_ops[i] - oldvs->vs_ops[i]);
for (i = 0; i < ARRAY_SIZE(calcvs->vs_bytes); i++)
calcvs->vs_bytes[i] = (newvs->vs_bytes[i] - oldvs->vs_bytes[i]);
}
/*
* Internal representation of the extended iostats data.
*
* The extended iostat stats are exported in nvlists as either uint64_t arrays
* or single uint64_t's. We make both look like arrays to make them easier
* to process. In order to make single uint64_t's look like arrays, we set
* __data to the stat data, and then set *data = &__data with count = 1. Then,
* we can just use *data and count.
*/
struct stat_array {
uint64_t *data;
uint_t count; /* Number of entries in data[] */
uint64_t __data; /* Only used when data is a single uint64_t */
};
static uint64_t
stat_histo_max(struct stat_array *nva, unsigned int len)
{
uint64_t max = 0;
int i;
for (i = 0; i < len; i++)
max = MAX(max, array64_max(nva[i].data, nva[i].count));
return (max);
}
/*
* Helper function to lookup a uint64_t array or uint64_t value and store its
* data as a stat_array. If the nvpair is a single uint64_t value, then we make
* it look like a one element array to make it easier to process.
*/
static int
nvpair64_to_stat_array(nvlist_t *nvl, const char *name,
struct stat_array *nva)
{
nvpair_t *tmp;
int ret;
verify(nvlist_lookup_nvpair(nvl, name, &tmp) == 0);
switch (nvpair_type(tmp)) {
case DATA_TYPE_UINT64_ARRAY:
ret = nvpair_value_uint64_array(tmp, &nva->data, &nva->count);
break;
case DATA_TYPE_UINT64:
ret = nvpair_value_uint64(tmp, &nva->__data);
nva->data = &nva->__data;
nva->count = 1;
break;
default:
/* Not a uint64_t */
ret = EINVAL;
break;
}
return (ret);
}
/*
* Given a list of nvlist names, look up the extended stats in newnv and oldnv,
* subtract them, and return the results in a newly allocated stat_array.
* You must free the returned array after you are done with it with
* free_calc_stats().
*
* Additionally, you can set "oldnv" to NULL if you simply want the newnv
* values.
*/
static struct stat_array *
calc_and_alloc_stats_ex(const char **names, unsigned int len, nvlist_t *oldnv,
nvlist_t *newnv)
{
nvlist_t *oldnvx = NULL, *newnvx;
struct stat_array *oldnva, *newnva, *calcnva;
int i, j;
unsigned int alloc_size = (sizeof (struct stat_array)) * len;
/* Extract our extended stats nvlist from the main list */
verify(nvlist_lookup_nvlist(newnv, ZPOOL_CONFIG_VDEV_STATS_EX,
&newnvx) == 0);
if (oldnv) {
verify(nvlist_lookup_nvlist(oldnv, ZPOOL_CONFIG_VDEV_STATS_EX,
&oldnvx) == 0);
}
newnva = safe_malloc(alloc_size);
oldnva = safe_malloc(alloc_size);
calcnva = safe_malloc(alloc_size);
for (j = 0; j < len; j++) {
verify(nvpair64_to_stat_array(newnvx, names[j],
&newnva[j]) == 0);
calcnva[j].count = newnva[j].count;
alloc_size = calcnva[j].count * sizeof (calcnva[j].data[0]);
calcnva[j].data = safe_malloc(alloc_size);
memcpy(calcnva[j].data, newnva[j].data, alloc_size);
if (oldnvx) {
verify(nvpair64_to_stat_array(oldnvx, names[j],
&oldnva[j]) == 0);
for (i = 0; i < oldnva[j].count; i++)
calcnva[j].data[i] -= oldnva[j].data[i];
}
}
free(newnva);
free(oldnva);
return (calcnva);
}
static void
free_calc_stats(struct stat_array *nva, unsigned int len)
{
int i;
for (i = 0; i < len; i++)
free(nva[i].data);
free(nva);
}
static void
print_iostat_histo(struct stat_array *nva, unsigned int len,
iostat_cbdata_t *cb, unsigned int column_width, unsigned int namewidth,
double scale)
{
int i, j;
char buf[6];
uint64_t val;
enum zfs_nicenum_format format;
unsigned int buckets;
unsigned int start_bucket;
if (cb->cb_literal)
format = ZFS_NICENUM_RAW;
else
format = ZFS_NICENUM_1024;
/* All these histos are the same size, so just use nva[0].count */
buckets = nva[0].count;
if (cb->cb_flags & IOS_RQ_HISTO_M) {
/* Start at 512 - req size should never be lower than this */
start_bucket = 9;
} else {
start_bucket = 0;
}
for (j = start_bucket; j < buckets; j++) {
/* Print histogram bucket label */
if (cb->cb_flags & IOS_L_HISTO_M) {
/* Ending range of this bucket */
val = (1UL << (j + 1)) - 1;
zfs_nicetime(val, buf, sizeof (buf));
} else {
/* Request size (starting range of bucket) */
val = (1UL << j);
zfs_nicenum(val, buf, sizeof (buf));
}
if (cb->cb_scripted)
printf("%llu", (u_longlong_t)val);
else
printf("%-*s", namewidth, buf);
/* Print the values on the line */
for (i = 0; i < len; i++) {
print_one_stat(nva[i].data[j] * scale, format,
column_width, cb->cb_scripted);
}
printf("\n");
}
}
static void
print_solid_separator(unsigned int length)
{
while (length--)
printf("-");
printf("\n");
}
static void
print_iostat_histos(iostat_cbdata_t *cb, nvlist_t *oldnv,
nvlist_t *newnv, double scale, const char *name)
{
unsigned int column_width;
unsigned int namewidth;
unsigned int entire_width;
enum iostat_type type;
struct stat_array *nva;
const char **names;
unsigned int names_len;
/* What type of histo are we? */
type = IOS_HISTO_IDX(cb->cb_flags);
/* Get NULL-terminated array of nvlist names for our histo */
names = vsx_type_to_nvlist[type];
names_len = str_array_len(names); /* num of names */
nva = calc_and_alloc_stats_ex(names, names_len, oldnv, newnv);
if (cb->cb_literal) {
column_width = MAX(5,
(unsigned int) log10(stat_histo_max(nva, names_len)) + 1);
} else {
column_width = 5;
}
namewidth = MAX(cb->cb_namewidth,
strlen(histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]));
/*
* Calculate the entire line width of what we're printing. The
* +2 is for the two spaces between columns:
*/
/* read write */
/* ----- ----- */
/* |___| <---------- column_width */
/* */
/* |__________| <--- entire_width */
/* */
entire_width = namewidth + (column_width + 2) *
label_array_len(iostat_bottom_labels[type]);
if (cb->cb_scripted)
printf("%s\n", name);
else
print_iostat_header_impl(cb, column_width, name);
print_iostat_histo(nva, names_len, cb, column_width,
namewidth, scale);
free_calc_stats(nva, names_len);
if (!cb->cb_scripted)
print_solid_separator(entire_width);
}
/*
* Calculate the average latency of a power-of-two latency histogram
*/
static uint64_t
single_histo_average(uint64_t *histo, unsigned int buckets)
{
int i;
uint64_t count = 0, total = 0;
for (i = 0; i < buckets; i++) {
/*
* Our buckets are power-of-two latency ranges. Use the
* midpoint latency of each bucket to calculate the average.
* For example:
*
* Bucket Midpoint
* 8ns-15ns: 12ns
* 16ns-31ns: 24ns
* ...
*/
if (histo[i] != 0) {
total += histo[i] * (((1UL << i) + ((1UL << i)/2)));
count += histo[i];
}
}
/* Prevent divide by zero */
return (count == 0 ? 0 : total / count);
}
static void
print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *newnv)
{
const char *names[] = {
ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
};
struct stat_array *nva;
unsigned int column_width = default_column_width(cb, IOS_QUEUES);
enum zfs_nicenum_format format;
nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), NULL, newnv);
if (cb->cb_literal)
format = ZFS_NICENUM_RAW;
else
format = ZFS_NICENUM_1024;
for (int i = 0; i < ARRAY_SIZE(names); i++) {
uint64_t val = nva[i].data[0];
print_one_stat(val, format, column_width, cb->cb_scripted);
}
free_calc_stats(nva, ARRAY_SIZE(names));
}
static void
print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv,
nvlist_t *newnv)
{
int i;
uint64_t val;
const char *names[] = {
ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
};
struct stat_array *nva;
unsigned int column_width = default_column_width(cb, IOS_LATENCY);
enum zfs_nicenum_format format;
nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), oldnv, newnv);
if (cb->cb_literal)
format = ZFS_NICENUM_RAWTIME;
else
format = ZFS_NICENUM_TIME;
/* Print our avg latencies on the line */
for (i = 0; i < ARRAY_SIZE(names); i++) {
/* Compute average latency for a latency histo */
val = single_histo_average(nva[i].data, nva[i].count);
print_one_stat(val, format, column_width, cb->cb_scripted);
}
free_calc_stats(nva, ARRAY_SIZE(names));
}
/*
* Print default statistics (capacity/operations/bandwidth)
*/
static void
print_iostat_default(vdev_stat_t *vs, iostat_cbdata_t *cb, double scale)
{
unsigned int column_width = default_column_width(cb, IOS_DEFAULT);
enum zfs_nicenum_format format;
char na; /* char to print for "not applicable" values */
if (cb->cb_literal) {
format = ZFS_NICENUM_RAW;
na = '0';
} else {
format = ZFS_NICENUM_1024;
na = '-';
}
/* only toplevel vdevs have capacity stats */
if (vs->vs_space == 0) {
if (cb->cb_scripted)
printf("\t%c\t%c", na, na);
else
printf(" %*c %*c", column_width, na, column_width,
na);
} else {
print_one_stat(vs->vs_alloc, format, column_width,
cb->cb_scripted);
print_one_stat(vs->vs_space - vs->vs_alloc, format,
column_width, cb->cb_scripted);
}
print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_READ] * scale),
format, column_width, cb->cb_scripted);
print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_WRITE] * scale),
format, column_width, cb->cb_scripted);
print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_READ] * scale),
format, column_width, cb->cb_scripted);
print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_WRITE] * scale),
format, column_width, cb->cb_scripted);
}
static const char *const class_name[] = {
VDEV_ALLOC_BIAS_DEDUP,
VDEV_ALLOC_BIAS_SPECIAL,
VDEV_ALLOC_CLASS_LOGS
};
/*
* Print out all the statistics for the given vdev. This can either be the
* toplevel configuration, or called recursively. If 'name' is NULL, then this
* is a verbose output, and we don't want to display the toplevel pool stats.
*
* Returns the number of stat lines printed.
*/
static unsigned int
print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv,
nvlist_t *newnv, iostat_cbdata_t *cb, int depth)
{
nvlist_t **oldchild, **newchild;
uint_t c, children, oldchildren;
vdev_stat_t *oldvs, *newvs, *calcvs;
vdev_stat_t zerovs = { 0 };
char *vname;
int i;
int ret = 0;
uint64_t tdelta;
double scale;
if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
return (ret);
calcvs = safe_malloc(sizeof (*calcvs));
if (oldnv != NULL) {
verify(nvlist_lookup_uint64_array(oldnv,
ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&oldvs, &c) == 0);
} else {
oldvs = &zerovs;
}
/* Do we only want to see a specific vdev? */
for (i = 0; i < cb->cb_vdevs.cb_names_count; i++) {
/* Yes we do. Is this the vdev? */
if (strcmp(name, cb->cb_vdevs.cb_names[i]) == 0) {
/*
* This is our vdev. Since it is the only vdev we
* will be displaying, make depth = 0 so that it
* doesn't get indented.
*/
depth = 0;
break;
}
}
if (cb->cb_vdevs.cb_names_count && (i == cb->cb_vdevs.cb_names_count)) {
/* Couldn't match the name */
goto children;
}
verify(nvlist_lookup_uint64_array(newnv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&newvs, &c) == 0);
/*
* Print the vdev name unless it's is a histogram. Histograms
* display the vdev name in the header itself.
*/
if (!(cb->cb_flags & IOS_ANYHISTO_M)) {
if (cb->cb_scripted) {
printf("%s", name);
} else {
if (strlen(name) + depth > cb->cb_namewidth)
(void) printf("%*s%s", depth, "", name);
else
(void) printf("%*s%s%*s", depth, "", name,
(int)(cb->cb_namewidth - strlen(name) -
depth), "");
}
}
/* Calculate our scaling factor */
tdelta = newvs->vs_timestamp - oldvs->vs_timestamp;
if ((oldvs->vs_timestamp == 0) && (cb->cb_flags & IOS_ANYHISTO_M)) {
/*
* If we specify printing histograms with no time interval, then
* print the histogram numbers over the entire lifetime of the
* vdev.
*/
scale = 1;
} else {
if (tdelta == 0)
scale = 1.0;
else
scale = (double)NANOSEC / tdelta;
}
if (cb->cb_flags & IOS_DEFAULT_M) {
calc_default_iostats(oldvs, newvs, calcvs);
print_iostat_default(calcvs, cb, scale);
}
if (cb->cb_flags & IOS_LATENCY_M)
print_iostat_latency(cb, oldnv, newnv);
if (cb->cb_flags & IOS_QUEUES_M)
print_iostat_queues(cb, newnv);
if (cb->cb_flags & IOS_ANYHISTO_M) {
printf("\n");
print_iostat_histos(cb, oldnv, newnv, scale, name);
}
if (cb->vcdl != NULL) {
char *path;
if (nvlist_lookup_string(newnv, ZPOOL_CONFIG_PATH,
&path) == 0) {
printf(" ");
zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
}
}
if (!(cb->cb_flags & IOS_ANYHISTO_M))
printf("\n");
ret++;
children:
free(calcvs);
if (!cb->cb_verbose)
return (ret);
if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_CHILDREN,
&newchild, &children) != 0)
return (ret);
if (oldnv) {
if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN,
&oldchild, &oldchildren) != 0)
return (ret);
children = MIN(oldchildren, children);
}
/*
* print normal top-level devices
*/
for (c = 0; c < children; c++) {
uint64_t ishole = B_FALSE, islog = B_FALSE;
(void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_HOLE,
&ishole);
(void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_LOG,
&islog);
if (ishole || islog)
continue;
if (nvlist_exists(newchild[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
continue;
vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);
ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL,
newchild[c], cb, depth + 2);
free(vname);
}
/*
* print all other top-level devices
*/
for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {
boolean_t printed = B_FALSE;
for (c = 0; c < children; c++) {
uint64_t islog = B_FALSE;
char *bias = NULL;
char *type = NULL;
(void) nvlist_lookup_uint64(newchild[c],
ZPOOL_CONFIG_IS_LOG, &islog);
if (islog) {
bias = (char *)VDEV_ALLOC_CLASS_LOGS;
} else {
(void) nvlist_lookup_string(newchild[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
(void) nvlist_lookup_string(newchild[c],
ZPOOL_CONFIG_TYPE, &type);
}
if (bias == NULL || strcmp(bias, class_name[n]) != 0)
continue;
if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
continue;
if (!printed) {
if ((!(cb->cb_flags & IOS_ANYHISTO_M)) &&
!cb->cb_scripted &&
!cb->cb_vdevs.cb_names) {
print_iostat_dashes(cb, 0,
class_name[n]);
}
printf("\n");
printed = B_TRUE;
}
vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);
ret += print_vdev_stats(zhp, vname, oldnv ?
oldchild[c] : NULL, newchild[c], cb, depth + 2);
free(vname);
}
}
/*
* Include level 2 ARC devices in iostat output
*/
if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_L2CACHE,
&newchild, &children) != 0)
return (ret);
if (oldnv) {
if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE,
&oldchild, &oldchildren) != 0)
return (ret);
children = MIN(oldchildren, children);
}
if (children > 0) {
if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted &&
!cb->cb_vdevs.cb_names) {
print_iostat_dashes(cb, 0, "cache");
}
printf("\n");
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
cb->cb_vdevs.cb_name_flags);
ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c]
: NULL, newchild[c], cb, depth + 2);
free(vname);
}
}
return (ret);
}
static int
refresh_iostat(zpool_handle_t *zhp, void *data)
{
iostat_cbdata_t *cb = data;
boolean_t missing;
/*
* If the pool has disappeared, remove it from the list and continue.
*/
if (zpool_refresh_stats(zhp, &missing) != 0)
return (-1);
if (missing)
pool_list_remove(cb->cb_list, zhp);
return (0);
}
/*
* Callback to print out the iostats for the given pool.
*/
static int
print_iostat(zpool_handle_t *zhp, void *data)
{
iostat_cbdata_t *cb = data;
nvlist_t *oldconfig, *newconfig;
nvlist_t *oldnvroot, *newnvroot;
int ret;
newconfig = zpool_get_config(zhp, &oldconfig);
if (cb->cb_iteration == 1)
oldconfig = NULL;
verify(nvlist_lookup_nvlist(newconfig, ZPOOL_CONFIG_VDEV_TREE,
&newnvroot) == 0);
if (oldconfig == NULL)
oldnvroot = NULL;
else
verify(nvlist_lookup_nvlist(oldconfig, ZPOOL_CONFIG_VDEV_TREE,
&oldnvroot) == 0);
ret = print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot,
cb, 0);
if ((ret != 0) && !(cb->cb_flags & IOS_ANYHISTO_M) &&
!cb->cb_scripted && cb->cb_verbose &&
!cb->cb_vdevs.cb_names_count) {
print_iostat_separator(cb);
if (cb->vcdl != NULL) {
print_cmd_columns(cb->vcdl, 1);
}
printf("\n");
}
return (ret);
}
static int
get_columns(void)
{
struct winsize ws;
int columns = 80;
int error;
if (isatty(STDOUT_FILENO)) {
error = ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws);
if (error == 0)
columns = ws.ws_col;
} else {
columns = 999;
}
return (columns);
}
/*
* Return the required length of the pool/vdev name column. The minimum
* allowed width and output formatting flags must be provided.
*/
static int
get_namewidth(zpool_handle_t *zhp, int min_width, int flags, boolean_t verbose)
{
nvlist_t *config, *nvroot;
int width = min_width;
if ((config = zpool_get_config(zhp, NULL)) != NULL) {
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
size_t poolname_len = strlen(zpool_get_name(zhp));
if (verbose == B_FALSE) {
width = MAX(poolname_len, min_width);
} else {
width = MAX(poolname_len,
max_width(zhp, nvroot, 0, min_width, flags));
}
}
return (width);
}
/*
* Parse the input string, get the 'interval' and 'count' value if there is one.
*/
static void
get_interval_count(int *argcp, char **argv, float *iv,
unsigned long *cnt)
{
float interval = 0;
unsigned long count = 0;
int argc = *argcp;
/*
* Determine if the last argument is an integer or a pool name
*/
if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
char *end;
errno = 0;
interval = strtof(argv[argc - 1], &end);
if (*end == '\0' && errno == 0) {
if (interval == 0) {
(void) fprintf(stderr, gettext(
"interval cannot be zero\n"));
usage(B_FALSE);
}
/*
* Ignore the last parameter
*/
argc--;
} else {
/*
* If this is not a valid number, just plow on. The
* user will get a more informative error message later
* on.
*/
interval = 0;
}
}
/*
* If the last argument is also an integer, then we have both a count
* and an interval.
*/
if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
char *end;
errno = 0;
count = interval;
interval = strtof(argv[argc - 1], &end);
if (*end == '\0' && errno == 0) {
if (interval == 0) {
(void) fprintf(stderr, gettext(
"interval cannot be zero\n"));
usage(B_FALSE);
}
/*
* Ignore the last parameter
*/
argc--;
} else {
interval = 0;
}
}
*iv = interval;
*cnt = count;
*argcp = argc;
}
static void
get_timestamp_arg(char c)
{
if (c == 'u')
timestamp_fmt = UDATE;
else if (c == 'd')
timestamp_fmt = DDATE;
else
usage(B_FALSE);
}
/*
* Return stat flags that are supported by all pools by both the module and
* zpool iostat. "*data" should be initialized to all 0xFFs before running.
* It will get ANDed down until only the flags that are supported on all pools
* remain.
*/
static int
get_stat_flags_cb(zpool_handle_t *zhp, void *data)
{
uint64_t *mask = data;
nvlist_t *config, *nvroot, *nvx;
uint64_t flags = 0;
int i, j;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
/* Default stats are always supported, but for completeness.. */
if (nvlist_exists(nvroot, ZPOOL_CONFIG_VDEV_STATS))
flags |= IOS_DEFAULT_M;
/* Get our extended stats nvlist from the main list */
if (nvlist_lookup_nvlist(nvroot, ZPOOL_CONFIG_VDEV_STATS_EX,
&nvx) != 0) {
/*
* No extended stats; they're probably running an older
* module. No big deal, we support that too.
*/
goto end;
}
/* For each extended stat, make sure all its nvpairs are supported */
for (j = 0; j < ARRAY_SIZE(vsx_type_to_nvlist); j++) {
if (!vsx_type_to_nvlist[j][0])
continue;
/* Start off by assuming the flag is supported, then check */
flags |= (1ULL << j);
for (i = 0; vsx_type_to_nvlist[j][i]; i++) {
if (!nvlist_exists(nvx, vsx_type_to_nvlist[j][i])) {
/* flag isn't supported */
flags = flags & ~(1ULL << j);
break;
}
}
}
end:
*mask = *mask & flags;
return (0);
}
/*
* Return a bitmask of stats that are supported on all pools by both the module
* and zpool iostat.
*/
static uint64_t
get_stat_flags(zpool_list_t *list)
{
uint64_t mask = -1;
/*
* get_stat_flags_cb() will lop off bits from "mask" until only the
* flags that are supported on all pools remain.
*/
pool_list_iter(list, B_FALSE, get_stat_flags_cb, &mask);
return (mask);
}
/*
* Return 1 if cb_data->cb_names[0] is this vdev's name, 0 otherwise.
*/
static int
is_vdev_cb(void *zhp_data, nvlist_t *nv, void *cb_data)
{
vdev_cbdata_t *cb = cb_data;
char *name = NULL;
int ret = 1; /* assume match */
zpool_handle_t *zhp = zhp_data;
name = zpool_vdev_name(g_zfs, zhp, nv, cb->cb_name_flags);
if (strcmp(name, cb->cb_names[0])) {
free(name);
name = zpool_vdev_name(g_zfs, zhp, nv, VDEV_NAME_GUID);
ret = (strcmp(name, cb->cb_names[0]) == 0);
}
free(name);
return (ret);
}
/*
* Returns 1 if cb_data->cb_names[0] is a vdev name, 0 otherwise.
*/
static int
is_vdev(zpool_handle_t *zhp, void *cb_data)
{
return (for_each_vdev(zhp, is_vdev_cb, cb_data));
}
/*
* Check if vdevs are in a pool
*
* Return 1 if all argv[] strings are vdev names in pool "pool_name". Otherwise
* return 0. If pool_name is NULL, then search all pools.
*/
static int
are_vdevs_in_pool(int argc, char **argv, char *pool_name,
vdev_cbdata_t *cb)
{
char **tmp_name;
int ret = 0;
int i;
int pool_count = 0;
if ((argc == 0) || !*argv)
return (0);
if (pool_name)
pool_count = 1;
/* Temporarily hijack cb_names for a second... */
tmp_name = cb->cb_names;
/* Go though our list of prospective vdev names */
for (i = 0; i < argc; i++) {
cb->cb_names = argv + i;
/* Is this name a vdev in our pools? */
ret = for_each_pool(pool_count, &pool_name, B_TRUE, NULL,
ZFS_TYPE_POOL, B_FALSE, is_vdev, cb);
if (!ret) {
/* No match */
break;
}
}
cb->cb_names = tmp_name;
return (ret);
}
static int
is_pool_cb(zpool_handle_t *zhp, void *data)
{
char *name = data;
if (strcmp(name, zpool_get_name(zhp)) == 0)
return (1);
return (0);
}
/*
* Do we have a pool named *name? If so, return 1, otherwise 0.
*/
static int
is_pool(char *name)
{
return (for_each_pool(0, NULL, B_TRUE, NULL, ZFS_TYPE_POOL, B_FALSE,
is_pool_cb, name));
}
/* Are all our argv[] strings pool names? If so return 1, 0 otherwise. */
static int
are_all_pools(int argc, char **argv)
{
if ((argc == 0) || !*argv)
return (0);
while (--argc >= 0)
if (!is_pool(argv[argc]))
return (0);
return (1);
}
/*
* Helper function to print out vdev/pool names we can't resolve. Used for an
* error message.
*/
static void
error_list_unresolved_vdevs(int argc, char **argv, char *pool_name,
vdev_cbdata_t *cb)
{
int i;
char *name;
char *str;
for (i = 0; i < argc; i++) {
name = argv[i];
if (is_pool(name))
str = gettext("pool");
else if (are_vdevs_in_pool(1, &name, pool_name, cb))
str = gettext("vdev in this pool");
else if (are_vdevs_in_pool(1, &name, NULL, cb))
str = gettext("vdev in another pool");
else
str = gettext("unknown");
fprintf(stderr, "\t%s (%s)\n", name, str);
}
}
/*
* Same as get_interval_count(), but with additional checks to not misinterpret
* guids as interval/count values. Assumes VDEV_NAME_GUID is set in
* cb.cb_vdevs.cb_name_flags.
*/
static void
get_interval_count_filter_guids(int *argc, char **argv, float *interval,
unsigned long *count, iostat_cbdata_t *cb)
{
char **tmpargv = argv;
int argc_for_interval = 0;
/* Is the last arg an interval value? Or a guid? */
if (*argc >= 1 && !are_vdevs_in_pool(1, &argv[*argc - 1], NULL,
&cb->cb_vdevs)) {
/*
* The last arg is not a guid, so it's probably an
* interval value.
*/
argc_for_interval++;
if (*argc >= 2 &&
!are_vdevs_in_pool(1, &argv[*argc - 2], NULL,
&cb->cb_vdevs)) {
/*
* The 2nd to last arg is not a guid, so it's probably
* an interval value.
*/
argc_for_interval++;
}
}
/* Point to our list of possible intervals */
tmpargv = &argv[*argc - argc_for_interval];
*argc = *argc - argc_for_interval;
get_interval_count(&argc_for_interval, tmpargv,
interval, count);
}
/*
* Floating point sleep(). Allows you to pass in a floating point value for
* seconds.
*/
static void
fsleep(float sec)
{
struct timespec req;
req.tv_sec = floor(sec);
req.tv_nsec = (sec - (float)req.tv_sec) * NANOSEC;
nanosleep(&req, NULL);
}
/*
* Terminal height, in rows. Returns -1 if stdout is not connected to a TTY or
* if we were unable to determine its size.
*/
static int
terminal_height(void)
{
struct winsize win;
if (isatty(STDOUT_FILENO) == 0)
return (-1);
if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &win) != -1 && win.ws_row > 0)
return (win.ws_row);
return (-1);
}
/*
* Run one of the zpool status/iostat -c scripts with the help (-h) option and
* print the result.
*
* name: Short name of the script ('iostat').
* path: Full path to the script ('/usr/local/etc/zfs/zpool.d/iostat');
*/
static void
print_zpool_script_help(char *name, char *path)
{
char *argv[] = {path, (char *)"-h", NULL};
char **lines = NULL;
int lines_cnt = 0;
int rc;
rc = libzfs_run_process_get_stdout_nopath(path, argv, NULL, &lines,
&lines_cnt);
if (rc != 0 || lines == NULL || lines_cnt <= 0) {
if (lines != NULL)
libzfs_free_str_array(lines, lines_cnt);
return;
}
for (int i = 0; i < lines_cnt; i++)
if (!is_blank_str(lines[i]))
printf(" %-14s %s\n", name, lines[i]);
libzfs_free_str_array(lines, lines_cnt);
}
/*
* Go though the zpool status/iostat -c scripts in the user's path, run their
* help option (-h), and print out the results.
*/
static void
print_zpool_dir_scripts(char *dirpath)
{
DIR *dir;
struct dirent *ent;
char fullpath[MAXPATHLEN];
struct stat dir_stat;
if ((dir = opendir(dirpath)) != NULL) {
/* print all the files and directories within directory */
while ((ent = readdir(dir)) != NULL) {
sprintf(fullpath, "%s/%s", dirpath, ent->d_name);
/* Print the scripts */
if (stat(fullpath, &dir_stat) == 0)
if (dir_stat.st_mode & S_IXUSR &&
S_ISREG(dir_stat.st_mode))
print_zpool_script_help(ent->d_name,
fullpath);
}
closedir(dir);
}
}
/*
* Print out help text for all zpool status/iostat -c scripts.
*/
static void
print_zpool_script_list(const char *subcommand)
{
char *dir, *sp, *tmp;
printf(gettext("Available 'zpool %s -c' commands:\n"), subcommand);
sp = zpool_get_cmd_search_path();
if (sp == NULL)
return;
for (dir = strtok_r(sp, ":", &tmp);
dir != NULL;
dir = strtok_r(NULL, ":", &tmp))
print_zpool_dir_scripts(dir);
free(sp);
}
/*
* Set the minimum pool/vdev name column width. The width must be at least 10,
* but may be as large as the column width - 42 so it still fits on one line.
* NOTE: 42 is the width of the default capacity/operations/bandwidth output
*/
static int
get_namewidth_iostat(zpool_handle_t *zhp, void *data)
{
iostat_cbdata_t *cb = data;
int width, available_width;
/*
* get_namewidth() returns the maximum width of any name in that column
* for any pool/vdev/device line that will be output.
*/
- width = get_namewidth(zhp, cb->cb_namewidth, cb->cb_vdevs.cb_name_flags,
- cb->cb_verbose);
+ width = get_namewidth(zhp, cb->cb_namewidth,
+ cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);
/*
* The width we are calculating is the width of the header and also the
* padding width for names that are less than maximum width. The stats
* take up 42 characters, so the width available for names is:
*/
available_width = get_columns() - 42;
/*
* If the maximum width fits on a screen, then great! Make everything
* line up by justifying all lines to the same width. If that max
* width is larger than what's available, the name plus stats won't fit
* on one line, and justifying to that width would cause every line to
* wrap on the screen. We only want lines with long names to wrap.
* Limit the padding to what won't wrap.
*/
if (width > available_width)
width = available_width;
/*
* And regardless of whatever the screen width is (get_columns can
* return 0 if the width is not known or less than 42 for a narrow
* terminal) have the width be a minimum of 10.
*/
if (width < 10)
width = 10;
/* Save the calculated width */
cb->cb_namewidth = width;
return (0);
}
/*
* zpool iostat [[-c [script1,script2,...]] [-lq]|[-rw]] [-ghHLpPvy] [-n name]
* [-T d|u] [[ pool ...]|[pool vdev ...]|[vdev ...]]
* [interval [count]]
*
* -c CMD For each vdev, run command CMD
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -P Display full path for vdev name.
* -v Display statistics for individual vdevs
* -h Display help
* -p Display values in parsable (exact) format.
* -H Scripted mode. Don't display headers, and separate properties
* by a single tab.
* -l Display average latency
* -q Display queue depths
* -w Display latency histograms
* -r Display request size histogram
* -T Display a timestamp in date(1) or Unix format
* -n Only print headers once
*
* This command can be tricky because we want to be able to deal with pool
* creation/destruction as well as vdev configuration changes. The bulk of this
* processing is handled by the pool_list_* routines in zpool_iter.c. We rely
* on pool_list_update() to detect the addition of new pools. Configuration
* changes are all handled within libzfs.
*/
int
zpool_do_iostat(int argc, char **argv)
{
int c;
int ret;
int npools;
float interval = 0;
unsigned long count = 0;
int winheight = 24;
zpool_list_t *list;
boolean_t verbose = B_FALSE;
boolean_t latency = B_FALSE, l_histo = B_FALSE, rq_histo = B_FALSE;
boolean_t queues = B_FALSE, parsable = B_FALSE, scripted = B_FALSE;
boolean_t omit_since_boot = B_FALSE;
boolean_t guid = B_FALSE;
boolean_t follow_links = B_FALSE;
boolean_t full_name = B_FALSE;
boolean_t headers_once = B_FALSE;
iostat_cbdata_t cb = { 0 };
char *cmd = NULL;
/* Used for printing error message */
const char flag_to_arg[] = {[IOS_LATENCY] = 'l', [IOS_QUEUES] = 'q',
[IOS_L_HISTO] = 'w', [IOS_RQ_HISTO] = 'r'};
uint64_t unsupported_flags;
/* check options */
while ((c = getopt(argc, argv, "c:gLPT:vyhplqrwnH")) != -1) {
switch (c) {
case 'c':
if (cmd != NULL) {
fprintf(stderr,
gettext("Can't set -c flag twice\n"));
exit(1);
}
if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
fprintf(stderr, gettext(
"Can't run -c, disabled by "
"ZPOOL_SCRIPTS_ENABLED.\n"));
exit(1);
}
if ((getuid() <= 0 || geteuid() <= 0) &&
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
fprintf(stderr, gettext(
"Can't run -c with root privileges "
"unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
exit(1);
}
cmd = optarg;
verbose = B_TRUE;
break;
case 'g':
guid = B_TRUE;
break;
case 'L':
follow_links = B_TRUE;
break;
case 'P':
full_name = B_TRUE;
break;
case 'T':
get_timestamp_arg(*optarg);
break;
case 'v':
verbose = B_TRUE;
break;
case 'p':
parsable = B_TRUE;
break;
case 'l':
latency = B_TRUE;
break;
case 'q':
queues = B_TRUE;
break;
case 'H':
scripted = B_TRUE;
break;
case 'w':
l_histo = B_TRUE;
break;
case 'r':
rq_histo = B_TRUE;
break;
case 'y':
omit_since_boot = B_TRUE;
break;
case 'n':
headers_once = B_TRUE;
break;
case 'h':
usage(B_FALSE);
break;
case '?':
if (optopt == 'c') {
print_zpool_script_list("iostat");
exit(0);
} else {
fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
}
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
cb.cb_literal = parsable;
cb.cb_scripted = scripted;
if (guid)
cb.cb_vdevs.cb_name_flags |= VDEV_NAME_GUID;
if (follow_links)
cb.cb_vdevs.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
if (full_name)
cb.cb_vdevs.cb_name_flags |= VDEV_NAME_PATH;
cb.cb_iteration = 0;
cb.cb_namewidth = 0;
cb.cb_verbose = verbose;
/* Get our interval and count values (if any) */
if (guid) {
get_interval_count_filter_guids(&argc, argv, &interval,
&count, &cb);
} else {
get_interval_count(&argc, argv, &interval, &count);
}
if (argc == 0) {
/* No args, so just print the defaults. */
} else if (are_all_pools(argc, argv)) {
/* All the args are pool names */
} else if (are_vdevs_in_pool(argc, argv, NULL, &cb.cb_vdevs)) {
/* All the args are vdevs */
cb.cb_vdevs.cb_names = argv;
cb.cb_vdevs.cb_names_count = argc;
argc = 0; /* No pools to process */
} else if (are_all_pools(1, argv)) {
/* The first arg is a pool name */
if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
&cb.cb_vdevs)) {
/* ...and the rest are vdev names */
cb.cb_vdevs.cb_names = argv + 1;
cb.cb_vdevs.cb_names_count = argc - 1;
argc = 1; /* One pool to process */
} else {
fprintf(stderr, gettext("Expected either a list of "));
fprintf(stderr, gettext("pools, or list of vdevs in"));
fprintf(stderr, " \"%s\", ", argv[0]);
fprintf(stderr, gettext("but got:\n"));
error_list_unresolved_vdevs(argc - 1, argv + 1,
argv[0], &cb.cb_vdevs);
fprintf(stderr, "\n");
usage(B_FALSE);
return (1);
}
} else {
/*
* The args don't make sense. The first arg isn't a pool name,
* nor are all the args vdevs.
*/
fprintf(stderr, gettext("Unable to parse pools/vdevs list.\n"));
fprintf(stderr, "\n");
return (1);
}
if (cb.cb_vdevs.cb_names_count != 0) {
/*
* If user specified vdevs, it implies verbose.
*/
cb.cb_verbose = B_TRUE;
}
/*
* Construct the list of all interesting pools.
*/
ret = 0;
if ((list = pool_list_get(argc, argv, NULL, ZFS_TYPE_POOL, parsable,
&ret)) == NULL)
return (1);
if (pool_list_count(list) == 0 && argc != 0) {
pool_list_free(list);
return (1);
}
if (pool_list_count(list) == 0 && interval == 0) {
pool_list_free(list);
(void) fprintf(stderr, gettext("no pools available\n"));
return (1);
}
if ((l_histo || rq_histo) && (cmd != NULL || latency || queues)) {
pool_list_free(list);
(void) fprintf(stderr,
gettext("[-r|-w] isn't allowed with [-c|-l|-q]\n"));
usage(B_FALSE);
return (1);
}
if (l_histo && rq_histo) {
pool_list_free(list);
(void) fprintf(stderr,
gettext("Only one of [-r|-w] can be passed at a time\n"));
usage(B_FALSE);
return (1);
}
/*
* Enter the main iostat loop.
*/
cb.cb_list = list;
if (l_histo) {
/*
* Histograms tables look out of place when you try to display
* them with the other stats, so make a rule that you can only
* print histograms by themselves.
*/
cb.cb_flags = IOS_L_HISTO_M;
} else if (rq_histo) {
cb.cb_flags = IOS_RQ_HISTO_M;
} else {
cb.cb_flags = IOS_DEFAULT_M;
if (latency)
cb.cb_flags |= IOS_LATENCY_M;
if (queues)
cb.cb_flags |= IOS_QUEUES_M;
}
/*
* See if the module supports all the stats we want to display.
*/
unsupported_flags = cb.cb_flags & ~get_stat_flags(list);
if (unsupported_flags) {
uint64_t f;
int idx;
fprintf(stderr,
gettext("The loaded zfs module doesn't support:"));
/* for each bit set in unsupported_flags */
for (f = unsupported_flags; f; f &= ~(1ULL << idx)) {
idx = lowbit64(f) - 1;
fprintf(stderr, " -%c", flag_to_arg[idx]);
}
fprintf(stderr, ". Try running a newer module.\n");
pool_list_free(list);
return (1);
}
for (;;) {
if ((npools = pool_list_count(list)) == 0)
(void) fprintf(stderr, gettext("no pools available\n"));
else {
/*
* If this is the first iteration and -y was supplied
* we skip any printing.
*/
boolean_t skip = (omit_since_boot &&
cb.cb_iteration == 0);
/*
* Refresh all statistics. This is done as an
* explicit step before calculating the maximum name
* width, so that any * configuration changes are
* properly accounted for.
*/
(void) pool_list_iter(list, B_FALSE, refresh_iostat,
&cb);
/*
* Iterate over all pools to determine the maximum width
* for the pool / device name column across all pools.
*/
cb.cb_namewidth = 0;
(void) pool_list_iter(list, B_FALSE,
get_namewidth_iostat, &cb);
if (timestamp_fmt != NODATE)
print_timestamp(timestamp_fmt);
if (cmd != NULL && cb.cb_verbose &&
!(cb.cb_flags & IOS_ANYHISTO_M)) {
cb.vcdl = all_pools_for_each_vdev_run(argc,
argv, cmd, g_zfs, cb.cb_vdevs.cb_names,
cb.cb_vdevs.cb_names_count,
cb.cb_vdevs.cb_name_flags);
} else {
cb.vcdl = NULL;
}
/*
* Check terminal size so we can print headers
* even when terminal window has its height
* changed.
*/
winheight = terminal_height();
/*
* Are we connected to TTY? If not, headers_once
* should be true, to avoid breaking scripts.
*/
if (winheight < 0)
headers_once = B_TRUE;
/*
* If it's the first time and we're not skipping it,
* or either skip or verbose mode, print the header.
*
* The histogram code explicitly prints its header on
* every vdev, so skip this for histograms.
*/
if (((++cb.cb_iteration == 1 && !skip) ||
(skip != verbose) ||
(!headers_once &&
(cb.cb_iteration % winheight) == 0)) &&
(!(cb.cb_flags & IOS_ANYHISTO_M)) &&
!cb.cb_scripted)
print_iostat_header(&cb);
if (skip) {
(void) fsleep(interval);
continue;
}
pool_list_iter(list, B_FALSE, print_iostat, &cb);
/*
* If there's more than one pool, and we're not in
* verbose mode (which prints a separator for us),
* then print a separator.
*
* In addition, if we're printing specific vdevs then
* we also want an ending separator.
*/
if (((npools > 1 && !verbose &&
!(cb.cb_flags & IOS_ANYHISTO_M)) ||
(!(cb.cb_flags & IOS_ANYHISTO_M) &&
cb.cb_vdevs.cb_names_count)) &&
!cb.cb_scripted) {
print_iostat_separator(&cb);
if (cb.vcdl != NULL)
print_cmd_columns(cb.vcdl, 1);
printf("\n");
}
if (cb.vcdl != NULL)
free_vdev_cmd_data_list(cb.vcdl);
}
/*
* Flush the output so that redirection to a file isn't buffered
* indefinitely.
*/
(void) fflush(stdout);
if (interval == 0)
break;
if (count != 0 && --count == 0)
break;
(void) fsleep(interval);
}
pool_list_free(list);
return (ret);
}
typedef struct list_cbdata {
boolean_t cb_verbose;
int cb_name_flags;
int cb_namewidth;
boolean_t cb_scripted;
zprop_list_t *cb_proplist;
boolean_t cb_literal;
} list_cbdata_t;
/*
* Given a list of columns to display, output appropriate headers for each one.
*/
static void
print_header(list_cbdata_t *cb)
{
zprop_list_t *pl = cb->cb_proplist;
char headerbuf[ZPOOL_MAXPROPLEN];
const char *header;
boolean_t first = B_TRUE;
boolean_t right_justify;
size_t width = 0;
for (; pl != NULL; pl = pl->pl_next) {
width = pl->pl_width;
if (first && cb->cb_verbose) {
/*
* Reset the width to accommodate the verbose listing
* of devices.
*/
width = cb->cb_namewidth;
}
if (!first)
(void) fputs(" ", stdout);
else
first = B_FALSE;
right_justify = B_FALSE;
if (pl->pl_prop != ZPROP_USERPROP) {
header = zpool_prop_column_name(pl->pl_prop);
right_justify = zpool_prop_align_right(pl->pl_prop);
} else {
int i;
for (i = 0; pl->pl_user_prop[i] != '\0'; i++)
headerbuf[i] = toupper(pl->pl_user_prop[i]);
headerbuf[i] = '\0';
header = headerbuf;
}
if (pl->pl_next == NULL && !right_justify)
(void) fputs(header, stdout);
else if (right_justify)
(void) printf("%*s", (int)width, header);
else
(void) printf("%-*s", (int)width, header);
}
(void) fputc('\n', stdout);
}
/*
* Given a pool and a list of properties, print out all the properties according
* to the described layout. Used by zpool_do_list().
*/
static void
print_pool(zpool_handle_t *zhp, list_cbdata_t *cb)
{
zprop_list_t *pl = cb->cb_proplist;
boolean_t first = B_TRUE;
char property[ZPOOL_MAXPROPLEN];
const char *propstr;
boolean_t right_justify;
size_t width;
for (; pl != NULL; pl = pl->pl_next) {
width = pl->pl_width;
if (first && cb->cb_verbose) {
/*
* Reset the width to accommodate the verbose listing
* of devices.
*/
width = cb->cb_namewidth;
}
if (!first) {
if (cb->cb_scripted)
(void) fputc('\t', stdout);
else
(void) fputs(" ", stdout);
} else {
first = B_FALSE;
}
right_justify = B_FALSE;
if (pl->pl_prop != ZPROP_USERPROP) {
if (zpool_get_prop(zhp, pl->pl_prop, property,
sizeof (property), NULL, cb->cb_literal) != 0)
propstr = "-";
else
propstr = property;
right_justify = zpool_prop_align_right(pl->pl_prop);
} else if ((zpool_prop_feature(pl->pl_user_prop) ||
zpool_prop_unsupported(pl->pl_user_prop)) &&
zpool_prop_get_feature(zhp, pl->pl_user_prop, property,
sizeof (property)) == 0) {
propstr = property;
} else {
propstr = "-";
}
/*
* If this is being called in scripted mode, or if this is the
* last column and it is left-justified, don't include a width
* format specifier.
*/
if (cb->cb_scripted || (pl->pl_next == NULL && !right_justify))
(void) fputs(propstr, stdout);
else if (right_justify)
(void) printf("%*s", (int)width, propstr);
else
(void) printf("%-*s", (int)width, propstr);
}
(void) fputc('\n', stdout);
}
static void
print_one_column(zpool_prop_t prop, uint64_t value, const char *str,
boolean_t scripted, boolean_t valid, enum zfs_nicenum_format format)
{
char propval[64];
boolean_t fixed;
size_t width = zprop_width(prop, &fixed, ZFS_TYPE_POOL);
switch (prop) {
case ZPOOL_PROP_SIZE:
case ZPOOL_PROP_EXPANDSZ:
case ZPOOL_PROP_CHECKPOINT:
case ZPOOL_PROP_DEDUPRATIO:
if (value == 0)
(void) strlcpy(propval, "-", sizeof (propval));
else
zfs_nicenum_format(value, propval, sizeof (propval),
format);
break;
case ZPOOL_PROP_FRAGMENTATION:
if (value == ZFS_FRAG_INVALID) {
(void) strlcpy(propval, "-", sizeof (propval));
} else if (format == ZFS_NICENUM_RAW) {
(void) snprintf(propval, sizeof (propval), "%llu",
(unsigned long long)value);
} else {
(void) snprintf(propval, sizeof (propval), "%llu%%",
(unsigned long long)value);
}
break;
case ZPOOL_PROP_CAPACITY:
/* capacity value is in parts-per-10,000 (aka permyriad) */
if (format == ZFS_NICENUM_RAW)
(void) snprintf(propval, sizeof (propval), "%llu",
(unsigned long long)value / 100);
else
(void) snprintf(propval, sizeof (propval),
value < 1000 ? "%1.2f%%" : value < 10000 ?
"%2.1f%%" : "%3.0f%%", value / 100.0);
break;
case ZPOOL_PROP_HEALTH:
width = 8;
(void) strlcpy(propval, str, sizeof (propval));
break;
default:
zfs_nicenum_format(value, propval, sizeof (propval), format);
}
if (!valid)
(void) strlcpy(propval, "-", sizeof (propval));
if (scripted)
(void) printf("\t%s", propval);
else
(void) printf(" %*s", (int)width, propval);
}
/*
* print static default line per vdev
* not compatible with '-o' <proplist> option
*/
static void
print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
list_cbdata_t *cb, int depth, boolean_t isspare)
{
nvlist_t **child;
vdev_stat_t *vs;
uint_t c, children;
char *vname;
boolean_t scripted = cb->cb_scripted;
uint64_t islog = B_FALSE;
const char *dashes = "%-*s - - - - "
"- - - - -\n";
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
if (name != NULL) {
boolean_t toplevel = (vs->vs_space != 0);
uint64_t cap;
enum zfs_nicenum_format format;
const char *state;
if (cb->cb_literal)
format = ZFS_NICENUM_RAW;
else
format = ZFS_NICENUM_1024;
if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
return;
if (scripted)
(void) printf("\t%s", name);
else if (strlen(name) + depth > cb->cb_namewidth)
(void) printf("%*s%s", depth, "", name);
else
(void) printf("%*s%s%*s", depth, "", name,
(int)(cb->cb_namewidth - strlen(name) - depth), "");
/*
* Print the properties for the individual vdevs. Some
* properties are only applicable to toplevel vdevs. The
* 'toplevel' boolean value is passed to the print_one_column()
* to indicate that the value is valid.
*/
if (VDEV_STAT_VALID(vs_pspace, c) && vs->vs_pspace)
print_one_column(ZPOOL_PROP_SIZE, vs->vs_pspace, NULL,
scripted, B_TRUE, format);
else
print_one_column(ZPOOL_PROP_SIZE, vs->vs_space, NULL,
scripted, toplevel, format);
print_one_column(ZPOOL_PROP_ALLOCATED, vs->vs_alloc, NULL,
scripted, toplevel, format);
print_one_column(ZPOOL_PROP_FREE, vs->vs_space - vs->vs_alloc,
NULL, scripted, toplevel, format);
print_one_column(ZPOOL_PROP_CHECKPOINT,
vs->vs_checkpoint_space, NULL, scripted, toplevel, format);
print_one_column(ZPOOL_PROP_EXPANDSZ, vs->vs_esize, NULL,
scripted, B_TRUE, format);
print_one_column(ZPOOL_PROP_FRAGMENTATION,
vs->vs_fragmentation, NULL, scripted,
(vs->vs_fragmentation != ZFS_FRAG_INVALID && toplevel),
format);
cap = (vs->vs_space == 0) ? 0 :
(vs->vs_alloc * 10000 / vs->vs_space);
print_one_column(ZPOOL_PROP_CAPACITY, cap, NULL,
scripted, toplevel, format);
print_one_column(ZPOOL_PROP_DEDUPRATIO, 0, NULL,
scripted, toplevel, format);
state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
if (isspare) {
if (vs->vs_aux == VDEV_AUX_SPARED)
state = "INUSE";
else if (vs->vs_state == VDEV_STATE_HEALTHY)
state = "AVAIL";
}
print_one_column(ZPOOL_PROP_HEALTH, 0, state, scripted,
B_TRUE, format);
(void) fputc('\n', stdout);
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
return;
/* list the normal vdevs first */
for (c = 0; c < children; c++) {
uint64_t ishole = B_FALSE;
if (nvlist_lookup_uint64(child[c],
ZPOOL_CONFIG_IS_HOLE, &ishole) == 0 && ishole)
continue;
if (nvlist_lookup_uint64(child[c],
ZPOOL_CONFIG_IS_LOG, &islog) == 0 && islog)
continue;
if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
continue;
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
print_list_stats(zhp, vname, child[c], cb, depth + 2, B_FALSE);
free(vname);
}
/* list the classes: 'logs', 'dedup', and 'special' */
for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {
boolean_t printed = B_FALSE;
for (c = 0; c < children; c++) {
char *bias = NULL;
char *type = NULL;
if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&islog) == 0 && islog) {
bias = (char *)VDEV_ALLOC_CLASS_LOGS;
} else {
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_TYPE, &type);
}
if (bias == NULL || strcmp(bias, class_name[n]) != 0)
continue;
if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
continue;
if (!printed) {
/* LINTED E_SEC_PRINTF_VAR_FMT */
(void) printf(dashes, cb->cb_namewidth,
class_name[n]);
printed = B_TRUE;
}
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
print_list_stats(zhp, vname, child[c], cb, depth + 2,
B_FALSE);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0 && children > 0) {
/* LINTED E_SEC_PRINTF_VAR_FMT */
(void) printf(dashes, cb->cb_namewidth, "cache");
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags);
print_list_stats(zhp, vname, child[c], cb, depth + 2,
B_FALSE);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &child,
&children) == 0 && children > 0) {
/* LINTED E_SEC_PRINTF_VAR_FMT */
(void) printf(dashes, cb->cb_namewidth, "spare");
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags);
print_list_stats(zhp, vname, child[c], cb, depth + 2,
B_TRUE);
free(vname);
}
}
}
/*
* Generic callback function to list a pool.
*/
static int
list_callback(zpool_handle_t *zhp, void *data)
{
list_cbdata_t *cbp = data;
print_pool(zhp, cbp);
if (cbp->cb_verbose) {
nvlist_t *config, *nvroot;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
print_list_stats(zhp, NULL, nvroot, cbp, 0, B_FALSE);
}
return (0);
}
/*
* Set the minimum pool/vdev name column width. The width must be at least 9,
* but may be as large as needed.
*/
static int
get_namewidth_list(zpool_handle_t *zhp, void *data)
{
list_cbdata_t *cb = data;
int width;
- width = get_namewidth(zhp, cb->cb_namewidth, cb->cb_name_flags,
- cb->cb_verbose);
+ width = get_namewidth(zhp, cb->cb_namewidth,
+ cb->cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);
if (width < 9)
width = 9;
cb->cb_namewidth = width;
return (0);
}
/*
* zpool list [-gHLpP] [-o prop[,prop]*] [-T d|u] [pool] ... [interval [count]]
*
* -g Display guid for individual vdev name.
* -H Scripted mode. Don't display headers, and separate properties
* by a single tab.
* -L Follow links when resolving vdev path name.
* -o List of properties to display. Defaults to
* "name,size,allocated,free,expandsize,fragmentation,capacity,"
* "dedupratio,health,altroot"
* -p Display values in parsable (exact) format.
* -P Display full path for vdev name.
* -T Display a timestamp in date(1) or Unix format
*
* List all pools in the system, whether or not they're healthy. Output space
* statistics for each one, as well as health status summary.
*/
int
zpool_do_list(int argc, char **argv)
{
int c;
int ret = 0;
list_cbdata_t cb = { 0 };
static char default_props[] =
"name,size,allocated,free,checkpoint,expandsize,fragmentation,"
"capacity,dedupratio,health,altroot";
char *props = default_props;
float interval = 0;
unsigned long count = 0;
zpool_list_t *list;
boolean_t first = B_TRUE;
current_prop_type = ZFS_TYPE_POOL;
/* check options */
while ((c = getopt(argc, argv, ":gHLo:pPT:v")) != -1) {
switch (c) {
case 'g':
cb.cb_name_flags |= VDEV_NAME_GUID;
break;
case 'H':
cb.cb_scripted = B_TRUE;
break;
case 'L':
cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'o':
props = optarg;
break;
case 'P':
cb.cb_name_flags |= VDEV_NAME_PATH;
break;
case 'p':
cb.cb_literal = B_TRUE;
break;
case 'T':
get_timestamp_arg(*optarg);
break;
case 'v':
cb.cb_verbose = B_TRUE;
cb.cb_namewidth = 8; /* 8 until precalc is avail */
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
get_interval_count(&argc, argv, &interval, &count);
if (zprop_get_list(g_zfs, props, &cb.cb_proplist, ZFS_TYPE_POOL) != 0)
usage(B_FALSE);
for (;;) {
if ((list = pool_list_get(argc, argv, &cb.cb_proplist,
ZFS_TYPE_POOL, cb.cb_literal, &ret)) == NULL)
return (1);
if (pool_list_count(list) == 0)
break;
cb.cb_namewidth = 0;
(void) pool_list_iter(list, B_FALSE, get_namewidth_list, &cb);
if (timestamp_fmt != NODATE)
print_timestamp(timestamp_fmt);
if (!cb.cb_scripted && (first || cb.cb_verbose)) {
print_header(&cb);
first = B_FALSE;
}
ret = pool_list_iter(list, B_TRUE, list_callback, &cb);
if (interval == 0)
break;
if (count != 0 && --count == 0)
break;
pool_list_free(list);
(void) fsleep(interval);
}
if (argc == 0 && !cb.cb_scripted && pool_list_count(list) == 0) {
(void) printf(gettext("no pools available\n"));
ret = 0;
}
pool_list_free(list);
zprop_free_list(cb.cb_proplist);
return (ret);
}
static int
zpool_do_attach_or_replace(int argc, char **argv, int replacing)
{
boolean_t force = B_FALSE;
boolean_t rebuild = B_FALSE;
boolean_t wait = B_FALSE;
int c;
nvlist_t *nvroot;
char *poolname, *old_disk, *new_disk;
zpool_handle_t *zhp;
nvlist_t *props = NULL;
char *propval;
int ret;
/* check options */
while ((c = getopt(argc, argv, "fo:sw")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
case 'o':
if ((propval = strchr(optarg, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for -o option\n"));
usage(B_FALSE);
}
*propval = '\0';
propval++;
if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
(add_prop_list(optarg, propval, &props, B_TRUE)))
usage(B_FALSE);
break;
case 's':
rebuild = B_TRUE;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
poolname = argv[0];
if (argc < 2) {
(void) fprintf(stderr,
gettext("missing <device> specification\n"));
usage(B_FALSE);
}
old_disk = argv[1];
if (argc < 3) {
if (!replacing) {
(void) fprintf(stderr,
gettext("missing <new_device> specification\n"));
usage(B_FALSE);
}
new_disk = old_disk;
argc -= 1;
argv += 1;
} else {
new_disk = argv[2];
argc -= 2;
argv += 2;
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
nvlist_free(props);
return (1);
}
if (zpool_get_config(zhp, NULL) == NULL) {
(void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
poolname);
zpool_close(zhp);
nvlist_free(props);
return (1);
}
/* unless manually specified use "ashift" pool property (if set) */
if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
int intval;
zprop_source_t src;
char strval[ZPOOL_MAXPROPLEN];
intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
if (src != ZPROP_SRC_DEFAULT) {
(void) sprintf(strval, "%" PRId32, intval);
verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
&props, B_TRUE) == 0);
}
}
nvroot = make_root_vdev(zhp, props, force, B_FALSE, replacing, B_FALSE,
argc, argv);
if (nvroot == NULL) {
zpool_close(zhp);
nvlist_free(props);
return (1);
}
ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing,
rebuild);
if (ret == 0 && wait)
ret = zpool_wait(zhp,
replacing ? ZPOOL_WAIT_REPLACE : ZPOOL_WAIT_RESILVER);
nvlist_free(props);
nvlist_free(nvroot);
zpool_close(zhp);
return (ret);
}
/*
* zpool replace [-fsw] [-o property=value] <pool> <device> <new_device>
*
* -f Force attach, even if <new_device> appears to be in use.
* -s Use sequential instead of healing reconstruction for resilver.
* -o Set property=value.
* -w Wait for replacing to complete before returning
*
* Replace <device> with <new_device>.
*/
int
zpool_do_replace(int argc, char **argv)
{
return (zpool_do_attach_or_replace(argc, argv, B_TRUE));
}
/*
* zpool attach [-fsw] [-o property=value] <pool> <device> <new_device>
*
* -f Force attach, even if <new_device> appears to be in use.
* -s Use sequential instead of healing reconstruction for resilver.
* -o Set property=value.
* -w Wait for resilvering to complete before returning
*
* Attach <new_device> to the mirror containing <device>. If <device> is not
* part of a mirror, then <device> will be transformed into a mirror of
* <device> and <new_device>. In either case, <new_device> will begin life
* with a DTL of [0, now], and will immediately begin to resilver itself.
*/
int
zpool_do_attach(int argc, char **argv)
{
return (zpool_do_attach_or_replace(argc, argv, B_FALSE));
}
/*
* zpool detach [-f] <pool> <device>
*
* -f Force detach of <device>, even if DTLs argue against it
* (not supported yet)
*
* Detach a device from a mirror. The operation will be refused if <device>
* is the last device in the mirror, or if the DTLs indicate that this device
* has the only valid copy of some data.
*/
int
zpool_do_detach(int argc, char **argv)
{
int c;
char *poolname, *path;
zpool_handle_t *zhp;
int ret;
/* check options */
while ((c = getopt(argc, argv, "")) != -1) {
switch (c) {
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr,
gettext("missing <device> specification\n"));
usage(B_FALSE);
}
poolname = argv[0];
path = argv[1];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
ret = zpool_vdev_detach(zhp, path);
zpool_close(zhp);
return (ret);
}
/*
* zpool split [-gLnP] [-o prop=val] ...
* [-o mntopt] ...
* [-R altroot] <pool> <newpool> [<device> ...]
*
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -n Do not split the pool, but display the resulting layout if
* it were to be split.
* -o Set property=value, or set mount options.
* -P Display full path for vdev name.
* -R Mount the split-off pool under an alternate root.
* -l Load encryption keys while importing.
*
* Splits the named pool and gives it the new pool name. Devices to be split
* off may be listed, provided that no more than one device is specified
* per top-level vdev mirror. The newly split pool is left in an exported
* state unless -R is specified.
*
* Restrictions: the top-level of the pool pool must only be made up of
* mirrors; all devices in the pool must be healthy; no device may be
* undergoing a resilvering operation.
*/
int
zpool_do_split(int argc, char **argv)
{
char *srcpool, *newpool, *propval;
char *mntopts = NULL;
splitflags_t flags;
int c, ret = 0;
boolean_t loadkeys = B_FALSE;
zpool_handle_t *zhp;
nvlist_t *config, *props = NULL;
flags.dryrun = B_FALSE;
flags.import = B_FALSE;
flags.name_flags = 0;
/* check options */
while ((c = getopt(argc, argv, ":gLR:lno:P")) != -1) {
switch (c) {
case 'g':
flags.name_flags |= VDEV_NAME_GUID;
break;
case 'L':
flags.name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'R':
flags.import = B_TRUE;
if (add_prop_list(
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), optarg,
&props, B_TRUE) != 0) {
nvlist_free(props);
usage(B_FALSE);
}
break;
case 'l':
loadkeys = B_TRUE;
break;
case 'n':
flags.dryrun = B_TRUE;
break;
case 'o':
if ((propval = strchr(optarg, '=')) != NULL) {
*propval = '\0';
propval++;
if (add_prop_list(optarg, propval,
&props, B_TRUE) != 0) {
nvlist_free(props);
usage(B_FALSE);
}
} else {
mntopts = optarg;
}
break;
case 'P':
flags.name_flags |= VDEV_NAME_PATH;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
break;
}
}
if (!flags.import && mntopts != NULL) {
(void) fprintf(stderr, gettext("setting mntopts is only "
"valid when importing the pool\n"));
usage(B_FALSE);
}
if (!flags.import && loadkeys) {
(void) fprintf(stderr, gettext("loading keys is only "
"valid when importing the pool\n"));
usage(B_FALSE);
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("Missing pool name\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("Missing new pool name\n"));
usage(B_FALSE);
}
srcpool = argv[0];
newpool = argv[1];
argc -= 2;
argv += 2;
if ((zhp = zpool_open(g_zfs, srcpool)) == NULL) {
nvlist_free(props);
return (1);
}
config = split_mirror_vdev(zhp, newpool, props, flags, argc, argv);
if (config == NULL) {
ret = 1;
} else {
if (flags.dryrun) {
(void) printf(gettext("would create '%s' with the "
"following layout:\n\n"), newpool);
print_vdev_tree(NULL, newpool, config, 0, "",
flags.name_flags);
print_vdev_tree(NULL, "dedup", config, 0,
VDEV_ALLOC_BIAS_DEDUP, 0);
print_vdev_tree(NULL, "special", config, 0,
VDEV_ALLOC_BIAS_SPECIAL, 0);
}
}
zpool_close(zhp);
if (ret != 0 || flags.dryrun || !flags.import) {
nvlist_free(config);
nvlist_free(props);
return (ret);
}
/*
* The split was successful. Now we need to open the new
* pool and import it.
*/
if ((zhp = zpool_open_canfail(g_zfs, newpool)) == NULL) {
nvlist_free(config);
nvlist_free(props);
return (1);
}
if (loadkeys) {
ret = zfs_crypto_attempt_load_keys(g_zfs, newpool);
if (ret != 0)
ret = 1;
}
if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
zpool_enable_datasets(zhp, mntopts, 0) != 0) {
ret = 1;
(void) fprintf(stderr, gettext("Split was successful, but "
"the datasets could not all be mounted\n"));
(void) fprintf(stderr, gettext("Try doing '%s' with a "
"different altroot\n"), "zpool import");
}
zpool_close(zhp);
nvlist_free(config);
nvlist_free(props);
return (ret);
}
/*
* zpool online <pool> <device> ...
*/
int
zpool_do_online(int argc, char **argv)
{
int c, i;
char *poolname;
zpool_handle_t *zhp;
int ret = 0;
vdev_state_t newstate;
int flags = 0;
/* check options */
while ((c = getopt(argc, argv, "e")) != -1) {
switch (c) {
case 'e':
flags |= ZFS_ONLINE_EXPAND;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing device name\n"));
usage(B_FALSE);
}
poolname = argv[0];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
for (i = 1; i < argc; i++) {
if (zpool_vdev_online(zhp, argv[i], flags, &newstate) == 0) {
if (newstate != VDEV_STATE_HEALTHY) {
(void) printf(gettext("warning: device '%s' "
"onlined, but remains in faulted state\n"),
argv[i]);
if (newstate == VDEV_STATE_FAULTED)
(void) printf(gettext("use 'zpool "
"clear' to restore a faulted "
"device\n"));
else
(void) printf(gettext("use 'zpool "
"replace' to replace devices "
"that are no longer present\n"));
}
} else {
ret = 1;
}
}
zpool_close(zhp);
return (ret);
}
/*
* zpool offline [-ft] <pool> <device> ...
*
* -f Force the device into a faulted state.
*
* -t Only take the device off-line temporarily. The offline/faulted
* state will not be persistent across reboots.
*/
int
zpool_do_offline(int argc, char **argv)
{
int c, i;
char *poolname;
zpool_handle_t *zhp;
int ret = 0;
boolean_t istmp = B_FALSE;
boolean_t fault = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "ft")) != -1) {
switch (c) {
case 'f':
fault = B_TRUE;
break;
case 't':
istmp = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing device name\n"));
usage(B_FALSE);
}
poolname = argv[0];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
for (i = 1; i < argc; i++) {
if (fault) {
uint64_t guid = zpool_vdev_path_to_guid(zhp, argv[i]);
vdev_aux_t aux;
if (istmp == B_FALSE) {
/* Force the fault to persist across imports */
aux = VDEV_AUX_EXTERNAL_PERSIST;
} else {
aux = VDEV_AUX_EXTERNAL;
}
if (guid == 0 || zpool_vdev_fault(zhp, guid, aux) != 0)
ret = 1;
} else {
if (zpool_vdev_offline(zhp, argv[i], istmp) != 0)
ret = 1;
}
}
zpool_close(zhp);
return (ret);
}
/*
* zpool clear <pool> [device]
*
* Clear all errors associated with a pool or a particular device.
*/
int
zpool_do_clear(int argc, char **argv)
{
int c;
int ret = 0;
boolean_t dryrun = B_FALSE;
boolean_t do_rewind = B_FALSE;
boolean_t xtreme_rewind = B_FALSE;
uint32_t rewind_policy = ZPOOL_NO_REWIND;
nvlist_t *policy = NULL;
zpool_handle_t *zhp;
char *pool, *device;
/* check options */
while ((c = getopt(argc, argv, "FnX")) != -1) {
switch (c) {
case 'F':
do_rewind = B_TRUE;
break;
case 'n':
dryrun = B_TRUE;
break;
case 'X':
xtreme_rewind = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if ((dryrun || xtreme_rewind) && !do_rewind) {
(void) fprintf(stderr,
gettext("-n or -X only meaningful with -F\n"));
usage(B_FALSE);
}
if (dryrun)
rewind_policy = ZPOOL_TRY_REWIND;
else if (do_rewind)
rewind_policy = ZPOOL_DO_REWIND;
if (xtreme_rewind)
rewind_policy |= ZPOOL_EXTREME_REWIND;
/* In future, further rewind policy choices can be passed along here */
if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
rewind_policy) != 0) {
return (1);
}
pool = argv[0];
device = argc == 2 ? argv[1] : NULL;
if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
nvlist_free(policy);
return (1);
}
if (zpool_clear(zhp, device, policy) != 0)
ret = 1;
zpool_close(zhp);
nvlist_free(policy);
return (ret);
}
/*
* zpool reguid <pool>
*/
int
zpool_do_reguid(int argc, char **argv)
{
int c;
char *poolname;
zpool_handle_t *zhp;
int ret = 0;
/* check options */
while ((c = getopt(argc, argv, "")) != -1) {
switch (c) {
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
poolname = argv[0];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
ret = zpool_reguid(zhp);
zpool_close(zhp);
return (ret);
}
/*
* zpool reopen <pool>
*
* Reopen the pool so that the kernel can update the sizes of all vdevs.
*/
int
zpool_do_reopen(int argc, char **argv)
{
int c;
int ret = 0;
boolean_t scrub_restart = B_TRUE;
/* check options */
while ((c = getopt(argc, argv, "n")) != -1) {
switch (c) {
case 'n':
scrub_restart = B_FALSE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* if argc == 0 we will execute zpool_reopen_one on all pools */
ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
B_FALSE, zpool_reopen_one, &scrub_restart);
return (ret);
}
typedef struct scrub_cbdata {
int cb_type;
pool_scrub_cmd_t cb_scrub_cmd;
} scrub_cbdata_t;
static boolean_t
zpool_has_checkpoint(zpool_handle_t *zhp)
{
nvlist_t *config, *nvroot;
config = zpool_get_config(zhp, NULL);
if (config != NULL) {
pool_checkpoint_stat_t *pcs = NULL;
uint_t c;
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
if (pcs == NULL || pcs->pcs_state == CS_NONE)
return (B_FALSE);
assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS ||
pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
return (B_TRUE);
}
return (B_FALSE);
}
static int
scrub_callback(zpool_handle_t *zhp, void *data)
{
scrub_cbdata_t *cb = data;
int err;
/*
* Ignore faulted pools.
*/
if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
(void) fprintf(stderr, gettext("cannot scan '%s': pool is "
"currently unavailable\n"), zpool_get_name(zhp));
return (1);
}
err = zpool_scan(zhp, cb->cb_type, cb->cb_scrub_cmd);
if (err == 0 && zpool_has_checkpoint(zhp) &&
cb->cb_type == POOL_SCAN_SCRUB) {
(void) printf(gettext("warning: will not scrub state that "
"belongs to the checkpoint of pool '%s'\n"),
zpool_get_name(zhp));
}
return (err != 0);
}
static int
wait_callback(zpool_handle_t *zhp, void *data)
{
zpool_wait_activity_t *act = data;
return (zpool_wait(zhp, *act));
}
/*
* zpool scrub [-s | -p] [-w] <pool> ...
*
* -s Stop. Stops any in-progress scrub.
* -p Pause. Pause in-progress scrub.
* -w Wait. Blocks until scrub has completed.
*/
int
zpool_do_scrub(int argc, char **argv)
{
int c;
scrub_cbdata_t cb;
boolean_t wait = B_FALSE;
int error;
cb.cb_type = POOL_SCAN_SCRUB;
cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
/* check options */
while ((c = getopt(argc, argv, "spw")) != -1) {
switch (c) {
case 's':
cb.cb_type = POOL_SCAN_NONE;
break;
case 'p':
cb.cb_scrub_cmd = POOL_SCRUB_PAUSE;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
if (cb.cb_type == POOL_SCAN_NONE &&
cb.cb_scrub_cmd == POOL_SCRUB_PAUSE) {
(void) fprintf(stderr, gettext("invalid option combination: "
"-s and -p are mutually exclusive\n"));
usage(B_FALSE);
}
if (wait && (cb.cb_type == POOL_SCAN_NONE ||
cb.cb_scrub_cmd == POOL_SCRUB_PAUSE)) {
(void) fprintf(stderr, gettext("invalid option combination: "
"-w cannot be used with -p or -s\n"));
usage(B_FALSE);
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
B_FALSE, scrub_callback, &cb);
if (wait && !error) {
zpool_wait_activity_t act = ZPOOL_WAIT_SCRUB;
error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
B_FALSE, wait_callback, &act);
}
return (error);
}
/*
* zpool resilver <pool> ...
*
* Restarts any in-progress resilver
*/
int
zpool_do_resilver(int argc, char **argv)
{
int c;
scrub_cbdata_t cb;
cb.cb_type = POOL_SCAN_RESILVER;
cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
/* check options */
while ((c = getopt(argc, argv, "")) != -1) {
switch (c) {
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
return (for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
B_FALSE, scrub_callback, &cb));
}
/*
* zpool trim [-d] [-r <rate>] [-c | -s] <pool> [<device> ...]
*
* -c Cancel. Ends any in-progress trim.
* -d Secure trim. Requires kernel and device support.
* -r <rate> Sets the TRIM rate in bytes (per second). Supports
* adding a multiplier suffix such as 'k' or 'm'.
* -s Suspend. TRIM can then be restarted with no flags.
* -w Wait. Blocks until trimming has completed.
*/
int
zpool_do_trim(int argc, char **argv)
{
struct option long_options[] = {
{"cancel", no_argument, NULL, 'c'},
{"secure", no_argument, NULL, 'd'},
{"rate", required_argument, NULL, 'r'},
{"suspend", no_argument, NULL, 's'},
{"wait", no_argument, NULL, 'w'},
{0, 0, 0, 0}
};
pool_trim_func_t cmd_type = POOL_TRIM_START;
uint64_t rate = 0;
boolean_t secure = B_FALSE;
boolean_t wait = B_FALSE;
int c;
while ((c = getopt_long(argc, argv, "cdr:sw", long_options, NULL))
!= -1) {
switch (c) {
case 'c':
if (cmd_type != POOL_TRIM_START &&
cmd_type != POOL_TRIM_CANCEL) {
(void) fprintf(stderr, gettext("-c cannot be "
"combined with other options\n"));
usage(B_FALSE);
}
cmd_type = POOL_TRIM_CANCEL;
break;
case 'd':
if (cmd_type != POOL_TRIM_START) {
(void) fprintf(stderr, gettext("-d cannot be "
"combined with the -c or -s options\n"));
usage(B_FALSE);
}
secure = B_TRUE;
break;
case 'r':
if (cmd_type != POOL_TRIM_START) {
(void) fprintf(stderr, gettext("-r cannot be "
"combined with the -c or -s options\n"));
usage(B_FALSE);
}
if (zfs_nicestrtonum(g_zfs, optarg, &rate) == -1) {
(void) fprintf(stderr, "%s: %s\n",
gettext("invalid value for rate"),
libzfs_error_description(g_zfs));
usage(B_FALSE);
}
break;
case 's':
if (cmd_type != POOL_TRIM_START &&
cmd_type != POOL_TRIM_SUSPEND) {
(void) fprintf(stderr, gettext("-s cannot be "
"combined with other options\n"));
usage(B_FALSE);
}
cmd_type = POOL_TRIM_SUSPEND;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
if (optopt != 0) {
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
} else {
(void) fprintf(stderr,
gettext("invalid option '%s'\n"),
argv[optind - 1]);
}
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
return (-1);
}
if (wait && (cmd_type != POOL_TRIM_START)) {
(void) fprintf(stderr, gettext("-w cannot be used with -c or "
"-s\n"));
usage(B_FALSE);
}
char *poolname = argv[0];
zpool_handle_t *zhp = zpool_open(g_zfs, poolname);
if (zhp == NULL)
return (-1);
trimflags_t trim_flags = {
.secure = secure,
.rate = rate,
.wait = wait,
};
nvlist_t *vdevs = fnvlist_alloc();
if (argc == 1) {
/* no individual leaf vdevs specified, so add them all */
nvlist_t *config = zpool_get_config(zhp, NULL);
nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE);
zpool_collect_leaves(zhp, nvroot, vdevs);
trim_flags.fullpool = B_TRUE;
} else {
trim_flags.fullpool = B_FALSE;
for (int i = 1; i < argc; i++) {
fnvlist_add_boolean(vdevs, argv[i]);
}
}
int error = zpool_trim(zhp, cmd_type, vdevs, &trim_flags);
fnvlist_free(vdevs);
zpool_close(zhp);
return (error);
}
/*
* Converts a total number of seconds to a human readable string broken
* down in to days/hours/minutes/seconds.
*/
static void
secs_to_dhms(uint64_t total, char *buf)
{
uint64_t days = total / 60 / 60 / 24;
uint64_t hours = (total / 60 / 60) % 24;
uint64_t mins = (total / 60) % 60;
uint64_t secs = (total % 60);
if (days > 0) {
(void) sprintf(buf, "%llu days %02llu:%02llu:%02llu",
(u_longlong_t)days, (u_longlong_t)hours,
(u_longlong_t)mins, (u_longlong_t)secs);
} else {
(void) sprintf(buf, "%02llu:%02llu:%02llu",
(u_longlong_t)hours, (u_longlong_t)mins,
(u_longlong_t)secs);
}
}
/*
* Print out detailed scrub status.
*/
static void
print_scan_scrub_resilver_status(pool_scan_stat_t *ps)
{
time_t start, end, pause;
uint64_t pass_scanned, scanned, pass_issued, issued, total;
uint64_t elapsed, scan_rate, issue_rate;
double fraction_done;
char processed_buf[7], scanned_buf[7], issued_buf[7], total_buf[7];
char srate_buf[7], irate_buf[7], time_buf[32];
printf(" ");
printf_color(ANSI_BOLD, gettext("scan:"));
printf(" ");
/* If there's never been a scan, there's not much to say. */
if (ps == NULL || ps->pss_func == POOL_SCAN_NONE ||
ps->pss_func >= POOL_SCAN_FUNCS) {
(void) printf(gettext("none requested\n"));
return;
}
start = ps->pss_start_time;
end = ps->pss_end_time;
pause = ps->pss_pass_scrub_pause;
zfs_nicebytes(ps->pss_processed, processed_buf, sizeof (processed_buf));
assert(ps->pss_func == POOL_SCAN_SCRUB ||
ps->pss_func == POOL_SCAN_RESILVER);
/* Scan is finished or canceled. */
if (ps->pss_state == DSS_FINISHED) {
secs_to_dhms(end - start, time_buf);
if (ps->pss_func == POOL_SCAN_SCRUB) {
(void) printf(gettext("scrub repaired %s "
"in %s with %llu errors on %s"), processed_buf,
time_buf, (u_longlong_t)ps->pss_errors,
ctime(&end));
} else if (ps->pss_func == POOL_SCAN_RESILVER) {
(void) printf(gettext("resilvered %s "
"in %s with %llu errors on %s"), processed_buf,
time_buf, (u_longlong_t)ps->pss_errors,
ctime(&end));
}
return;
} else if (ps->pss_state == DSS_CANCELED) {
if (ps->pss_func == POOL_SCAN_SCRUB) {
(void) printf(gettext("scrub canceled on %s"),
ctime(&end));
} else if (ps->pss_func == POOL_SCAN_RESILVER) {
(void) printf(gettext("resilver canceled on %s"),
ctime(&end));
}
return;
}
assert(ps->pss_state == DSS_SCANNING);
/* Scan is in progress. Resilvers can't be paused. */
if (ps->pss_func == POOL_SCAN_SCRUB) {
if (pause == 0) {
(void) printf(gettext("scrub in progress since %s"),
ctime(&start));
} else {
(void) printf(gettext("scrub paused since %s"),
ctime(&pause));
(void) printf(gettext("\tscrub started on %s"),
ctime(&start));
}
} else if (ps->pss_func == POOL_SCAN_RESILVER) {
(void) printf(gettext("resilver in progress since %s"),
ctime(&start));
}
scanned = ps->pss_examined;
pass_scanned = ps->pss_pass_exam;
issued = ps->pss_issued;
pass_issued = ps->pss_pass_issued;
total = ps->pss_to_examine;
/* we are only done with a block once we have issued the IO for it */
fraction_done = (double)issued / total;
/* elapsed time for this pass, rounding up to 1 if it's 0 */
elapsed = time(NULL) - ps->pss_pass_start;
elapsed -= ps->pss_pass_scrub_spent_paused;
elapsed = (elapsed != 0) ? elapsed : 1;
scan_rate = pass_scanned / elapsed;
issue_rate = pass_issued / elapsed;
uint64_t total_secs_left = (issue_rate != 0 && total >= issued) ?
((total - issued) / issue_rate) : UINT64_MAX;
secs_to_dhms(total_secs_left, time_buf);
/* format all of the numbers we will be reporting */
zfs_nicebytes(scanned, scanned_buf, sizeof (scanned_buf));
zfs_nicebytes(issued, issued_buf, sizeof (issued_buf));
zfs_nicebytes(total, total_buf, sizeof (total_buf));
zfs_nicebytes(scan_rate, srate_buf, sizeof (srate_buf));
zfs_nicebytes(issue_rate, irate_buf, sizeof (irate_buf));
/* do not print estimated time if we have a paused scrub */
if (pause == 0) {
(void) printf(gettext("\t%s scanned at %s/s, "
"%s issued at %s/s, %s total\n"),
scanned_buf, srate_buf, issued_buf, irate_buf, total_buf);
} else {
(void) printf(gettext("\t%s scanned, %s issued, %s total\n"),
scanned_buf, issued_buf, total_buf);
}
if (ps->pss_func == POOL_SCAN_RESILVER) {
(void) printf(gettext("\t%s resilvered, %.2f%% done"),
processed_buf, 100 * fraction_done);
} else if (ps->pss_func == POOL_SCAN_SCRUB) {
(void) printf(gettext("\t%s repaired, %.2f%% done"),
processed_buf, 100 * fraction_done);
}
if (pause == 0) {
if (total_secs_left != UINT64_MAX &&
issue_rate >= 10 * 1024 * 1024) {
(void) printf(gettext(", %s to go\n"), time_buf);
} else {
(void) printf(gettext(", no estimated "
"completion time\n"));
}
} else {
(void) printf(gettext("\n"));
}
}
static void
print_rebuild_status_impl(vdev_rebuild_stat_t *vrs, char *vdev_name)
{
if (vrs == NULL || vrs->vrs_state == VDEV_REBUILD_NONE)
return;
printf(" ");
printf_color(ANSI_BOLD, gettext("scan:"));
printf(" ");
uint64_t bytes_scanned = vrs->vrs_bytes_scanned;
uint64_t bytes_issued = vrs->vrs_bytes_issued;
uint64_t bytes_rebuilt = vrs->vrs_bytes_rebuilt;
uint64_t bytes_est = vrs->vrs_bytes_est;
uint64_t scan_rate = (vrs->vrs_pass_bytes_scanned /
(vrs->vrs_pass_time_ms + 1)) * 1000;
uint64_t issue_rate = (vrs->vrs_pass_bytes_issued /
(vrs->vrs_pass_time_ms + 1)) * 1000;
double scan_pct = MIN((double)bytes_scanned * 100 /
(bytes_est + 1), 100);
/* Format all of the numbers we will be reporting */
char bytes_scanned_buf[7], bytes_issued_buf[7];
char bytes_rebuilt_buf[7], bytes_est_buf[7];
char scan_rate_buf[7], issue_rate_buf[7], time_buf[32];
zfs_nicebytes(bytes_scanned, bytes_scanned_buf,
sizeof (bytes_scanned_buf));
zfs_nicebytes(bytes_issued, bytes_issued_buf,
sizeof (bytes_issued_buf));
zfs_nicebytes(bytes_rebuilt, bytes_rebuilt_buf,
sizeof (bytes_rebuilt_buf));
zfs_nicebytes(bytes_est, bytes_est_buf, sizeof (bytes_est_buf));
zfs_nicebytes(scan_rate, scan_rate_buf, sizeof (scan_rate_buf));
zfs_nicebytes(issue_rate, issue_rate_buf, sizeof (issue_rate_buf));
time_t start = vrs->vrs_start_time;
time_t end = vrs->vrs_end_time;
/* Rebuild is finished or canceled. */
if (vrs->vrs_state == VDEV_REBUILD_COMPLETE) {
secs_to_dhms(vrs->vrs_scan_time_ms / 1000, time_buf);
(void) printf(gettext("resilvered (%s) %s in %s "
"with %llu errors on %s"), vdev_name, bytes_rebuilt_buf,
time_buf, (u_longlong_t)vrs->vrs_errors, ctime(&end));
return;
} else if (vrs->vrs_state == VDEV_REBUILD_CANCELED) {
(void) printf(gettext("resilver (%s) canceled on %s"),
vdev_name, ctime(&end));
return;
} else if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
(void) printf(gettext("resilver (%s) in progress since %s"),
vdev_name, ctime(&start));
}
assert(vrs->vrs_state == VDEV_REBUILD_ACTIVE);
secs_to_dhms(MAX((int64_t)bytes_est - (int64_t)bytes_scanned, 0) /
MAX(scan_rate, 1), time_buf);
(void) printf(gettext("\t%s scanned at %s/s, %s issued %s/s, "
"%s total\n"), bytes_scanned_buf, scan_rate_buf,
bytes_issued_buf, issue_rate_buf, bytes_est_buf);
(void) printf(gettext("\t%s resilvered, %.2f%% done"),
bytes_rebuilt_buf, scan_pct);
if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
if (scan_rate >= 10 * 1024 * 1024) {
(void) printf(gettext(", %s to go\n"), time_buf);
} else {
(void) printf(gettext(", no estimated "
"completion time\n"));
}
} else {
(void) printf(gettext("\n"));
}
}
/*
* Print rebuild status for top-level vdevs.
*/
static void
print_rebuild_status(zpool_handle_t *zhp, nvlist_t *nvroot)
{
nvlist_t **child;
uint_t children;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (uint_t c = 0; c < children; c++) {
vdev_rebuild_stat_t *vrs;
uint_t i;
if (nvlist_lookup_uint64_array(child[c],
ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
char *name = zpool_vdev_name(g_zfs, zhp,
child[c], VDEV_NAME_TYPE_ID);
print_rebuild_status_impl(vrs, name);
free(name);
}
}
}
/*
* As we don't scrub checkpointed blocks, we want to warn the user that we
* skipped scanning some blocks if a checkpoint exists or existed at any
* time during the scan. If a sequential instead of healing reconstruction
* was performed then the blocks were reconstructed. However, their checksums
* have not been verified so we still print the warning.
*/
static void
print_checkpoint_scan_warning(pool_scan_stat_t *ps, pool_checkpoint_stat_t *pcs)
{
if (ps == NULL || pcs == NULL)
return;
if (pcs->pcs_state == CS_NONE ||
pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
return;
assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS);
if (ps->pss_state == DSS_NONE)
return;
if ((ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) &&
ps->pss_end_time < pcs->pcs_start_time)
return;
if (ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) {
(void) printf(gettext(" scan warning: skipped blocks "
"that are only referenced by the checkpoint.\n"));
} else {
assert(ps->pss_state == DSS_SCANNING);
(void) printf(gettext(" scan warning: skipping blocks "
"that are only referenced by the checkpoint.\n"));
}
}
/*
* Returns B_TRUE if there is an active rebuild in progress. Otherwise,
* B_FALSE is returned and 'rebuild_end_time' is set to the end time for
* the last completed (or cancelled) rebuild.
*/
static boolean_t
check_rebuilding(nvlist_t *nvroot, uint64_t *rebuild_end_time)
{
nvlist_t **child;
uint_t children;
boolean_t rebuilding = B_FALSE;
uint64_t end_time = 0;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (uint_t c = 0; c < children; c++) {
vdev_rebuild_stat_t *vrs;
uint_t i;
if (nvlist_lookup_uint64_array(child[c],
ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
if (vrs->vrs_end_time > end_time)
end_time = vrs->vrs_end_time;
if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
rebuilding = B_TRUE;
end_time = 0;
break;
}
}
}
if (rebuild_end_time != NULL)
*rebuild_end_time = end_time;
return (rebuilding);
}
/*
* Print the scan status.
*/
static void
print_scan_status(zpool_handle_t *zhp, nvlist_t *nvroot)
{
uint64_t rebuild_end_time = 0, resilver_end_time = 0;
boolean_t have_resilver = B_FALSE, have_scrub = B_FALSE;
boolean_t active_resilver = B_FALSE;
pool_checkpoint_stat_t *pcs = NULL;
pool_scan_stat_t *ps = NULL;
uint_t c;
if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
(uint64_t **)&ps, &c) == 0) {
if (ps->pss_func == POOL_SCAN_RESILVER) {
resilver_end_time = ps->pss_end_time;
active_resilver = (ps->pss_state == DSS_SCANNING);
}
have_resilver = (ps->pss_func == POOL_SCAN_RESILVER);
have_scrub = (ps->pss_func == POOL_SCAN_SCRUB);
}
boolean_t active_rebuild = check_rebuilding(nvroot, &rebuild_end_time);
boolean_t have_rebuild = (active_rebuild || (rebuild_end_time > 0));
/* Always print the scrub status when available. */
if (have_scrub)
print_scan_scrub_resilver_status(ps);
/*
* When there is an active resilver or rebuild print its status.
* Otherwise print the status of the last resilver or rebuild.
*/
if (active_resilver || (!active_rebuild && have_resilver &&
resilver_end_time && resilver_end_time > rebuild_end_time)) {
print_scan_scrub_resilver_status(ps);
} else if (active_rebuild || (!active_resilver && have_rebuild &&
rebuild_end_time && rebuild_end_time > resilver_end_time)) {
print_rebuild_status(zhp, nvroot);
}
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
print_checkpoint_scan_warning(ps, pcs);
}
/*
* Print out detailed removal status.
*/
static void
print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs)
{
char copied_buf[7], examined_buf[7], total_buf[7], rate_buf[7];
time_t start, end;
nvlist_t *config, *nvroot;
nvlist_t **child;
uint_t children;
char *vdev_name;
if (prs == NULL || prs->prs_state == DSS_NONE)
return;
/*
* Determine name of vdev.
*/
config = zpool_get_config(zhp, NULL);
nvroot = fnvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE);
verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0);
assert(prs->prs_removing_vdev < children);
vdev_name = zpool_vdev_name(g_zfs, zhp,
child[prs->prs_removing_vdev], B_TRUE);
printf_color(ANSI_BOLD, gettext("remove: "));
start = prs->prs_start_time;
end = prs->prs_end_time;
zfs_nicenum(prs->prs_copied, copied_buf, sizeof (copied_buf));
/*
* Removal is finished or canceled.
*/
if (prs->prs_state == DSS_FINISHED) {
uint64_t minutes_taken = (end - start) / 60;
(void) printf(gettext("Removal of vdev %llu copied %s "
"in %lluh%um, completed on %s"),
(longlong_t)prs->prs_removing_vdev,
copied_buf,
(u_longlong_t)(minutes_taken / 60),
(uint_t)(minutes_taken % 60),
ctime((time_t *)&end));
} else if (prs->prs_state == DSS_CANCELED) {
(void) printf(gettext("Removal of %s canceled on %s"),
vdev_name, ctime(&end));
} else {
uint64_t copied, total, elapsed, mins_left, hours_left;
double fraction_done;
uint_t rate;
assert(prs->prs_state == DSS_SCANNING);
/*
* Removal is in progress.
*/
(void) printf(gettext(
"Evacuation of %s in progress since %s"),
vdev_name, ctime(&start));
copied = prs->prs_copied > 0 ? prs->prs_copied : 1;
total = prs->prs_to_copy;
fraction_done = (double)copied / total;
/* elapsed time for this pass */
elapsed = time(NULL) - prs->prs_start_time;
elapsed = elapsed > 0 ? elapsed : 1;
rate = copied / elapsed;
rate = rate > 0 ? rate : 1;
mins_left = ((total - copied) / rate) / 60;
hours_left = mins_left / 60;
zfs_nicenum(copied, examined_buf, sizeof (examined_buf));
zfs_nicenum(total, total_buf, sizeof (total_buf));
zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
/*
* do not print estimated time if hours_left is more than
* 30 days
*/
(void) printf(gettext(
"\t%s copied out of %s at %s/s, %.2f%% done"),
examined_buf, total_buf, rate_buf, 100 * fraction_done);
if (hours_left < (30 * 24)) {
(void) printf(gettext(", %lluh%um to go\n"),
(u_longlong_t)hours_left, (uint_t)(mins_left % 60));
} else {
(void) printf(gettext(
", (copy is slow, no estimated time)\n"));
}
}
free(vdev_name);
if (prs->prs_mapping_memory > 0) {
char mem_buf[7];
zfs_nicenum(prs->prs_mapping_memory, mem_buf, sizeof (mem_buf));
(void) printf(gettext(
"\t%s memory used for removed device mappings\n"),
mem_buf);
}
}
static void
print_checkpoint_status(pool_checkpoint_stat_t *pcs)
{
time_t start;
char space_buf[7];
if (pcs == NULL || pcs->pcs_state == CS_NONE)
return;
(void) printf(gettext("checkpoint: "));
start = pcs->pcs_start_time;
zfs_nicenum(pcs->pcs_space, space_buf, sizeof (space_buf));
if (pcs->pcs_state == CS_CHECKPOINT_EXISTS) {
char *date = ctime(&start);
/*
* ctime() adds a newline at the end of the generated
* string, thus the weird format specifier and the
* strlen() call used to chop it off from the output.
*/
(void) printf(gettext("created %.*s, consumes %s\n"),
(int)(strlen(date) - 1), date, space_buf);
return;
}
assert(pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
(void) printf(gettext("discarding, %s remaining.\n"),
space_buf);
}
static void
print_error_log(zpool_handle_t *zhp)
{
nvlist_t *nverrlist = NULL;
nvpair_t *elem;
char *pathname;
size_t len = MAXPATHLEN * 2;
if (zpool_get_errlog(zhp, &nverrlist) != 0)
return;
(void) printf("errors: Permanent errors have been "
"detected in the following files:\n\n");
pathname = safe_malloc(len);
elem = NULL;
while ((elem = nvlist_next_nvpair(nverrlist, elem)) != NULL) {
nvlist_t *nv;
uint64_t dsobj, obj;
verify(nvpair_value_nvlist(elem, &nv) == 0);
verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_DATASET,
&dsobj) == 0);
verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_OBJECT,
&obj) == 0);
zpool_obj_to_path(zhp, dsobj, obj, pathname, len);
(void) printf("%7s %s\n", "", pathname);
}
free(pathname);
nvlist_free(nverrlist);
}
static void
print_spares(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **spares,
uint_t nspares)
{
uint_t i;
char *name;
if (nspares == 0)
return;
(void) printf(gettext("\tspares\n"));
for (i = 0; i < nspares; i++) {
name = zpool_vdev_name(g_zfs, zhp, spares[i],
cb->cb_name_flags);
print_status_config(zhp, cb, name, spares[i], 2, B_TRUE, NULL);
free(name);
}
}
static void
print_l2cache(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **l2cache,
uint_t nl2cache)
{
uint_t i;
char *name;
if (nl2cache == 0)
return;
(void) printf(gettext("\tcache\n"));
for (i = 0; i < nl2cache; i++) {
name = zpool_vdev_name(g_zfs, zhp, l2cache[i],
cb->cb_name_flags);
print_status_config(zhp, cb, name, l2cache[i], 2,
B_FALSE, NULL);
free(name);
}
}
static void
print_dedup_stats(nvlist_t *config)
{
ddt_histogram_t *ddh;
ddt_stat_t *dds;
ddt_object_t *ddo;
uint_t c;
char dspace[6], mspace[6];
/*
* If the pool was faulted then we may not have been able to
* obtain the config. Otherwise, if we have anything in the dedup
* table continue processing the stats.
*/
if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS,
(uint64_t **)&ddo, &c) != 0)
return;
(void) printf("\n");
(void) printf(gettext(" dedup: "));
if (ddo->ddo_count == 0) {
(void) printf(gettext("no DDT entries\n"));
return;
}
zfs_nicebytes(ddo->ddo_dspace, dspace, sizeof (dspace));
zfs_nicebytes(ddo->ddo_mspace, mspace, sizeof (mspace));
(void) printf("DDT entries %llu, size %s on disk, %s in core\n",
(u_longlong_t)ddo->ddo_count,
dspace,
mspace);
verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,
(uint64_t **)&dds, &c) == 0);
verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM,
(uint64_t **)&ddh, &c) == 0);
zpool_dump_ddt(dds, ddh);
}
/*
* Display a summary of pool status. Displays a summary such as:
*
* pool: tank
* status: DEGRADED
* reason: One or more devices ...
* see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-01
* config:
* mirror DEGRADED
* c1t0d0 OK
* c2t0d0 UNAVAIL
*
* When given the '-v' option, we print out the complete config. If the '-e'
* option is specified, then we print out error rate information as well.
*/
static int
status_callback(zpool_handle_t *zhp, void *data)
{
status_cbdata_t *cbp = data;
nvlist_t *config, *nvroot;
const char *msgid;
zpool_status_t reason;
zpool_errata_t errata;
const char *health;
uint_t c;
vdev_stat_t *vs;
config = zpool_get_config(zhp, NULL);
reason = zpool_get_status(zhp, &msgid, &errata);
cbp->cb_count++;
/*
* If we were given 'zpool status -x', only report those pools with
* problems.
*/
if (cbp->cb_explain &&
(reason == ZPOOL_STATUS_OK ||
reason == ZPOOL_STATUS_VERSION_OLDER ||
reason == ZPOOL_STATUS_FEAT_DISABLED ||
reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||
reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {
if (!cbp->cb_allpools) {
(void) printf(gettext("pool '%s' is healthy\n"),
zpool_get_name(zhp));
if (cbp->cb_first)
cbp->cb_first = B_FALSE;
}
return (0);
}
if (cbp->cb_first)
cbp->cb_first = B_FALSE;
else
(void) printf("\n");
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
health = zpool_get_state_str(zhp);
printf(" ");
printf_color(ANSI_BOLD, gettext("pool:"));
printf(" %s\n", zpool_get_name(zhp));
fputc(' ', stdout);
printf_color(ANSI_BOLD, gettext("state: "));
printf_color(health_str_to_color(health), "%s", health);
fputc('\n', stdout);
switch (reason) {
case ZPOOL_STATUS_MISSING_DEV_R:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices could "
"not be opened. Sufficient replicas exist for\n\tthe pool "
"to continue functioning in a degraded state.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Attach the missing device "
"and online it using 'zpool online'.\n"));
break;
case ZPOOL_STATUS_MISSING_DEV_NR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices could "
"not be opened. There are insufficient\n\treplicas for the"
" pool to continue functioning.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Attach the missing device "
"and online it using 'zpool online'.\n"));
break;
case ZPOOL_STATUS_CORRUPT_LABEL_R:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices could "
"not be used because the label is missing or\n\tinvalid. "
"Sufficient replicas exist for the pool to continue\n\t"
"functioning in a degraded state.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Replace the device using "
"'zpool replace'.\n"));
break;
case ZPOOL_STATUS_CORRUPT_LABEL_NR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices could "
"not be used because the label is missing \n\tor invalid. "
"There are insufficient replicas for the pool to "
"continue\n\tfunctioning.\n"));
zpool_explain_recover(zpool_get_handle(zhp),
zpool_get_name(zhp), reason, config);
break;
case ZPOOL_STATUS_FAILING_DEV:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices has "
"experienced an unrecoverable error. An\n\tattempt was "
"made to correct the error. Applications are "
"unaffected.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Determine if the "
"device needs to be replaced, and clear the errors\n\tusing"
" 'zpool clear' or replace the device with 'zpool "
"replace'.\n"));
break;
case ZPOOL_STATUS_OFFLINE_DEV:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices has "
"been taken offline by the administrator.\n\tSufficient "
"replicas exist for the pool to continue functioning in "
"a\n\tdegraded state.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Online the device "
"using 'zpool online' or replace the device with\n\t'zpool "
"replace'.\n"));
break;
case ZPOOL_STATUS_REMOVED_DEV:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices has "
"been removed by the administrator.\n\tSufficient "
"replicas exist for the pool to continue functioning in "
"a\n\tdegraded state.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Online the device "
"using zpool online' or replace the device with\n\t'zpool "
"replace'.\n"));
break;
case ZPOOL_STATUS_RESILVERING:
case ZPOOL_STATUS_REBUILDING:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices is "
"currently being resilvered. The pool will\n\tcontinue "
"to function, possibly in a degraded state.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Wait for the resilver to "
"complete.\n"));
break;
case ZPOOL_STATUS_REBUILD_SCRUB:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices have "
"been sequentially resilvered, scrubbing\n\tthe pool "
"is recommended.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Use 'zpool scrub' to "
"verify all data checksums.\n"));
break;
case ZPOOL_STATUS_CORRUPT_DATA:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices has "
"experienced an error resulting in data\n\tcorruption. "
"Applications may be affected.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Restore the file in question"
" if possible. Otherwise restore the\n\tentire pool from "
"backup.\n"));
break;
case ZPOOL_STATUS_CORRUPT_POOL:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool metadata is "
"corrupted and the pool cannot be opened.\n"));
zpool_explain_recover(zpool_get_handle(zhp),
zpool_get_name(zhp), reason, config);
break;
case ZPOOL_STATUS_VERSION_OLDER:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
"a legacy on-disk format. The pool can\n\tstill be used, "
"but some features are unavailable.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Upgrade the pool using "
"'zpool upgrade'. Once this is done, the\n\tpool will no "
"longer be accessible on software that does not support\n\t"
"feature flags.\n"));
break;
case ZPOOL_STATUS_VERSION_NEWER:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool has been upgraded "
"to a newer, incompatible on-disk version.\n\tThe pool "
"cannot be accessed on this system.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Access the pool from a "
"system running more recent software, or\n\trestore the "
"pool from backup.\n"));
break;
case ZPOOL_STATUS_FEAT_DISABLED:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("Some supported and "
"requested features are not enabled on the pool.\n\t"
"The pool can still be used, but some features are "
"unavailable.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Enable all features using "
"'zpool upgrade'. Once this is done,\n\tthe pool may no "
"longer be accessible by software that does not support\n\t"
"the features. See zpool-features(7) for details.\n"));
break;
case ZPOOL_STATUS_COMPATIBILITY_ERR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("This pool has a "
"compatibility list specified, but it could not be\n\t"
"read/parsed at this time. The pool can still be used, "
"but this\n\tshould be investigated.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Check the value of the "
"'compatibility' property against the\n\t"
"appropriate file in " ZPOOL_SYSCONF_COMPAT_D " or "
ZPOOL_DATA_COMPAT_D ".\n"));
break;
case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more features "
"are enabled on the pool despite not being\n\t"
"requested by the 'compatibility' property.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Consider setting "
"'compatibility' to an appropriate value, or\n\t"
"adding needed features to the relevant file in\n\t"
ZPOOL_SYSCONF_COMPAT_D " or " ZPOOL_DATA_COMPAT_D ".\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_READ:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed "
"on this system because it uses the\n\tfollowing feature(s)"
" not supported on this system:\n"));
zpool_print_unsup_feat(config);
(void) printf("\n");
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Access the pool from a "
"system that supports the required feature(s),\n\tor "
"restore the pool from backup.\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool can only be "
"accessed in read-only mode on this system. It\n\tcannot be"
" accessed in read-write mode because it uses the "
"following\n\tfeature(s) not supported on this system:\n"));
zpool_print_unsup_feat(config);
(void) printf("\n");
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed "
"in read-write mode. Import the pool with\n"
"\t\"-o readonly=on\", access the pool from a system that "
"supports the\n\trequired feature(s), or restore the "
"pool from backup.\n"));
break;
case ZPOOL_STATUS_FAULTED_DEV_R:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"faulted in response to persistent errors.\n\tSufficient "
"replicas exist for the pool to continue functioning "
"in a\n\tdegraded state.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Replace the faulted device, "
"or use 'zpool clear' to mark the device\n\trepaired.\n"));
break;
case ZPOOL_STATUS_FAULTED_DEV_NR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"faulted in response to persistent errors. There are "
"insufficient replicas for the pool to\n\tcontinue "
"functioning.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Destroy and re-create the "
"pool from a backup source. Manually marking the device\n"
"\trepaired using 'zpool clear' may allow some data "
"to be recovered.\n"));
break;
case ZPOOL_STATUS_IO_FAILURE_MMP:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool is suspended "
"because multihost writes failed or were delayed;\n\t"
"another system could import the pool undetected.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Make sure the pool's devices"
" are connected, then reboot your system and\n\timport the "
"pool.\n"));
break;
case ZPOOL_STATUS_IO_FAILURE_WAIT:
case ZPOOL_STATUS_IO_FAILURE_CONTINUE:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"faulted in response to IO failures.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Make sure the affected "
"devices are connected, then run 'zpool clear'.\n"));
break;
case ZPOOL_STATUS_BAD_LOG:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("An intent log record "
"could not be read.\n"
"\tWaiting for administrator intervention to fix the "
"faulted pool.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Either restore the affected "
"device(s) and run 'zpool online',\n"
"\tor ignore the intent log records by running "
"'zpool clear'.\n"));
break;
case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
(void) printf(gettext("status: One or more devices are "
"configured to use a non-native block size.\n"
"\tExpect reduced performance.\n"));
(void) printf(gettext("action: Replace affected devices with "
"devices that support the\n\tconfigured block size, or "
"migrate data to a properly configured\n\tpool.\n"));
break;
case ZPOOL_STATUS_HOSTID_MISMATCH:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("Mismatch between pool hostid"
" and system hostid on imported pool.\n\tThis pool was "
"previously imported into a system with a different "
"hostid,\n\tand then was verbatim imported into this "
"system.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Export this pool on all "
"systems on which it is imported.\n"
"\tThen import it to correct the mismatch.\n"));
break;
case ZPOOL_STATUS_ERRATA:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"),
errata);
switch (errata) {
case ZPOOL_ERRATA_NONE:
break;
case ZPOOL_ERRATA_ZOL_2094_SCRUB:
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("To correct the issue"
" run 'zpool scrub'.\n"));
break;
case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
(void) printf(gettext("\tExisting encrypted datasets "
"contain an on-disk incompatibility\n\twhich "
"needs to be corrected.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("To correct the issue"
" backup existing encrypted datasets to new\n\t"
"encrypted datasets and destroy the old ones. "
"'zfs mount -o ro' can\n\tbe used to temporarily "
"mount existing encrypted datasets readonly.\n"));
break;
case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
(void) printf(gettext("\tExisting encrypted snapshots "
"and bookmarks contain an on-disk\n\tincompat"
"ibility. This may cause on-disk corruption if "
"they are used\n\twith 'zfs recv'.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("To correct the"
"issue, enable the bookmark_v2 feature. No "
"additional\n\taction is needed if there are no "
"encrypted snapshots or bookmarks.\n\tIf preserving"
"the encrypted snapshots and bookmarks is required,"
" use\n\ta non-raw send to backup and restore them."
" Alternately, they may be\n\tremoved to resolve "
"the incompatibility.\n"));
break;
default:
/*
* All errata which allow the pool to be imported
* must contain an action message.
*/
assert(0);
}
break;
default:
/*
* The remaining errors can't actually be generated, yet.
*/
assert(reason == ZPOOL_STATUS_OK);
}
if (msgid != NULL) {
printf(" ");
printf_color(ANSI_BOLD, gettext("see:"));
printf(gettext(
" https://openzfs.github.io/openzfs-docs/msg/%s\n"),
msgid);
}
if (config != NULL) {
uint64_t nerr;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
pool_checkpoint_stat_t *pcs = NULL;
pool_removal_stat_t *prs = NULL;
print_scan_status(zhp, nvroot);
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
print_removal_status(zhp, prs);
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
print_checkpoint_status(pcs);
cbp->cb_namewidth = max_width(zhp, nvroot, 0, 0,
cbp->cb_name_flags | VDEV_NAME_TYPE_ID);
if (cbp->cb_namewidth < 10)
cbp->cb_namewidth = 10;
color_start(ANSI_BOLD);
(void) printf(gettext("config:\n\n"));
(void) printf(gettext("\t%-*s %-8s %5s %5s %5s"),
cbp->cb_namewidth, "NAME", "STATE", "READ", "WRITE",
"CKSUM");
color_end();
if (cbp->cb_print_slow_ios) {
printf_color(ANSI_BOLD, " %5s", gettext("SLOW"));
}
if (cbp->vcdl != NULL)
print_cmd_columns(cbp->vcdl, 0);
printf("\n");
print_status_config(zhp, cbp, zpool_get_name(zhp), nvroot, 0,
B_FALSE, NULL);
print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_DEDUP);
print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_CLASS_LOGS);
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0)
print_l2cache(zhp, cbp, l2cache, nl2cache);
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0)
print_spares(zhp, cbp, spares, nspares);
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
&nerr) == 0) {
nvlist_t *nverrlist = NULL;
/*
* If the approximate error count is small, get a
* precise count by fetching the entire log and
* uniquifying the results.
*/
if (nerr > 0 && nerr < 100 && !cbp->cb_verbose &&
zpool_get_errlog(zhp, &nverrlist) == 0) {
nvpair_t *elem;
elem = NULL;
nerr = 0;
while ((elem = nvlist_next_nvpair(nverrlist,
elem)) != NULL) {
nerr++;
}
}
nvlist_free(nverrlist);
(void) printf("\n");
if (nerr == 0)
(void) printf(gettext("errors: No known data "
"errors\n"));
else if (!cbp->cb_verbose)
(void) printf(gettext("errors: %llu data "
"errors, use '-v' for a list\n"),
(u_longlong_t)nerr);
else
print_error_log(zhp);
}
if (cbp->cb_dedup_stats)
print_dedup_stats(config);
} else {
(void) printf(gettext("config: The configuration cannot be "
"determined.\n"));
}
return (0);
}
/*
* zpool status [-c [script1,script2,...]] [-igLpPstvx] [-T d|u] [pool] ...
* [interval [count]]
*
* -c CMD For each vdev, run command CMD
* -i Display vdev initialization status.
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -p Display values in parsable (exact) format.
* -P Display full path for vdev name.
* -s Display slow IOs column.
* -v Display complete error logs
* -x Display only pools with potential problems
* -D Display dedup status (undocumented)
* -t Display vdev TRIM status.
* -T Display a timestamp in date(1) or Unix format
*
* Describes the health status of all pools or some subset.
*/
int
zpool_do_status(int argc, char **argv)
{
int c;
int ret;
float interval = 0;
unsigned long count = 0;
status_cbdata_t cb = { 0 };
char *cmd = NULL;
/* check options */
while ((c = getopt(argc, argv, "c:igLpPsvxDtT:")) != -1) {
switch (c) {
case 'c':
if (cmd != NULL) {
fprintf(stderr,
gettext("Can't set -c flag twice\n"));
exit(1);
}
if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
fprintf(stderr, gettext(
"Can't run -c, disabled by "
"ZPOOL_SCRIPTS_ENABLED.\n"));
exit(1);
}
if ((getuid() <= 0 || geteuid() <= 0) &&
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
fprintf(stderr, gettext(
"Can't run -c with root privileges "
"unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
exit(1);
}
cmd = optarg;
break;
case 'i':
cb.cb_print_vdev_init = B_TRUE;
break;
case 'g':
cb.cb_name_flags |= VDEV_NAME_GUID;
break;
case 'L':
cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'p':
cb.cb_literal = B_TRUE;
break;
case 'P':
cb.cb_name_flags |= VDEV_NAME_PATH;
break;
case 's':
cb.cb_print_slow_ios = B_TRUE;
break;
case 'v':
cb.cb_verbose = B_TRUE;
break;
case 'x':
cb.cb_explain = B_TRUE;
break;
case 'D':
cb.cb_dedup_stats = B_TRUE;
break;
case 't':
cb.cb_print_vdev_trim = B_TRUE;
break;
case 'T':
get_timestamp_arg(*optarg);
break;
case '?':
if (optopt == 'c') {
print_zpool_script_list("status");
exit(0);
} else {
fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
}
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
get_interval_count(&argc, argv, &interval, &count);
if (argc == 0)
cb.cb_allpools = B_TRUE;
cb.cb_first = B_TRUE;
cb.cb_print_status = B_TRUE;
for (;;) {
if (timestamp_fmt != NODATE)
print_timestamp(timestamp_fmt);
if (cmd != NULL)
cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd,
NULL, NULL, 0, 0);
ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
cb.cb_literal, status_callback, &cb);
if (cb.vcdl != NULL)
free_vdev_cmd_data_list(cb.vcdl);
if (argc == 0 && cb.cb_count == 0)
(void) fprintf(stderr, gettext("no pools available\n"));
else if (cb.cb_explain && cb.cb_first && cb.cb_allpools)
(void) printf(gettext("all pools are healthy\n"));
if (ret != 0)
return (ret);
if (interval == 0)
break;
if (count != 0 && --count == 0)
break;
(void) fsleep(interval);
}
return (0);
}
typedef struct upgrade_cbdata {
int cb_first;
int cb_argc;
uint64_t cb_version;
char **cb_argv;
} upgrade_cbdata_t;
static int
check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs)
{
int zfs_version = (int)zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
int *count = (int *)unsupp_fs;
if (zfs_version > ZPL_VERSION) {
(void) printf(gettext("%s (v%d) is not supported by this "
"implementation of ZFS.\n"),
zfs_get_name(zhp), zfs_version);
(*count)++;
}
zfs_iter_filesystems(zhp, check_unsupp_fs, unsupp_fs);
zfs_close(zhp);
return (0);
}
static int
upgrade_version(zpool_handle_t *zhp, uint64_t version)
{
int ret;
nvlist_t *config;
uint64_t oldversion;
int unsupp_fs = 0;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&oldversion) == 0);
char compat[ZFS_MAXPROPLEN];
if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
compat[0] = '\0';
assert(SPA_VERSION_IS_SUPPORTED(oldversion));
assert(oldversion < version);
ret = zfs_iter_root(zpool_get_handle(zhp), check_unsupp_fs, &unsupp_fs);
if (ret != 0)
return (ret);
if (unsupp_fs) {
(void) fprintf(stderr, gettext("Upgrade not performed due "
"to %d unsupported filesystems (max v%d).\n"),
unsupp_fs, (int)ZPL_VERSION);
return (1);
}
if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {
(void) fprintf(stderr, gettext("Upgrade not performed because "
"'compatibility' property set to '"
ZPOOL_COMPAT_LEGACY "'.\n"));
return (1);
}
ret = zpool_upgrade(zhp, version);
if (ret != 0)
return (ret);
if (version >= SPA_VERSION_FEATURES) {
(void) printf(gettext("Successfully upgraded "
"'%s' from version %llu to feature flags.\n"),
zpool_get_name(zhp), (u_longlong_t)oldversion);
} else {
(void) printf(gettext("Successfully upgraded "
"'%s' from version %llu to version %llu.\n"),
zpool_get_name(zhp), (u_longlong_t)oldversion,
(u_longlong_t)version);
}
return (0);
}
static int
upgrade_enable_all(zpool_handle_t *zhp, int *countp)
{
int i, ret, count;
boolean_t firstff = B_TRUE;
nvlist_t *enabled = zpool_get_features(zhp);
char compat[ZFS_MAXPROPLEN];
if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
compat[0] = '\0';
boolean_t requested_features[SPA_FEATURES];
if (zpool_do_load_compat(compat, requested_features) !=
ZPOOL_COMPATIBILITY_OK)
return (-1);
count = 0;
for (i = 0; i < SPA_FEATURES; i++) {
const char *fname = spa_feature_table[i].fi_uname;
const char *fguid = spa_feature_table[i].fi_guid;
if (!spa_feature_table[i].fi_zfs_mod_supported)
continue;
if (!nvlist_exists(enabled, fguid) && requested_features[i]) {
char *propname;
verify(-1 != asprintf(&propname, "feature@%s", fname));
ret = zpool_set_prop(zhp, propname,
ZFS_FEATURE_ENABLED);
if (ret != 0) {
free(propname);
return (ret);
}
count++;
if (firstff) {
(void) printf(gettext("Enabled the "
"following features on '%s':\n"),
zpool_get_name(zhp));
firstff = B_FALSE;
}
(void) printf(gettext(" %s\n"), fname);
free(propname);
}
}
if (countp != NULL)
*countp = count;
return (0);
}
static int
upgrade_cb(zpool_handle_t *zhp, void *arg)
{
upgrade_cbdata_t *cbp = arg;
nvlist_t *config;
uint64_t version;
boolean_t modified_pool = B_FALSE;
int ret;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&version) == 0);
assert(SPA_VERSION_IS_SUPPORTED(version));
if (version < cbp->cb_version) {
cbp->cb_first = B_FALSE;
ret = upgrade_version(zhp, cbp->cb_version);
if (ret != 0)
return (ret);
modified_pool = B_TRUE;
/*
* If they did "zpool upgrade -a", then we could
* be doing ioctls to different pools. We need
* to log this history once to each pool, and bypass
* the normal history logging that happens in main().
*/
(void) zpool_log_history(g_zfs, history_str);
log_history = B_FALSE;
}
if (cbp->cb_version >= SPA_VERSION_FEATURES) {
int count;
ret = upgrade_enable_all(zhp, &count);
if (ret != 0)
return (ret);
if (count > 0) {
cbp->cb_first = B_FALSE;
modified_pool = B_TRUE;
}
}
if (modified_pool) {
(void) printf("\n");
(void) after_zpool_upgrade(zhp);
}
return (0);
}
static int
upgrade_list_older_cb(zpool_handle_t *zhp, void *arg)
{
upgrade_cbdata_t *cbp = arg;
nvlist_t *config;
uint64_t version;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&version) == 0);
assert(SPA_VERSION_IS_SUPPORTED(version));
if (version < SPA_VERSION_FEATURES) {
if (cbp->cb_first) {
(void) printf(gettext("The following pools are "
"formatted with legacy version numbers and can\n"
"be upgraded to use feature flags. After "
"being upgraded, these pools\nwill no "
"longer be accessible by software that does not "
"support feature\nflags.\n\n"
"Note that setting a pool's 'compatibility' "
"feature to '" ZPOOL_COMPAT_LEGACY "' will\n"
"inhibit upgrades.\n\n"));
(void) printf(gettext("VER POOL\n"));
(void) printf(gettext("--- ------------\n"));
cbp->cb_first = B_FALSE;
}
(void) printf("%2llu %s\n", (u_longlong_t)version,
zpool_get_name(zhp));
}
return (0);
}
static int
upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg)
{
upgrade_cbdata_t *cbp = arg;
nvlist_t *config;
uint64_t version;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&version) == 0);
if (version >= SPA_VERSION_FEATURES) {
int i;
boolean_t poolfirst = B_TRUE;
nvlist_t *enabled = zpool_get_features(zhp);
for (i = 0; i < SPA_FEATURES; i++) {
const char *fguid = spa_feature_table[i].fi_guid;
const char *fname = spa_feature_table[i].fi_uname;
if (!spa_feature_table[i].fi_zfs_mod_supported)
continue;
if (!nvlist_exists(enabled, fguid)) {
if (cbp->cb_first) {
(void) printf(gettext("\nSome "
"supported features are not "
"enabled on the following pools. "
"Once a\nfeature is enabled the "
"pool may become incompatible with "
"software\nthat does not support "
"the feature. See "
"zpool-features(7) for "
"details.\n\n"
"Note that the pool "
"'compatibility' feature can be "
"used to inhibit\nfeature "
"upgrades.\n\n"));
(void) printf(gettext("POOL "
"FEATURE\n"));
(void) printf(gettext("------"
"---------\n"));
cbp->cb_first = B_FALSE;
}
if (poolfirst) {
(void) printf(gettext("%s\n"),
zpool_get_name(zhp));
poolfirst = B_FALSE;
}
(void) printf(gettext(" %s\n"), fname);
}
/*
* If they did "zpool upgrade -a", then we could
* be doing ioctls to different pools. We need
* to log this history once to each pool, and bypass
* the normal history logging that happens in main().
*/
(void) zpool_log_history(g_zfs, history_str);
log_history = B_FALSE;
}
}
return (0);
}
static int
upgrade_one(zpool_handle_t *zhp, void *data)
{
boolean_t modified_pool = B_FALSE;
upgrade_cbdata_t *cbp = data;
uint64_t cur_version;
int ret;
if (strcmp("log", zpool_get_name(zhp)) == 0) {
(void) fprintf(stderr, gettext("'log' is now a reserved word\n"
"Pool 'log' must be renamed using export and import"
" to upgrade.\n"));
return (1);
}
cur_version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
if (cur_version > cbp->cb_version) {
(void) printf(gettext("Pool '%s' is already formatted "
"using more current version '%llu'.\n\n"),
zpool_get_name(zhp), (u_longlong_t)cur_version);
return (0);
}
if (cbp->cb_version != SPA_VERSION && cur_version == cbp->cb_version) {
(void) printf(gettext("Pool '%s' is already formatted "
"using version %llu.\n\n"), zpool_get_name(zhp),
(u_longlong_t)cbp->cb_version);
return (0);
}
if (cur_version != cbp->cb_version) {
modified_pool = B_TRUE;
ret = upgrade_version(zhp, cbp->cb_version);
if (ret != 0)
return (ret);
}
if (cbp->cb_version >= SPA_VERSION_FEATURES) {
int count = 0;
ret = upgrade_enable_all(zhp, &count);
if (ret != 0)
return (ret);
if (count != 0) {
modified_pool = B_TRUE;
} else if (cur_version == SPA_VERSION) {
(void) printf(gettext("Pool '%s' already has all "
"supported and requested features enabled.\n"),
zpool_get_name(zhp));
}
}
if (modified_pool) {
(void) printf("\n");
(void) after_zpool_upgrade(zhp);
}
return (0);
}
/*
* zpool upgrade
* zpool upgrade -v
* zpool upgrade [-V version] <-a | pool ...>
*
* With no arguments, display downrev'd ZFS pool available for upgrade.
* Individual pools can be upgraded by specifying the pool, and '-a' will
* upgrade all pools.
*/
int
zpool_do_upgrade(int argc, char **argv)
{
int c;
upgrade_cbdata_t cb = { 0 };
int ret = 0;
boolean_t showversions = B_FALSE;
boolean_t upgradeall = B_FALSE;
char *end;
/* check options */
while ((c = getopt(argc, argv, ":avV:")) != -1) {
switch (c) {
case 'a':
upgradeall = B_TRUE;
break;
case 'v':
showversions = B_TRUE;
break;
case 'V':
cb.cb_version = strtoll(optarg, &end, 10);
if (*end != '\0' ||
!SPA_VERSION_IS_SUPPORTED(cb.cb_version)) {
(void) fprintf(stderr,
gettext("invalid version '%s'\n"), optarg);
usage(B_FALSE);
}
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
cb.cb_argc = argc;
cb.cb_argv = argv;
argc -= optind;
argv += optind;
if (cb.cb_version == 0) {
cb.cb_version = SPA_VERSION;
} else if (!upgradeall && argc == 0) {
(void) fprintf(stderr, gettext("-V option is "
"incompatible with other arguments\n"));
usage(B_FALSE);
}
if (showversions) {
if (upgradeall || argc != 0) {
(void) fprintf(stderr, gettext("-v option is "
"incompatible with other arguments\n"));
usage(B_FALSE);
}
} else if (upgradeall) {
if (argc != 0) {
(void) fprintf(stderr, gettext("-a option should not "
"be used along with a pool name\n"));
usage(B_FALSE);
}
}
(void) printf("%s", gettext("This system supports ZFS pool feature "
"flags.\n\n"));
if (showversions) {
int i;
(void) printf(gettext("The following features are "
"supported:\n\n"));
(void) printf(gettext("FEAT DESCRIPTION\n"));
(void) printf("----------------------------------------------"
"---------------\n");
for (i = 0; i < SPA_FEATURES; i++) {
zfeature_info_t *fi = &spa_feature_table[i];
if (!fi->fi_zfs_mod_supported)
continue;
const char *ro =
(fi->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ?
" (read-only compatible)" : "";
(void) printf("%-37s%s\n", fi->fi_uname, ro);
(void) printf(" %s\n", fi->fi_desc);
}
(void) printf("\n");
(void) printf(gettext("The following legacy versions are also "
"supported:\n\n"));
(void) printf(gettext("VER DESCRIPTION\n"));
(void) printf("--- -----------------------------------------"
"---------------\n");
(void) printf(gettext(" 1 Initial ZFS version\n"));
(void) printf(gettext(" 2 Ditto blocks "
"(replicated metadata)\n"));
(void) printf(gettext(" 3 Hot spares and double parity "
"RAID-Z\n"));
(void) printf(gettext(" 4 zpool history\n"));
(void) printf(gettext(" 5 Compression using the gzip "
"algorithm\n"));
(void) printf(gettext(" 6 bootfs pool property\n"));
(void) printf(gettext(" 7 Separate intent log devices\n"));
(void) printf(gettext(" 8 Delegated administration\n"));
(void) printf(gettext(" 9 refquota and refreservation "
"properties\n"));
(void) printf(gettext(" 10 Cache devices\n"));
(void) printf(gettext(" 11 Improved scrub performance\n"));
(void) printf(gettext(" 12 Snapshot properties\n"));
(void) printf(gettext(" 13 snapused property\n"));
(void) printf(gettext(" 14 passthrough-x aclinherit\n"));
(void) printf(gettext(" 15 user/group space accounting\n"));
(void) printf(gettext(" 16 stmf property support\n"));
(void) printf(gettext(" 17 Triple-parity RAID-Z\n"));
(void) printf(gettext(" 18 Snapshot user holds\n"));
(void) printf(gettext(" 19 Log device removal\n"));
(void) printf(gettext(" 20 Compression using zle "
"(zero-length encoding)\n"));
(void) printf(gettext(" 21 Deduplication\n"));
(void) printf(gettext(" 22 Received properties\n"));
(void) printf(gettext(" 23 Slim ZIL\n"));
(void) printf(gettext(" 24 System attributes\n"));
(void) printf(gettext(" 25 Improved scrub stats\n"));
(void) printf(gettext(" 26 Improved snapshot deletion "
"performance\n"));
(void) printf(gettext(" 27 Improved snapshot creation "
"performance\n"));
(void) printf(gettext(" 28 Multiple vdev replacements\n"));
(void) printf(gettext("\nFor more information on a particular "
"version, including supported releases,\n"));
(void) printf(gettext("see the ZFS Administration Guide.\n\n"));
} else if (argc == 0 && upgradeall) {
cb.cb_first = B_TRUE;
ret = zpool_iter(g_zfs, upgrade_cb, &cb);
if (ret == 0 && cb.cb_first) {
if (cb.cb_version == SPA_VERSION) {
(void) printf(gettext("All pools are already "
"formatted using feature flags.\n\n"));
(void) printf(gettext("Every feature flags "
"pool already has all supported and "
"requested features enabled.\n"));
} else {
(void) printf(gettext("All pools are already "
"formatted with version %llu or higher.\n"),
(u_longlong_t)cb.cb_version);
}
}
} else if (argc == 0) {
cb.cb_first = B_TRUE;
ret = zpool_iter(g_zfs, upgrade_list_older_cb, &cb);
assert(ret == 0);
if (cb.cb_first) {
(void) printf(gettext("All pools are formatted "
"using feature flags.\n\n"));
} else {
(void) printf(gettext("\nUse 'zpool upgrade -v' "
"for a list of available legacy versions.\n"));
}
cb.cb_first = B_TRUE;
ret = zpool_iter(g_zfs, upgrade_list_disabled_cb, &cb);
assert(ret == 0);
if (cb.cb_first) {
(void) printf(gettext("Every feature flags pool has "
"all supported and requested features enabled.\n"));
} else {
(void) printf(gettext("\n"));
}
} else {
ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
B_FALSE, upgrade_one, &cb);
}
return (ret);
}
typedef struct hist_cbdata {
boolean_t first;
boolean_t longfmt;
boolean_t internal;
} hist_cbdata_t;
static void
print_history_records(nvlist_t *nvhis, hist_cbdata_t *cb)
{
nvlist_t **records;
uint_t numrecords;
int i;
verify(nvlist_lookup_nvlist_array(nvhis, ZPOOL_HIST_RECORD,
&records, &numrecords) == 0);
for (i = 0; i < numrecords; i++) {
nvlist_t *rec = records[i];
char tbuf[64] = "";
if (nvlist_exists(rec, ZPOOL_HIST_TIME)) {
time_t tsec;
struct tm t;
tsec = fnvlist_lookup_uint64(records[i],
ZPOOL_HIST_TIME);
(void) localtime_r(&tsec, &t);
(void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
}
if (nvlist_exists(rec, ZPOOL_HIST_ELAPSED_NS)) {
uint64_t elapsed_ns = fnvlist_lookup_int64(records[i],
ZPOOL_HIST_ELAPSED_NS);
(void) snprintf(tbuf + strlen(tbuf),
sizeof (tbuf) - strlen(tbuf),
" (%lldms)", (long long)elapsed_ns / 1000 / 1000);
}
if (nvlist_exists(rec, ZPOOL_HIST_CMD)) {
(void) printf("%s %s", tbuf,
fnvlist_lookup_string(rec, ZPOOL_HIST_CMD));
} else if (nvlist_exists(rec, ZPOOL_HIST_INT_EVENT)) {
int ievent =
fnvlist_lookup_uint64(rec, ZPOOL_HIST_INT_EVENT);
if (!cb->internal)
continue;
if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) {
(void) printf("%s unrecognized record:\n",
tbuf);
dump_nvlist(rec, 4);
continue;
}
(void) printf("%s [internal %s txg:%lld] %s", tbuf,
zfs_history_event_names[ievent],
(longlong_t)fnvlist_lookup_uint64(
rec, ZPOOL_HIST_TXG),
fnvlist_lookup_string(rec, ZPOOL_HIST_INT_STR));
} else if (nvlist_exists(rec, ZPOOL_HIST_INT_NAME)) {
if (!cb->internal)
continue;
(void) printf("%s [txg:%lld] %s", tbuf,
(longlong_t)fnvlist_lookup_uint64(
rec, ZPOOL_HIST_TXG),
fnvlist_lookup_string(rec, ZPOOL_HIST_INT_NAME));
if (nvlist_exists(rec, ZPOOL_HIST_DSNAME)) {
(void) printf(" %s (%llu)",
fnvlist_lookup_string(rec,
ZPOOL_HIST_DSNAME),
(u_longlong_t)fnvlist_lookup_uint64(rec,
ZPOOL_HIST_DSID));
}
(void) printf(" %s", fnvlist_lookup_string(rec,
ZPOOL_HIST_INT_STR));
} else if (nvlist_exists(rec, ZPOOL_HIST_IOCTL)) {
if (!cb->internal)
continue;
(void) printf("%s ioctl %s\n", tbuf,
fnvlist_lookup_string(rec, ZPOOL_HIST_IOCTL));
if (nvlist_exists(rec, ZPOOL_HIST_INPUT_NVL)) {
(void) printf(" input:\n");
dump_nvlist(fnvlist_lookup_nvlist(rec,
ZPOOL_HIST_INPUT_NVL), 8);
}
if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_NVL)) {
(void) printf(" output:\n");
dump_nvlist(fnvlist_lookup_nvlist(rec,
ZPOOL_HIST_OUTPUT_NVL), 8);
}
if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_SIZE)) {
(void) printf(" output nvlist omitted; "
"original size: %lldKB\n",
(longlong_t)fnvlist_lookup_int64(rec,
ZPOOL_HIST_OUTPUT_SIZE) / 1024);
}
if (nvlist_exists(rec, ZPOOL_HIST_ERRNO)) {
(void) printf(" errno: %lld\n",
(longlong_t)fnvlist_lookup_int64(rec,
ZPOOL_HIST_ERRNO));
}
} else {
if (!cb->internal)
continue;
(void) printf("%s unrecognized record:\n", tbuf);
dump_nvlist(rec, 4);
}
if (!cb->longfmt) {
(void) printf("\n");
continue;
}
(void) printf(" [");
if (nvlist_exists(rec, ZPOOL_HIST_WHO)) {
uid_t who = fnvlist_lookup_uint64(rec, ZPOOL_HIST_WHO);
struct passwd *pwd = getpwuid(who);
(void) printf("user %d ", (int)who);
if (pwd != NULL)
(void) printf("(%s) ", pwd->pw_name);
}
if (nvlist_exists(rec, ZPOOL_HIST_HOST)) {
(void) printf("on %s",
fnvlist_lookup_string(rec, ZPOOL_HIST_HOST));
}
if (nvlist_exists(rec, ZPOOL_HIST_ZONE)) {
(void) printf(":%s",
fnvlist_lookup_string(rec, ZPOOL_HIST_ZONE));
}
(void) printf("]");
(void) printf("\n");
}
}
/*
* Print out the command history for a specific pool.
*/
static int
get_history_one(zpool_handle_t *zhp, void *data)
{
nvlist_t *nvhis;
int ret;
hist_cbdata_t *cb = (hist_cbdata_t *)data;
uint64_t off = 0;
boolean_t eof = B_FALSE;
cb->first = B_FALSE;
(void) printf(gettext("History for '%s':\n"), zpool_get_name(zhp));
while (!eof) {
if ((ret = zpool_get_history(zhp, &nvhis, &off, &eof)) != 0)
return (ret);
print_history_records(nvhis, cb);
nvlist_free(nvhis);
}
(void) printf("\n");
return (ret);
}
/*
* zpool history <pool>
*
* Displays the history of commands that modified pools.
*/
int
zpool_do_history(int argc, char **argv)
{
hist_cbdata_t cbdata = { 0 };
int ret;
int c;
cbdata.first = B_TRUE;
/* check options */
while ((c = getopt(argc, argv, "li")) != -1) {
switch (c) {
case 'l':
cbdata.longfmt = B_TRUE;
break;
case 'i':
cbdata.internal = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
B_FALSE, get_history_one, &cbdata);
if (argc == 0 && cbdata.first == B_TRUE) {
(void) fprintf(stderr, gettext("no pools available\n"));
return (0);
}
return (ret);
}
typedef struct ev_opts {
int verbose;
int scripted;
int follow;
int clear;
char poolname[ZFS_MAX_DATASET_NAME_LEN];
} ev_opts_t;
static void
zpool_do_events_short(nvlist_t *nvl, ev_opts_t *opts)
{
char ctime_str[26], str[32], *ptr;
int64_t *tv;
uint_t n;
verify(nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0);
memset(str, ' ', 32);
(void) ctime_r((const time_t *)&tv[0], ctime_str);
(void) memcpy(str, ctime_str+4, 6); /* 'Jun 30' */
(void) memcpy(str+7, ctime_str+20, 4); /* '1993' */
(void) memcpy(str+12, ctime_str+11, 8); /* '21:49:08' */
(void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */
if (opts->scripted)
(void) printf(gettext("%s\t"), str);
else
(void) printf(gettext("%s "), str);
verify(nvlist_lookup_string(nvl, FM_CLASS, &ptr) == 0);
(void) printf(gettext("%s\n"), ptr);
}
static void
zpool_do_events_nvprint(nvlist_t *nvl, int depth)
{
nvpair_t *nvp;
for (nvp = nvlist_next_nvpair(nvl, NULL);
nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) {
data_type_t type = nvpair_type(nvp);
const char *name = nvpair_name(nvp);
boolean_t b;
uint8_t i8;
uint16_t i16;
uint32_t i32;
uint64_t i64;
char *str;
nvlist_t *cnv;
printf(gettext("%*s%s = "), depth, "", name);
switch (type) {
case DATA_TYPE_BOOLEAN:
printf(gettext("%s"), "1");
break;
case DATA_TYPE_BOOLEAN_VALUE:
(void) nvpair_value_boolean_value(nvp, &b);
printf(gettext("%s"), b ? "1" : "0");
break;
case DATA_TYPE_BYTE:
(void) nvpair_value_byte(nvp, &i8);
printf(gettext("0x%x"), i8);
break;
case DATA_TYPE_INT8:
(void) nvpair_value_int8(nvp, (void *)&i8);
printf(gettext("0x%x"), i8);
break;
case DATA_TYPE_UINT8:
(void) nvpair_value_uint8(nvp, &i8);
printf(gettext("0x%x"), i8);
break;
case DATA_TYPE_INT16:
(void) nvpair_value_int16(nvp, (void *)&i16);
printf(gettext("0x%x"), i16);
break;
case DATA_TYPE_UINT16:
(void) nvpair_value_uint16(nvp, &i16);
printf(gettext("0x%x"), i16);
break;
case DATA_TYPE_INT32:
(void) nvpair_value_int32(nvp, (void *)&i32);
printf(gettext("0x%x"), i32);
break;
case DATA_TYPE_UINT32:
(void) nvpair_value_uint32(nvp, &i32);
printf(gettext("0x%x"), i32);
break;
case DATA_TYPE_INT64:
(void) nvpair_value_int64(nvp, (void *)&i64);
printf(gettext("0x%llx"), (u_longlong_t)i64);
break;
case DATA_TYPE_UINT64:
(void) nvpair_value_uint64(nvp, &i64);
/*
* translate vdev state values to readable
* strings to aide zpool events consumers
*/
if (strcmp(name,
FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE) == 0 ||
strcmp(name,
FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE) == 0) {
printf(gettext("\"%s\" (0x%llx)"),
zpool_state_to_name(i64, VDEV_AUX_NONE),
(u_longlong_t)i64);
} else {
printf(gettext("0x%llx"), (u_longlong_t)i64);
}
break;
case DATA_TYPE_HRTIME:
(void) nvpair_value_hrtime(nvp, (void *)&i64);
printf(gettext("0x%llx"), (u_longlong_t)i64);
break;
case DATA_TYPE_STRING:
(void) nvpair_value_string(nvp, &str);
printf(gettext("\"%s\""), str ? str : "<NULL>");
break;
case DATA_TYPE_NVLIST:
printf(gettext("(embedded nvlist)\n"));
(void) nvpair_value_nvlist(nvp, &cnv);
zpool_do_events_nvprint(cnv, depth + 8);
printf(gettext("%*s(end %s)"), depth, "", name);
break;
case DATA_TYPE_NVLIST_ARRAY: {
nvlist_t **val;
uint_t i, nelem;
(void) nvpair_value_nvlist_array(nvp, &val, &nelem);
printf(gettext("(%d embedded nvlists)\n"), nelem);
for (i = 0; i < nelem; i++) {
printf(gettext("%*s%s[%d] = %s\n"),
depth, "", name, i, "(embedded nvlist)");
zpool_do_events_nvprint(val[i], depth + 8);
printf(gettext("%*s(end %s[%i])\n"),
depth, "", name, i);
}
printf(gettext("%*s(end %s)\n"), depth, "", name);
}
break;
case DATA_TYPE_INT8_ARRAY: {
int8_t *val;
uint_t i, nelem;
(void) nvpair_value_int8_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_UINT8_ARRAY: {
uint8_t *val;
uint_t i, nelem;
(void) nvpair_value_uint8_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_INT16_ARRAY: {
int16_t *val;
uint_t i, nelem;
(void) nvpair_value_int16_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_UINT16_ARRAY: {
uint16_t *val;
uint_t i, nelem;
(void) nvpair_value_uint16_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_INT32_ARRAY: {
int32_t *val;
uint_t i, nelem;
(void) nvpair_value_int32_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_UINT32_ARRAY: {
uint32_t *val;
uint_t i, nelem;
(void) nvpair_value_uint32_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_INT64_ARRAY: {
int64_t *val;
uint_t i, nelem;
(void) nvpair_value_int64_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%llx "),
(u_longlong_t)val[i]);
break;
}
case DATA_TYPE_UINT64_ARRAY: {
uint64_t *val;
uint_t i, nelem;
(void) nvpair_value_uint64_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%llx "),
(u_longlong_t)val[i]);
break;
}
case DATA_TYPE_STRING_ARRAY: {
char **str;
uint_t i, nelem;
(void) nvpair_value_string_array(nvp, &str, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("\"%s\" "),
str[i] ? str[i] : "<NULL>");
break;
}
case DATA_TYPE_BOOLEAN_ARRAY:
case DATA_TYPE_BYTE_ARRAY:
case DATA_TYPE_DOUBLE:
case DATA_TYPE_DONTCARE:
case DATA_TYPE_UNKNOWN:
printf(gettext("<unknown>"));
break;
}
printf(gettext("\n"));
}
}
static int
zpool_do_events_next(ev_opts_t *opts)
{
nvlist_t *nvl;
int zevent_fd, ret, dropped;
char *pool;
zevent_fd = open(ZFS_DEV, O_RDWR);
VERIFY(zevent_fd >= 0);
if (!opts->scripted)
(void) printf(gettext("%-30s %s\n"), "TIME", "CLASS");
while (1) {
ret = zpool_events_next(g_zfs, &nvl, &dropped,
(opts->follow ? ZEVENT_NONE : ZEVENT_NONBLOCK), zevent_fd);
if (ret || nvl == NULL)
break;
if (dropped > 0)
(void) printf(gettext("dropped %d events\n"), dropped);
if (strlen(opts->poolname) > 0 &&
nvlist_lookup_string(nvl, FM_FMRI_ZFS_POOL, &pool) == 0 &&
strcmp(opts->poolname, pool) != 0)
continue;
zpool_do_events_short(nvl, opts);
if (opts->verbose) {
zpool_do_events_nvprint(nvl, 8);
printf(gettext("\n"));
}
(void) fflush(stdout);
nvlist_free(nvl);
}
VERIFY(0 == close(zevent_fd));
return (ret);
}
static int
zpool_do_events_clear(void)
{
int count, ret;
ret = zpool_events_clear(g_zfs, &count);
if (!ret)
(void) printf(gettext("cleared %d events\n"), count);
return (ret);
}
/*
* zpool events [-vHf [pool] | -c]
*
* Displays events logs by ZFS.
*/
int
zpool_do_events(int argc, char **argv)
{
ev_opts_t opts = { 0 };
int ret;
int c;
/* check options */
while ((c = getopt(argc, argv, "vHfc")) != -1) {
switch (c) {
case 'v':
opts.verbose = 1;
break;
case 'H':
opts.scripted = 1;
break;
case 'f':
opts.follow = 1;
break;
case 'c':
opts.clear = 1;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
} else if (argc == 1) {
(void) strlcpy(opts.poolname, argv[0], sizeof (opts.poolname));
if (!zfs_name_valid(opts.poolname, ZFS_TYPE_POOL)) {
(void) fprintf(stderr,
gettext("invalid pool name '%s'\n"), opts.poolname);
usage(B_FALSE);
}
}
if ((argc == 1 || opts.verbose || opts.scripted || opts.follow) &&
opts.clear) {
(void) fprintf(stderr,
gettext("invalid options combined with -c\n"));
usage(B_FALSE);
}
if (opts.clear)
ret = zpool_do_events_clear();
else
ret = zpool_do_events_next(&opts);
return (ret);
}
static int
get_callback_vdev(zpool_handle_t *zhp, char *vdevname, void *data)
{
zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
char value[ZFS_MAXPROPLEN];
zprop_source_t srctype;
for (zprop_list_t *pl = cbp->cb_proplist; pl != NULL;
pl = pl->pl_next) {
char *prop_name;
/*
* If the first property is pool name, it is a special
* placeholder that we can skip. This will also skip
* over the name property when 'all' is specified.
*/
if (pl->pl_prop == ZPOOL_PROP_NAME &&
pl == cbp->cb_proplist)
continue;
if (pl->pl_prop == ZPROP_INVAL) {
prop_name = pl->pl_user_prop;
} else {
prop_name = (char *)vdev_prop_to_name(pl->pl_prop);
}
if (zpool_get_vdev_prop(zhp, vdevname, pl->pl_prop,
prop_name, value, sizeof (value), &srctype,
cbp->cb_literal) == 0) {
zprop_print_one_property(vdevname, cbp, prop_name,
value, srctype, NULL, NULL);
}
}
return (0);
}
static int
get_callback_vdev_width_cb(void *zhp_data, nvlist_t *nv, void *data)
{
zpool_handle_t *zhp = zhp_data;
zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
char *vdevname = zpool_vdev_name(g_zfs, zhp, nv,
cbp->cb_vdevs.cb_name_flags);
int ret;
/* Adjust the column widths for the vdev properties */
ret = vdev_expand_proplist(zhp, vdevname, &cbp->cb_proplist);
return (ret);
}
static int
get_callback_vdev_cb(void *zhp_data, nvlist_t *nv, void *data)
{
zpool_handle_t *zhp = zhp_data;
zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
char *vdevname = zpool_vdev_name(g_zfs, zhp, nv,
cbp->cb_vdevs.cb_name_flags);
int ret;
/* Display the properties */
ret = get_callback_vdev(zhp, vdevname, data);
return (ret);
}
static int
get_callback(zpool_handle_t *zhp, void *data)
{
zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
char value[MAXNAMELEN];
zprop_source_t srctype;
zprop_list_t *pl;
int vid;
if (cbp->cb_type == ZFS_TYPE_VDEV) {
if (strcmp(cbp->cb_vdevs.cb_names[0], "all-vdevs") == 0) {
for_each_vdev(zhp, get_callback_vdev_width_cb, data);
for_each_vdev(zhp, get_callback_vdev_cb, data);
} else {
/* Adjust column widths for vdev properties */
for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
vid++) {
vdev_expand_proplist(zhp,
cbp->cb_vdevs.cb_names[vid],
&cbp->cb_proplist);
}
/* Display the properties */
for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
vid++) {
get_callback_vdev(zhp,
cbp->cb_vdevs.cb_names[vid], data);
}
}
} else {
assert(cbp->cb_type == ZFS_TYPE_POOL);
for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) {
/*
* Skip the special fake placeholder. This will also
* skip over the name property when 'all' is specified.
*/
if (pl->pl_prop == ZPOOL_PROP_NAME &&
pl == cbp->cb_proplist)
continue;
if (pl->pl_prop == ZPROP_INVAL &&
(zpool_prop_feature(pl->pl_user_prop) ||
zpool_prop_unsupported(pl->pl_user_prop))) {
srctype = ZPROP_SRC_LOCAL;
if (zpool_prop_get_feature(zhp,
pl->pl_user_prop, value,
sizeof (value)) == 0) {
zprop_print_one_property(
zpool_get_name(zhp), cbp,
pl->pl_user_prop, value, srctype,
NULL, NULL);
}
} else {
if (zpool_get_prop(zhp, pl->pl_prop, value,
sizeof (value), &srctype,
cbp->cb_literal) != 0)
continue;
zprop_print_one_property(zpool_get_name(zhp),
cbp, zpool_prop_to_name(pl->pl_prop),
value, srctype, NULL, NULL);
}
}
}
return (0);
}
/*
* zpool get [-Hp] [-o "all" | field[,...]] <"all" | property[,...]> <pool> ...
*
* -H Scripted mode. Don't display headers, and separate properties
* by a single tab.
* -o List of columns to display. Defaults to
* "name,property,value,source".
* -p Display values in parsable (exact) format.
*
* Get properties of pools in the system. Output space statistics
* for each one as well as other attributes.
*/
int
zpool_do_get(int argc, char **argv)
{
zprop_get_cbdata_t cb = { 0 };
zprop_list_t fake_name = { 0 };
int ret;
int c, i;
char *propstr = NULL;
cb.cb_first = B_TRUE;
/*
* Set up default columns and sources.
*/
cb.cb_sources = ZPROP_SRC_ALL;
cb.cb_columns[0] = GET_COL_NAME;
cb.cb_columns[1] = GET_COL_PROPERTY;
cb.cb_columns[2] = GET_COL_VALUE;
cb.cb_columns[3] = GET_COL_SOURCE;
cb.cb_type = ZFS_TYPE_POOL;
cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
current_prop_type = cb.cb_type;
/* check options */
while ((c = getopt(argc, argv, ":Hpo:")) != -1) {
switch (c) {
case 'p':
cb.cb_literal = B_TRUE;
break;
case 'H':
cb.cb_scripted = B_TRUE;
break;
case 'o':
memset(&cb.cb_columns, 0, sizeof (cb.cb_columns));
i = 0;
for (char *tok; (tok = strsep(&optarg, ",")); ) {
static const char *const col_opts[] =
{ "name", "property", "value", "source",
"all" };
static const zfs_get_column_t col_cols[] =
{ GET_COL_NAME, GET_COL_PROPERTY, GET_COL_VALUE,
GET_COL_SOURCE };
if (i == ZFS_GET_NCOLS - 1) {
(void) fprintf(stderr, gettext("too "
"many fields given to -o "
"option\n"));
usage(B_FALSE);
}
for (c = 0; c < ARRAY_SIZE(col_opts); ++c)
if (strcmp(tok, col_opts[c]) == 0)
goto found;
(void) fprintf(stderr,
gettext("invalid column name '%s'\n"), tok);
usage(B_FALSE);
found:
if (c >= 4) {
if (i > 0) {
(void) fprintf(stderr,
gettext("\"all\" conflicts "
"with specific fields "
"given to -o option\n"));
usage(B_FALSE);
}
memcpy(cb.cb_columns, col_cols,
sizeof (col_cols));
i = ZFS_GET_NCOLS - 1;
} else
cb.cb_columns[i++] = col_cols[c];
}
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing property "
"argument\n"));
usage(B_FALSE);
}
/* Properties list is needed later by zprop_get_list() */
propstr = argv[0];
argc--;
argv++;
if (argc == 0) {
/* No args, so just print the defaults. */
} else if (are_all_pools(argc, argv)) {
/* All the args are pool names */
} else if (are_all_pools(1, argv)) {
/* The first arg is a pool name */
if ((argc == 2 && strcmp(argv[1], "all-vdevs") == 0) ||
are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
&cb.cb_vdevs)) {
/* ... and the rest are vdev names */
cb.cb_vdevs.cb_names = argv + 1;
cb.cb_vdevs.cb_names_count = argc - 1;
cb.cb_type = ZFS_TYPE_VDEV;
argc = 1; /* One pool to process */
} else {
fprintf(stderr, gettext("Expected a list of vdevs in"
" \"%s\", but got:\n"), argv[0]);
error_list_unresolved_vdevs(argc - 1, argv + 1,
argv[0], &cb.cb_vdevs);
fprintf(stderr, "\n");
usage(B_FALSE);
return (1);
}
} else {
/*
* The first arg isn't a pool name,
*/
fprintf(stderr, gettext("missing pool name.\n"));
fprintf(stderr, "\n");
usage(B_FALSE);
return (1);
}
if (zprop_get_list(g_zfs, propstr, &cb.cb_proplist,
cb.cb_type) != 0) {
/* Use correct list of valid properties (pool or vdev) */
current_prop_type = cb.cb_type;
usage(B_FALSE);
}
if (cb.cb_proplist != NULL) {
fake_name.pl_prop = ZPOOL_PROP_NAME;
fake_name.pl_width = strlen(gettext("NAME"));
fake_name.pl_next = cb.cb_proplist;
cb.cb_proplist = &fake_name;
}
ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, cb.cb_type,
cb.cb_literal, get_callback, &cb);
if (cb.cb_proplist == &fake_name)
zprop_free_list(fake_name.pl_next);
else
zprop_free_list(cb.cb_proplist);
return (ret);
}
typedef struct set_cbdata {
char *cb_propname;
char *cb_value;
zfs_type_t cb_type;
vdev_cbdata_t cb_vdevs;
boolean_t cb_any_successful;
} set_cbdata_t;
static int
set_pool_callback(zpool_handle_t *zhp, set_cbdata_t *cb)
{
int error;
/* Check if we have out-of-bounds features */
if (strcmp(cb->cb_propname, ZPOOL_CONFIG_COMPATIBILITY) == 0) {
boolean_t features[SPA_FEATURES];
if (zpool_do_load_compat(cb->cb_value, features) !=
ZPOOL_COMPATIBILITY_OK)
return (-1);
nvlist_t *enabled = zpool_get_features(zhp);
spa_feature_t i;
for (i = 0; i < SPA_FEATURES; i++) {
const char *fguid = spa_feature_table[i].fi_guid;
if (nvlist_exists(enabled, fguid) && !features[i])
break;
}
if (i < SPA_FEATURES)
(void) fprintf(stderr, gettext("Warning: one or "
"more features already enabled on pool '%s'\n"
"are not present in this compatibility set.\n"),
zpool_get_name(zhp));
}
/* if we're setting a feature, check it's in compatibility set */
if (zpool_prop_feature(cb->cb_propname) &&
strcmp(cb->cb_value, ZFS_FEATURE_ENABLED) == 0) {
char *fname = strchr(cb->cb_propname, '@') + 1;
spa_feature_t f;
if (zfeature_lookup_name(fname, &f) == 0) {
char compat[ZFS_MAXPROPLEN];
if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY,
compat, ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
compat[0] = '\0';
boolean_t features[SPA_FEATURES];
if (zpool_do_load_compat(compat, features) !=
ZPOOL_COMPATIBILITY_OK) {
(void) fprintf(stderr, gettext("Error: "
"cannot enable feature '%s' on pool '%s'\n"
"because the pool's 'compatibility' "
"property cannot be parsed.\n"),
fname, zpool_get_name(zhp));
return (-1);
}
if (!features[f]) {
(void) fprintf(stderr, gettext("Error: "
"cannot enable feature '%s' on pool '%s'\n"
"as it is not specified in this pool's "
"current compatibility set.\n"
"Consider setting 'compatibility' to a "
"less restrictive set, or to 'off'.\n"),
fname, zpool_get_name(zhp));
return (-1);
}
}
}
error = zpool_set_prop(zhp, cb->cb_propname, cb->cb_value);
return (error);
}
static int
set_callback(zpool_handle_t *zhp, void *data)
{
int error;
set_cbdata_t *cb = (set_cbdata_t *)data;
if (cb->cb_type == ZFS_TYPE_VDEV) {
error = zpool_set_vdev_prop(zhp, *cb->cb_vdevs.cb_names,
cb->cb_propname, cb->cb_value);
} else {
assert(cb->cb_type == ZFS_TYPE_POOL);
error = set_pool_callback(zhp, cb);
}
cb->cb_any_successful = !error;
return (error);
}
int
zpool_do_set(int argc, char **argv)
{
set_cbdata_t cb = { 0 };
int error;
current_prop_type = ZFS_TYPE_POOL;
if (argc > 1 && argv[1][0] == '-') {
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
argv[1][1]);
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing property=value "
"argument\n"));
usage(B_FALSE);
}
if (argc < 3) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc > 4) {
(void) fprintf(stderr, gettext("too many pool names\n"));
usage(B_FALSE);
}
cb.cb_propname = argv[1];
cb.cb_type = ZFS_TYPE_POOL;
cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
cb.cb_value = strchr(cb.cb_propname, '=');
if (cb.cb_value == NULL) {
(void) fprintf(stderr, gettext("missing value in "
"property=value argument\n"));
usage(B_FALSE);
}
*(cb.cb_value) = '\0';
cb.cb_value++;
argc -= 2;
argv += 2;
if (are_vdevs_in_pool(argc, argv, NULL, &cb.cb_vdevs)) {
/* Argument is a vdev */
cb.cb_vdevs.cb_names = argv;
cb.cb_vdevs.cb_names_count = 1;
cb.cb_type = ZFS_TYPE_VDEV;
argc = 0; /* No pools to process */
} else if (are_all_pools(1, argv)) {
/* The first arg is a pool name */
if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
&cb.cb_vdevs)) {
/* 2nd argument is a vdev */
cb.cb_vdevs.cb_names = argv + 1;
cb.cb_vdevs.cb_names_count = 1;
cb.cb_type = ZFS_TYPE_VDEV;
argc = 1; /* One pool to process */
} else if (argc > 1) {
(void) fprintf(stderr,
gettext("too many pool names\n"));
usage(B_FALSE);
}
}
error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
B_FALSE, set_callback, &cb);
return (error);
}
/* Add up the total number of bytes left to initialize/trim across all vdevs */
static uint64_t
vdev_activity_remaining(nvlist_t *nv, zpool_wait_activity_t activity)
{
uint64_t bytes_remaining;
nvlist_t **child;
uint_t c, children;
vdev_stat_t *vs;
assert(activity == ZPOOL_WAIT_INITIALIZE ||
activity == ZPOOL_WAIT_TRIM);
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
if (activity == ZPOOL_WAIT_INITIALIZE &&
vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE)
bytes_remaining = vs->vs_initialize_bytes_est -
vs->vs_initialize_bytes_done;
else if (activity == ZPOOL_WAIT_TRIM &&
vs->vs_trim_state == VDEV_TRIM_ACTIVE)
bytes_remaining = vs->vs_trim_bytes_est -
vs->vs_trim_bytes_done;
else
bytes_remaining = 0;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (c = 0; c < children; c++)
bytes_remaining += vdev_activity_remaining(child[c], activity);
return (bytes_remaining);
}
/* Add up the total number of bytes left to rebuild across top-level vdevs */
static uint64_t
vdev_activity_top_remaining(nvlist_t *nv)
{
uint64_t bytes_remaining = 0;
nvlist_t **child;
uint_t children;
int error;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (uint_t c = 0; c < children; c++) {
vdev_rebuild_stat_t *vrs;
uint_t i;
error = nvlist_lookup_uint64_array(child[c],
ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i);
if (error == 0) {
if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
bytes_remaining += (vrs->vrs_bytes_est -
vrs->vrs_bytes_rebuilt);
}
}
}
return (bytes_remaining);
}
/* Whether any vdevs are 'spare' or 'replacing' vdevs */
static boolean_t
vdev_any_spare_replacing(nvlist_t *nv)
{
nvlist_t **child;
uint_t c, children;
char *vdev_type;
(void) nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &vdev_type);
if (strcmp(vdev_type, VDEV_TYPE_REPLACING) == 0 ||
strcmp(vdev_type, VDEV_TYPE_SPARE) == 0 ||
strcmp(vdev_type, VDEV_TYPE_DRAID_SPARE) == 0) {
return (B_TRUE);
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (c = 0; c < children; c++) {
if (vdev_any_spare_replacing(child[c]))
return (B_TRUE);
}
return (B_FALSE);
}
typedef struct wait_data {
char *wd_poolname;
boolean_t wd_scripted;
boolean_t wd_exact;
boolean_t wd_headers_once;
boolean_t wd_should_exit;
/* Which activities to wait for */
boolean_t wd_enabled[ZPOOL_WAIT_NUM_ACTIVITIES];
float wd_interval;
pthread_cond_t wd_cv;
pthread_mutex_t wd_mutex;
} wait_data_t;
/*
* Print to stdout a single line, containing one column for each activity that
* we are waiting for specifying how many bytes of work are left for that
* activity.
*/
static void
print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row)
{
nvlist_t *config, *nvroot;
uint_t c;
int i;
pool_checkpoint_stat_t *pcs = NULL;
pool_scan_stat_t *pss = NULL;
pool_removal_stat_t *prs = NULL;
const char *const headers[] = {"DISCARD", "FREE", "INITIALIZE",
"REPLACE", "REMOVE", "RESILVER", "SCRUB", "TRIM"};
int col_widths[ZPOOL_WAIT_NUM_ACTIVITIES];
/* Calculate the width of each column */
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
/*
* Make sure we have enough space in the col for pretty-printed
* numbers and for the column header, and then leave a couple
* spaces between cols for readability.
*/
col_widths[i] = MAX(strlen(headers[i]), 6) + 2;
}
/* Print header if appropriate */
int term_height = terminal_height();
boolean_t reprint_header = (!wd->wd_headers_once && term_height > 0 &&
row % (term_height-1) == 0);
if (!wd->wd_scripted && (row == 0 || reprint_header)) {
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
if (wd->wd_enabled[i])
(void) printf("%*s", col_widths[i], headers[i]);
}
(void) fputc('\n', stdout);
}
/* Bytes of work remaining in each activity */
int64_t bytes_rem[ZPOOL_WAIT_NUM_ACTIVITIES] = {0};
bytes_rem[ZPOOL_WAIT_FREE] =
zpool_get_prop_int(zhp, ZPOOL_PROP_FREEING, NULL);
config = zpool_get_config(zhp, NULL);
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
if (pcs != NULL && pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
bytes_rem[ZPOOL_WAIT_CKPT_DISCARD] = pcs->pcs_space;
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
if (prs != NULL && prs->prs_state == DSS_SCANNING)
bytes_rem[ZPOOL_WAIT_REMOVE] = prs->prs_to_copy -
prs->prs_copied;
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&pss, &c);
if (pss != NULL && pss->pss_state == DSS_SCANNING &&
pss->pss_pass_scrub_pause == 0) {
int64_t rem = pss->pss_to_examine - pss->pss_issued;
if (pss->pss_func == POOL_SCAN_SCRUB)
bytes_rem[ZPOOL_WAIT_SCRUB] = rem;
else
bytes_rem[ZPOOL_WAIT_RESILVER] = rem;
} else if (check_rebuilding(nvroot, NULL)) {
bytes_rem[ZPOOL_WAIT_RESILVER] =
vdev_activity_top_remaining(nvroot);
}
bytes_rem[ZPOOL_WAIT_INITIALIZE] =
vdev_activity_remaining(nvroot, ZPOOL_WAIT_INITIALIZE);
bytes_rem[ZPOOL_WAIT_TRIM] =
vdev_activity_remaining(nvroot, ZPOOL_WAIT_TRIM);
/*
* A replace finishes after resilvering finishes, so the amount of work
* left for a replace is the same as for resilvering.
*
* It isn't quite correct to say that if we have any 'spare' or
* 'replacing' vdevs and a resilver is happening, then a replace is in
* progress, like we do here. When a hot spare is used, the faulted vdev
* is not removed after the hot spare is resilvered, so parent 'spare'
* vdev is not removed either. So we could have a 'spare' vdev, but be
* resilvering for a different reason. However, we use it as a heuristic
* because we don't have access to the DTLs, which could tell us whether
* or not we have really finished resilvering a hot spare.
*/
if (vdev_any_spare_replacing(nvroot))
bytes_rem[ZPOOL_WAIT_REPLACE] = bytes_rem[ZPOOL_WAIT_RESILVER];
if (timestamp_fmt != NODATE)
print_timestamp(timestamp_fmt);
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
char buf[64];
if (!wd->wd_enabled[i])
continue;
if (wd->wd_exact)
(void) snprintf(buf, sizeof (buf), "%" PRIi64,
bytes_rem[i]);
else
zfs_nicenum(bytes_rem[i], buf, sizeof (buf));
if (wd->wd_scripted)
(void) printf(i == 0 ? "%s" : "\t%s", buf);
else
(void) printf(" %*s", col_widths[i] - 1, buf);
}
(void) printf("\n");
(void) fflush(stdout);
}
static void *
wait_status_thread(void *arg)
{
wait_data_t *wd = (wait_data_t *)arg;
zpool_handle_t *zhp;
if ((zhp = zpool_open(g_zfs, wd->wd_poolname)) == NULL)
return (void *)(1);
for (int row = 0; ; row++) {
boolean_t missing;
struct timespec timeout;
int ret = 0;
(void) clock_gettime(CLOCK_REALTIME, &timeout);
if (zpool_refresh_stats(zhp, &missing) != 0 || missing ||
zpool_props_refresh(zhp) != 0) {
zpool_close(zhp);
return (void *)(uintptr_t)(missing ? 0 : 1);
}
print_wait_status_row(wd, zhp, row);
timeout.tv_sec += floor(wd->wd_interval);
long nanos = timeout.tv_nsec +
(wd->wd_interval - floor(wd->wd_interval)) * NANOSEC;
if (nanos >= NANOSEC) {
timeout.tv_sec++;
timeout.tv_nsec = nanos - NANOSEC;
} else {
timeout.tv_nsec = nanos;
}
pthread_mutex_lock(&wd->wd_mutex);
if (!wd->wd_should_exit)
ret = pthread_cond_timedwait(&wd->wd_cv, &wd->wd_mutex,
&timeout);
pthread_mutex_unlock(&wd->wd_mutex);
if (ret == 0) {
break; /* signaled by main thread */
} else if (ret != ETIMEDOUT) {
(void) fprintf(stderr, gettext("pthread_cond_timedwait "
"failed: %s\n"), strerror(ret));
zpool_close(zhp);
return (void *)(uintptr_t)(1);
}
}
zpool_close(zhp);
return (void *)(0);
}
int
zpool_do_wait(int argc, char **argv)
{
boolean_t verbose = B_FALSE;
int c, i;
unsigned long count;
pthread_t status_thr;
int error = 0;
zpool_handle_t *zhp;
wait_data_t wd;
wd.wd_scripted = B_FALSE;
wd.wd_exact = B_FALSE;
wd.wd_headers_once = B_FALSE;
wd.wd_should_exit = B_FALSE;
pthread_mutex_init(&wd.wd_mutex, NULL);
pthread_cond_init(&wd.wd_cv, NULL);
/* By default, wait for all types of activity. */
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++)
wd.wd_enabled[i] = B_TRUE;
while ((c = getopt(argc, argv, "HpT:t:")) != -1) {
switch (c) {
case 'H':
wd.wd_scripted = B_TRUE;
break;
case 'n':
wd.wd_headers_once = B_TRUE;
break;
case 'p':
wd.wd_exact = B_TRUE;
break;
case 'T':
get_timestamp_arg(*optarg);
break;
case 't':
/* Reset activities array */
memset(&wd.wd_enabled, 0, sizeof (wd.wd_enabled));
for (char *tok; (tok = strsep(&optarg, ",")); ) {
static const char *const col_opts[] = {
"discard", "free", "initialize", "replace",
"remove", "resilver", "scrub", "trim" };
for (i = 0; i < ARRAY_SIZE(col_opts); ++i)
if (strcmp(tok, col_opts[i]) == 0) {
wd.wd_enabled[i] = B_TRUE;
goto found;
}
(void) fprintf(stderr,
gettext("invalid activity '%s'\n"), tok);
usage(B_FALSE);
found:;
}
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
get_interval_count(&argc, argv, &wd.wd_interval, &count);
if (count != 0) {
/* This subcmd only accepts an interval, not a count */
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if (wd.wd_interval != 0)
verbose = B_TRUE;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing 'pool' argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
wd.wd_poolname = argv[0];
if ((zhp = zpool_open(g_zfs, wd.wd_poolname)) == NULL)
return (1);
if (verbose) {
/*
* We use a separate thread for printing status updates because
* the main thread will call lzc_wait(), which blocks as long
* as an activity is in progress, which can be a long time.
*/
if (pthread_create(&status_thr, NULL, wait_status_thread, &wd)
!= 0) {
(void) fprintf(stderr, gettext("failed to create status"
"thread: %s\n"), strerror(errno));
zpool_close(zhp);
return (1);
}
}
/*
* Loop over all activities that we are supposed to wait for until none
* of them are in progress. Note that this means we can end up waiting
* for more activities to complete than just those that were in progress
* when we began waiting; if an activity we are interested in begins
* while we are waiting for another activity, we will wait for both to
* complete before exiting.
*/
for (;;) {
boolean_t missing = B_FALSE;
boolean_t any_waited = B_FALSE;
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
boolean_t waited;
if (!wd.wd_enabled[i])
continue;
error = zpool_wait_status(zhp, i, &missing, &waited);
if (error != 0 || missing)
break;
any_waited = (any_waited || waited);
}
if (error != 0 || missing || !any_waited)
break;
}
zpool_close(zhp);
if (verbose) {
uintptr_t status;
pthread_mutex_lock(&wd.wd_mutex);
wd.wd_should_exit = B_TRUE;
pthread_cond_signal(&wd.wd_cv);
pthread_mutex_unlock(&wd.wd_mutex);
(void) pthread_join(status_thr, (void *)&status);
if (status != 0)
error = status;
}
pthread_mutex_destroy(&wd.wd_mutex);
pthread_cond_destroy(&wd.wd_cv);
return (error);
}
static int
find_command_idx(const char *command, int *idx)
{
for (int i = 0; i < NCOMMAND; ++i) {
if (command_table[i].name == NULL)
continue;
if (strcmp(command, command_table[i].name) == 0) {
*idx = i;
return (0);
}
}
return (1);
}
/*
* Display version message
*/
static int
zpool_do_version(int argc, char **argv)
{
(void) argc, (void) argv;
return (zfs_version_print() != 0);
}
/*
* Do zpool_load_compat() and print error message on failure
*/
static zpool_compat_status_t
zpool_do_load_compat(const char *compat, boolean_t *list)
{
char report[1024];
zpool_compat_status_t ret;
ret = zpool_load_compat(compat, list, report, 1024);
switch (ret) {
case ZPOOL_COMPATIBILITY_OK:
break;
case ZPOOL_COMPATIBILITY_NOFILES:
case ZPOOL_COMPATIBILITY_BADFILE:
case ZPOOL_COMPATIBILITY_BADTOKEN:
(void) fprintf(stderr, "Error: %s\n", report);
break;
case ZPOOL_COMPATIBILITY_WARNTOKEN:
(void) fprintf(stderr, "Warning: %s\n", report);
ret = ZPOOL_COMPATIBILITY_OK;
break;
}
return (ret);
}
int
main(int argc, char **argv)
{
int ret = 0;
int i = 0;
char *cmdname;
char **newargv;
(void) setlocale(LC_ALL, "");
(void) setlocale(LC_NUMERIC, "C");
(void) textdomain(TEXT_DOMAIN);
srand(time(NULL));
opterr = 0;
/*
* Make sure the user has specified some command.
*/
if (argc < 2) {
(void) fprintf(stderr, gettext("missing command\n"));
usage(B_FALSE);
}
cmdname = argv[1];
/*
* Special case '-?'
*/
if ((strcmp(cmdname, "-?") == 0) || strcmp(cmdname, "--help") == 0)
usage(B_TRUE);
/*
* Special case '-V|--version'
*/
if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0))
return (zpool_do_version(argc, argv));
if ((g_zfs = libzfs_init()) == NULL) {
(void) fprintf(stderr, "%s\n", libzfs_error_init(errno));
return (1);
}
libzfs_print_on_error(g_zfs, B_TRUE);
zfs_save_arguments(argc, argv, history_str, sizeof (history_str));
/*
* Many commands modify input strings for string parsing reasons.
* We create a copy to protect the original argv.
*/
newargv = safe_malloc((argc + 1) * sizeof (newargv[0]));
for (i = 0; i < argc; i++)
newargv[i] = strdup(argv[i]);
newargv[argc] = NULL;
/*
* Run the appropriate command.
*/
if (find_command_idx(cmdname, &i) == 0) {
current_command = &command_table[i];
ret = command_table[i].func(argc - 1, newargv + 1);
} else if (strchr(cmdname, '=')) {
verify(find_command_idx("set", &i) == 0);
current_command = &command_table[i];
ret = command_table[i].func(argc, newargv);
} else if (strcmp(cmdname, "freeze") == 0 && argc == 3) {
/*
* 'freeze' is a vile debugging abomination, so we treat
* it as such.
*/
zfs_cmd_t zc = {"\0"};
(void) strlcpy(zc.zc_name, argv[2], sizeof (zc.zc_name));
ret = zfs_ioctl(g_zfs, ZFS_IOC_POOL_FREEZE, &zc);
if (ret != 0) {
(void) fprintf(stderr,
gettext("failed to freeze pool: %d\n"), errno);
ret = 1;
}
log_history = 0;
} else {
(void) fprintf(stderr, gettext("unrecognized "
"command '%s'\n"), cmdname);
usage(B_FALSE);
ret = 1;
}
for (i = 0; i < argc; i++)
free(newargv[i]);
free(newargv);
if (ret == 0 && log_history)
(void) zpool_log_history(g_zfs, history_str);
libzfs_fini(g_zfs);
/*
* The 'ZFS_ABORT' environment variable causes us to dump core on exit
* for the purposes of running ::findleaks.
*/
if (getenv("ZFS_ABORT") != NULL) {
(void) printf("dumping core by request\n");
abort();
}
return (ret);
}
diff --git a/sys/contrib/openzfs/cmd/ztest.c b/sys/contrib/openzfs/cmd/ztest.c
index 31b9990a1fcf..0712f286bf66 100644
--- a/sys/contrib/openzfs/cmd/ztest.c
+++ b/sys/contrib/openzfs/cmd/ztest.c
@@ -1,8287 +1,8288 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2018 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2017 Joyent, Inc.
* Copyright (c) 2017, Intel Corporation.
*/
/*
* The objective of this program is to provide a DMU/ZAP/SPA stress test
* that runs entirely in userland, is easy to use, and easy to extend.
*
* The overall design of the ztest program is as follows:
*
* (1) For each major functional area (e.g. adding vdevs to a pool,
* creating and destroying datasets, reading and writing objects, etc)
* we have a simple routine to test that functionality. These
* individual routines do not have to do anything "stressful".
*
* (2) We turn these simple functionality tests into a stress test by
* running them all in parallel, with as many threads as desired,
* and spread across as many datasets, objects, and vdevs as desired.
*
* (3) While all this is happening, we inject faults into the pool to
* verify that self-healing data really works.
*
* (4) Every time we open a dataset, we change its checksum and compression
* functions. Thus even individual objects vary from block to block
* in which checksum they use and whether they're compressed.
*
* (5) To verify that we never lose on-disk consistency after a crash,
* we run the entire test in a child of the main process.
* At random times, the child self-immolates with a SIGKILL.
* This is the software equivalent of pulling the power cord.
* The parent then runs the test again, using the existing
* storage pool, as many times as desired. If backwards compatibility
* testing is enabled ztest will sometimes run the "older" version
* of ztest after a SIGKILL.
*
* (6) To verify that we don't have future leaks or temporal incursions,
* many of the functional tests record the transaction group number
* as part of their data. When reading old data, they verify that
* the transaction group number is less than the current, open txg.
* If you add a new test, please do this if applicable.
*
* (7) Threads are created with a reduced stack size, for sanity checking.
* Therefore, it's important not to allocate huge buffers on the stack.
*
* When run with no arguments, ztest runs for about five minutes and
* produces no output if successful. To get a little bit of information,
* specify -V. To get more information, specify -VV, and so on.
*
* To turn this into an overnight stress test, use -T to specify run time.
*
* You can ask more vdevs [-v], datasets [-d], or threads [-t]
* to increase the pool capacity, fanout, and overall stress level.
*
* Use the -k option to set the desired frequency of kills.
*
* When ztest invokes itself it passes all relevant information through a
* temporary file which is mmap-ed in the child process. This allows shared
* memory to survive the exec syscall. The ztest_shared_hdr_t struct is always
* stored at offset 0 of this file and contains information on the size and
* number of shared structures in the file. The information stored in this file
* must remain backwards compatible with older versions of ztest so that
* ztest can invoke them during backwards compatibility testing (-B).
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/dmu.h>
#include <sys/txg.h>
#include <sys/dbuf.h>
#include <sys/zap.h>
#include <sys/dmu_objset.h>
#include <sys/poll.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/wait.h>
#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/zio.h>
#include <sys/zil.h>
#include <sys/zil_impl.h>
#include <sys/vdev_draid.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_file.h>
#include <sys/vdev_initialize.h>
#include <sys/vdev_raidz.h>
#include <sys/vdev_trim.h>
#include <sys/spa_impl.h>
#include <sys/metaslab_impl.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_destroy.h>
#include <sys/dsl_scan.h>
#include <sys/zio_checksum.h>
#include <sys/zfs_refcount.h>
#include <sys/zfeature.h>
#include <sys/dsl_userhold.h>
#include <sys/abd.h>
#include <sys/blake3.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <getopt.h>
#include <signal.h>
#include <umem.h>
#include <ctype.h>
#include <math.h>
#include <sys/fs/zfs.h>
#include <zfs_fletcher.h>
#include <libnvpair.h>
#include <libzutil.h>
#include <sys/crypto/icp.h>
#if (__GLIBC__ && !__UCLIBC__)
#include <execinfo.h> /* for backtrace() */
#endif
static int ztest_fd_data = -1;
static int ztest_fd_rand = -1;
typedef struct ztest_shared_hdr {
uint64_t zh_hdr_size;
uint64_t zh_opts_size;
uint64_t zh_size;
uint64_t zh_stats_size;
uint64_t zh_stats_count;
uint64_t zh_ds_size;
uint64_t zh_ds_count;
} ztest_shared_hdr_t;
static ztest_shared_hdr_t *ztest_shared_hdr;
enum ztest_class_state {
ZTEST_VDEV_CLASS_OFF,
ZTEST_VDEV_CLASS_ON,
ZTEST_VDEV_CLASS_RND
};
#define ZO_GVARS_MAX_ARGLEN ((size_t)64)
#define ZO_GVARS_MAX_COUNT ((size_t)10)
typedef struct ztest_shared_opts {
char zo_pool[ZFS_MAX_DATASET_NAME_LEN];
char zo_dir[ZFS_MAX_DATASET_NAME_LEN];
char zo_alt_ztest[MAXNAMELEN];
char zo_alt_libpath[MAXNAMELEN];
uint64_t zo_vdevs;
uint64_t zo_vdevtime;
size_t zo_vdev_size;
int zo_ashift;
int zo_mirrors;
int zo_raid_children;
int zo_raid_parity;
char zo_raid_type[8];
int zo_draid_data;
int zo_draid_spares;
int zo_datasets;
int zo_threads;
uint64_t zo_passtime;
uint64_t zo_killrate;
int zo_verbose;
int zo_init;
uint64_t zo_time;
uint64_t zo_maxloops;
uint64_t zo_metaslab_force_ganging;
int zo_mmp_test;
int zo_special_vdevs;
int zo_dump_dbgmsg;
int zo_gvars_count;
char zo_gvars[ZO_GVARS_MAX_COUNT][ZO_GVARS_MAX_ARGLEN];
} ztest_shared_opts_t;
/* Default values for command line options. */
#define DEFAULT_POOL "ztest"
#define DEFAULT_VDEV_DIR "/tmp"
#define DEFAULT_VDEV_COUNT 5
#define DEFAULT_VDEV_SIZE (SPA_MINDEVSIZE * 4) /* 256m default size */
#define DEFAULT_VDEV_SIZE_STR "256M"
#define DEFAULT_ASHIFT SPA_MINBLOCKSHIFT
#define DEFAULT_MIRRORS 2
#define DEFAULT_RAID_CHILDREN 4
#define DEFAULT_RAID_PARITY 1
#define DEFAULT_DRAID_DATA 4
#define DEFAULT_DRAID_SPARES 1
#define DEFAULT_DATASETS_COUNT 7
#define DEFAULT_THREADS 23
#define DEFAULT_RUN_TIME 300 /* 300 seconds */
#define DEFAULT_RUN_TIME_STR "300 sec"
#define DEFAULT_PASS_TIME 60 /* 60 seconds */
#define DEFAULT_PASS_TIME_STR "60 sec"
#define DEFAULT_KILL_RATE 70 /* 70% kill rate */
#define DEFAULT_KILLRATE_STR "70%"
#define DEFAULT_INITS 1
#define DEFAULT_MAX_LOOPS 50 /* 5 minutes */
#define DEFAULT_FORCE_GANGING (64 << 10)
#define DEFAULT_FORCE_GANGING_STR "64K"
/* Simplifying assumption: -1 is not a valid default. */
#define NO_DEFAULT -1
static const ztest_shared_opts_t ztest_opts_defaults = {
.zo_pool = DEFAULT_POOL,
.zo_dir = DEFAULT_VDEV_DIR,
.zo_alt_ztest = { '\0' },
.zo_alt_libpath = { '\0' },
.zo_vdevs = DEFAULT_VDEV_COUNT,
.zo_ashift = DEFAULT_ASHIFT,
.zo_mirrors = DEFAULT_MIRRORS,
.zo_raid_children = DEFAULT_RAID_CHILDREN,
.zo_raid_parity = DEFAULT_RAID_PARITY,
.zo_raid_type = VDEV_TYPE_RAIDZ,
.zo_vdev_size = DEFAULT_VDEV_SIZE,
.zo_draid_data = DEFAULT_DRAID_DATA, /* data drives */
.zo_draid_spares = DEFAULT_DRAID_SPARES, /* distributed spares */
.zo_datasets = DEFAULT_DATASETS_COUNT,
.zo_threads = DEFAULT_THREADS,
.zo_passtime = DEFAULT_PASS_TIME,
.zo_killrate = DEFAULT_KILL_RATE,
.zo_verbose = 0,
.zo_mmp_test = 0,
.zo_init = DEFAULT_INITS,
.zo_time = DEFAULT_RUN_TIME,
.zo_maxloops = DEFAULT_MAX_LOOPS, /* max loops during spa_freeze() */
.zo_metaslab_force_ganging = DEFAULT_FORCE_GANGING,
.zo_special_vdevs = ZTEST_VDEV_CLASS_RND,
.zo_gvars_count = 0,
};
extern uint64_t metaslab_force_ganging;
extern uint64_t metaslab_df_alloc_threshold;
extern unsigned long zfs_deadman_synctime_ms;
extern int metaslab_preload_limit;
extern int zfs_compressed_arc_enabled;
extern int zfs_abd_scatter_enabled;
extern int dmu_object_alloc_chunk_shift;
extern boolean_t zfs_force_some_double_word_sm_entries;
extern unsigned long zio_decompress_fail_fraction;
extern unsigned long zfs_reconstruct_indirect_damage_fraction;
static ztest_shared_opts_t *ztest_shared_opts;
static ztest_shared_opts_t ztest_opts;
static const char *const ztest_wkeydata = "abcdefghijklmnopqrstuvwxyz012345";
typedef struct ztest_shared_ds {
uint64_t zd_seq;
} ztest_shared_ds_t;
static ztest_shared_ds_t *ztest_shared_ds;
#define ZTEST_GET_SHARED_DS(d) (&ztest_shared_ds[d])
#define BT_MAGIC 0x123456789abcdefULL
#define MAXFAULTS(zs) \
(MAX((zs)->zs_mirrors, 1) * (ztest_opts.zo_raid_parity + 1) - 1)
enum ztest_io_type {
ZTEST_IO_WRITE_TAG,
ZTEST_IO_WRITE_PATTERN,
ZTEST_IO_WRITE_ZEROES,
ZTEST_IO_TRUNCATE,
ZTEST_IO_SETATTR,
ZTEST_IO_REWRITE,
ZTEST_IO_TYPES
};
typedef struct ztest_block_tag {
uint64_t bt_magic;
uint64_t bt_objset;
uint64_t bt_object;
uint64_t bt_dnodesize;
uint64_t bt_offset;
uint64_t bt_gen;
uint64_t bt_txg;
uint64_t bt_crtxg;
} ztest_block_tag_t;
typedef struct bufwad {
uint64_t bw_index;
uint64_t bw_txg;
uint64_t bw_data;
} bufwad_t;
/*
* It would be better to use a rangelock_t per object. Unfortunately
* the rangelock_t is not a drop-in replacement for rl_t, because we
* still need to map from object ID to rangelock_t.
*/
typedef enum {
RL_READER,
RL_WRITER,
RL_APPEND
} rl_type_t;
typedef struct rll {
void *rll_writer;
int rll_readers;
kmutex_t rll_lock;
kcondvar_t rll_cv;
} rll_t;
typedef struct rl {
uint64_t rl_object;
uint64_t rl_offset;
uint64_t rl_size;
rll_t *rl_lock;
} rl_t;
#define ZTEST_RANGE_LOCKS 64
#define ZTEST_OBJECT_LOCKS 64
/*
* Object descriptor. Used as a template for object lookup/create/remove.
*/
typedef struct ztest_od {
uint64_t od_dir;
uint64_t od_object;
dmu_object_type_t od_type;
dmu_object_type_t od_crtype;
uint64_t od_blocksize;
uint64_t od_crblocksize;
uint64_t od_crdnodesize;
uint64_t od_gen;
uint64_t od_crgen;
char od_name[ZFS_MAX_DATASET_NAME_LEN];
} ztest_od_t;
/*
* Per-dataset state.
*/
typedef struct ztest_ds {
ztest_shared_ds_t *zd_shared;
objset_t *zd_os;
pthread_rwlock_t zd_zilog_lock;
zilog_t *zd_zilog;
ztest_od_t *zd_od; /* debugging aid */
char zd_name[ZFS_MAX_DATASET_NAME_LEN];
kmutex_t zd_dirobj_lock;
rll_t zd_object_lock[ZTEST_OBJECT_LOCKS];
rll_t zd_range_lock[ZTEST_RANGE_LOCKS];
} ztest_ds_t;
/*
* Per-iteration state.
*/
typedef void ztest_func_t(ztest_ds_t *zd, uint64_t id);
typedef struct ztest_info {
ztest_func_t *zi_func; /* test function */
uint64_t zi_iters; /* iterations per execution */
uint64_t *zi_interval; /* execute every <interval> seconds */
const char *zi_funcname; /* name of test function */
} ztest_info_t;
typedef struct ztest_shared_callstate {
uint64_t zc_count; /* per-pass count */
uint64_t zc_time; /* per-pass time */
uint64_t zc_next; /* next time to call this function */
} ztest_shared_callstate_t;
static ztest_shared_callstate_t *ztest_shared_callstate;
#define ZTEST_GET_SHARED_CALLSTATE(c) (&ztest_shared_callstate[c])
ztest_func_t ztest_dmu_read_write;
ztest_func_t ztest_dmu_write_parallel;
ztest_func_t ztest_dmu_object_alloc_free;
ztest_func_t ztest_dmu_object_next_chunk;
ztest_func_t ztest_dmu_commit_callbacks;
ztest_func_t ztest_zap;
ztest_func_t ztest_zap_parallel;
ztest_func_t ztest_zil_commit;
ztest_func_t ztest_zil_remount;
ztest_func_t ztest_dmu_read_write_zcopy;
ztest_func_t ztest_dmu_objset_create_destroy;
ztest_func_t ztest_dmu_prealloc;
ztest_func_t ztest_fzap;
ztest_func_t ztest_dmu_snapshot_create_destroy;
ztest_func_t ztest_dsl_prop_get_set;
ztest_func_t ztest_spa_prop_get_set;
ztest_func_t ztest_spa_create_destroy;
ztest_func_t ztest_fault_inject;
ztest_func_t ztest_dmu_snapshot_hold;
ztest_func_t ztest_mmp_enable_disable;
ztest_func_t ztest_scrub;
ztest_func_t ztest_dsl_dataset_promote_busy;
ztest_func_t ztest_vdev_attach_detach;
ztest_func_t ztest_vdev_LUN_growth;
ztest_func_t ztest_vdev_add_remove;
ztest_func_t ztest_vdev_class_add;
ztest_func_t ztest_vdev_aux_add_remove;
ztest_func_t ztest_split_pool;
ztest_func_t ztest_reguid;
ztest_func_t ztest_spa_upgrade;
ztest_func_t ztest_device_removal;
ztest_func_t ztest_spa_checkpoint_create_discard;
ztest_func_t ztest_initialize;
ztest_func_t ztest_trim;
ztest_func_t ztest_blake3;
ztest_func_t ztest_fletcher;
ztest_func_t ztest_fletcher_incr;
ztest_func_t ztest_verify_dnode_bt;
uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */
uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */
uint64_t zopt_often = 1ULL * NANOSEC; /* every second */
uint64_t zopt_sometimes = 10ULL * NANOSEC; /* every 10 seconds */
uint64_t zopt_rarely = 60ULL * NANOSEC; /* every 60 seconds */
#define ZTI_INIT(func, iters, interval) \
{ .zi_func = (func), \
.zi_iters = (iters), \
.zi_interval = (interval), \
.zi_funcname = # func }
ztest_info_t ztest_info[] = {
ZTI_INIT(ztest_dmu_read_write, 1, &zopt_always),
ZTI_INIT(ztest_dmu_write_parallel, 10, &zopt_always),
ZTI_INIT(ztest_dmu_object_alloc_free, 1, &zopt_always),
ZTI_INIT(ztest_dmu_object_next_chunk, 1, &zopt_sometimes),
ZTI_INIT(ztest_dmu_commit_callbacks, 1, &zopt_always),
ZTI_INIT(ztest_zap, 30, &zopt_always),
ZTI_INIT(ztest_zap_parallel, 100, &zopt_always),
ZTI_INIT(ztest_split_pool, 1, &zopt_always),
ZTI_INIT(ztest_zil_commit, 1, &zopt_incessant),
ZTI_INIT(ztest_zil_remount, 1, &zopt_sometimes),
ZTI_INIT(ztest_dmu_read_write_zcopy, 1, &zopt_often),
ZTI_INIT(ztest_dmu_objset_create_destroy, 1, &zopt_often),
ZTI_INIT(ztest_dsl_prop_get_set, 1, &zopt_often),
ZTI_INIT(ztest_spa_prop_get_set, 1, &zopt_sometimes),
#if 0
ZTI_INIT(ztest_dmu_prealloc, 1, &zopt_sometimes),
#endif
ZTI_INIT(ztest_fzap, 1, &zopt_sometimes),
ZTI_INIT(ztest_dmu_snapshot_create_destroy, 1, &zopt_sometimes),
ZTI_INIT(ztest_spa_create_destroy, 1, &zopt_sometimes),
ZTI_INIT(ztest_fault_inject, 1, &zopt_sometimes),
ZTI_INIT(ztest_dmu_snapshot_hold, 1, &zopt_sometimes),
ZTI_INIT(ztest_mmp_enable_disable, 1, &zopt_sometimes),
ZTI_INIT(ztest_reguid, 1, &zopt_rarely),
ZTI_INIT(ztest_scrub, 1, &zopt_rarely),
ZTI_INIT(ztest_spa_upgrade, 1, &zopt_rarely),
ZTI_INIT(ztest_dsl_dataset_promote_busy, 1, &zopt_rarely),
ZTI_INIT(ztest_vdev_attach_detach, 1, &zopt_sometimes),
ZTI_INIT(ztest_vdev_LUN_growth, 1, &zopt_rarely),
ZTI_INIT(ztest_vdev_add_remove, 1, &ztest_opts.zo_vdevtime),
ZTI_INIT(ztest_vdev_class_add, 1, &ztest_opts.zo_vdevtime),
ZTI_INIT(ztest_vdev_aux_add_remove, 1, &ztest_opts.zo_vdevtime),
ZTI_INIT(ztest_device_removal, 1, &zopt_sometimes),
ZTI_INIT(ztest_spa_checkpoint_create_discard, 1, &zopt_rarely),
ZTI_INIT(ztest_initialize, 1, &zopt_sometimes),
ZTI_INIT(ztest_trim, 1, &zopt_sometimes),
ZTI_INIT(ztest_blake3, 1, &zopt_rarely),
ZTI_INIT(ztest_fletcher, 1, &zopt_rarely),
ZTI_INIT(ztest_fletcher_incr, 1, &zopt_rarely),
ZTI_INIT(ztest_verify_dnode_bt, 1, &zopt_sometimes),
};
#define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t))
/*
* The following struct is used to hold a list of uncalled commit callbacks.
* The callbacks are ordered by txg number.
*/
typedef struct ztest_cb_list {
kmutex_t zcl_callbacks_lock;
list_t zcl_callbacks;
} ztest_cb_list_t;
/*
* Stuff we need to share writably between parent and child.
*/
typedef struct ztest_shared {
boolean_t zs_do_init;
hrtime_t zs_proc_start;
hrtime_t zs_proc_stop;
hrtime_t zs_thread_start;
hrtime_t zs_thread_stop;
hrtime_t zs_thread_kill;
uint64_t zs_enospc_count;
uint64_t zs_vdev_next_leaf;
uint64_t zs_vdev_aux;
uint64_t zs_alloc;
uint64_t zs_space;
uint64_t zs_splits;
uint64_t zs_mirrors;
uint64_t zs_metaslab_sz;
uint64_t zs_metaslab_df_alloc_threshold;
uint64_t zs_guid;
} ztest_shared_t;
#define ID_PARALLEL -1ULL
static char ztest_dev_template[] = "%s/%s.%llua";
static char ztest_aux_template[] = "%s/%s.%s.%llu";
ztest_shared_t *ztest_shared;
static spa_t *ztest_spa = NULL;
static ztest_ds_t *ztest_ds;
static kmutex_t ztest_vdev_lock;
static boolean_t ztest_device_removal_active = B_FALSE;
static boolean_t ztest_pool_scrubbed = B_FALSE;
static kmutex_t ztest_checkpoint_lock;
/*
* The ztest_name_lock protects the pool and dataset namespace used by
* the individual tests. To modify the namespace, consumers must grab
* this lock as writer. Grabbing the lock as reader will ensure that the
* namespace does not change while the lock is held.
*/
static pthread_rwlock_t ztest_name_lock;
static boolean_t ztest_dump_core = B_TRUE;
static boolean_t ztest_exiting;
/* Global commit callback list */
static ztest_cb_list_t zcl;
/* Commit cb delay */
static uint64_t zc_min_txg_delay = UINT64_MAX;
static int zc_cb_counter = 0;
/*
* Minimum number of commit callbacks that need to be registered for us to check
* whether the minimum txg delay is acceptable.
*/
#define ZTEST_COMMIT_CB_MIN_REG 100
/*
* If a number of txgs equal to this threshold have been created after a commit
* callback has been registered but not called, then we assume there is an
* implementation bug.
*/
#define ZTEST_COMMIT_CB_THRESH (TXG_CONCURRENT_STATES + 1000)
enum ztest_object {
ZTEST_META_DNODE = 0,
ZTEST_DIROBJ,
ZTEST_OBJECTS
};
static __attribute__((noreturn)) void usage(boolean_t requested);
static int ztest_scrub_impl(spa_t *spa);
/*
* These libumem hooks provide a reasonable set of defaults for the allocator's
* debugging facilities.
*/
const char *
_umem_debug_init(void)
{
return ("default,verbose"); /* $UMEM_DEBUG setting */
}
const char *
_umem_logging_init(void)
{
return ("fail,contents"); /* $UMEM_LOGGING setting */
}
static void
dump_debug_buffer(void)
{
ssize_t ret __attribute__((unused));
if (!ztest_opts.zo_dump_dbgmsg)
return;
/*
* We use write() instead of printf() so that this function
* is safe to call from a signal handler.
*/
ret = write(STDOUT_FILENO, "\n", 1);
zfs_dbgmsg_print("ztest");
}
#define BACKTRACE_SZ 100
static void sig_handler(int signo)
{
struct sigaction action;
#if (__GLIBC__ && !__UCLIBC__) /* backtrace() is a GNU extension */
int nptrs;
void *buffer[BACKTRACE_SZ];
nptrs = backtrace(buffer, BACKTRACE_SZ);
backtrace_symbols_fd(buffer, nptrs, STDERR_FILENO);
#endif
dump_debug_buffer();
/*
* Restore default action and re-raise signal so SIGSEGV and
* SIGABRT can trigger a core dump.
*/
action.sa_handler = SIG_DFL;
sigemptyset(&action.sa_mask);
action.sa_flags = 0;
(void) sigaction(signo, &action, NULL);
raise(signo);
}
#define FATAL_MSG_SZ 1024
static const char *fatal_msg;
static __attribute__((format(printf, 2, 3))) __attribute__((noreturn)) void
fatal(int do_perror, const char *message, ...)
{
va_list args;
int save_errno = errno;
char *buf;
(void) fflush(stdout);
buf = umem_alloc(FATAL_MSG_SZ, UMEM_NOFAIL);
if (buf == NULL)
goto out;
va_start(args, message);
(void) sprintf(buf, "ztest: ");
/* LINTED */
(void) vsprintf(buf + strlen(buf), message, args);
va_end(args);
if (do_perror) {
(void) snprintf(buf + strlen(buf), FATAL_MSG_SZ - strlen(buf),
": %s", strerror(save_errno));
}
(void) fprintf(stderr, "%s\n", buf);
fatal_msg = buf; /* to ease debugging */
out:
if (ztest_dump_core)
abort();
else
dump_debug_buffer();
exit(3);
}
static int
str2shift(const char *buf)
{
const char *ends = "BKMGTPEZ";
int i;
if (buf[0] == '\0')
return (0);
for (i = 0; i < strlen(ends); i++) {
if (toupper(buf[0]) == ends[i])
break;
}
if (i == strlen(ends)) {
(void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n",
buf);
usage(B_FALSE);
}
if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0')) {
return (10*i);
}
(void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", buf);
usage(B_FALSE);
}
static uint64_t
nicenumtoull(const char *buf)
{
char *end;
uint64_t val;
val = strtoull(buf, &end, 0);
if (end == buf) {
(void) fprintf(stderr, "ztest: bad numeric value: %s\n", buf);
usage(B_FALSE);
} else if (end[0] == '.') {
double fval = strtod(buf, &end);
fval *= pow(2, str2shift(end));
/*
* UINT64_MAX is not exactly representable as a double.
* The closest representation is UINT64_MAX + 1, so we
* use a >= comparison instead of > for the bounds check.
*/
if (fval >= (double)UINT64_MAX) {
(void) fprintf(stderr, "ztest: value too large: %s\n",
buf);
usage(B_FALSE);
}
val = (uint64_t)fval;
} else {
int shift = str2shift(end);
if (shift >= 64 || (val << shift) >> shift != val) {
(void) fprintf(stderr, "ztest: value too large: %s\n",
buf);
usage(B_FALSE);
}
val <<= shift;
}
return (val);
}
typedef struct ztest_option {
const char short_opt;
const char *long_opt;
const char *long_opt_param;
const char *comment;
unsigned int default_int;
const char *default_str;
} ztest_option_t;
/*
* The following option_table is used for generating the usage info as well as
* the long and short option information for calling getopt_long().
*/
static ztest_option_t option_table[] = {
{ 'v', "vdevs", "INTEGER", "Number of vdevs", DEFAULT_VDEV_COUNT,
NULL},
{ 's', "vdev-size", "INTEGER", "Size of each vdev",
NO_DEFAULT, DEFAULT_VDEV_SIZE_STR},
{ 'a', "alignment-shift", "INTEGER",
"Alignment shift; use 0 for random", DEFAULT_ASHIFT, NULL},
{ 'm', "mirror-copies", "INTEGER", "Number of mirror copies",
DEFAULT_MIRRORS, NULL},
{ 'r', "raid-disks", "INTEGER", "Number of raidz/draid disks",
DEFAULT_RAID_CHILDREN, NULL},
{ 'R', "raid-parity", "INTEGER", "Raid parity",
DEFAULT_RAID_PARITY, NULL},
{ 'K', "raid-kind", "raidz|draid|random", "Raid kind",
NO_DEFAULT, "random"},
{ 'D', "draid-data", "INTEGER", "Number of draid data drives",
DEFAULT_DRAID_DATA, NULL},
{ 'S', "draid-spares", "INTEGER", "Number of draid spares",
DEFAULT_DRAID_SPARES, NULL},
{ 'd', "datasets", "INTEGER", "Number of datasets",
DEFAULT_DATASETS_COUNT, NULL},
{ 't', "threads", "INTEGER", "Number of ztest threads",
DEFAULT_THREADS, NULL},
{ 'g', "gang-block-threshold", "INTEGER",
"Metaslab gang block threshold",
NO_DEFAULT, DEFAULT_FORCE_GANGING_STR},
{ 'i', "init-count", "INTEGER", "Number of times to initialize pool",
DEFAULT_INITS, NULL},
{ 'k', "kill-percentage", "INTEGER", "Kill percentage",
NO_DEFAULT, DEFAULT_KILLRATE_STR},
{ 'p', "pool-name", "STRING", "Pool name",
NO_DEFAULT, DEFAULT_POOL},
{ 'f', "vdev-file-directory", "PATH", "File directory for vdev files",
NO_DEFAULT, DEFAULT_VDEV_DIR},
{ 'M', "multi-host", NULL,
"Multi-host; simulate pool imported on remote host",
NO_DEFAULT, NULL},
{ 'E', "use-existing-pool", NULL,
"Use existing pool instead of creating new one", NO_DEFAULT, NULL},
{ 'T', "run-time", "INTEGER", "Total run time",
NO_DEFAULT, DEFAULT_RUN_TIME_STR},
{ 'P', "pass-time", "INTEGER", "Time per pass",
NO_DEFAULT, DEFAULT_PASS_TIME_STR},
{ 'F', "freeze-loops", "INTEGER", "Max loops in spa_freeze()",
DEFAULT_MAX_LOOPS, NULL},
{ 'B', "alt-ztest", "PATH", "Alternate ztest path",
NO_DEFAULT, NULL},
{ 'C', "vdev-class-state", "on|off|random", "vdev class state",
NO_DEFAULT, "random"},
{ 'o', "option", "\"OPTION=INTEGER\"",
"Set global variable to an unsigned 32-bit integer value",
NO_DEFAULT, NULL},
{ 'G', "dump-debug-msg", NULL,
"Dump zfs_dbgmsg buffer before exiting due to an error",
NO_DEFAULT, NULL},
{ 'V', "verbose", NULL,
"Verbose (use multiple times for ever more verbosity)",
NO_DEFAULT, NULL},
{ 'h', "help", NULL, "Show this help",
NO_DEFAULT, NULL},
{0, 0, 0, 0, 0, 0}
};
static struct option *long_opts = NULL;
static char *short_opts = NULL;
static void
init_options(void)
{
ASSERT3P(long_opts, ==, NULL);
ASSERT3P(short_opts, ==, NULL);
int count = sizeof (option_table) / sizeof (option_table[0]);
long_opts = umem_alloc(sizeof (struct option) * count, UMEM_NOFAIL);
short_opts = umem_alloc(sizeof (char) * 2 * count, UMEM_NOFAIL);
int short_opt_index = 0;
for (int i = 0; i < count; i++) {
long_opts[i].val = option_table[i].short_opt;
long_opts[i].name = option_table[i].long_opt;
long_opts[i].has_arg = option_table[i].long_opt_param != NULL
? required_argument : no_argument;
long_opts[i].flag = NULL;
short_opts[short_opt_index++] = option_table[i].short_opt;
if (option_table[i].long_opt_param != NULL) {
short_opts[short_opt_index++] = ':';
}
}
}
static void
fini_options(void)
{
int count = sizeof (option_table) / sizeof (option_table[0]);
umem_free(long_opts, sizeof (struct option) * count);
umem_free(short_opts, sizeof (char) * 2 * count);
long_opts = NULL;
short_opts = NULL;
}
static __attribute__((noreturn)) void
usage(boolean_t requested)
{
char option[80];
FILE *fp = requested ? stdout : stderr;
(void) fprintf(fp, "Usage: %s [OPTIONS...]\n", DEFAULT_POOL);
for (int i = 0; option_table[i].short_opt != 0; i++) {
if (option_table[i].long_opt_param != NULL) {
(void) sprintf(option, " -%c --%s=%s",
option_table[i].short_opt,
option_table[i].long_opt,
option_table[i].long_opt_param);
} else {
(void) sprintf(option, " -%c --%s",
option_table[i].short_opt,
option_table[i].long_opt);
}
(void) fprintf(fp, " %-40s%s", option,
option_table[i].comment);
if (option_table[i].long_opt_param != NULL) {
if (option_table[i].default_str != NULL) {
(void) fprintf(fp, " (default: %s)",
option_table[i].default_str);
} else if (option_table[i].default_int != NO_DEFAULT) {
(void) fprintf(fp, " (default: %u)",
option_table[i].default_int);
}
}
(void) fprintf(fp, "\n");
}
exit(requested ? 0 : 1);
}
static uint64_t
ztest_random(uint64_t range)
{
uint64_t r;
ASSERT3S(ztest_fd_rand, >=, 0);
if (range == 0)
return (0);
if (read(ztest_fd_rand, &r, sizeof (r)) != sizeof (r))
fatal(B_TRUE, "short read from /dev/urandom");
return (r % range);
}
static void
ztest_parse_name_value(const char *input, ztest_shared_opts_t *zo)
{
char name[32];
char *value;
int state = ZTEST_VDEV_CLASS_RND;
(void) strlcpy(name, input, sizeof (name));
value = strchr(name, '=');
if (value == NULL) {
(void) fprintf(stderr, "missing value in property=value "
"'-C' argument (%s)\n", input);
usage(B_FALSE);
}
*(value) = '\0';
value++;
if (strcmp(value, "on") == 0) {
state = ZTEST_VDEV_CLASS_ON;
} else if (strcmp(value, "off") == 0) {
state = ZTEST_VDEV_CLASS_OFF;
} else if (strcmp(value, "random") == 0) {
state = ZTEST_VDEV_CLASS_RND;
} else {
(void) fprintf(stderr, "invalid property value '%s'\n", value);
usage(B_FALSE);
}
if (strcmp(name, "special") == 0) {
zo->zo_special_vdevs = state;
} else {
(void) fprintf(stderr, "invalid property name '%s'\n", name);
usage(B_FALSE);
}
if (zo->zo_verbose >= 3)
(void) printf("%s vdev state is '%s'\n", name, value);
}
static void
process_options(int argc, char **argv)
{
char *path;
ztest_shared_opts_t *zo = &ztest_opts;
int opt;
uint64_t value;
const char *raid_kind = "random";
memcpy(zo, &ztest_opts_defaults, sizeof (*zo));
init_options();
while ((opt = getopt_long(argc, argv, short_opts, long_opts,
NULL)) != EOF) {
value = 0;
switch (opt) {
case 'v':
case 's':
case 'a':
case 'm':
case 'r':
case 'R':
case 'D':
case 'S':
case 'd':
case 't':
case 'g':
case 'i':
case 'k':
case 'T':
case 'P':
case 'F':
value = nicenumtoull(optarg);
}
switch (opt) {
case 'v':
zo->zo_vdevs = value;
break;
case 's':
zo->zo_vdev_size = MAX(SPA_MINDEVSIZE, value);
break;
case 'a':
zo->zo_ashift = value;
break;
case 'm':
zo->zo_mirrors = value;
break;
case 'r':
zo->zo_raid_children = MAX(1, value);
break;
case 'R':
zo->zo_raid_parity = MIN(MAX(value, 1), 3);
break;
case 'K':
raid_kind = optarg;
break;
case 'D':
zo->zo_draid_data = MAX(1, value);
break;
case 'S':
zo->zo_draid_spares = MAX(1, value);
break;
case 'd':
zo->zo_datasets = MAX(1, value);
break;
case 't':
zo->zo_threads = MAX(1, value);
break;
case 'g':
zo->zo_metaslab_force_ganging =
MAX(SPA_MINBLOCKSIZE << 1, value);
break;
case 'i':
zo->zo_init = value;
break;
case 'k':
zo->zo_killrate = value;
break;
case 'p':
(void) strlcpy(zo->zo_pool, optarg,
sizeof (zo->zo_pool));
break;
case 'f':
path = realpath(optarg, NULL);
if (path == NULL) {
(void) fprintf(stderr, "error: %s: %s\n",
optarg, strerror(errno));
usage(B_FALSE);
} else {
(void) strlcpy(zo->zo_dir, path,
sizeof (zo->zo_dir));
free(path);
}
break;
case 'M':
zo->zo_mmp_test = 1;
break;
case 'V':
zo->zo_verbose++;
break;
case 'E':
zo->zo_init = 0;
break;
case 'T':
zo->zo_time = value;
break;
case 'P':
zo->zo_passtime = MAX(1, value);
break;
case 'F':
zo->zo_maxloops = MAX(1, value);
break;
case 'B':
(void) strlcpy(zo->zo_alt_ztest, optarg,
sizeof (zo->zo_alt_ztest));
break;
case 'C':
ztest_parse_name_value(optarg, zo);
break;
case 'o':
if (zo->zo_gvars_count >= ZO_GVARS_MAX_COUNT) {
(void) fprintf(stderr,
"max global var count (%zu) exceeded\n",
ZO_GVARS_MAX_COUNT);
usage(B_FALSE);
}
char *v = zo->zo_gvars[zo->zo_gvars_count];
if (strlcpy(v, optarg, ZO_GVARS_MAX_ARGLEN) >=
ZO_GVARS_MAX_ARGLEN) {
(void) fprintf(stderr,
"global var option '%s' is too long\n",
optarg);
usage(B_FALSE);
}
zo->zo_gvars_count++;
break;
case 'G':
zo->zo_dump_dbgmsg = 1;
break;
case 'h':
usage(B_TRUE);
break;
case '?':
default:
usage(B_FALSE);
break;
}
}
fini_options();
/* When raid choice is 'random' add a draid pool 50% of the time */
if (strcmp(raid_kind, "random") == 0) {
raid_kind = (ztest_random(2) == 0) ? "draid" : "raidz";
if (ztest_opts.zo_verbose >= 3)
(void) printf("choosing RAID type '%s'\n", raid_kind);
}
if (strcmp(raid_kind, "draid") == 0) {
uint64_t min_devsize;
/* With fewer disk use 256M, otherwise 128M is OK */
min_devsize = (ztest_opts.zo_raid_children < 16) ?
(256ULL << 20) : (128ULL << 20);
/* No top-level mirrors with dRAID for now */
zo->zo_mirrors = 0;
/* Use more appropriate defaults for dRAID */
if (zo->zo_vdevs == ztest_opts_defaults.zo_vdevs)
zo->zo_vdevs = 1;
if (zo->zo_raid_children ==
ztest_opts_defaults.zo_raid_children)
zo->zo_raid_children = 16;
if (zo->zo_ashift < 12)
zo->zo_ashift = 12;
if (zo->zo_vdev_size < min_devsize)
zo->zo_vdev_size = min_devsize;
if (zo->zo_draid_data + zo->zo_raid_parity >
zo->zo_raid_children - zo->zo_draid_spares) {
(void) fprintf(stderr, "error: too few draid "
"children (%d) for stripe width (%d)\n",
zo->zo_raid_children,
zo->zo_draid_data + zo->zo_raid_parity);
usage(B_FALSE);
}
(void) strlcpy(zo->zo_raid_type, VDEV_TYPE_DRAID,
sizeof (zo->zo_raid_type));
} else /* using raidz */ {
ASSERT0(strcmp(raid_kind, "raidz"));
zo->zo_raid_parity = MIN(zo->zo_raid_parity,
zo->zo_raid_children - 1);
}
zo->zo_vdevtime =
(zo->zo_vdevs > 0 ? zo->zo_time * NANOSEC / zo->zo_vdevs :
UINT64_MAX >> 2);
if (*zo->zo_alt_ztest) {
const char *invalid_what = "ztest";
char *val = zo->zo_alt_ztest;
if (0 != access(val, X_OK) ||
(strrchr(val, '/') == NULL && (errno = EINVAL)))
goto invalid;
int dirlen = strrchr(val, '/') - val;
strncpy(zo->zo_alt_libpath, val, dirlen);
invalid_what = "library path", val = zo->zo_alt_libpath;
if (strrchr(val, '/') == NULL && (errno = EINVAL))
goto invalid;
*strrchr(val, '/') = '\0';
strlcat(val, "/lib", sizeof (zo->zo_alt_libpath));
if (0 != access(zo->zo_alt_libpath, X_OK))
goto invalid;
return;
invalid:
ztest_dump_core = B_FALSE;
fatal(B_TRUE, "invalid alternate %s %s", invalid_what, val);
}
}
static void
ztest_kill(ztest_shared_t *zs)
{
zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(ztest_spa));
zs->zs_space = metaslab_class_get_space(spa_normal_class(ztest_spa));
/*
* Before we kill ourselves, make sure that the config is updated.
* See comment above spa_write_cachefile().
*/
mutex_enter(&spa_namespace_lock);
spa_write_cachefile(ztest_spa, B_FALSE, B_FALSE);
mutex_exit(&spa_namespace_lock);
(void) raise(SIGKILL);
}
static void
ztest_record_enospc(const char *s)
{
(void) s;
ztest_shared->zs_enospc_count++;
}
static uint64_t
ztest_get_ashift(void)
{
if (ztest_opts.zo_ashift == 0)
return (SPA_MINBLOCKSHIFT + ztest_random(5));
return (ztest_opts.zo_ashift);
}
static boolean_t
ztest_is_draid_spare(const char *name)
{
uint64_t spare_id = 0, parity = 0, vdev_id = 0;
if (sscanf(name, VDEV_TYPE_DRAID "%"PRIu64"-%"PRIu64"-%"PRIu64"",
&parity, &vdev_id, &spare_id) == 3) {
return (B_TRUE);
}
return (B_FALSE);
}
static nvlist_t *
make_vdev_file(const char *path, const char *aux, const char *pool,
size_t size, uint64_t ashift)
{
char *pathbuf = NULL;
uint64_t vdev;
nvlist_t *file;
boolean_t draid_spare = B_FALSE;
if (ashift == 0)
ashift = ztest_get_ashift();
if (path == NULL) {
pathbuf = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
path = pathbuf;
if (aux != NULL) {
vdev = ztest_shared->zs_vdev_aux;
(void) snprintf(pathbuf, MAXPATHLEN,
ztest_aux_template, ztest_opts.zo_dir,
pool == NULL ? ztest_opts.zo_pool : pool,
aux, vdev);
} else {
vdev = ztest_shared->zs_vdev_next_leaf++;
(void) snprintf(pathbuf, MAXPATHLEN,
ztest_dev_template, ztest_opts.zo_dir,
pool == NULL ? ztest_opts.zo_pool : pool, vdev);
}
} else {
draid_spare = ztest_is_draid_spare(path);
}
if (size != 0 && !draid_spare) {
int fd = open(path, O_RDWR | O_CREAT | O_TRUNC, 0666);
if (fd == -1)
fatal(B_TRUE, "can't open %s", path);
if (ftruncate(fd, size) != 0)
fatal(B_TRUE, "can't ftruncate %s", path);
(void) close(fd);
}
file = fnvlist_alloc();
fnvlist_add_string(file, ZPOOL_CONFIG_TYPE,
draid_spare ? VDEV_TYPE_DRAID_SPARE : VDEV_TYPE_FILE);
fnvlist_add_string(file, ZPOOL_CONFIG_PATH, path);
fnvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift);
umem_free(pathbuf, MAXPATHLEN);
return (file);
}
static nvlist_t *
make_vdev_raid(const char *path, const char *aux, const char *pool, size_t size,
uint64_t ashift, int r)
{
nvlist_t *raid, **child;
int c;
if (r < 2)
return (make_vdev_file(path, aux, pool, size, ashift));
child = umem_alloc(r * sizeof (nvlist_t *), UMEM_NOFAIL);
for (c = 0; c < r; c++)
child[c] = make_vdev_file(path, aux, pool, size, ashift);
raid = fnvlist_alloc();
fnvlist_add_string(raid, ZPOOL_CONFIG_TYPE,
ztest_opts.zo_raid_type);
fnvlist_add_uint64(raid, ZPOOL_CONFIG_NPARITY,
ztest_opts.zo_raid_parity);
fnvlist_add_nvlist_array(raid, ZPOOL_CONFIG_CHILDREN,
(const nvlist_t **)child, r);
if (strcmp(ztest_opts.zo_raid_type, VDEV_TYPE_DRAID) == 0) {
uint64_t ndata = ztest_opts.zo_draid_data;
uint64_t nparity = ztest_opts.zo_raid_parity;
uint64_t nspares = ztest_opts.zo_draid_spares;
uint64_t children = ztest_opts.zo_raid_children;
uint64_t ngroups = 1;
/*
* Calculate the minimum number of groups required to fill a
* slice. This is the LCM of the stripe width (data + parity)
* and the number of data drives (children - spares).
*/
while (ngroups * (ndata + nparity) % (children - nspares) != 0)
ngroups++;
/* Store the basic dRAID configuration. */
fnvlist_add_uint64(raid, ZPOOL_CONFIG_DRAID_NDATA, ndata);
fnvlist_add_uint64(raid, ZPOOL_CONFIG_DRAID_NSPARES, nspares);
fnvlist_add_uint64(raid, ZPOOL_CONFIG_DRAID_NGROUPS, ngroups);
}
for (c = 0; c < r; c++)
fnvlist_free(child[c]);
umem_free(child, r * sizeof (nvlist_t *));
return (raid);
}
static nvlist_t *
make_vdev_mirror(const char *path, const char *aux, const char *pool,
size_t size, uint64_t ashift, int r, int m)
{
nvlist_t *mirror, **child;
int c;
if (m < 1)
return (make_vdev_raid(path, aux, pool, size, ashift, r));
child = umem_alloc(m * sizeof (nvlist_t *), UMEM_NOFAIL);
for (c = 0; c < m; c++)
child[c] = make_vdev_raid(path, aux, pool, size, ashift, r);
mirror = fnvlist_alloc();
fnvlist_add_string(mirror, ZPOOL_CONFIG_TYPE, VDEV_TYPE_MIRROR);
fnvlist_add_nvlist_array(mirror, ZPOOL_CONFIG_CHILDREN,
(const nvlist_t **)child, m);
for (c = 0; c < m; c++)
fnvlist_free(child[c]);
umem_free(child, m * sizeof (nvlist_t *));
return (mirror);
}
static nvlist_t *
make_vdev_root(const char *path, const char *aux, const char *pool, size_t size,
uint64_t ashift, const char *class, int r, int m, int t)
{
nvlist_t *root, **child;
int c;
boolean_t log;
ASSERT3S(t, >, 0);
log = (class != NULL && strcmp(class, "log") == 0);
child = umem_alloc(t * sizeof (nvlist_t *), UMEM_NOFAIL);
for (c = 0; c < t; c++) {
child[c] = make_vdev_mirror(path, aux, pool, size, ashift,
r, m);
fnvlist_add_uint64(child[c], ZPOOL_CONFIG_IS_LOG, log);
if (class != NULL && class[0] != '\0') {
ASSERT(m > 1 || log); /* expecting a mirror */
fnvlist_add_string(child[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, class);
}
}
root = fnvlist_alloc();
fnvlist_add_string(root, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT);
fnvlist_add_nvlist_array(root, aux ? aux : ZPOOL_CONFIG_CHILDREN,
(const nvlist_t **)child, t);
for (c = 0; c < t; c++)
fnvlist_free(child[c]);
umem_free(child, t * sizeof (nvlist_t *));
return (root);
}
/*
* Find a random spa version. Returns back a random spa version in the
* range [initial_version, SPA_VERSION_FEATURES].
*/
static uint64_t
ztest_random_spa_version(uint64_t initial_version)
{
uint64_t version = initial_version;
if (version <= SPA_VERSION_BEFORE_FEATURES) {
version = version +
ztest_random(SPA_VERSION_BEFORE_FEATURES - version + 1);
}
if (version > SPA_VERSION_BEFORE_FEATURES)
version = SPA_VERSION_FEATURES;
ASSERT(SPA_VERSION_IS_SUPPORTED(version));
return (version);
}
static int
ztest_random_blocksize(void)
{
ASSERT3U(ztest_spa->spa_max_ashift, !=, 0);
/*
* Choose a block size >= the ashift.
* If the SPA supports new MAXBLOCKSIZE, test up to 1MB blocks.
*/
int maxbs = SPA_OLD_MAXBLOCKSHIFT;
if (spa_maxblocksize(ztest_spa) == SPA_MAXBLOCKSIZE)
maxbs = 20;
uint64_t block_shift =
ztest_random(maxbs - ztest_spa->spa_max_ashift + 1);
return (1 << (SPA_MINBLOCKSHIFT + block_shift));
}
static int
ztest_random_dnodesize(void)
{
int slots;
int max_slots = spa_maxdnodesize(ztest_spa) >> DNODE_SHIFT;
if (max_slots == DNODE_MIN_SLOTS)
return (DNODE_MIN_SIZE);
/*
* Weight the random distribution more heavily toward smaller
* dnode sizes since that is more likely to reflect real-world
* usage.
*/
ASSERT3U(max_slots, >, 4);
switch (ztest_random(10)) {
case 0:
slots = 5 + ztest_random(max_slots - 4);
break;
case 1 ... 4:
slots = 2 + ztest_random(3);
break;
default:
slots = 1;
break;
}
return (slots << DNODE_SHIFT);
}
static int
ztest_random_ibshift(void)
{
return (DN_MIN_INDBLKSHIFT +
ztest_random(DN_MAX_INDBLKSHIFT - DN_MIN_INDBLKSHIFT + 1));
}
static uint64_t
ztest_random_vdev_top(spa_t *spa, boolean_t log_ok)
{
uint64_t top;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *tvd;
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
do {
top = ztest_random(rvd->vdev_children);
tvd = rvd->vdev_child[top];
} while (!vdev_is_concrete(tvd) || (tvd->vdev_islog && !log_ok) ||
tvd->vdev_mg == NULL || tvd->vdev_mg->mg_class == NULL);
return (top);
}
static uint64_t
ztest_random_dsl_prop(zfs_prop_t prop)
{
uint64_t value;
do {
value = zfs_prop_random_value(prop, ztest_random(-1ULL));
} while (prop == ZFS_PROP_CHECKSUM && value == ZIO_CHECKSUM_OFF);
return (value);
}
static int
ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value,
boolean_t inherit)
{
const char *propname = zfs_prop_to_name(prop);
const char *valname;
char *setpoint;
uint64_t curval;
int error;
error = dsl_prop_set_int(osname, propname,
(inherit ? ZPROP_SRC_NONE : ZPROP_SRC_LOCAL), value);
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
return (error);
}
ASSERT0(error);
setpoint = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
VERIFY0(dsl_prop_get_integer(osname, propname, &curval, setpoint));
if (ztest_opts.zo_verbose >= 6) {
int err;
err = zfs_prop_index_to_string(prop, curval, &valname);
if (err)
(void) printf("%s %s = %llu at '%s'\n", osname,
propname, (unsigned long long)curval, setpoint);
else
(void) printf("%s %s = %s at '%s'\n",
osname, propname, valname, setpoint);
}
umem_free(setpoint, MAXPATHLEN);
return (error);
}
static int
ztest_spa_prop_set_uint64(zpool_prop_t prop, uint64_t value)
{
spa_t *spa = ztest_spa;
nvlist_t *props = NULL;
int error;
props = fnvlist_alloc();
fnvlist_add_uint64(props, zpool_prop_to_name(prop), value);
error = spa_prop_set(spa, props);
fnvlist_free(props);
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
return (error);
}
ASSERT0(error);
return (error);
}
static int
ztest_dmu_objset_own(const char *name, dmu_objset_type_t type,
boolean_t readonly, boolean_t decrypt, const void *tag, objset_t **osp)
{
int err;
char *cp = NULL;
char ddname[ZFS_MAX_DATASET_NAME_LEN];
strcpy(ddname, name);
cp = strchr(ddname, '@');
if (cp != NULL)
*cp = '\0';
err = dmu_objset_own(name, type, readonly, decrypt, tag, osp);
while (decrypt && err == EACCES) {
dsl_crypto_params_t *dcp;
nvlist_t *crypto_args = fnvlist_alloc();
fnvlist_add_uint8_array(crypto_args, "wkeydata",
(uint8_t *)ztest_wkeydata, WRAPPING_KEY_LEN);
VERIFY0(dsl_crypto_params_create_nvlist(DCP_CMD_NONE, NULL,
crypto_args, &dcp));
err = spa_keystore_load_wkey(ddname, dcp, B_FALSE);
/*
* Note: if there was an error loading, the wkey was not
* consumed, and needs to be freed.
*/
dsl_crypto_params_free(dcp, (err != 0));
fnvlist_free(crypto_args);
if (err == EINVAL) {
/*
* We couldn't load a key for this dataset so try
* the parent. This loop will eventually hit the
* encryption root since ztest only makes clones
* as children of their origin datasets.
*/
cp = strrchr(ddname, '/');
if (cp == NULL)
return (err);
*cp = '\0';
err = EACCES;
continue;
} else if (err != 0) {
break;
}
err = dmu_objset_own(name, type, readonly, decrypt, tag, osp);
break;
}
return (err);
}
static void
ztest_rll_init(rll_t *rll)
{
rll->rll_writer = NULL;
rll->rll_readers = 0;
mutex_init(&rll->rll_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&rll->rll_cv, NULL, CV_DEFAULT, NULL);
}
static void
ztest_rll_destroy(rll_t *rll)
{
ASSERT3P(rll->rll_writer, ==, NULL);
ASSERT0(rll->rll_readers);
mutex_destroy(&rll->rll_lock);
cv_destroy(&rll->rll_cv);
}
static void
ztest_rll_lock(rll_t *rll, rl_type_t type)
{
mutex_enter(&rll->rll_lock);
if (type == RL_READER) {
while (rll->rll_writer != NULL)
(void) cv_wait(&rll->rll_cv, &rll->rll_lock);
rll->rll_readers++;
} else {
while (rll->rll_writer != NULL || rll->rll_readers)
(void) cv_wait(&rll->rll_cv, &rll->rll_lock);
rll->rll_writer = curthread;
}
mutex_exit(&rll->rll_lock);
}
static void
ztest_rll_unlock(rll_t *rll)
{
mutex_enter(&rll->rll_lock);
if (rll->rll_writer) {
ASSERT0(rll->rll_readers);
rll->rll_writer = NULL;
} else {
ASSERT3S(rll->rll_readers, >, 0);
ASSERT3P(rll->rll_writer, ==, NULL);
rll->rll_readers--;
}
if (rll->rll_writer == NULL && rll->rll_readers == 0)
cv_broadcast(&rll->rll_cv);
mutex_exit(&rll->rll_lock);
}
static void
ztest_object_lock(ztest_ds_t *zd, uint64_t object, rl_type_t type)
{
rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
ztest_rll_lock(rll, type);
}
static void
ztest_object_unlock(ztest_ds_t *zd, uint64_t object)
{
rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
ztest_rll_unlock(rll);
}
static rl_t *
ztest_range_lock(ztest_ds_t *zd, uint64_t object, uint64_t offset,
uint64_t size, rl_type_t type)
{
uint64_t hash = object ^ (offset % (ZTEST_RANGE_LOCKS + 1));
rll_t *rll = &zd->zd_range_lock[hash & (ZTEST_RANGE_LOCKS - 1)];
rl_t *rl;
rl = umem_alloc(sizeof (*rl), UMEM_NOFAIL);
rl->rl_object = object;
rl->rl_offset = offset;
rl->rl_size = size;
rl->rl_lock = rll;
ztest_rll_lock(rll, type);
return (rl);
}
static void
ztest_range_unlock(rl_t *rl)
{
rll_t *rll = rl->rl_lock;
ztest_rll_unlock(rll);
umem_free(rl, sizeof (*rl));
}
static void
ztest_zd_init(ztest_ds_t *zd, ztest_shared_ds_t *szd, objset_t *os)
{
zd->zd_os = os;
zd->zd_zilog = dmu_objset_zil(os);
zd->zd_shared = szd;
dmu_objset_name(os, zd->zd_name);
int l;
if (zd->zd_shared != NULL)
zd->zd_shared->zd_seq = 0;
VERIFY0(pthread_rwlock_init(&zd->zd_zilog_lock, NULL));
mutex_init(&zd->zd_dirobj_lock, NULL, MUTEX_DEFAULT, NULL);
for (l = 0; l < ZTEST_OBJECT_LOCKS; l++)
ztest_rll_init(&zd->zd_object_lock[l]);
for (l = 0; l < ZTEST_RANGE_LOCKS; l++)
ztest_rll_init(&zd->zd_range_lock[l]);
}
static void
ztest_zd_fini(ztest_ds_t *zd)
{
int l;
mutex_destroy(&zd->zd_dirobj_lock);
(void) pthread_rwlock_destroy(&zd->zd_zilog_lock);
for (l = 0; l < ZTEST_OBJECT_LOCKS; l++)
ztest_rll_destroy(&zd->zd_object_lock[l]);
for (l = 0; l < ZTEST_RANGE_LOCKS; l++)
ztest_rll_destroy(&zd->zd_range_lock[l]);
}
#define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT)
static uint64_t
ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag)
{
uint64_t txg;
int error;
/*
* Attempt to assign tx to some transaction group.
*/
error = dmu_tx_assign(tx, txg_how);
if (error) {
if (error == ERESTART) {
ASSERT3U(txg_how, ==, TXG_NOWAIT);
dmu_tx_wait(tx);
} else {
ASSERT3U(error, ==, ENOSPC);
ztest_record_enospc(tag);
}
dmu_tx_abort(tx);
return (0);
}
txg = dmu_tx_get_txg(tx);
ASSERT3U(txg, !=, 0);
return (txg);
}
static void
ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
uint64_t dnodesize, uint64_t offset, uint64_t gen, uint64_t txg,
uint64_t crtxg)
{
bt->bt_magic = BT_MAGIC;
bt->bt_objset = dmu_objset_id(os);
bt->bt_object = object;
bt->bt_dnodesize = dnodesize;
bt->bt_offset = offset;
bt->bt_gen = gen;
bt->bt_txg = txg;
bt->bt_crtxg = crtxg;
}
static void
ztest_bt_verify(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
uint64_t dnodesize, uint64_t offset, uint64_t gen, uint64_t txg,
uint64_t crtxg)
{
ASSERT3U(bt->bt_magic, ==, BT_MAGIC);
ASSERT3U(bt->bt_objset, ==, dmu_objset_id(os));
ASSERT3U(bt->bt_object, ==, object);
ASSERT3U(bt->bt_dnodesize, ==, dnodesize);
ASSERT3U(bt->bt_offset, ==, offset);
ASSERT3U(bt->bt_gen, <=, gen);
ASSERT3U(bt->bt_txg, <=, txg);
ASSERT3U(bt->bt_crtxg, ==, crtxg);
}
static ztest_block_tag_t *
ztest_bt_bonus(dmu_buf_t *db)
{
dmu_object_info_t doi;
ztest_block_tag_t *bt;
dmu_object_info_from_db(db, &doi);
ASSERT3U(doi.doi_bonus_size, <=, db->db_size);
ASSERT3U(doi.doi_bonus_size, >=, sizeof (*bt));
bt = (void *)((char *)db->db_data + doi.doi_bonus_size - sizeof (*bt));
return (bt);
}
/*
* Generate a token to fill up unused bonus buffer space. Try to make
* it unique to the object, generation, and offset to verify that data
* is not getting overwritten by data from other dnodes.
*/
#define ZTEST_BONUS_FILL_TOKEN(obj, ds, gen, offset) \
(((ds) << 48) | ((gen) << 32) | ((obj) << 8) | (offset))
/*
* Fill up the unused bonus buffer region before the block tag with a
* verifiable pattern. Filling the whole bonus area with non-zero data
* helps ensure that all dnode traversal code properly skips the
* interior regions of large dnodes.
*/
static void
ztest_fill_unused_bonus(dmu_buf_t *db, void *end, uint64_t obj,
objset_t *os, uint64_t gen)
{
uint64_t *bonusp;
ASSERT(IS_P2ALIGNED((char *)end - (char *)db->db_data, 8));
for (bonusp = db->db_data; bonusp < (uint64_t *)end; bonusp++) {
uint64_t token = ZTEST_BONUS_FILL_TOKEN(obj, dmu_objset_id(os),
gen, bonusp - (uint64_t *)db->db_data);
*bonusp = token;
}
}
/*
* Verify that the unused area of a bonus buffer is filled with the
* expected tokens.
*/
static void
ztest_verify_unused_bonus(dmu_buf_t *db, void *end, uint64_t obj,
objset_t *os, uint64_t gen)
{
uint64_t *bonusp;
for (bonusp = db->db_data; bonusp < (uint64_t *)end; bonusp++) {
uint64_t token = ZTEST_BONUS_FILL_TOKEN(obj, dmu_objset_id(os),
gen, bonusp - (uint64_t *)db->db_data);
VERIFY3U(*bonusp, ==, token);
}
}
/*
* ZIL logging ops
*/
#define lrz_type lr_mode
#define lrz_blocksize lr_uid
#define lrz_ibshift lr_gid
#define lrz_bonustype lr_rdev
#define lrz_dnodesize lr_crtime[1]
static void
ztest_log_create(ztest_ds_t *zd, dmu_tx_t *tx, lr_create_t *lr)
{
char *name = (void *)(lr + 1); /* name follows lr */
size_t namesize = strlen(name) + 1;
itx_t *itx;
if (zil_replaying(zd->zd_zilog, tx))
return;
itx = zil_itx_create(TX_CREATE, sizeof (*lr) + namesize);
memcpy(&itx->itx_lr + 1, &lr->lr_common + 1,
sizeof (*lr) + namesize - sizeof (lr_t));
zil_itx_assign(zd->zd_zilog, itx, tx);
}
static void
ztest_log_remove(ztest_ds_t *zd, dmu_tx_t *tx, lr_remove_t *lr, uint64_t object)
{
char *name = (void *)(lr + 1); /* name follows lr */
size_t namesize = strlen(name) + 1;
itx_t *itx;
if (zil_replaying(zd->zd_zilog, tx))
return;
itx = zil_itx_create(TX_REMOVE, sizeof (*lr) + namesize);
memcpy(&itx->itx_lr + 1, &lr->lr_common + 1,
sizeof (*lr) + namesize - sizeof (lr_t));
itx->itx_oid = object;
zil_itx_assign(zd->zd_zilog, itx, tx);
}
static void
ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr)
{
itx_t *itx;
itx_wr_state_t write_state = ztest_random(WR_NUM_STATES);
if (zil_replaying(zd->zd_zilog, tx))
return;
if (lr->lr_length > zil_max_log_data(zd->zd_zilog))
write_state = WR_INDIRECT;
itx = zil_itx_create(TX_WRITE,
sizeof (*lr) + (write_state == WR_COPIED ? lr->lr_length : 0));
if (write_state == WR_COPIED &&
dmu_read(zd->zd_os, lr->lr_foid, lr->lr_offset, lr->lr_length,
((lr_write_t *)&itx->itx_lr) + 1, DMU_READ_NO_PREFETCH) != 0) {
zil_itx_destroy(itx);
itx = zil_itx_create(TX_WRITE, sizeof (*lr));
write_state = WR_NEED_COPY;
}
itx->itx_private = zd;
itx->itx_wr_state = write_state;
itx->itx_sync = (ztest_random(8) == 0);
memcpy(&itx->itx_lr + 1, &lr->lr_common + 1,
sizeof (*lr) - sizeof (lr_t));
zil_itx_assign(zd->zd_zilog, itx, tx);
}
static void
ztest_log_truncate(ztest_ds_t *zd, dmu_tx_t *tx, lr_truncate_t *lr)
{
itx_t *itx;
if (zil_replaying(zd->zd_zilog, tx))
return;
itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
memcpy(&itx->itx_lr + 1, &lr->lr_common + 1,
sizeof (*lr) - sizeof (lr_t));
itx->itx_sync = B_FALSE;
zil_itx_assign(zd->zd_zilog, itx, tx);
}
static void
ztest_log_setattr(ztest_ds_t *zd, dmu_tx_t *tx, lr_setattr_t *lr)
{
itx_t *itx;
if (zil_replaying(zd->zd_zilog, tx))
return;
itx = zil_itx_create(TX_SETATTR, sizeof (*lr));
memcpy(&itx->itx_lr + 1, &lr->lr_common + 1,
sizeof (*lr) - sizeof (lr_t));
itx->itx_sync = B_FALSE;
zil_itx_assign(zd->zd_zilog, itx, tx);
}
/*
* ZIL replay ops
*/
static int
ztest_replay_create(void *arg1, void *arg2, boolean_t byteswap)
{
ztest_ds_t *zd = arg1;
lr_create_t *lr = arg2;
char *name = (void *)(lr + 1); /* name follows lr */
objset_t *os = zd->zd_os;
ztest_block_tag_t *bbt;
dmu_buf_t *db;
dmu_tx_t *tx;
uint64_t txg;
int error = 0;
int bonuslen;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
ASSERT3U(lr->lr_doid, ==, ZTEST_DIROBJ);
ASSERT3S(name[0], !=, '\0');
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, lr->lr_doid, B_TRUE, name);
if (lr->lrz_type == DMU_OT_ZAP_OTHER) {
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
} else {
dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
}
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg == 0)
return (ENOSPC);
ASSERT3U(dmu_objset_zil(os)->zl_replay, ==, !!lr->lr_foid);
bonuslen = DN_BONUS_SIZE(lr->lrz_dnodesize);
if (lr->lrz_type == DMU_OT_ZAP_OTHER) {
if (lr->lr_foid == 0) {
lr->lr_foid = zap_create_dnsize(os,
lr->lrz_type, lr->lrz_bonustype,
bonuslen, lr->lrz_dnodesize, tx);
} else {
error = zap_create_claim_dnsize(os, lr->lr_foid,
lr->lrz_type, lr->lrz_bonustype,
bonuslen, lr->lrz_dnodesize, tx);
}
} else {
if (lr->lr_foid == 0) {
lr->lr_foid = dmu_object_alloc_dnsize(os,
lr->lrz_type, 0, lr->lrz_bonustype,
bonuslen, lr->lrz_dnodesize, tx);
} else {
error = dmu_object_claim_dnsize(os, lr->lr_foid,
lr->lrz_type, 0, lr->lrz_bonustype,
bonuslen, lr->lrz_dnodesize, tx);
}
}
if (error) {
ASSERT3U(error, ==, EEXIST);
ASSERT(zd->zd_zilog->zl_replay);
dmu_tx_commit(tx);
return (error);
}
ASSERT3U(lr->lr_foid, !=, 0);
if (lr->lrz_type != DMU_OT_ZAP_OTHER)
VERIFY0(dmu_object_set_blocksize(os, lr->lr_foid,
lr->lrz_blocksize, lr->lrz_ibshift, tx));
VERIFY0(dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
bbt = ztest_bt_bonus(db);
dmu_buf_will_dirty(db, tx);
ztest_bt_generate(bbt, os, lr->lr_foid, lr->lrz_dnodesize, -1ULL,
lr->lr_gen, txg, txg);
ztest_fill_unused_bonus(db, bbt, lr->lr_foid, os, lr->lr_gen);
dmu_buf_rele(db, FTAG);
VERIFY0(zap_add(os, lr->lr_doid, name, sizeof (uint64_t), 1,
&lr->lr_foid, tx));
(void) ztest_log_create(zd, tx, lr);
dmu_tx_commit(tx);
return (0);
}
static int
ztest_replay_remove(void *arg1, void *arg2, boolean_t byteswap)
{
ztest_ds_t *zd = arg1;
lr_remove_t *lr = arg2;
char *name = (void *)(lr + 1); /* name follows lr */
objset_t *os = zd->zd_os;
dmu_object_info_t doi;
dmu_tx_t *tx;
uint64_t object, txg;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
ASSERT3U(lr->lr_doid, ==, ZTEST_DIROBJ);
ASSERT3S(name[0], !=, '\0');
VERIFY0(
zap_lookup(os, lr->lr_doid, name, sizeof (object), 1, &object));
ASSERT3U(object, !=, 0);
ztest_object_lock(zd, object, RL_WRITER);
VERIFY0(dmu_object_info(os, object, &doi));
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name);
dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg == 0) {
ztest_object_unlock(zd, object);
return (ENOSPC);
}
if (doi.doi_type == DMU_OT_ZAP_OTHER) {
VERIFY0(zap_destroy(os, object, tx));
} else {
VERIFY0(dmu_object_free(os, object, tx));
}
VERIFY0(zap_remove(os, lr->lr_doid, name, tx));
(void) ztest_log_remove(zd, tx, lr, object);
dmu_tx_commit(tx);
ztest_object_unlock(zd, object);
return (0);
}
static int
ztest_replay_write(void *arg1, void *arg2, boolean_t byteswap)
{
ztest_ds_t *zd = arg1;
lr_write_t *lr = arg2;
objset_t *os = zd->zd_os;
void *data = lr + 1; /* data follows lr */
uint64_t offset, length;
ztest_block_tag_t *bt = data;
ztest_block_tag_t *bbt;
uint64_t gen, txg, lrtxg, crtxg;
dmu_object_info_t doi;
dmu_tx_t *tx;
dmu_buf_t *db;
arc_buf_t *abuf = NULL;
rl_t *rl;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
offset = lr->lr_offset;
length = lr->lr_length;
/* If it's a dmu_sync() block, write the whole block */
if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
if (length < blocksize) {
offset -= offset % blocksize;
length = blocksize;
}
}
if (bt->bt_magic == BSWAP_64(BT_MAGIC))
byteswap_uint64_array(bt, sizeof (*bt));
if (bt->bt_magic != BT_MAGIC)
bt = NULL;
ztest_object_lock(zd, lr->lr_foid, RL_READER);
rl = ztest_range_lock(zd, lr->lr_foid, offset, length, RL_WRITER);
VERIFY0(dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
dmu_object_info_from_db(db, &doi);
bbt = ztest_bt_bonus(db);
ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
gen = bbt->bt_gen;
crtxg = bbt->bt_crtxg;
lrtxg = lr->lr_common.lrc_txg;
tx = dmu_tx_create(os);
dmu_tx_hold_write(tx, lr->lr_foid, offset, length);
if (ztest_random(8) == 0 && length == doi.doi_data_block_size &&
P2PHASE(offset, length) == 0)
abuf = dmu_request_arcbuf(db, length);
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg == 0) {
if (abuf != NULL)
dmu_return_arcbuf(abuf);
dmu_buf_rele(db, FTAG);
ztest_range_unlock(rl);
ztest_object_unlock(zd, lr->lr_foid);
return (ENOSPC);
}
if (bt != NULL) {
/*
* Usually, verify the old data before writing new data --
* but not always, because we also want to verify correct
* behavior when the data was not recently read into cache.
*/
ASSERT0(offset % doi.doi_data_block_size);
if (ztest_random(4) != 0) {
int prefetch = ztest_random(2) ?
DMU_READ_PREFETCH : DMU_READ_NO_PREFETCH;
ztest_block_tag_t rbt;
VERIFY(dmu_read(os, lr->lr_foid, offset,
sizeof (rbt), &rbt, prefetch) == 0);
if (rbt.bt_magic == BT_MAGIC) {
ztest_bt_verify(&rbt, os, lr->lr_foid, 0,
offset, gen, txg, crtxg);
}
}
/*
* Writes can appear to be newer than the bonus buffer because
* the ztest_get_data() callback does a dmu_read() of the
* open-context data, which may be different than the data
* as it was when the write was generated.
*/
if (zd->zd_zilog->zl_replay) {
ztest_bt_verify(bt, os, lr->lr_foid, 0, offset,
MAX(gen, bt->bt_gen), MAX(txg, lrtxg),
bt->bt_crtxg);
}
/*
* Set the bt's gen/txg to the bonus buffer's gen/txg
* so that all of the usual ASSERTs will work.
*/
ztest_bt_generate(bt, os, lr->lr_foid, 0, offset, gen, txg,
crtxg);
}
if (abuf == NULL) {
dmu_write(os, lr->lr_foid, offset, length, data, tx);
} else {
memcpy(abuf->b_data, data, length);
dmu_assign_arcbuf_by_dbuf(db, offset, abuf, tx);
}
(void) ztest_log_write(zd, tx, lr);
dmu_buf_rele(db, FTAG);
dmu_tx_commit(tx);
ztest_range_unlock(rl);
ztest_object_unlock(zd, lr->lr_foid);
return (0);
}
static int
ztest_replay_truncate(void *arg1, void *arg2, boolean_t byteswap)
{
ztest_ds_t *zd = arg1;
lr_truncate_t *lr = arg2;
objset_t *os = zd->zd_os;
dmu_tx_t *tx;
uint64_t txg;
rl_t *rl;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
ztest_object_lock(zd, lr->lr_foid, RL_READER);
rl = ztest_range_lock(zd, lr->lr_foid, lr->lr_offset, lr->lr_length,
RL_WRITER);
tx = dmu_tx_create(os);
dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length);
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg == 0) {
ztest_range_unlock(rl);
ztest_object_unlock(zd, lr->lr_foid);
return (ENOSPC);
}
VERIFY0(dmu_free_range(os, lr->lr_foid, lr->lr_offset,
lr->lr_length, tx));
(void) ztest_log_truncate(zd, tx, lr);
dmu_tx_commit(tx);
ztest_range_unlock(rl);
ztest_object_unlock(zd, lr->lr_foid);
return (0);
}
static int
ztest_replay_setattr(void *arg1, void *arg2, boolean_t byteswap)
{
ztest_ds_t *zd = arg1;
lr_setattr_t *lr = arg2;
objset_t *os = zd->zd_os;
dmu_tx_t *tx;
dmu_buf_t *db;
ztest_block_tag_t *bbt;
uint64_t txg, lrtxg, crtxg, dnodesize;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
ztest_object_lock(zd, lr->lr_foid, RL_WRITER);
VERIFY0(dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
tx = dmu_tx_create(os);
dmu_tx_hold_bonus(tx, lr->lr_foid);
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg == 0) {
dmu_buf_rele(db, FTAG);
ztest_object_unlock(zd, lr->lr_foid);
return (ENOSPC);
}
bbt = ztest_bt_bonus(db);
ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
crtxg = bbt->bt_crtxg;
lrtxg = lr->lr_common.lrc_txg;
dnodesize = bbt->bt_dnodesize;
if (zd->zd_zilog->zl_replay) {
ASSERT3U(lr->lr_size, !=, 0);
ASSERT3U(lr->lr_mode, !=, 0);
ASSERT3U(lrtxg, !=, 0);
} else {
/*
* Randomly change the size and increment the generation.
*/
lr->lr_size = (ztest_random(db->db_size / sizeof (*bbt)) + 1) *
sizeof (*bbt);
lr->lr_mode = bbt->bt_gen + 1;
ASSERT0(lrtxg);
}
/*
* Verify that the current bonus buffer is not newer than our txg.
*/
ztest_bt_verify(bbt, os, lr->lr_foid, dnodesize, -1ULL, lr->lr_mode,
MAX(txg, lrtxg), crtxg);
dmu_buf_will_dirty(db, tx);
ASSERT3U(lr->lr_size, >=, sizeof (*bbt));
ASSERT3U(lr->lr_size, <=, db->db_size);
VERIFY0(dmu_set_bonus(db, lr->lr_size, tx));
bbt = ztest_bt_bonus(db);
ztest_bt_generate(bbt, os, lr->lr_foid, dnodesize, -1ULL, lr->lr_mode,
txg, crtxg);
ztest_fill_unused_bonus(db, bbt, lr->lr_foid, os, bbt->bt_gen);
dmu_buf_rele(db, FTAG);
(void) ztest_log_setattr(zd, tx, lr);
dmu_tx_commit(tx);
ztest_object_unlock(zd, lr->lr_foid);
return (0);
}
zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = {
NULL, /* 0 no such transaction type */
ztest_replay_create, /* TX_CREATE */
NULL, /* TX_MKDIR */
NULL, /* TX_MKXATTR */
NULL, /* TX_SYMLINK */
ztest_replay_remove, /* TX_REMOVE */
NULL, /* TX_RMDIR */
NULL, /* TX_LINK */
NULL, /* TX_RENAME */
ztest_replay_write, /* TX_WRITE */
ztest_replay_truncate, /* TX_TRUNCATE */
ztest_replay_setattr, /* TX_SETATTR */
NULL, /* TX_ACL */
NULL, /* TX_CREATE_ACL */
NULL, /* TX_CREATE_ATTR */
NULL, /* TX_CREATE_ACL_ATTR */
NULL, /* TX_MKDIR_ACL */
NULL, /* TX_MKDIR_ATTR */
NULL, /* TX_MKDIR_ACL_ATTR */
NULL, /* TX_WRITE2 */
NULL, /* TX_SETSAXATTR */
};
/*
* ZIL get_data callbacks
*/
static void
ztest_get_done(zgd_t *zgd, int error)
{
(void) error;
ztest_ds_t *zd = zgd->zgd_private;
uint64_t object = ((rl_t *)zgd->zgd_lr)->rl_object;
if (zgd->zgd_db)
dmu_buf_rele(zgd->zgd_db, zgd);
ztest_range_unlock((rl_t *)zgd->zgd_lr);
ztest_object_unlock(zd, object);
umem_free(zgd, sizeof (*zgd));
}
static int
ztest_get_data(void *arg, uint64_t arg2, lr_write_t *lr, char *buf,
struct lwb *lwb, zio_t *zio)
{
(void) arg2;
ztest_ds_t *zd = arg;
objset_t *os = zd->zd_os;
uint64_t object = lr->lr_foid;
uint64_t offset = lr->lr_offset;
uint64_t size = lr->lr_length;
uint64_t txg = lr->lr_common.lrc_txg;
uint64_t crtxg;
dmu_object_info_t doi;
dmu_buf_t *db;
zgd_t *zgd;
int error;
ASSERT3P(lwb, !=, NULL);
ASSERT3P(zio, !=, NULL);
ASSERT3U(size, !=, 0);
ztest_object_lock(zd, object, RL_READER);
error = dmu_bonus_hold(os, object, FTAG, &db);
if (error) {
ztest_object_unlock(zd, object);
return (error);
}
crtxg = ztest_bt_bonus(db)->bt_crtxg;
if (crtxg == 0 || crtxg > txg) {
dmu_buf_rele(db, FTAG);
ztest_object_unlock(zd, object);
return (ENOENT);
}
dmu_object_info_from_db(db, &doi);
dmu_buf_rele(db, FTAG);
db = NULL;
zgd = umem_zalloc(sizeof (*zgd), UMEM_NOFAIL);
zgd->zgd_lwb = lwb;
zgd->zgd_private = zd;
if (buf != NULL) { /* immediate write */
zgd->zgd_lr = (struct zfs_locked_range *)ztest_range_lock(zd,
object, offset, size, RL_READER);
error = dmu_read(os, object, offset, size, buf,
DMU_READ_NO_PREFETCH);
ASSERT0(error);
} else {
size = doi.doi_data_block_size;
if (ISP2(size)) {
offset = P2ALIGN(offset, size);
} else {
ASSERT3U(offset, <, size);
offset = 0;
}
zgd->zgd_lr = (struct zfs_locked_range *)ztest_range_lock(zd,
object, offset, size, RL_READER);
error = dmu_buf_hold(os, object, offset, zgd, &db,
DMU_READ_NO_PREFETCH);
if (error == 0) {
blkptr_t *bp = &lr->lr_blkptr;
zgd->zgd_db = db;
zgd->zgd_bp = bp;
ASSERT3U(db->db_offset, ==, offset);
ASSERT3U(db->db_size, ==, size);
error = dmu_sync(zio, lr->lr_common.lrc_txg,
ztest_get_done, zgd);
if (error == 0)
return (0);
}
}
ztest_get_done(zgd, error);
return (error);
}
static void *
ztest_lr_alloc(size_t lrsize, char *name)
{
char *lr;
size_t namesize = name ? strlen(name) + 1 : 0;
lr = umem_zalloc(lrsize + namesize, UMEM_NOFAIL);
if (name)
memcpy(lr + lrsize, name, namesize);
return (lr);
}
static void
ztest_lr_free(void *lr, size_t lrsize, char *name)
{
size_t namesize = name ? strlen(name) + 1 : 0;
umem_free(lr, lrsize + namesize);
}
/*
* Lookup a bunch of objects. Returns the number of objects not found.
*/
static int
ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count)
{
int missing = 0;
int error;
int i;
ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock));
for (i = 0; i < count; i++, od++) {
od->od_object = 0;
error = zap_lookup(zd->zd_os, od->od_dir, od->od_name,
sizeof (uint64_t), 1, &od->od_object);
if (error) {
ASSERT3S(error, ==, ENOENT);
ASSERT0(od->od_object);
missing++;
} else {
dmu_buf_t *db;
ztest_block_tag_t *bbt;
dmu_object_info_t doi;
ASSERT3U(od->od_object, !=, 0);
ASSERT0(missing); /* there should be no gaps */
ztest_object_lock(zd, od->od_object, RL_READER);
VERIFY0(dmu_bonus_hold(zd->zd_os, od->od_object,
FTAG, &db));
dmu_object_info_from_db(db, &doi);
bbt = ztest_bt_bonus(db);
ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
od->od_type = doi.doi_type;
od->od_blocksize = doi.doi_data_block_size;
od->od_gen = bbt->bt_gen;
dmu_buf_rele(db, FTAG);
ztest_object_unlock(zd, od->od_object);
}
}
return (missing);
}
static int
ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count)
{
int missing = 0;
int i;
ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock));
for (i = 0; i < count; i++, od++) {
if (missing) {
od->od_object = 0;
missing++;
continue;
}
lr_create_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name);
lr->lr_doid = od->od_dir;
lr->lr_foid = 0; /* 0 to allocate, > 0 to claim */
lr->lrz_type = od->od_crtype;
lr->lrz_blocksize = od->od_crblocksize;
lr->lrz_ibshift = ztest_random_ibshift();
lr->lrz_bonustype = DMU_OT_UINT64_OTHER;
lr->lrz_dnodesize = od->od_crdnodesize;
lr->lr_gen = od->od_crgen;
lr->lr_crtime[0] = time(NULL);
if (ztest_replay_create(zd, lr, B_FALSE) != 0) {
ASSERT0(missing);
od->od_object = 0;
missing++;
} else {
od->od_object = lr->lr_foid;
od->od_type = od->od_crtype;
od->od_blocksize = od->od_crblocksize;
od->od_gen = od->od_crgen;
ASSERT3U(od->od_object, !=, 0);
}
ztest_lr_free(lr, sizeof (*lr), od->od_name);
}
return (missing);
}
static int
ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count)
{
int missing = 0;
int error;
int i;
ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock));
od += count - 1;
for (i = count - 1; i >= 0; i--, od--) {
if (missing) {
missing++;
continue;
}
/*
* No object was found.
*/
if (od->od_object == 0)
continue;
lr_remove_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name);
lr->lr_doid = od->od_dir;
if ((error = ztest_replay_remove(zd, lr, B_FALSE)) != 0) {
ASSERT3U(error, ==, ENOSPC);
missing++;
} else {
od->od_object = 0;
}
ztest_lr_free(lr, sizeof (*lr), od->od_name);
}
return (missing);
}
static int
ztest_write(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size,
void *data)
{
lr_write_t *lr;
int error;
lr = ztest_lr_alloc(sizeof (*lr) + size, NULL);
lr->lr_foid = object;
lr->lr_offset = offset;
lr->lr_length = size;
lr->lr_blkoff = 0;
BP_ZERO(&lr->lr_blkptr);
memcpy(lr + 1, data, size);
error = ztest_replay_write(zd, lr, B_FALSE);
ztest_lr_free(lr, sizeof (*lr) + size, NULL);
return (error);
}
static int
ztest_truncate(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
{
lr_truncate_t *lr;
int error;
lr = ztest_lr_alloc(sizeof (*lr), NULL);
lr->lr_foid = object;
lr->lr_offset = offset;
lr->lr_length = size;
error = ztest_replay_truncate(zd, lr, B_FALSE);
ztest_lr_free(lr, sizeof (*lr), NULL);
return (error);
}
static int
ztest_setattr(ztest_ds_t *zd, uint64_t object)
{
lr_setattr_t *lr;
int error;
lr = ztest_lr_alloc(sizeof (*lr), NULL);
lr->lr_foid = object;
lr->lr_size = 0;
lr->lr_mode = 0;
error = ztest_replay_setattr(zd, lr, B_FALSE);
ztest_lr_free(lr, sizeof (*lr), NULL);
return (error);
}
static void
ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
{
objset_t *os = zd->zd_os;
dmu_tx_t *tx;
uint64_t txg;
rl_t *rl;
txg_wait_synced(dmu_objset_pool(os), 0);
ztest_object_lock(zd, object, RL_READER);
rl = ztest_range_lock(zd, object, offset, size, RL_WRITER);
tx = dmu_tx_create(os);
dmu_tx_hold_write(tx, object, offset, size);
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg != 0) {
dmu_prealloc(os, object, offset, size, tx);
dmu_tx_commit(tx);
txg_wait_synced(dmu_objset_pool(os), txg);
} else {
(void) dmu_free_long_range(os, object, offset, size);
}
ztest_range_unlock(rl);
ztest_object_unlock(zd, object);
}
static void
ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset)
{
int err;
ztest_block_tag_t wbt;
dmu_object_info_t doi;
enum ztest_io_type io_type;
uint64_t blocksize;
void *data;
VERIFY0(dmu_object_info(zd->zd_os, object, &doi));
blocksize = doi.doi_data_block_size;
data = umem_alloc(blocksize, UMEM_NOFAIL);
/*
* Pick an i/o type at random, biased toward writing block tags.
*/
io_type = ztest_random(ZTEST_IO_TYPES);
if (ztest_random(2) == 0)
io_type = ZTEST_IO_WRITE_TAG;
(void) pthread_rwlock_rdlock(&zd->zd_zilog_lock);
switch (io_type) {
case ZTEST_IO_WRITE_TAG:
ztest_bt_generate(&wbt, zd->zd_os, object, doi.doi_dnodesize,
offset, 0, 0, 0);
(void) ztest_write(zd, object, offset, sizeof (wbt), &wbt);
break;
case ZTEST_IO_WRITE_PATTERN:
(void) memset(data, 'a' + (object + offset) % 5, blocksize);
if (ztest_random(2) == 0) {
/*
* Induce fletcher2 collisions to ensure that
* zio_ddt_collision() detects and resolves them
* when using fletcher2-verify for deduplication.
*/
((uint64_t *)data)[0] ^= 1ULL << 63;
((uint64_t *)data)[4] ^= 1ULL << 63;
}
(void) ztest_write(zd, object, offset, blocksize, data);
break;
case ZTEST_IO_WRITE_ZEROES:
memset(data, 0, blocksize);
(void) ztest_write(zd, object, offset, blocksize, data);
break;
case ZTEST_IO_TRUNCATE:
(void) ztest_truncate(zd, object, offset, blocksize);
break;
case ZTEST_IO_SETATTR:
(void) ztest_setattr(zd, object);
break;
default:
break;
case ZTEST_IO_REWRITE:
(void) pthread_rwlock_rdlock(&ztest_name_lock);
err = ztest_dsl_prop_set_uint64(zd->zd_name,
ZFS_PROP_CHECKSUM, spa_dedup_checksum(ztest_spa),
B_FALSE);
VERIFY(err == 0 || err == ENOSPC);
err = ztest_dsl_prop_set_uint64(zd->zd_name,
ZFS_PROP_COMPRESSION,
ztest_random_dsl_prop(ZFS_PROP_COMPRESSION),
B_FALSE);
VERIFY(err == 0 || err == ENOSPC);
(void) pthread_rwlock_unlock(&ztest_name_lock);
VERIFY0(dmu_read(zd->zd_os, object, offset, blocksize, data,
DMU_READ_NO_PREFETCH));
(void) ztest_write(zd, object, offset, blocksize, data);
break;
}
(void) pthread_rwlock_unlock(&zd->zd_zilog_lock);
umem_free(data, blocksize);
}
/*
* Initialize an object description template.
*/
static void
ztest_od_init(ztest_od_t *od, uint64_t id, const char *tag, uint64_t index,
dmu_object_type_t type, uint64_t blocksize, uint64_t dnodesize,
uint64_t gen)
{
od->od_dir = ZTEST_DIROBJ;
od->od_object = 0;
od->od_crtype = type;
od->od_crblocksize = blocksize ? blocksize : ztest_random_blocksize();
od->od_crdnodesize = dnodesize ? dnodesize : ztest_random_dnodesize();
od->od_crgen = gen;
od->od_type = DMU_OT_NONE;
od->od_blocksize = 0;
od->od_gen = 0;
(void) snprintf(od->od_name, sizeof (od->od_name),
"%s(%"PRId64")[%"PRIu64"]",
tag, id, index);
}
/*
* Lookup or create the objects for a test using the od template.
* If the objects do not all exist, or if 'remove' is specified,
* remove any existing objects and create new ones. Otherwise,
* use the existing objects.
*/
static int
ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove)
{
int count = size / sizeof (*od);
int rv = 0;
mutex_enter(&zd->zd_dirobj_lock);
if ((ztest_lookup(zd, od, count) != 0 || remove) &&
(ztest_remove(zd, od, count) != 0 ||
ztest_create(zd, od, count) != 0))
rv = -1;
zd->zd_od = od;
mutex_exit(&zd->zd_dirobj_lock);
return (rv);
}
void
ztest_zil_commit(ztest_ds_t *zd, uint64_t id)
{
(void) id;
zilog_t *zilog = zd->zd_zilog;
(void) pthread_rwlock_rdlock(&zd->zd_zilog_lock);
zil_commit(zilog, ztest_random(ZTEST_OBJECTS));
/*
* Remember the committed values in zd, which is in parent/child
* shared memory. If we die, the next iteration of ztest_run()
* will verify that the log really does contain this record.
*/
mutex_enter(&zilog->zl_lock);
ASSERT3P(zd->zd_shared, !=, NULL);
ASSERT3U(zd->zd_shared->zd_seq, <=, zilog->zl_commit_lr_seq);
zd->zd_shared->zd_seq = zilog->zl_commit_lr_seq;
mutex_exit(&zilog->zl_lock);
(void) pthread_rwlock_unlock(&zd->zd_zilog_lock);
}
/*
* This function is designed to simulate the operations that occur during a
* mount/unmount operation. We hold the dataset across these operations in an
* attempt to expose any implicit assumptions about ZIL management.
*/
void
ztest_zil_remount(ztest_ds_t *zd, uint64_t id)
{
(void) id;
objset_t *os = zd->zd_os;
/*
* We hold the ztest_vdev_lock so we don't cause problems with
* other threads that wish to remove a log device, such as
* ztest_device_removal().
*/
mutex_enter(&ztest_vdev_lock);
/*
* We grab the zd_dirobj_lock to ensure that no other thread is
* updating the zil (i.e. adding in-memory log records) and the
* zd_zilog_lock to block any I/O.
*/
mutex_enter(&zd->zd_dirobj_lock);
(void) pthread_rwlock_wrlock(&zd->zd_zilog_lock);
/* zfsvfs_teardown() */
zil_close(zd->zd_zilog);
/* zfsvfs_setup() */
VERIFY3P(zil_open(os, ztest_get_data, NULL), ==, zd->zd_zilog);
zil_replay(os, zd, ztest_replay_vector);
(void) pthread_rwlock_unlock(&zd->zd_zilog_lock);
mutex_exit(&zd->zd_dirobj_lock);
mutex_exit(&ztest_vdev_lock);
}
/*
* Verify that we can't destroy an active pool, create an existing pool,
* or create a pool with a bad vdev spec.
*/
void
ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
ztest_shared_opts_t *zo = &ztest_opts;
spa_t *spa;
nvlist_t *nvroot;
if (zo->zo_mmp_test)
return;
/*
* Attempt to create using a bad file.
*/
nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, NULL, 0, 0, 1);
VERIFY3U(ENOENT, ==,
spa_create("ztest_bad_file", nvroot, NULL, NULL, NULL));
fnvlist_free(nvroot);
/*
* Attempt to create using a bad mirror.
*/
nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, NULL, 0, 2, 1);
VERIFY3U(ENOENT, ==,
spa_create("ztest_bad_mirror", nvroot, NULL, NULL, NULL));
fnvlist_free(nvroot);
/*
* Attempt to create an existing pool. It shouldn't matter
* what's in the nvroot; we should fail with EEXIST.
*/
(void) pthread_rwlock_rdlock(&ztest_name_lock);
nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, NULL, 0, 0, 1);
VERIFY3U(EEXIST, ==,
spa_create(zo->zo_pool, nvroot, NULL, NULL, NULL));
fnvlist_free(nvroot);
/*
* We open a reference to the spa and then we try to export it
* expecting one of the following errors:
*
* EBUSY
* Because of the reference we just opened.
*
* ZFS_ERR_EXPORT_IN_PROGRESS
* For the case that there is another ztest thread doing
* an export concurrently.
*/
VERIFY0(spa_open(zo->zo_pool, &spa, FTAG));
int error = spa_destroy(zo->zo_pool);
if (error != EBUSY && error != ZFS_ERR_EXPORT_IN_PROGRESS) {
fatal(B_FALSE, "spa_destroy(%s) returned unexpected value %d",
spa->spa_name, error);
}
spa_close(spa, FTAG);
(void) pthread_rwlock_unlock(&ztest_name_lock);
}
/*
* Start and then stop the MMP threads to ensure the startup and shutdown code
* works properly. Actual protection and property-related code tested via ZTS.
*/
void
ztest_mmp_enable_disable(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
ztest_shared_opts_t *zo = &ztest_opts;
spa_t *spa = ztest_spa;
if (zo->zo_mmp_test)
return;
/*
* Since enabling MMP involves setting a property, it could not be done
* while the pool is suspended.
*/
if (spa_suspended(spa))
return;
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
mutex_enter(&spa->spa_props_lock);
zfs_multihost_fail_intervals = 0;
if (!spa_multihost(spa)) {
spa->spa_multihost = B_TRUE;
mmp_thread_start(spa);
}
mutex_exit(&spa->spa_props_lock);
spa_config_exit(spa, SCL_CONFIG, FTAG);
txg_wait_synced(spa_get_dsl(spa), 0);
mmp_signal_all_threads();
txg_wait_synced(spa_get_dsl(spa), 0);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
mutex_enter(&spa->spa_props_lock);
if (spa_multihost(spa)) {
mmp_thread_stop(spa);
spa->spa_multihost = B_FALSE;
}
mutex_exit(&spa->spa_props_lock);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
void
ztest_spa_upgrade(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
spa_t *spa;
uint64_t initial_version = SPA_VERSION_INITIAL;
uint64_t version, newversion;
nvlist_t *nvroot, *props;
char *name;
if (ztest_opts.zo_mmp_test)
return;
/* dRAID added after feature flags, skip upgrade test. */
if (strcmp(ztest_opts.zo_raid_type, VDEV_TYPE_DRAID) == 0)
return;
mutex_enter(&ztest_vdev_lock);
name = kmem_asprintf("%s_upgrade", ztest_opts.zo_pool);
/*
* Clean up from previous runs.
*/
(void) spa_destroy(name);
nvroot = make_vdev_root(NULL, NULL, name, ztest_opts.zo_vdev_size, 0,
NULL, ztest_opts.zo_raid_children, ztest_opts.zo_mirrors, 1);
/*
* If we're configuring a RAIDZ device then make sure that the
* initial version is capable of supporting that feature.
*/
switch (ztest_opts.zo_raid_parity) {
case 0:
case 1:
initial_version = SPA_VERSION_INITIAL;
break;
case 2:
initial_version = SPA_VERSION_RAIDZ2;
break;
case 3:
initial_version = SPA_VERSION_RAIDZ3;
break;
}
/*
* Create a pool with a spa version that can be upgraded. Pick
* a value between initial_version and SPA_VERSION_BEFORE_FEATURES.
*/
do {
version = ztest_random_spa_version(initial_version);
} while (version > SPA_VERSION_BEFORE_FEATURES);
props = fnvlist_alloc();
fnvlist_add_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_VERSION), version);
VERIFY0(spa_create(name, nvroot, props, NULL, NULL));
fnvlist_free(nvroot);
fnvlist_free(props);
VERIFY0(spa_open(name, &spa, FTAG));
VERIFY3U(spa_version(spa), ==, version);
newversion = ztest_random_spa_version(version + 1);
if (ztest_opts.zo_verbose >= 4) {
(void) printf("upgrading spa version from "
"%"PRIu64" to %"PRIu64"\n",
version, newversion);
}
spa_upgrade(spa, newversion);
VERIFY3U(spa_version(spa), >, version);
VERIFY3U(spa_version(spa), ==, fnvlist_lookup_uint64(spa->spa_config,
zpool_prop_to_name(ZPOOL_PROP_VERSION)));
spa_close(spa, FTAG);
kmem_strfree(name);
mutex_exit(&ztest_vdev_lock);
}
static void
ztest_spa_checkpoint(spa_t *spa)
{
ASSERT(MUTEX_HELD(&ztest_checkpoint_lock));
int error = spa_checkpoint(spa->spa_name);
switch (error) {
case 0:
case ZFS_ERR_DEVRM_IN_PROGRESS:
case ZFS_ERR_DISCARDING_CHECKPOINT:
case ZFS_ERR_CHECKPOINT_EXISTS:
break;
case ENOSPC:
ztest_record_enospc(FTAG);
break;
default:
fatal(B_FALSE, "spa_checkpoint(%s) = %d", spa->spa_name, error);
}
}
static void
ztest_spa_discard_checkpoint(spa_t *spa)
{
ASSERT(MUTEX_HELD(&ztest_checkpoint_lock));
int error = spa_checkpoint_discard(spa->spa_name);
switch (error) {
case 0:
case ZFS_ERR_DISCARDING_CHECKPOINT:
case ZFS_ERR_NO_CHECKPOINT:
break;
default:
fatal(B_FALSE, "spa_discard_checkpoint(%s) = %d",
spa->spa_name, error);
}
}
void
ztest_spa_checkpoint_create_discard(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
spa_t *spa = ztest_spa;
mutex_enter(&ztest_checkpoint_lock);
if (ztest_random(2) == 0) {
ztest_spa_checkpoint(spa);
} else {
ztest_spa_discard_checkpoint(spa);
}
mutex_exit(&ztest_checkpoint_lock);
}
static vdev_t *
vdev_lookup_by_path(vdev_t *vd, const char *path)
{
vdev_t *mvd;
int c;
if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0)
return (vd);
for (c = 0; c < vd->vdev_children; c++)
if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) !=
NULL)
return (mvd);
return (NULL);
}
static int
spa_num_top_vdevs(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
ASSERT3U(spa_config_held(spa, SCL_VDEV, RW_READER), ==, SCL_VDEV);
return (rvd->vdev_children);
}
/*
* Verify that vdev_add() works as expected.
*/
void
ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
ztest_shared_t *zs = ztest_shared;
spa_t *spa = ztest_spa;
uint64_t leaves;
uint64_t guid;
nvlist_t *nvroot;
int error;
if (ztest_opts.zo_mmp_test)
return;
mutex_enter(&ztest_vdev_lock);
leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) *
ztest_opts.zo_raid_children;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
ztest_shared->zs_vdev_next_leaf = spa_num_top_vdevs(spa) * leaves;
/*
* If we have slogs then remove them 1/4 of the time.
*/
if (spa_has_slogs(spa) && ztest_random(4) == 0) {
metaslab_group_t *mg;
/*
* find the first real slog in log allocation class
*/
mg = spa_log_class(spa)->mc_allocator[0].mca_rotor;
while (!mg->mg_vd->vdev_islog)
mg = mg->mg_next;
guid = mg->mg_vd->vdev_guid;
spa_config_exit(spa, SCL_VDEV, FTAG);
/*
* We have to grab the zs_name_lock as writer to
* prevent a race between removing a slog (dmu_objset_find)
* and destroying a dataset. Removing the slog will
* grab a reference on the dataset which may cause
* dsl_destroy_head() to fail with EBUSY thus
* leaving the dataset in an inconsistent state.
*/
pthread_rwlock_wrlock(&ztest_name_lock);
error = spa_vdev_remove(spa, guid, B_FALSE);
pthread_rwlock_unlock(&ztest_name_lock);
switch (error) {
case 0:
case EEXIST: /* Generic zil_reset() error */
case EBUSY: /* Replay required */
case EACCES: /* Crypto key not loaded */
case ZFS_ERR_CHECKPOINT_EXISTS:
case ZFS_ERR_DISCARDING_CHECKPOINT:
break;
default:
fatal(B_FALSE, "spa_vdev_remove() = %d", error);
}
} else {
spa_config_exit(spa, SCL_VDEV, FTAG);
/*
* Make 1/4 of the devices be log devices
*/
nvroot = make_vdev_root(NULL, NULL, NULL,
ztest_opts.zo_vdev_size, 0, (ztest_random(4) == 0) ?
"log" : NULL, ztest_opts.zo_raid_children, zs->zs_mirrors,
1);
error = spa_vdev_add(spa, nvroot);
fnvlist_free(nvroot);
switch (error) {
case 0:
break;
case ENOSPC:
ztest_record_enospc("spa_vdev_add");
break;
default:
fatal(B_FALSE, "spa_vdev_add() = %d", error);
}
}
mutex_exit(&ztest_vdev_lock);
}
void
ztest_vdev_class_add(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
ztest_shared_t *zs = ztest_shared;
spa_t *spa = ztest_spa;
uint64_t leaves;
nvlist_t *nvroot;
const char *class = (ztest_random(2) == 0) ?
VDEV_ALLOC_BIAS_SPECIAL : VDEV_ALLOC_BIAS_DEDUP;
int error;
/*
* By default add a special vdev 50% of the time
*/
if ((ztest_opts.zo_special_vdevs == ZTEST_VDEV_CLASS_OFF) ||
(ztest_opts.zo_special_vdevs == ZTEST_VDEV_CLASS_RND &&
ztest_random(2) == 0)) {
return;
}
mutex_enter(&ztest_vdev_lock);
/* Only test with mirrors */
if (zs->zs_mirrors < 2) {
mutex_exit(&ztest_vdev_lock);
return;
}
/* requires feature@allocation_classes */
if (!spa_feature_is_enabled(spa, SPA_FEATURE_ALLOCATION_CLASSES)) {
mutex_exit(&ztest_vdev_lock);
return;
}
leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) *
ztest_opts.zo_raid_children;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
ztest_shared->zs_vdev_next_leaf = spa_num_top_vdevs(spa) * leaves;
spa_config_exit(spa, SCL_VDEV, FTAG);
nvroot = make_vdev_root(NULL, NULL, NULL, ztest_opts.zo_vdev_size, 0,
class, ztest_opts.zo_raid_children, zs->zs_mirrors, 1);
error = spa_vdev_add(spa, nvroot);
fnvlist_free(nvroot);
if (error == ENOSPC)
ztest_record_enospc("spa_vdev_add");
else if (error != 0)
fatal(B_FALSE, "spa_vdev_add() = %d", error);
/*
* 50% of the time allow small blocks in the special class
*/
if (error == 0 &&
spa_special_class(spa)->mc_groups == 1 && ztest_random(2) == 0) {
if (ztest_opts.zo_verbose >= 3)
(void) printf("Enabling special VDEV small blocks\n");
(void) ztest_dsl_prop_set_uint64(zd->zd_name,
ZFS_PROP_SPECIAL_SMALL_BLOCKS, 32768, B_FALSE);
}
mutex_exit(&ztest_vdev_lock);
if (ztest_opts.zo_verbose >= 3) {
metaslab_class_t *mc;
if (strcmp(class, VDEV_ALLOC_BIAS_SPECIAL) == 0)
mc = spa_special_class(spa);
else
mc = spa_dedup_class(spa);
(void) printf("Added a %s mirrored vdev (of %d)\n",
class, (int)mc->mc_groups);
}
}
/*
* Verify that adding/removing aux devices (l2arc, hot spare) works as expected.
*/
void
ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
ztest_shared_t *zs = ztest_shared;
spa_t *spa = ztest_spa;
vdev_t *rvd = spa->spa_root_vdev;
spa_aux_vdev_t *sav;
const char *aux;
char *path;
uint64_t guid = 0;
int error, ignore_err = 0;
if (ztest_opts.zo_mmp_test)
return;
path = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
if (ztest_random(2) == 0) {
sav = &spa->spa_spares;
aux = ZPOOL_CONFIG_SPARES;
} else {
sav = &spa->spa_l2cache;
aux = ZPOOL_CONFIG_L2CACHE;
}
mutex_enter(&ztest_vdev_lock);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
if (sav->sav_count != 0 && ztest_random(4) == 0) {
/*
* Pick a random device to remove.
*/
vdev_t *svd = sav->sav_vdevs[ztest_random(sav->sav_count)];
/* dRAID spares cannot be removed; try anyways to see ENOTSUP */
if (strstr(svd->vdev_path, VDEV_TYPE_DRAID) != NULL)
ignore_err = ENOTSUP;
guid = svd->vdev_guid;
} else {
/*
* Find an unused device we can add.
*/
zs->zs_vdev_aux = 0;
for (;;) {
int c;
(void) snprintf(path, MAXPATHLEN, ztest_aux_template,
ztest_opts.zo_dir, ztest_opts.zo_pool, aux,
zs->zs_vdev_aux);
for (c = 0; c < sav->sav_count; c++)
if (strcmp(sav->sav_vdevs[c]->vdev_path,
path) == 0)
break;
if (c == sav->sav_count &&
vdev_lookup_by_path(rvd, path) == NULL)
break;
zs->zs_vdev_aux++;
}
}
spa_config_exit(spa, SCL_VDEV, FTAG);
if (guid == 0) {
/*
* Add a new device.
*/
nvlist_t *nvroot = make_vdev_root(NULL, aux, NULL,
(ztest_opts.zo_vdev_size * 5) / 4, 0, NULL, 0, 0, 1);
error = spa_vdev_add(spa, nvroot);
switch (error) {
case 0:
break;
default:
fatal(B_FALSE, "spa_vdev_add(%p) = %d", nvroot, error);
}
fnvlist_free(nvroot);
} else {
/*
* Remove an existing device. Sometimes, dirty its
* vdev state first to make sure we handle removal
* of devices that have pending state changes.
*/
if (ztest_random(2) == 0)
(void) vdev_online(spa, guid, 0, NULL);
error = spa_vdev_remove(spa, guid, B_FALSE);
switch (error) {
case 0:
case EBUSY:
case ZFS_ERR_CHECKPOINT_EXISTS:
case ZFS_ERR_DISCARDING_CHECKPOINT:
break;
default:
if (error != ignore_err)
fatal(B_FALSE,
"spa_vdev_remove(%"PRIu64") = %d",
guid, error);
}
}
mutex_exit(&ztest_vdev_lock);
umem_free(path, MAXPATHLEN);
}
/*
* split a pool if it has mirror tlvdevs
*/
void
ztest_split_pool(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
ztest_shared_t *zs = ztest_shared;
spa_t *spa = ztest_spa;
vdev_t *rvd = spa->spa_root_vdev;
nvlist_t *tree, **child, *config, *split, **schild;
uint_t c, children, schildren = 0, lastlogid = 0;
int error = 0;
if (ztest_opts.zo_mmp_test)
return;
mutex_enter(&ztest_vdev_lock);
/* ensure we have a usable config; mirrors of raidz aren't supported */
if (zs->zs_mirrors < 3 || ztest_opts.zo_raid_children > 1) {
mutex_exit(&ztest_vdev_lock);
return;
}
/* clean up the old pool, if any */
(void) spa_destroy("splitp");
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
/* generate a config from the existing config */
mutex_enter(&spa->spa_props_lock);
tree = fnvlist_lookup_nvlist(spa->spa_config, ZPOOL_CONFIG_VDEV_TREE);
mutex_exit(&spa->spa_props_lock);
VERIFY0(nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN,
&child, &children));
schild = malloc(rvd->vdev_children * sizeof (nvlist_t *));
for (c = 0; c < children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
nvlist_t **mchild;
uint_t mchildren;
if (tvd->vdev_islog || tvd->vdev_ops == &vdev_hole_ops) {
schild[schildren] = fnvlist_alloc();
fnvlist_add_string(schild[schildren],
ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE);
fnvlist_add_uint64(schild[schildren],
ZPOOL_CONFIG_IS_HOLE, 1);
if (lastlogid == 0)
lastlogid = schildren;
++schildren;
continue;
}
lastlogid = 0;
VERIFY0(nvlist_lookup_nvlist_array(child[c],
ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren));
schild[schildren++] = fnvlist_dup(mchild[0]);
}
/* OK, create a config that can be used to split */
split = fnvlist_alloc();
fnvlist_add_string(split, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT);
fnvlist_add_nvlist_array(split, ZPOOL_CONFIG_CHILDREN,
(const nvlist_t **)schild, lastlogid != 0 ? lastlogid : schildren);
config = fnvlist_alloc();
fnvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, split);
for (c = 0; c < schildren; c++)
fnvlist_free(schild[c]);
free(schild);
fnvlist_free(split);
spa_config_exit(spa, SCL_VDEV, FTAG);
(void) pthread_rwlock_wrlock(&ztest_name_lock);
error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE);
(void) pthread_rwlock_unlock(&ztest_name_lock);
fnvlist_free(config);
if (error == 0) {
(void) printf("successful split - results:\n");
mutex_enter(&spa_namespace_lock);
show_pool_stats(spa);
show_pool_stats(spa_lookup("splitp"));
mutex_exit(&spa_namespace_lock);
++zs->zs_splits;
--zs->zs_mirrors;
}
mutex_exit(&ztest_vdev_lock);
}
/*
* Verify that we can attach and detach devices.
*/
void
ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
ztest_shared_t *zs = ztest_shared;
spa_t *spa = ztest_spa;
spa_aux_vdev_t *sav = &spa->spa_spares;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *oldvd, *newvd, *pvd;
nvlist_t *root;
uint64_t leaves;
uint64_t leaf, top;
uint64_t ashift = ztest_get_ashift();
uint64_t oldguid, pguid;
uint64_t oldsize, newsize;
char *oldpath, *newpath;
int replacing;
int oldvd_has_siblings = B_FALSE;
int newvd_is_spare = B_FALSE;
int newvd_is_dspare = B_FALSE;
int oldvd_is_log;
int error, expected_error;
if (ztest_opts.zo_mmp_test)
return;
oldpath = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
newpath = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
mutex_enter(&ztest_vdev_lock);
leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raid_children;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
/*
* If a vdev is in the process of being removed, its removal may
* finish while we are in progress, leading to an unexpected error
* value. Don't bother trying to attach while we are in the middle
* of removal.
*/
if (ztest_device_removal_active) {
spa_config_exit(spa, SCL_ALL, FTAG);
goto out;
}
/*
* Decide whether to do an attach or a replace.
*/
replacing = ztest_random(2);
/*
* Pick a random top-level vdev.
*/
top = ztest_random_vdev_top(spa, B_TRUE);
/*
* Pick a random leaf within it.
*/
leaf = ztest_random(leaves);
/*
* Locate this vdev.
*/
oldvd = rvd->vdev_child[top];
/* pick a child from the mirror */
if (zs->zs_mirrors >= 1) {
ASSERT3P(oldvd->vdev_ops, ==, &vdev_mirror_ops);
ASSERT3U(oldvd->vdev_children, >=, zs->zs_mirrors);
oldvd = oldvd->vdev_child[leaf / ztest_opts.zo_raid_children];
}
/* pick a child out of the raidz group */
if (ztest_opts.zo_raid_children > 1) {
if (strcmp(oldvd->vdev_ops->vdev_op_type, "raidz") == 0)
ASSERT3P(oldvd->vdev_ops, ==, &vdev_raidz_ops);
else
ASSERT3P(oldvd->vdev_ops, ==, &vdev_draid_ops);
ASSERT3U(oldvd->vdev_children, ==, ztest_opts.zo_raid_children);
oldvd = oldvd->vdev_child[leaf % ztest_opts.zo_raid_children];
}
/*
* If we're already doing an attach or replace, oldvd may be a
* mirror vdev -- in which case, pick a random child.
*/
while (oldvd->vdev_children != 0) {
oldvd_has_siblings = B_TRUE;
ASSERT3U(oldvd->vdev_children, >=, 2);
oldvd = oldvd->vdev_child[ztest_random(oldvd->vdev_children)];
}
oldguid = oldvd->vdev_guid;
oldsize = vdev_get_min_asize(oldvd);
oldvd_is_log = oldvd->vdev_top->vdev_islog;
(void) strcpy(oldpath, oldvd->vdev_path);
pvd = oldvd->vdev_parent;
pguid = pvd->vdev_guid;
/*
* If oldvd has siblings, then half of the time, detach it. Prior
* to the detach the pool is scrubbed in order to prevent creating
* unrepairable blocks as a result of the data corruption injection.
*/
if (oldvd_has_siblings && ztest_random(2) == 0) {
spa_config_exit(spa, SCL_ALL, FTAG);
error = ztest_scrub_impl(spa);
if (error)
goto out;
error = spa_vdev_detach(spa, oldguid, pguid, B_FALSE);
if (error != 0 && error != ENODEV && error != EBUSY &&
error != ENOTSUP && error != ZFS_ERR_CHECKPOINT_EXISTS &&
error != ZFS_ERR_DISCARDING_CHECKPOINT)
fatal(B_FALSE, "detach (%s) returned %d",
oldpath, error);
goto out;
}
/*
* For the new vdev, choose with equal probability between the two
* standard paths (ending in either 'a' or 'b') or a random hot spare.
*/
if (sav->sav_count != 0 && ztest_random(3) == 0) {
newvd = sav->sav_vdevs[ztest_random(sav->sav_count)];
newvd_is_spare = B_TRUE;
if (newvd->vdev_ops == &vdev_draid_spare_ops)
newvd_is_dspare = B_TRUE;
(void) strcpy(newpath, newvd->vdev_path);
} else {
(void) snprintf(newpath, MAXPATHLEN, ztest_dev_template,
ztest_opts.zo_dir, ztest_opts.zo_pool,
top * leaves + leaf);
if (ztest_random(2) == 0)
newpath[strlen(newpath) - 1] = 'b';
newvd = vdev_lookup_by_path(rvd, newpath);
}
if (newvd) {
/*
* Reopen to ensure the vdev's asize field isn't stale.
*/
vdev_reopen(newvd);
newsize = vdev_get_min_asize(newvd);
} else {
/*
* Make newsize a little bigger or smaller than oldsize.
* If it's smaller, the attach should fail.
* If it's larger, and we're doing a replace,
* we should get dynamic LUN growth when we're done.
*/
newsize = 10 * oldsize / (9 + ztest_random(3));
}
/*
* If pvd is not a mirror or root, the attach should fail with ENOTSUP,
* unless it's a replace; in that case any non-replacing parent is OK.
*
* If newvd is already part of the pool, it should fail with EBUSY.
*
* If newvd is too small, it should fail with EOVERFLOW.
*
* If newvd is a distributed spare and it's being attached to a
* dRAID which is not its parent it should fail with EINVAL.
*/
if (pvd->vdev_ops != &vdev_mirror_ops &&
pvd->vdev_ops != &vdev_root_ops && (!replacing ||
pvd->vdev_ops == &vdev_replacing_ops ||
pvd->vdev_ops == &vdev_spare_ops))
expected_error = ENOTSUP;
else if (newvd_is_spare && (!replacing || oldvd_is_log))
expected_error = ENOTSUP;
else if (newvd == oldvd)
expected_error = replacing ? 0 : EBUSY;
else if (vdev_lookup_by_path(rvd, newpath) != NULL)
expected_error = EBUSY;
else if (!newvd_is_dspare && newsize < oldsize)
expected_error = EOVERFLOW;
else if (ashift > oldvd->vdev_top->vdev_ashift)
expected_error = EDOM;
else if (newvd_is_dspare && pvd != vdev_draid_spare_get_parent(newvd))
expected_error = ENOTSUP;
else
expected_error = 0;
spa_config_exit(spa, SCL_ALL, FTAG);
/*
* Build the nvlist describing newpath.
*/
root = make_vdev_root(newpath, NULL, NULL, newvd == NULL ? newsize : 0,
ashift, NULL, 0, 0, 1);
/*
* When supported select either a healing or sequential resilver.
*/
boolean_t rebuilding = B_FALSE;
if (pvd->vdev_ops == &vdev_mirror_ops ||
pvd->vdev_ops == &vdev_root_ops) {
rebuilding = !!ztest_random(2);
}
error = spa_vdev_attach(spa, oldguid, root, replacing, rebuilding);
fnvlist_free(root);
/*
* If our parent was the replacing vdev, but the replace completed,
* then instead of failing with ENOTSUP we may either succeed,
* fail with ENODEV, or fail with EOVERFLOW.
*/
if (expected_error == ENOTSUP &&
(error == 0 || error == ENODEV || error == EOVERFLOW))
expected_error = error;
/*
* If someone grew the LUN, the replacement may be too small.
*/
if (error == EOVERFLOW || error == EBUSY)
expected_error = error;
if (error == ZFS_ERR_CHECKPOINT_EXISTS ||
error == ZFS_ERR_DISCARDING_CHECKPOINT ||
error == ZFS_ERR_RESILVER_IN_PROGRESS ||
error == ZFS_ERR_REBUILD_IN_PROGRESS)
expected_error = error;
if (error != expected_error && expected_error != EBUSY) {
fatal(B_FALSE, "attach (%s %"PRIu64", %s %"PRIu64", %d) "
"returned %d, expected %d",
oldpath, oldsize, newpath,
newsize, replacing, error, expected_error);
}
out:
mutex_exit(&ztest_vdev_lock);
umem_free(oldpath, MAXPATHLEN);
umem_free(newpath, MAXPATHLEN);
}
void
ztest_device_removal(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
spa_t *spa = ztest_spa;
vdev_t *vd;
uint64_t guid;
int error;
mutex_enter(&ztest_vdev_lock);
if (ztest_device_removal_active) {
mutex_exit(&ztest_vdev_lock);
return;
}
/*
* Remove a random top-level vdev and wait for removal to finish.
*/
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
vd = vdev_lookup_top(spa, ztest_random_vdev_top(spa, B_FALSE));
guid = vd->vdev_guid;
spa_config_exit(spa, SCL_VDEV, FTAG);
error = spa_vdev_remove(spa, guid, B_FALSE);
if (error == 0) {
ztest_device_removal_active = B_TRUE;
mutex_exit(&ztest_vdev_lock);
/*
* spa->spa_vdev_removal is created in a sync task that
* is initiated via dsl_sync_task_nowait(). Since the
* task may not run before spa_vdev_remove() returns, we
* must wait at least 1 txg to ensure that the removal
* struct has been created.
*/
txg_wait_synced(spa_get_dsl(spa), 0);
while (spa->spa_removing_phys.sr_state == DSS_SCANNING)
txg_wait_synced(spa_get_dsl(spa), 0);
} else {
mutex_exit(&ztest_vdev_lock);
return;
}
/*
* The pool needs to be scrubbed after completing device removal.
* Failure to do so may result in checksum errors due to the
* strategy employed by ztest_fault_inject() when selecting which
* offset are redundant and can be damaged.
*/
error = spa_scan(spa, POOL_SCAN_SCRUB);
if (error == 0) {
while (dsl_scan_scrubbing(spa_get_dsl(spa)))
txg_wait_synced(spa_get_dsl(spa), 0);
}
mutex_enter(&ztest_vdev_lock);
ztest_device_removal_active = B_FALSE;
mutex_exit(&ztest_vdev_lock);
}
/*
* Callback function which expands the physical size of the vdev.
*/
static vdev_t *
grow_vdev(vdev_t *vd, void *arg)
{
spa_t *spa __maybe_unused = vd->vdev_spa;
size_t *newsize = arg;
size_t fsize;
int fd;
ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), ==, SCL_STATE);
ASSERT(vd->vdev_ops->vdev_op_leaf);
if ((fd = open(vd->vdev_path, O_RDWR)) == -1)
return (vd);
fsize = lseek(fd, 0, SEEK_END);
VERIFY0(ftruncate(fd, *newsize));
if (ztest_opts.zo_verbose >= 6) {
(void) printf("%s grew from %lu to %lu bytes\n",
vd->vdev_path, (ulong_t)fsize, (ulong_t)*newsize);
}
(void) close(fd);
return (NULL);
}
/*
* Callback function which expands a given vdev by calling vdev_online().
*/
static vdev_t *
online_vdev(vdev_t *vd, void *arg)
{
(void) arg;
spa_t *spa = vd->vdev_spa;
vdev_t *tvd = vd->vdev_top;
uint64_t guid = vd->vdev_guid;
uint64_t generation = spa->spa_config_generation + 1;
vdev_state_t newstate = VDEV_STATE_UNKNOWN;
int error;
ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), ==, SCL_STATE);
ASSERT(vd->vdev_ops->vdev_op_leaf);
/* Calling vdev_online will initialize the new metaslabs */
spa_config_exit(spa, SCL_STATE, spa);
error = vdev_online(spa, guid, ZFS_ONLINE_EXPAND, &newstate);
spa_config_enter(spa, SCL_STATE, spa, RW_READER);
/*
* If vdev_online returned an error or the underlying vdev_open
* failed then we abort the expand. The only way to know that
* vdev_open fails is by checking the returned newstate.
*/
if (error || newstate != VDEV_STATE_HEALTHY) {
if (ztest_opts.zo_verbose >= 5) {
(void) printf("Unable to expand vdev, state %u, "
"error %d\n", newstate, error);
}
return (vd);
}
ASSERT3U(newstate, ==, VDEV_STATE_HEALTHY);
/*
* Since we dropped the lock we need to ensure that we're
* still talking to the original vdev. It's possible this
* vdev may have been detached/replaced while we were
* trying to online it.
*/
if (generation != spa->spa_config_generation) {
if (ztest_opts.zo_verbose >= 5) {
(void) printf("vdev configuration has changed, "
"guid %"PRIu64", state %"PRIu64", "
"expected gen %"PRIu64", got gen %"PRIu64"\n",
guid,
tvd->vdev_state,
generation,
spa->spa_config_generation);
}
return (vd);
}
return (NULL);
}
/*
* Traverse the vdev tree calling the supplied function.
* We continue to walk the tree until we either have walked all
* children or we receive a non-NULL return from the callback.
* If a NULL callback is passed, then we just return back the first
* leaf vdev we encounter.
*/
static vdev_t *
vdev_walk_tree(vdev_t *vd, vdev_t *(*func)(vdev_t *, void *), void *arg)
{
uint_t c;
if (vd->vdev_ops->vdev_op_leaf) {
if (func == NULL)
return (vd);
else
return (func(vd, arg));
}
for (c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if ((cvd = vdev_walk_tree(cvd, func, arg)) != NULL)
return (cvd);
}
return (NULL);
}
/*
* Verify that dynamic LUN growth works as expected.
*/
void
ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
spa_t *spa = ztest_spa;
vdev_t *vd, *tvd;
metaslab_class_t *mc;
metaslab_group_t *mg;
size_t psize, newsize;
uint64_t top;
uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count;
mutex_enter(&ztest_checkpoint_lock);
mutex_enter(&ztest_vdev_lock);
spa_config_enter(spa, SCL_STATE, spa, RW_READER);
/*
* If there is a vdev removal in progress, it could complete while
* we are running, in which case we would not be able to verify
* that the metaslab_class space increased (because it decreases
* when the device removal completes).
*/
if (ztest_device_removal_active) {
spa_config_exit(spa, SCL_STATE, spa);
mutex_exit(&ztest_vdev_lock);
mutex_exit(&ztest_checkpoint_lock);
return;
}
top = ztest_random_vdev_top(spa, B_TRUE);
tvd = spa->spa_root_vdev->vdev_child[top];
mg = tvd->vdev_mg;
mc = mg->mg_class;
old_ms_count = tvd->vdev_ms_count;
old_class_space = metaslab_class_get_space(mc);
/*
* Determine the size of the first leaf vdev associated with
* our top-level device.
*/
vd = vdev_walk_tree(tvd, NULL, NULL);
ASSERT3P(vd, !=, NULL);
ASSERT(vd->vdev_ops->vdev_op_leaf);
psize = vd->vdev_psize;
/*
* We only try to expand the vdev if it's healthy, less than 4x its
* original size, and it has a valid psize.
*/
if (tvd->vdev_state != VDEV_STATE_HEALTHY ||
psize == 0 || psize >= 4 * ztest_opts.zo_vdev_size) {
spa_config_exit(spa, SCL_STATE, spa);
mutex_exit(&ztest_vdev_lock);
mutex_exit(&ztest_checkpoint_lock);
return;
}
ASSERT3U(psize, >, 0);
newsize = psize + MAX(psize / 8, SPA_MAXBLOCKSIZE);
ASSERT3U(newsize, >, psize);
if (ztest_opts.zo_verbose >= 6) {
(void) printf("Expanding LUN %s from %lu to %lu\n",
vd->vdev_path, (ulong_t)psize, (ulong_t)newsize);
}
/*
* Growing the vdev is a two step process:
* 1). expand the physical size (i.e. relabel)
* 2). online the vdev to create the new metaslabs
*/
if (vdev_walk_tree(tvd, grow_vdev, &newsize) != NULL ||
vdev_walk_tree(tvd, online_vdev, NULL) != NULL ||
tvd->vdev_state != VDEV_STATE_HEALTHY) {
if (ztest_opts.zo_verbose >= 5) {
(void) printf("Could not expand LUN because "
"the vdev configuration changed.\n");
}
spa_config_exit(spa, SCL_STATE, spa);
mutex_exit(&ztest_vdev_lock);
mutex_exit(&ztest_checkpoint_lock);
return;
}
spa_config_exit(spa, SCL_STATE, spa);
/*
* Expanding the LUN will update the config asynchronously,
* thus we must wait for the async thread to complete any
* pending tasks before proceeding.
*/
for (;;) {
boolean_t done;
mutex_enter(&spa->spa_async_lock);
done = (spa->spa_async_thread == NULL && !spa->spa_async_tasks);
mutex_exit(&spa->spa_async_lock);
if (done)
break;
txg_wait_synced(spa_get_dsl(spa), 0);
(void) poll(NULL, 0, 100);
}
spa_config_enter(spa, SCL_STATE, spa, RW_READER);
tvd = spa->spa_root_vdev->vdev_child[top];
new_ms_count = tvd->vdev_ms_count;
new_class_space = metaslab_class_get_space(mc);
if (tvd->vdev_mg != mg || mg->mg_class != mc) {
if (ztest_opts.zo_verbose >= 5) {
(void) printf("Could not verify LUN expansion due to "
"intervening vdev offline or remove.\n");
}
spa_config_exit(spa, SCL_STATE, spa);
mutex_exit(&ztest_vdev_lock);
mutex_exit(&ztest_checkpoint_lock);
return;
}
/*
* Make sure we were able to grow the vdev.
*/
if (new_ms_count <= old_ms_count) {
fatal(B_FALSE,
"LUN expansion failed: ms_count %"PRIu64" < %"PRIu64"\n",
old_ms_count, new_ms_count);
}
/*
* Make sure we were able to grow the pool.
*/
if (new_class_space <= old_class_space) {
fatal(B_FALSE,
"LUN expansion failed: class_space %"PRIu64" < %"PRIu64"\n",
old_class_space, new_class_space);
}
if (ztest_opts.zo_verbose >= 5) {
char oldnumbuf[NN_NUMBUF_SZ], newnumbuf[NN_NUMBUF_SZ];
nicenum(old_class_space, oldnumbuf, sizeof (oldnumbuf));
nicenum(new_class_space, newnumbuf, sizeof (newnumbuf));
(void) printf("%s grew from %s to %s\n",
spa->spa_name, oldnumbuf, newnumbuf);
}
spa_config_exit(spa, SCL_STATE, spa);
mutex_exit(&ztest_vdev_lock);
mutex_exit(&ztest_checkpoint_lock);
}
/*
* Verify that dmu_objset_{create,destroy,open,close} work as expected.
*/
static void
ztest_objset_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
{
(void) arg, (void) cr;
/*
* Create the objects common to all ztest datasets.
*/
VERIFY0(zap_create_claim(os, ZTEST_DIROBJ,
DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx));
}
static int
ztest_dataset_create(char *dsname)
{
int err;
uint64_t rand;
dsl_crypto_params_t *dcp = NULL;
/*
* 50% of the time, we create encrypted datasets
* using a random cipher suite and a hard-coded
* wrapping key.
*/
rand = ztest_random(2);
if (rand != 0) {
nvlist_t *crypto_args = fnvlist_alloc();
nvlist_t *props = fnvlist_alloc();
/* slight bias towards the default cipher suite */
rand = ztest_random(ZIO_CRYPT_FUNCTIONS);
if (rand < ZIO_CRYPT_AES_128_CCM)
rand = ZIO_CRYPT_ON;
fnvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_ENCRYPTION), rand);
fnvlist_add_uint8_array(crypto_args, "wkeydata",
(uint8_t *)ztest_wkeydata, WRAPPING_KEY_LEN);
/*
* These parameters aren't really used by the kernel. They
* are simply stored so that userspace knows how to load
* the wrapping key.
*/
fnvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT), ZFS_KEYFORMAT_RAW);
fnvlist_add_string(props,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION), "prompt");
fnvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT), 0ULL);
fnvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS), 0ULL);
VERIFY0(dsl_crypto_params_create_nvlist(DCP_CMD_NONE, props,
crypto_args, &dcp));
/*
* Cycle through all available encryption implementations
* to verify interoperability.
*/
VERIFY0(gcm_impl_set("cycle"));
VERIFY0(aes_impl_set("cycle"));
fnvlist_free(crypto_args);
fnvlist_free(props);
}
err = dmu_objset_create(dsname, DMU_OST_OTHER, 0, dcp,
ztest_objset_create_cb, NULL);
dsl_crypto_params_free(dcp, !!err);
rand = ztest_random(100);
if (err || rand < 80)
return (err);
if (ztest_opts.zo_verbose >= 5)
(void) printf("Setting dataset %s to sync always\n", dsname);
return (ztest_dsl_prop_set_uint64(dsname, ZFS_PROP_SYNC,
ZFS_SYNC_ALWAYS, B_FALSE));
}
static int
ztest_objset_destroy_cb(const char *name, void *arg)
{
(void) arg;
objset_t *os;
dmu_object_info_t doi;
int error;
/*
* Verify that the dataset contains a directory object.
*/
VERIFY0(ztest_dmu_objset_own(name, DMU_OST_OTHER, B_TRUE,
B_TRUE, FTAG, &os));
error = dmu_object_info(os, ZTEST_DIROBJ, &doi);
if (error != ENOENT) {
/* We could have crashed in the middle of destroying it */
ASSERT0(error);
ASSERT3U(doi.doi_type, ==, DMU_OT_ZAP_OTHER);
ASSERT3S(doi.doi_physical_blocks_512, >=, 0);
}
dmu_objset_disown(os, B_TRUE, FTAG);
/*
* Destroy the dataset.
*/
if (strchr(name, '@') != NULL) {
error = dsl_destroy_snapshot(name, B_TRUE);
if (error != ECHRNG) {
/*
* The program was executed, but encountered a runtime
* error, such as insufficient slop, or a hold on the
* dataset.
*/
ASSERT0(error);
}
} else {
error = dsl_destroy_head(name);
if (error == ENOSPC) {
/* There could be checkpoint or insufficient slop */
ztest_record_enospc(FTAG);
} else if (error != EBUSY) {
/* There could be a hold on this dataset */
ASSERT0(error);
}
}
return (0);
}
static boolean_t
ztest_snapshot_create(char *osname, uint64_t id)
{
char snapname[ZFS_MAX_DATASET_NAME_LEN];
int error;
(void) snprintf(snapname, sizeof (snapname), "%"PRIu64"", id);
error = dmu_objset_snapshot_one(osname, snapname);
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
return (B_FALSE);
}
if (error != 0 && error != EEXIST) {
fatal(B_FALSE, "ztest_snapshot_create(%s@%s) = %d", osname,
snapname, error);
}
return (B_TRUE);
}
static boolean_t
ztest_snapshot_destroy(char *osname, uint64_t id)
{
char snapname[ZFS_MAX_DATASET_NAME_LEN];
int error;
(void) snprintf(snapname, sizeof (snapname), "%s@%"PRIu64"",
osname, id);
error = dsl_destroy_snapshot(snapname, B_FALSE);
if (error != 0 && error != ENOENT)
fatal(B_FALSE, "ztest_snapshot_destroy(%s) = %d",
snapname, error);
return (B_TRUE);
}
void
ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id)
{
(void) zd;
ztest_ds_t *zdtmp;
int iters;
int error;
objset_t *os, *os2;
char name[ZFS_MAX_DATASET_NAME_LEN];
zilog_t *zilog;
int i;
zdtmp = umem_alloc(sizeof (ztest_ds_t), UMEM_NOFAIL);
(void) pthread_rwlock_rdlock(&ztest_name_lock);
(void) snprintf(name, sizeof (name), "%s/temp_%"PRIu64"",
ztest_opts.zo_pool, id);
/*
* If this dataset exists from a previous run, process its replay log
* half of the time. If we don't replay it, then dsl_destroy_head()
* (invoked from ztest_objset_destroy_cb()) should just throw it away.
*/
if (ztest_random(2) == 0 &&
ztest_dmu_objset_own(name, DMU_OST_OTHER, B_FALSE,
B_TRUE, FTAG, &os) == 0) {
ztest_zd_init(zdtmp, NULL, os);
zil_replay(os, zdtmp, ztest_replay_vector);
ztest_zd_fini(zdtmp);
dmu_objset_disown(os, B_TRUE, FTAG);
}
/*
* There may be an old instance of the dataset we're about to
* create lying around from a previous run. If so, destroy it
* and all of its snapshots.
*/
(void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL,
DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
/*
* Verify that the destroyed dataset is no longer in the namespace.
*/
VERIFY3U(ENOENT, ==, ztest_dmu_objset_own(name, DMU_OST_OTHER, B_TRUE,
B_TRUE, FTAG, &os));
/*
* Verify that we can create a new dataset.
*/
error = ztest_dataset_create(name);
if (error) {
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
goto out;
}
fatal(B_FALSE, "dmu_objset_create(%s) = %d", name, error);
}
VERIFY0(ztest_dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, B_TRUE,
FTAG, &os));
ztest_zd_init(zdtmp, NULL, os);
/*
* Open the intent log for it.
*/
zilog = zil_open(os, ztest_get_data, NULL);
/*
* Put some objects in there, do a little I/O to them,
* and randomly take a couple of snapshots along the way.
*/
iters = ztest_random(5);
for (i = 0; i < iters; i++) {
ztest_dmu_object_alloc_free(zdtmp, id);
if (ztest_random(iters) == 0)
(void) ztest_snapshot_create(name, i);
}
/*
* Verify that we cannot create an existing dataset.
*/
VERIFY3U(EEXIST, ==,
dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL, NULL));
/*
* Verify that we can hold an objset that is also owned.
*/
VERIFY0(dmu_objset_hold(name, FTAG, &os2));
dmu_objset_rele(os2, FTAG);
/*
* Verify that we cannot own an objset that is already owned.
*/
VERIFY3U(EBUSY, ==, ztest_dmu_objset_own(name, DMU_OST_OTHER,
B_FALSE, B_TRUE, FTAG, &os2));
zil_close(zilog);
dmu_objset_disown(os, B_TRUE, FTAG);
ztest_zd_fini(zdtmp);
out:
(void) pthread_rwlock_unlock(&ztest_name_lock);
umem_free(zdtmp, sizeof (ztest_ds_t));
}
/*
* Verify that dmu_snapshot_{create,destroy,open,close} work as expected.
*/
void
ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id)
{
(void) pthread_rwlock_rdlock(&ztest_name_lock);
(void) ztest_snapshot_destroy(zd->zd_name, id);
(void) ztest_snapshot_create(zd->zd_name, id);
(void) pthread_rwlock_unlock(&ztest_name_lock);
}
/*
* Cleanup non-standard snapshots and clones.
*/
static void
ztest_dsl_dataset_cleanup(char *osname, uint64_t id)
{
char *snap1name;
char *clone1name;
char *snap2name;
char *clone2name;
char *snap3name;
int error;
snap1name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
clone1name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
snap2name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
clone2name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
snap3name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
(void) snprintf(snap1name, ZFS_MAX_DATASET_NAME_LEN, "%s@s1_%"PRIu64"",
osname, id);
(void) snprintf(clone1name, ZFS_MAX_DATASET_NAME_LEN, "%s/c1_%"PRIu64"",
osname, id);
(void) snprintf(snap2name, ZFS_MAX_DATASET_NAME_LEN, "%s@s2_%"PRIu64"",
clone1name, id);
(void) snprintf(clone2name, ZFS_MAX_DATASET_NAME_LEN, "%s/c2_%"PRIu64"",
osname, id);
(void) snprintf(snap3name, ZFS_MAX_DATASET_NAME_LEN, "%s@s3_%"PRIu64"",
clone1name, id);
error = dsl_destroy_head(clone2name);
if (error && error != ENOENT)
fatal(B_FALSE, "dsl_destroy_head(%s) = %d", clone2name, error);
error = dsl_destroy_snapshot(snap3name, B_FALSE);
if (error && error != ENOENT)
fatal(B_FALSE, "dsl_destroy_snapshot(%s) = %d",
snap3name, error);
error = dsl_destroy_snapshot(snap2name, B_FALSE);
if (error && error != ENOENT)
fatal(B_FALSE, "dsl_destroy_snapshot(%s) = %d",
snap2name, error);
error = dsl_destroy_head(clone1name);
if (error && error != ENOENT)
fatal(B_FALSE, "dsl_destroy_head(%s) = %d", clone1name, error);
error = dsl_destroy_snapshot(snap1name, B_FALSE);
if (error && error != ENOENT)
fatal(B_FALSE, "dsl_destroy_snapshot(%s) = %d",
snap1name, error);
umem_free(snap1name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(clone1name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(snap2name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(clone2name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(snap3name, ZFS_MAX_DATASET_NAME_LEN);
}
/*
* Verify dsl_dataset_promote handles EBUSY
*/
void
ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id)
{
objset_t *os;
char *snap1name;
char *clone1name;
char *snap2name;
char *clone2name;
char *snap3name;
char *osname = zd->zd_name;
int error;
snap1name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
clone1name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
snap2name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
clone2name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
snap3name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
(void) pthread_rwlock_rdlock(&ztest_name_lock);
ztest_dsl_dataset_cleanup(osname, id);
(void) snprintf(snap1name, ZFS_MAX_DATASET_NAME_LEN, "%s@s1_%"PRIu64"",
osname, id);
(void) snprintf(clone1name, ZFS_MAX_DATASET_NAME_LEN, "%s/c1_%"PRIu64"",
osname, id);
(void) snprintf(snap2name, ZFS_MAX_DATASET_NAME_LEN, "%s@s2_%"PRIu64"",
clone1name, id);
(void) snprintf(clone2name, ZFS_MAX_DATASET_NAME_LEN, "%s/c2_%"PRIu64"",
osname, id);
(void) snprintf(snap3name, ZFS_MAX_DATASET_NAME_LEN, "%s@s3_%"PRIu64"",
clone1name, id);
error = dmu_objset_snapshot_one(osname, strchr(snap1name, '@') + 1);
if (error && error != EEXIST) {
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
goto out;
}
fatal(B_FALSE, "dmu_take_snapshot(%s) = %d", snap1name, error);
}
error = dmu_objset_clone(clone1name, snap1name);
if (error) {
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
goto out;
}
fatal(B_FALSE, "dmu_objset_create(%s) = %d", clone1name, error);
}
error = dmu_objset_snapshot_one(clone1name, strchr(snap2name, '@') + 1);
if (error && error != EEXIST) {
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
goto out;
}
fatal(B_FALSE, "dmu_open_snapshot(%s) = %d", snap2name, error);
}
error = dmu_objset_snapshot_one(clone1name, strchr(snap3name, '@') + 1);
if (error && error != EEXIST) {
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
goto out;
}
fatal(B_FALSE, "dmu_open_snapshot(%s) = %d", snap3name, error);
}
error = dmu_objset_clone(clone2name, snap3name);
if (error) {
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
goto out;
}
fatal(B_FALSE, "dmu_objset_create(%s) = %d", clone2name, error);
}
error = ztest_dmu_objset_own(snap2name, DMU_OST_ANY, B_TRUE, B_TRUE,
FTAG, &os);
if (error)
fatal(B_FALSE, "dmu_objset_own(%s) = %d", snap2name, error);
error = dsl_dataset_promote(clone2name, NULL);
if (error == ENOSPC) {
dmu_objset_disown(os, B_TRUE, FTAG);
ztest_record_enospc(FTAG);
goto out;
}
if (error != EBUSY)
fatal(B_FALSE, "dsl_dataset_promote(%s), %d, not EBUSY",
clone2name, error);
dmu_objset_disown(os, B_TRUE, FTAG);
out:
ztest_dsl_dataset_cleanup(osname, id);
(void) pthread_rwlock_unlock(&ztest_name_lock);
umem_free(snap1name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(clone1name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(snap2name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(clone2name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(snap3name, ZFS_MAX_DATASET_NAME_LEN);
}
#undef OD_ARRAY_SIZE
#define OD_ARRAY_SIZE 4
/*
* Verify that dmu_object_{alloc,free} work as expected.
*/
void
ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id)
{
ztest_od_t *od;
int batchsize;
int size;
int b;
size = sizeof (ztest_od_t) * OD_ARRAY_SIZE;
od = umem_alloc(size, UMEM_NOFAIL);
batchsize = OD_ARRAY_SIZE;
for (b = 0; b < batchsize; b++)
ztest_od_init(od + b, id, FTAG, b, DMU_OT_UINT64_OTHER,
0, 0, 0);
/*
* Destroy the previous batch of objects, create a new batch,
* and do some I/O on the new objects.
*/
if (ztest_object_init(zd, od, size, B_TRUE) != 0)
return;
while (ztest_random(4 * batchsize) != 0)
ztest_io(zd, od[ztest_random(batchsize)].od_object,
ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
umem_free(od, size);
}
/*
* Rewind the global allocator to verify object allocation backfilling.
*/
void
ztest_dmu_object_next_chunk(ztest_ds_t *zd, uint64_t id)
{
(void) id;
objset_t *os = zd->zd_os;
int dnodes_per_chunk = 1 << dmu_object_alloc_chunk_shift;
uint64_t object;
/*
* Rewind the global allocator randomly back to a lower object number
* to force backfilling and reclamation of recently freed dnodes.
*/
mutex_enter(&os->os_obj_lock);
object = ztest_random(os->os_obj_next_chunk);
os->os_obj_next_chunk = P2ALIGN(object, dnodes_per_chunk);
mutex_exit(&os->os_obj_lock);
}
#undef OD_ARRAY_SIZE
#define OD_ARRAY_SIZE 2
/*
* Verify that dmu_{read,write} work as expected.
*/
void
ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id)
{
int size;
ztest_od_t *od;
objset_t *os = zd->zd_os;
size = sizeof (ztest_od_t) * OD_ARRAY_SIZE;
od = umem_alloc(size, UMEM_NOFAIL);
dmu_tx_t *tx;
int freeit, error;
uint64_t i, n, s, txg;
bufwad_t *packbuf, *bigbuf, *pack, *bigH, *bigT;
uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize;
uint64_t chunksize = (1000 + ztest_random(1000)) * sizeof (uint64_t);
uint64_t regions = 997;
uint64_t stride = 123456789ULL;
uint64_t width = 40;
int free_percent = 5;
/*
* This test uses two objects, packobj and bigobj, that are always
* updated together (i.e. in the same tx) so that their contents are
* in sync and can be compared. Their contents relate to each other
* in a simple way: packobj is a dense array of 'bufwad' structures,
* while bigobj is a sparse array of the same bufwads. Specifically,
* for any index n, there are three bufwads that should be identical:
*
* packobj, at offset n * sizeof (bufwad_t)
* bigobj, at the head of the nth chunk
* bigobj, at the tail of the nth chunk
*
* The chunk size is arbitrary. It doesn't have to be a power of two,
* and it doesn't have any relation to the object blocksize.
* The only requirement is that it can hold at least two bufwads.
*
* Normally, we write the bufwad to each of these locations.
* However, free_percent of the time we instead write zeroes to
* packobj and perform a dmu_free_range() on bigobj. By comparing
* bigobj to packobj, we can verify that the DMU is correctly
* tracking which parts of an object are allocated and free,
* and that the contents of the allocated blocks are correct.
*/
/*
* Read the directory info. If it's the first time, set things up.
*/
ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0, chunksize);
ztest_od_init(od + 1, id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, 0,
chunksize);
if (ztest_object_init(zd, od, size, B_FALSE) != 0) {
umem_free(od, size);
return;
}
bigobj = od[0].od_object;
packobj = od[1].od_object;
chunksize = od[0].od_gen;
ASSERT3U(chunksize, ==, od[1].od_gen);
/*
* Prefetch a random chunk of the big object.
* Our aim here is to get some async reads in flight
* for blocks that we may free below; the DMU should
* handle this race correctly.
*/
n = ztest_random(regions) * stride + ztest_random(width);
s = 1 + ztest_random(2 * width - 1);
dmu_prefetch(os, bigobj, 0, n * chunksize, s * chunksize,
ZIO_PRIORITY_SYNC_READ);
/*
* Pick a random index and compute the offsets into packobj and bigobj.
*/
n = ztest_random(regions) * stride + ztest_random(width);
s = 1 + ztest_random(width - 1);
packoff = n * sizeof (bufwad_t);
packsize = s * sizeof (bufwad_t);
bigoff = n * chunksize;
bigsize = s * chunksize;
packbuf = umem_alloc(packsize, UMEM_NOFAIL);
bigbuf = umem_alloc(bigsize, UMEM_NOFAIL);
/*
* free_percent of the time, free a range of bigobj rather than
* overwriting it.
*/
freeit = (ztest_random(100) < free_percent);
/*
* Read the current contents of our objects.
*/
error = dmu_read(os, packobj, packoff, packsize, packbuf,
DMU_READ_PREFETCH);
ASSERT0(error);
error = dmu_read(os, bigobj, bigoff, bigsize, bigbuf,
DMU_READ_PREFETCH);
ASSERT0(error);
/*
* Get a tx for the mods to both packobj and bigobj.
*/
tx = dmu_tx_create(os);
dmu_tx_hold_write(tx, packobj, packoff, packsize);
if (freeit)
dmu_tx_hold_free(tx, bigobj, bigoff, bigsize);
else
dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
/* This accounts for setting the checksum/compression. */
dmu_tx_hold_bonus(tx, bigobj);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0) {
umem_free(packbuf, packsize);
umem_free(bigbuf, bigsize);
umem_free(od, size);
return;
}
enum zio_checksum cksum;
do {
cksum = (enum zio_checksum)
ztest_random_dsl_prop(ZFS_PROP_CHECKSUM);
} while (cksum >= ZIO_CHECKSUM_LEGACY_FUNCTIONS);
dmu_object_set_checksum(os, bigobj, cksum, tx);
enum zio_compress comp;
do {
comp = (enum zio_compress)
ztest_random_dsl_prop(ZFS_PROP_COMPRESSION);
} while (comp >= ZIO_COMPRESS_LEGACY_FUNCTIONS);
dmu_object_set_compress(os, bigobj, comp, tx);
/*
* For each index from n to n + s, verify that the existing bufwad
* in packobj matches the bufwads at the head and tail of the
* corresponding chunk in bigobj. Then update all three bufwads
* with the new values we want to write out.
*/
for (i = 0; i < s; i++) {
/* LINTED */
pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t));
/* LINTED */
bigH = (bufwad_t *)((char *)bigbuf + i * chunksize);
/* LINTED */
bigT = (bufwad_t *)((char *)bigH + chunksize) - 1;
ASSERT3U((uintptr_t)bigH - (uintptr_t)bigbuf, <, bigsize);
ASSERT3U((uintptr_t)bigT - (uintptr_t)bigbuf, <, bigsize);
if (pack->bw_txg > txg)
fatal(B_FALSE,
"future leak: got %"PRIx64", open txg is %"PRIx64"",
pack->bw_txg, txg);
if (pack->bw_data != 0 && pack->bw_index != n + i)
fatal(B_FALSE, "wrong index: "
"got %"PRIx64", wanted %"PRIx64"+%"PRIx64"",
pack->bw_index, n, i);
if (memcmp(pack, bigH, sizeof (bufwad_t)) != 0)
fatal(B_FALSE, "pack/bigH mismatch in %p/%p",
pack, bigH);
if (memcmp(pack, bigT, sizeof (bufwad_t)) != 0)
fatal(B_FALSE, "pack/bigT mismatch in %p/%p",
pack, bigT);
if (freeit) {
memset(pack, 0, sizeof (bufwad_t));
} else {
pack->bw_index = n + i;
pack->bw_txg = txg;
pack->bw_data = 1 + ztest_random(-2ULL);
}
*bigH = *pack;
*bigT = *pack;
}
/*
* We've verified all the old bufwads, and made new ones.
* Now write them out.
*/
dmu_write(os, packobj, packoff, packsize, packbuf, tx);
if (freeit) {
if (ztest_opts.zo_verbose >= 7) {
(void) printf("freeing offset %"PRIx64" size %"PRIx64""
" txg %"PRIx64"\n",
bigoff, bigsize, txg);
}
VERIFY0(dmu_free_range(os, bigobj, bigoff, bigsize, tx));
} else {
if (ztest_opts.zo_verbose >= 7) {
(void) printf("writing offset %"PRIx64" size %"PRIx64""
" txg %"PRIx64"\n",
bigoff, bigsize, txg);
}
dmu_write(os, bigobj, bigoff, bigsize, bigbuf, tx);
}
dmu_tx_commit(tx);
/*
* Sanity check the stuff we just wrote.
*/
{
void *packcheck = umem_alloc(packsize, UMEM_NOFAIL);
void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL);
VERIFY0(dmu_read(os, packobj, packoff,
packsize, packcheck, DMU_READ_PREFETCH));
VERIFY0(dmu_read(os, bigobj, bigoff,
bigsize, bigcheck, DMU_READ_PREFETCH));
ASSERT0(memcmp(packbuf, packcheck, packsize));
ASSERT0(memcmp(bigbuf, bigcheck, bigsize));
umem_free(packcheck, packsize);
umem_free(bigcheck, bigsize);
}
umem_free(packbuf, packsize);
umem_free(bigbuf, bigsize);
umem_free(od, size);
}
static void
compare_and_update_pbbufs(uint64_t s, bufwad_t *packbuf, bufwad_t *bigbuf,
uint64_t bigsize, uint64_t n, uint64_t chunksize, uint64_t txg)
{
uint64_t i;
bufwad_t *pack;
bufwad_t *bigH;
bufwad_t *bigT;
/*
* For each index from n to n + s, verify that the existing bufwad
* in packobj matches the bufwads at the head and tail of the
* corresponding chunk in bigobj. Then update all three bufwads
* with the new values we want to write out.
*/
for (i = 0; i < s; i++) {
/* LINTED */
pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t));
/* LINTED */
bigH = (bufwad_t *)((char *)bigbuf + i * chunksize);
/* LINTED */
bigT = (bufwad_t *)((char *)bigH + chunksize) - 1;
ASSERT3U((uintptr_t)bigH - (uintptr_t)bigbuf, <, bigsize);
ASSERT3U((uintptr_t)bigT - (uintptr_t)bigbuf, <, bigsize);
if (pack->bw_txg > txg)
fatal(B_FALSE,
"future leak: got %"PRIx64", open txg is %"PRIx64"",
pack->bw_txg, txg);
if (pack->bw_data != 0 && pack->bw_index != n + i)
fatal(B_FALSE, "wrong index: "
"got %"PRIx64", wanted %"PRIx64"+%"PRIx64"",
pack->bw_index, n, i);
if (memcmp(pack, bigH, sizeof (bufwad_t)) != 0)
fatal(B_FALSE, "pack/bigH mismatch in %p/%p",
pack, bigH);
if (memcmp(pack, bigT, sizeof (bufwad_t)) != 0)
fatal(B_FALSE, "pack/bigT mismatch in %p/%p",
pack, bigT);
pack->bw_index = n + i;
pack->bw_txg = txg;
pack->bw_data = 1 + ztest_random(-2ULL);
*bigH = *pack;
*bigT = *pack;
}
}
#undef OD_ARRAY_SIZE
#define OD_ARRAY_SIZE 2
void
ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
{
objset_t *os = zd->zd_os;
ztest_od_t *od;
dmu_tx_t *tx;
uint64_t i;
int error;
int size;
uint64_t n, s, txg;
bufwad_t *packbuf, *bigbuf;
uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize;
uint64_t blocksize = ztest_random_blocksize();
uint64_t chunksize = blocksize;
uint64_t regions = 997;
uint64_t stride = 123456789ULL;
uint64_t width = 9;
dmu_buf_t *bonus_db;
arc_buf_t **bigbuf_arcbufs;
dmu_object_info_t doi;
size = sizeof (ztest_od_t) * OD_ARRAY_SIZE;
od = umem_alloc(size, UMEM_NOFAIL);
/*
* This test uses two objects, packobj and bigobj, that are always
* updated together (i.e. in the same tx) so that their contents are
* in sync and can be compared. Their contents relate to each other
* in a simple way: packobj is a dense array of 'bufwad' structures,
* while bigobj is a sparse array of the same bufwads. Specifically,
* for any index n, there are three bufwads that should be identical:
*
* packobj, at offset n * sizeof (bufwad_t)
* bigobj, at the head of the nth chunk
* bigobj, at the tail of the nth chunk
*
* The chunk size is set equal to bigobj block size so that
* dmu_assign_arcbuf_by_dbuf() can be tested for object updates.
*/
/*
* Read the directory info. If it's the first time, set things up.
*/
ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0, 0);
ztest_od_init(od + 1, id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, 0,
chunksize);
if (ztest_object_init(zd, od, size, B_FALSE) != 0) {
umem_free(od, size);
return;
}
bigobj = od[0].od_object;
packobj = od[1].od_object;
blocksize = od[0].od_blocksize;
chunksize = blocksize;
ASSERT3U(chunksize, ==, od[1].od_gen);
VERIFY0(dmu_object_info(os, bigobj, &doi));
VERIFY(ISP2(doi.doi_data_block_size));
VERIFY3U(chunksize, ==, doi.doi_data_block_size);
VERIFY3U(chunksize, >=, 2 * sizeof (bufwad_t));
/*
* Pick a random index and compute the offsets into packobj and bigobj.
*/
n = ztest_random(regions) * stride + ztest_random(width);
s = 1 + ztest_random(width - 1);
packoff = n * sizeof (bufwad_t);
packsize = s * sizeof (bufwad_t);
bigoff = n * chunksize;
bigsize = s * chunksize;
packbuf = umem_zalloc(packsize, UMEM_NOFAIL);
bigbuf = umem_zalloc(bigsize, UMEM_NOFAIL);
VERIFY0(dmu_bonus_hold(os, bigobj, FTAG, &bonus_db));
bigbuf_arcbufs = umem_zalloc(2 * s * sizeof (arc_buf_t *), UMEM_NOFAIL);
/*
* Iteration 0 test zcopy for DB_UNCACHED dbufs.
* Iteration 1 test zcopy to already referenced dbufs.
* Iteration 2 test zcopy to dirty dbuf in the same txg.
* Iteration 3 test zcopy to dbuf dirty in previous txg.
* Iteration 4 test zcopy when dbuf is no longer dirty.
* Iteration 5 test zcopy when it can't be done.
* Iteration 6 one more zcopy write.
*/
for (i = 0; i < 7; i++) {
uint64_t j;
uint64_t off;
/*
* In iteration 5 (i == 5) use arcbufs
* that don't match bigobj blksz to test
* dmu_assign_arcbuf_by_dbuf() when it can't directly
* assign an arcbuf to a dbuf.
*/
for (j = 0; j < s; j++) {
if (i != 5 || chunksize < (SPA_MINBLOCKSIZE * 2)) {
bigbuf_arcbufs[j] =
dmu_request_arcbuf(bonus_db, chunksize);
} else {
bigbuf_arcbufs[2 * j] =
dmu_request_arcbuf(bonus_db, chunksize / 2);
bigbuf_arcbufs[2 * j + 1] =
dmu_request_arcbuf(bonus_db, chunksize / 2);
}
}
/*
* Get a tx for the mods to both packobj and bigobj.
*/
tx = dmu_tx_create(os);
dmu_tx_hold_write(tx, packobj, packoff, packsize);
dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0) {
umem_free(packbuf, packsize);
umem_free(bigbuf, bigsize);
for (j = 0; j < s; j++) {
if (i != 5 ||
chunksize < (SPA_MINBLOCKSIZE * 2)) {
dmu_return_arcbuf(bigbuf_arcbufs[j]);
} else {
dmu_return_arcbuf(
bigbuf_arcbufs[2 * j]);
dmu_return_arcbuf(
bigbuf_arcbufs[2 * j + 1]);
}
}
umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
umem_free(od, size);
dmu_buf_rele(bonus_db, FTAG);
return;
}
/*
* 50% of the time don't read objects in the 1st iteration to
* test dmu_assign_arcbuf_by_dbuf() for the case when there are
* no existing dbufs for the specified offsets.
*/
if (i != 0 || ztest_random(2) != 0) {
error = dmu_read(os, packobj, packoff,
packsize, packbuf, DMU_READ_PREFETCH);
ASSERT0(error);
error = dmu_read(os, bigobj, bigoff, bigsize,
bigbuf, DMU_READ_PREFETCH);
ASSERT0(error);
}
compare_and_update_pbbufs(s, packbuf, bigbuf, bigsize,
n, chunksize, txg);
/*
* We've verified all the old bufwads, and made new ones.
* Now write them out.
*/
dmu_write(os, packobj, packoff, packsize, packbuf, tx);
if (ztest_opts.zo_verbose >= 7) {
(void) printf("writing offset %"PRIx64" size %"PRIx64""
" txg %"PRIx64"\n",
bigoff, bigsize, txg);
}
for (off = bigoff, j = 0; j < s; j++, off += chunksize) {
dmu_buf_t *dbt;
if (i != 5 || chunksize < (SPA_MINBLOCKSIZE * 2)) {
memcpy(bigbuf_arcbufs[j]->b_data,
(caddr_t)bigbuf + (off - bigoff),
chunksize);
} else {
memcpy(bigbuf_arcbufs[2 * j]->b_data,
(caddr_t)bigbuf + (off - bigoff),
chunksize / 2);
memcpy(bigbuf_arcbufs[2 * j + 1]->b_data,
(caddr_t)bigbuf + (off - bigoff) +
chunksize / 2,
chunksize / 2);
}
if (i == 1) {
VERIFY(dmu_buf_hold(os, bigobj, off,
FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0);
}
if (i != 5 || chunksize < (SPA_MINBLOCKSIZE * 2)) {
VERIFY0(dmu_assign_arcbuf_by_dbuf(bonus_db,
off, bigbuf_arcbufs[j], tx));
} else {
VERIFY0(dmu_assign_arcbuf_by_dbuf(bonus_db,
off, bigbuf_arcbufs[2 * j], tx));
VERIFY0(dmu_assign_arcbuf_by_dbuf(bonus_db,
off + chunksize / 2,
bigbuf_arcbufs[2 * j + 1], tx));
}
if (i == 1) {
dmu_buf_rele(dbt, FTAG);
}
}
dmu_tx_commit(tx);
/*
* Sanity check the stuff we just wrote.
*/
{
void *packcheck = umem_alloc(packsize, UMEM_NOFAIL);
void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL);
VERIFY0(dmu_read(os, packobj, packoff,
packsize, packcheck, DMU_READ_PREFETCH));
VERIFY0(dmu_read(os, bigobj, bigoff,
bigsize, bigcheck, DMU_READ_PREFETCH));
ASSERT0(memcmp(packbuf, packcheck, packsize));
ASSERT0(memcmp(bigbuf, bigcheck, bigsize));
umem_free(packcheck, packsize);
umem_free(bigcheck, bigsize);
}
if (i == 2) {
txg_wait_open(dmu_objset_pool(os), 0, B_TRUE);
} else if (i == 3) {
txg_wait_synced(dmu_objset_pool(os), 0);
}
}
dmu_buf_rele(bonus_db, FTAG);
umem_free(packbuf, packsize);
umem_free(bigbuf, bigsize);
umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
umem_free(od, size);
}
void
ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id)
{
(void) id;
ztest_od_t *od;
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
uint64_t offset = (1ULL << (ztest_random(20) + 43)) +
(ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
/*
* Have multiple threads write to large offsets in an object
* to verify that parallel writes to an object -- even to the
* same blocks within the object -- doesn't cause any trouble.
*/
ztest_od_init(od, ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0)
return;
while (ztest_random(10) != 0)
ztest_io(zd, od->od_object, offset);
umem_free(od, sizeof (ztest_od_t));
}
void
ztest_dmu_prealloc(ztest_ds_t *zd, uint64_t id)
{
ztest_od_t *od;
uint64_t offset = (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT)) +
(ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
uint64_t count = ztest_random(20) + 1;
uint64_t blocksize = ztest_random_blocksize();
void *data;
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t),
!ztest_random(2)) != 0) {
umem_free(od, sizeof (ztest_od_t));
return;
}
if (ztest_truncate(zd, od->od_object, offset, count * blocksize) != 0) {
umem_free(od, sizeof (ztest_od_t));
return;
}
ztest_prealloc(zd, od->od_object, offset, count * blocksize);
data = umem_zalloc(blocksize, UMEM_NOFAIL);
while (ztest_random(count) != 0) {
uint64_t randoff = offset + (ztest_random(count) * blocksize);
if (ztest_write(zd, od->od_object, randoff, blocksize,
data) != 0)
break;
while (ztest_random(4) != 0)
ztest_io(zd, od->od_object, randoff);
}
umem_free(data, blocksize);
umem_free(od, sizeof (ztest_od_t));
}
/*
* Verify that zap_{create,destroy,add,remove,update} work as expected.
*/
#define ZTEST_ZAP_MIN_INTS 1
#define ZTEST_ZAP_MAX_INTS 4
#define ZTEST_ZAP_MAX_PROPS 1000
void
ztest_zap(ztest_ds_t *zd, uint64_t id)
{
objset_t *os = zd->zd_os;
ztest_od_t *od;
uint64_t object;
uint64_t txg, last_txg;
uint64_t value[ZTEST_ZAP_MAX_INTS];
uint64_t zl_ints, zl_intsize, prop;
int i, ints;
dmu_tx_t *tx;
char propname[100], txgname[100];
int error;
const char *const hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" };
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
ztest_od_init(od, id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t),
!ztest_random(2)) != 0)
goto out;
object = od->od_object;
/*
* Generate a known hash collision, and verify that
* we can lookup and remove both entries.
*/
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0)
goto out;
for (i = 0; i < 2; i++) {
value[i] = i;
VERIFY0(zap_add(os, object, hc[i], sizeof (uint64_t),
1, &value[i], tx));
}
for (i = 0; i < 2; i++) {
VERIFY3U(EEXIST, ==, zap_add(os, object, hc[i],
sizeof (uint64_t), 1, &value[i], tx));
VERIFY0(
zap_length(os, object, hc[i], &zl_intsize, &zl_ints));
ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
ASSERT3U(zl_ints, ==, 1);
}
for (i = 0; i < 2; i++) {
VERIFY0(zap_remove(os, object, hc[i], tx));
}
dmu_tx_commit(tx);
/*
* Generate a bunch of random entries.
*/
ints = MAX(ZTEST_ZAP_MIN_INTS, object % ZTEST_ZAP_MAX_INTS);
prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
(void) sprintf(propname, "prop_%"PRIu64"", prop);
(void) sprintf(txgname, "txg_%"PRIu64"", prop);
memset(value, 0, sizeof (value));
last_txg = 0;
/*
* If these zap entries already exist, validate their contents.
*/
error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
if (error == 0) {
ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
ASSERT3U(zl_ints, ==, 1);
VERIFY0(zap_lookup(os, object, txgname, zl_intsize,
zl_ints, &last_txg));
VERIFY0(zap_length(os, object, propname, &zl_intsize,
&zl_ints));
ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
ASSERT3U(zl_ints, ==, ints);
VERIFY0(zap_lookup(os, object, propname, zl_intsize,
zl_ints, value));
for (i = 0; i < ints; i++) {
ASSERT3U(value[i], ==, last_txg + object + i);
}
} else {
ASSERT3U(error, ==, ENOENT);
}
/*
* Atomically update two entries in our zap object.
* The first is named txg_%llu, and contains the txg
* in which the property was last updated. The second
* is named prop_%llu, and the nth element of its value
* should be txg + object + n.
*/
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0)
goto out;
if (last_txg > txg)
fatal(B_FALSE, "zap future leak: old %"PRIu64" new %"PRIu64"",
last_txg, txg);
for (i = 0; i < ints; i++)
value[i] = txg + object + i;
VERIFY0(zap_update(os, object, txgname, sizeof (uint64_t),
1, &txg, tx));
VERIFY0(zap_update(os, object, propname, sizeof (uint64_t),
ints, value, tx));
dmu_tx_commit(tx);
/*
* Remove a random pair of entries.
*/
prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
(void) sprintf(propname, "prop_%"PRIu64"", prop);
(void) sprintf(txgname, "txg_%"PRIu64"", prop);
error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
if (error == ENOENT)
goto out;
ASSERT0(error);
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0)
goto out;
VERIFY0(zap_remove(os, object, txgname, tx));
VERIFY0(zap_remove(os, object, propname, tx));
dmu_tx_commit(tx);
out:
umem_free(od, sizeof (ztest_od_t));
}
/*
* Test case to test the upgrading of a microzap to fatzap.
*/
void
ztest_fzap(ztest_ds_t *zd, uint64_t id)
{
objset_t *os = zd->zd_os;
ztest_od_t *od;
uint64_t object, txg, value;
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
ztest_od_init(od, id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t),
!ztest_random(2)) != 0)
goto out;
object = od->od_object;
/*
* Add entries to this ZAP and make sure it spills over
* and gets upgraded to a fatzap. Also, since we are adding
* 2050 entries we should see ptrtbl growth and leaf-block split.
*/
for (value = 0; value < 2050; value++) {
char name[ZFS_MAX_DATASET_NAME_LEN];
dmu_tx_t *tx;
int error;
(void) snprintf(name, sizeof (name), "fzap-%"PRIu64"-%"PRIu64"",
id, value);
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, object, B_TRUE, name);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0)
goto out;
error = zap_add(os, object, name, sizeof (uint64_t), 1,
&value, tx);
ASSERT(error == 0 || error == EEXIST);
dmu_tx_commit(tx);
}
out:
umem_free(od, sizeof (ztest_od_t));
}
void
ztest_zap_parallel(ztest_ds_t *zd, uint64_t id)
{
(void) id;
objset_t *os = zd->zd_os;
ztest_od_t *od;
uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc;
dmu_tx_t *tx;
int i, namelen, error;
int micro = ztest_random(2);
char name[20], string_value[20];
void *data;
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
ztest_od_init(od, ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) {
umem_free(od, sizeof (ztest_od_t));
return;
}
object = od->od_object;
/*
* Generate a random name of the form 'xxx.....' where each
* x is a random printable character and the dots are dots.
* There are 94 such characters, and the name length goes from
* 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names.
*/
namelen = ztest_random(sizeof (name) - 5) + 5 + 1;
for (i = 0; i < 3; i++)
name[i] = '!' + ztest_random('~' - '!' + 1);
for (; i < namelen - 1; i++)
name[i] = '.';
name[i] = '\0';
if ((namelen & 1) || micro) {
wsize = sizeof (txg);
wc = 1;
data = &txg;
} else {
wsize = 1;
wc = namelen;
data = string_value;
}
count = -1ULL;
VERIFY0(zap_count(os, object, &count));
ASSERT3S(count, !=, -1ULL);
/*
* Select an operation: length, lookup, add, update, remove.
*/
i = ztest_random(5);
if (i >= 2) {
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0) {
umem_free(od, sizeof (ztest_od_t));
return;
}
memcpy(string_value, name, namelen);
} else {
tx = NULL;
txg = 0;
memset(string_value, 0, namelen);
}
switch (i) {
case 0:
error = zap_length(os, object, name, &zl_wsize, &zl_wc);
if (error == 0) {
ASSERT3U(wsize, ==, zl_wsize);
ASSERT3U(wc, ==, zl_wc);
} else {
ASSERT3U(error, ==, ENOENT);
}
break;
case 1:
error = zap_lookup(os, object, name, wsize, wc, data);
if (error == 0) {
if (data == string_value &&
memcmp(name, data, namelen) != 0)
fatal(B_FALSE, "name '%s' != val '%s' len %d",
name, (char *)data, namelen);
} else {
ASSERT3U(error, ==, ENOENT);
}
break;
case 2:
error = zap_add(os, object, name, wsize, wc, data, tx);
ASSERT(error == 0 || error == EEXIST);
break;
case 3:
VERIFY0(zap_update(os, object, name, wsize, wc, data, tx));
break;
case 4:
error = zap_remove(os, object, name, tx);
ASSERT(error == 0 || error == ENOENT);
break;
}
if (tx != NULL)
dmu_tx_commit(tx);
umem_free(od, sizeof (ztest_od_t));
}
/*
* Commit callback data.
*/
typedef struct ztest_cb_data {
list_node_t zcd_node;
uint64_t zcd_txg;
int zcd_expected_err;
boolean_t zcd_added;
boolean_t zcd_called;
spa_t *zcd_spa;
} ztest_cb_data_t;
/* This is the actual commit callback function */
static void
ztest_commit_callback(void *arg, int error)
{
ztest_cb_data_t *data = arg;
uint64_t synced_txg;
VERIFY3P(data, !=, NULL);
VERIFY3S(data->zcd_expected_err, ==, error);
VERIFY(!data->zcd_called);
synced_txg = spa_last_synced_txg(data->zcd_spa);
if (data->zcd_txg > synced_txg)
fatal(B_FALSE,
"commit callback of txg %"PRIu64" called prematurely, "
"last synced txg = %"PRIu64"\n",
data->zcd_txg, synced_txg);
data->zcd_called = B_TRUE;
if (error == ECANCELED) {
ASSERT0(data->zcd_txg);
ASSERT(!data->zcd_added);
/*
* The private callback data should be destroyed here, but
* since we are going to check the zcd_called field after
* dmu_tx_abort(), we will destroy it there.
*/
return;
}
ASSERT(data->zcd_added);
ASSERT3U(data->zcd_txg, !=, 0);
(void) mutex_enter(&zcl.zcl_callbacks_lock);
/* See if this cb was called more quickly */
if ((synced_txg - data->zcd_txg) < zc_min_txg_delay)
zc_min_txg_delay = synced_txg - data->zcd_txg;
/* Remove our callback from the list */
list_remove(&zcl.zcl_callbacks, data);
(void) mutex_exit(&zcl.zcl_callbacks_lock);
umem_free(data, sizeof (ztest_cb_data_t));
}
/* Allocate and initialize callback data structure */
static ztest_cb_data_t *
ztest_create_cb_data(objset_t *os, uint64_t txg)
{
ztest_cb_data_t *cb_data;
cb_data = umem_zalloc(sizeof (ztest_cb_data_t), UMEM_NOFAIL);
cb_data->zcd_txg = txg;
cb_data->zcd_spa = dmu_objset_spa(os);
list_link_init(&cb_data->zcd_node);
return (cb_data);
}
/*
* Commit callback test.
*/
void
ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id)
{
objset_t *os = zd->zd_os;
ztest_od_t *od;
dmu_tx_t *tx;
ztest_cb_data_t *cb_data[3], *tmp_cb;
uint64_t old_txg, txg;
int i, error = 0;
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) {
umem_free(od, sizeof (ztest_od_t));
return;
}
tx = dmu_tx_create(os);
cb_data[0] = ztest_create_cb_data(os, 0);
dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[0]);
dmu_tx_hold_write(tx, od->od_object, 0, sizeof (uint64_t));
/* Every once in a while, abort the transaction on purpose */
if (ztest_random(100) == 0)
error = -1;
if (!error)
error = dmu_tx_assign(tx, TXG_NOWAIT);
txg = error ? 0 : dmu_tx_get_txg(tx);
cb_data[0]->zcd_txg = txg;
cb_data[1] = ztest_create_cb_data(os, txg);
dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[1]);
if (error) {
/*
* It's not a strict requirement to call the registered
* callbacks from inside dmu_tx_abort(), but that's what
* it's supposed to happen in the current implementation
* so we will check for that.
*/
for (i = 0; i < 2; i++) {
cb_data[i]->zcd_expected_err = ECANCELED;
VERIFY(!cb_data[i]->zcd_called);
}
dmu_tx_abort(tx);
for (i = 0; i < 2; i++) {
VERIFY(cb_data[i]->zcd_called);
umem_free(cb_data[i], sizeof (ztest_cb_data_t));
}
umem_free(od, sizeof (ztest_od_t));
return;
}
cb_data[2] = ztest_create_cb_data(os, txg);
dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[2]);
/*
* Read existing data to make sure there isn't a future leak.
*/
VERIFY0(dmu_read(os, od->od_object, 0, sizeof (uint64_t),
&old_txg, DMU_READ_PREFETCH));
if (old_txg > txg)
fatal(B_FALSE,
"future leak: got %"PRIu64", open txg is %"PRIu64"",
old_txg, txg);
dmu_write(os, od->od_object, 0, sizeof (uint64_t), &txg, tx);
(void) mutex_enter(&zcl.zcl_callbacks_lock);
/*
* Since commit callbacks don't have any ordering requirement and since
* it is theoretically possible for a commit callback to be called
* after an arbitrary amount of time has elapsed since its txg has been
* synced, it is difficult to reliably determine whether a commit
* callback hasn't been called due to high load or due to a flawed
* implementation.
*
* In practice, we will assume that if after a certain number of txgs a
* commit callback hasn't been called, then most likely there's an
* implementation bug..
*/
tmp_cb = list_head(&zcl.zcl_callbacks);
if (tmp_cb != NULL &&
tmp_cb->zcd_txg + ZTEST_COMMIT_CB_THRESH < txg) {
fatal(B_FALSE,
"Commit callback threshold exceeded, "
"oldest txg: %"PRIu64", open txg: %"PRIu64"\n",
tmp_cb->zcd_txg, txg);
}
/*
* Let's find the place to insert our callbacks.
*
* Even though the list is ordered by txg, it is possible for the
* insertion point to not be the end because our txg may already be
* quiescing at this point and other callbacks in the open txg
* (from other objsets) may have sneaked in.
*/
tmp_cb = list_tail(&zcl.zcl_callbacks);
while (tmp_cb != NULL && tmp_cb->zcd_txg > txg)
tmp_cb = list_prev(&zcl.zcl_callbacks, tmp_cb);
/* Add the 3 callbacks to the list */
for (i = 0; i < 3; i++) {
if (tmp_cb == NULL)
list_insert_head(&zcl.zcl_callbacks, cb_data[i]);
else
list_insert_after(&zcl.zcl_callbacks, tmp_cb,
cb_data[i]);
cb_data[i]->zcd_added = B_TRUE;
VERIFY(!cb_data[i]->zcd_called);
tmp_cb = cb_data[i];
}
zc_cb_counter += 3;
(void) mutex_exit(&zcl.zcl_callbacks_lock);
dmu_tx_commit(tx);
umem_free(od, sizeof (ztest_od_t));
}
/*
* Visit each object in the dataset. Verify that its properties
* are consistent what was stored in the block tag when it was created,
* and that its unused bonus buffer space has not been overwritten.
*/
void
ztest_verify_dnode_bt(ztest_ds_t *zd, uint64_t id)
{
(void) id;
objset_t *os = zd->zd_os;
uint64_t obj;
int err = 0;
for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) {
ztest_block_tag_t *bt = NULL;
dmu_object_info_t doi;
dmu_buf_t *db;
ztest_object_lock(zd, obj, RL_READER);
if (dmu_bonus_hold(os, obj, FTAG, &db) != 0) {
ztest_object_unlock(zd, obj);
continue;
}
dmu_object_info_from_db(db, &doi);
if (doi.doi_bonus_size >= sizeof (*bt))
bt = ztest_bt_bonus(db);
if (bt && bt->bt_magic == BT_MAGIC) {
ztest_bt_verify(bt, os, obj, doi.doi_dnodesize,
bt->bt_offset, bt->bt_gen, bt->bt_txg,
bt->bt_crtxg);
ztest_verify_unused_bonus(db, bt, obj, os, bt->bt_gen);
}
dmu_buf_rele(db, FTAG);
ztest_object_unlock(zd, obj);
}
}
void
ztest_dsl_prop_get_set(ztest_ds_t *zd, uint64_t id)
{
(void) id;
zfs_prop_t proplist[] = {
ZFS_PROP_CHECKSUM,
ZFS_PROP_COMPRESSION,
ZFS_PROP_COPIES,
ZFS_PROP_DEDUP
};
(void) pthread_rwlock_rdlock(&ztest_name_lock);
for (int p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++)
(void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p],
ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2));
VERIFY0(ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_RECORDSIZE,
ztest_random_blocksize(), (int)ztest_random(2)));
(void) pthread_rwlock_unlock(&ztest_name_lock);
}
void
ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
nvlist_t *props = NULL;
(void) pthread_rwlock_rdlock(&ztest_name_lock);
(void) ztest_spa_prop_set_uint64(ZPOOL_PROP_AUTOTRIM, ztest_random(2));
VERIFY0(spa_prop_get(ztest_spa, &props));
if (ztest_opts.zo_verbose >= 6)
dump_nvlist(props, 4);
fnvlist_free(props);
(void) pthread_rwlock_unlock(&ztest_name_lock);
}
static int
user_release_one(const char *snapname, const char *holdname)
{
nvlist_t *snaps, *holds;
int error;
snaps = fnvlist_alloc();
holds = fnvlist_alloc();
fnvlist_add_boolean(holds, holdname);
fnvlist_add_nvlist(snaps, snapname, holds);
fnvlist_free(holds);
error = dsl_dataset_user_release(snaps, NULL);
fnvlist_free(snaps);
return (error);
}
/*
* Test snapshot hold/release and deferred destroy.
*/
void
ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id)
{
int error;
objset_t *os = zd->zd_os;
objset_t *origin;
char snapname[100];
char fullname[100];
char clonename[100];
char tag[100];
char osname[ZFS_MAX_DATASET_NAME_LEN];
nvlist_t *holds;
(void) pthread_rwlock_rdlock(&ztest_name_lock);
dmu_objset_name(os, osname);
(void) snprintf(snapname, sizeof (snapname), "sh1_%"PRIu64"", id);
(void) snprintf(fullname, sizeof (fullname), "%s@%s", osname, snapname);
(void) snprintf(clonename, sizeof (clonename), "%s/ch1_%"PRIu64"",
osname, id);
(void) snprintf(tag, sizeof (tag), "tag_%"PRIu64"", id);
/*
* Clean up from any previous run.
*/
error = dsl_destroy_head(clonename);
if (error != ENOENT)
ASSERT0(error);
error = user_release_one(fullname, tag);
if (error != ESRCH && error != ENOENT)
ASSERT0(error);
error = dsl_destroy_snapshot(fullname, B_FALSE);
if (error != ENOENT)
ASSERT0(error);
/*
* Create snapshot, clone it, mark snap for deferred destroy,
* destroy clone, verify snap was also destroyed.
*/
error = dmu_objset_snapshot_one(osname, snapname);
if (error) {
if (error == ENOSPC) {
ztest_record_enospc("dmu_objset_snapshot");
goto out;
}
fatal(B_FALSE, "dmu_objset_snapshot(%s) = %d", fullname, error);
}
error = dmu_objset_clone(clonename, fullname);
if (error) {
if (error == ENOSPC) {
ztest_record_enospc("dmu_objset_clone");
goto out;
}
fatal(B_FALSE, "dmu_objset_clone(%s) = %d", clonename, error);
}
error = dsl_destroy_snapshot(fullname, B_TRUE);
if (error) {
fatal(B_FALSE, "dsl_destroy_snapshot(%s, B_TRUE) = %d",
fullname, error);
}
error = dsl_destroy_head(clonename);
if (error)
fatal(B_FALSE, "dsl_destroy_head(%s) = %d", clonename, error);
error = dmu_objset_hold(fullname, FTAG, &origin);
if (error != ENOENT)
fatal(B_FALSE, "dmu_objset_hold(%s) = %d", fullname, error);
/*
* Create snapshot, add temporary hold, verify that we can't
* destroy a held snapshot, mark for deferred destroy,
* release hold, verify snapshot was destroyed.
*/
error = dmu_objset_snapshot_one(osname, snapname);
if (error) {
if (error == ENOSPC) {
ztest_record_enospc("dmu_objset_snapshot");
goto out;
}
fatal(B_FALSE, "dmu_objset_snapshot(%s) = %d", fullname, error);
}
holds = fnvlist_alloc();
fnvlist_add_string(holds, fullname, tag);
error = dsl_dataset_user_hold(holds, 0, NULL);
fnvlist_free(holds);
if (error == ENOSPC) {
ztest_record_enospc("dsl_dataset_user_hold");
goto out;
} else if (error) {
fatal(B_FALSE, "dsl_dataset_user_hold(%s, %s) = %u",
fullname, tag, error);
}
error = dsl_destroy_snapshot(fullname, B_FALSE);
if (error != EBUSY) {
fatal(B_FALSE, "dsl_destroy_snapshot(%s, B_FALSE) = %d",
fullname, error);
}
error = dsl_destroy_snapshot(fullname, B_TRUE);
if (error) {
fatal(B_FALSE, "dsl_destroy_snapshot(%s, B_TRUE) = %d",
fullname, error);
}
error = user_release_one(fullname, tag);
if (error)
fatal(B_FALSE, "user_release_one(%s, %s) = %d",
fullname, tag, error);
VERIFY3U(dmu_objset_hold(fullname, FTAG, &origin), ==, ENOENT);
out:
(void) pthread_rwlock_unlock(&ztest_name_lock);
}
/*
* Inject random faults into the on-disk data.
*/
void
ztest_fault_inject(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
ztest_shared_t *zs = ztest_shared;
spa_t *spa = ztest_spa;
int fd;
uint64_t offset;
uint64_t leaves;
uint64_t bad = 0x1990c0ffeedecadeull;
uint64_t top, leaf;
char *path0;
char *pathrand;
size_t fsize;
int bshift = SPA_MAXBLOCKSHIFT + 2;
int iters = 1000;
int maxfaults;
int mirror_save;
vdev_t *vd0 = NULL;
uint64_t guid0 = 0;
boolean_t islog = B_FALSE;
path0 = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
pathrand = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
mutex_enter(&ztest_vdev_lock);
/*
* Device removal is in progress, fault injection must be disabled
* until it completes and the pool is scrubbed. The fault injection
* strategy for damaging blocks does not take in to account evacuated
* blocks which may have already been damaged.
*/
if (ztest_device_removal_active) {
mutex_exit(&ztest_vdev_lock);
goto out;
}
maxfaults = MAXFAULTS(zs);
leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raid_children;
mirror_save = zs->zs_mirrors;
mutex_exit(&ztest_vdev_lock);
ASSERT3U(leaves, >=, 1);
/*
* While ztest is running the number of leaves will not change. This
* is critical for the fault injection logic as it determines where
* errors can be safely injected such that they are always repairable.
*
* When restarting ztest a different number of leaves may be requested
* which will shift the regions to be damaged. This is fine as long
* as the pool has been scrubbed prior to using the new mapping.
* Failure to do can result in non-repairable damage being injected.
*/
if (ztest_pool_scrubbed == B_FALSE)
goto out;
/*
* Grab the name lock as reader. There are some operations
* which don't like to have their vdevs changed while
* they are in progress (i.e. spa_change_guid). Those
* operations will have grabbed the name lock as writer.
*/
(void) pthread_rwlock_rdlock(&ztest_name_lock);
/*
* We need SCL_STATE here because we're going to look at vd0->vdev_tsd.
*/
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
if (ztest_random(2) == 0) {
/*
* Inject errors on a normal data device or slog device.
*/
top = ztest_random_vdev_top(spa, B_TRUE);
leaf = ztest_random(leaves) + zs->zs_splits;
/*
* Generate paths to the first leaf in this top-level vdev,
* and to the random leaf we selected. We'll induce transient
* write failures and random online/offline activity on leaf 0,
* and we'll write random garbage to the randomly chosen leaf.
*/
(void) snprintf(path0, MAXPATHLEN, ztest_dev_template,
ztest_opts.zo_dir, ztest_opts.zo_pool,
top * leaves + zs->zs_splits);
(void) snprintf(pathrand, MAXPATHLEN, ztest_dev_template,
ztest_opts.zo_dir, ztest_opts.zo_pool,
top * leaves + leaf);
vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0);
if (vd0 != NULL && vd0->vdev_top->vdev_islog)
islog = B_TRUE;
/*
* If the top-level vdev needs to be resilvered
* then we only allow faults on the device that is
* resilvering.
*/
if (vd0 != NULL && maxfaults != 1 &&
(!vdev_resilver_needed(vd0->vdev_top, NULL, NULL) ||
vd0->vdev_resilver_txg != 0)) {
/*
* Make vd0 explicitly claim to be unreadable,
* or unwritable, or reach behind its back
* and close the underlying fd. We can do this if
* maxfaults == 0 because we'll fail and reexecute,
* and we can do it if maxfaults >= 2 because we'll
* have enough redundancy. If maxfaults == 1, the
* combination of this with injection of random data
* corruption below exceeds the pool's fault tolerance.
*/
vdev_file_t *vf = vd0->vdev_tsd;
zfs_dbgmsg("injecting fault to vdev %llu; maxfaults=%d",
(long long)vd0->vdev_id, (int)maxfaults);
if (vf != NULL && ztest_random(3) == 0) {
(void) close(vf->vf_file->f_fd);
vf->vf_file->f_fd = -1;
} else if (ztest_random(2) == 0) {
vd0->vdev_cant_read = B_TRUE;
} else {
vd0->vdev_cant_write = B_TRUE;
}
guid0 = vd0->vdev_guid;
}
} else {
/*
* Inject errors on an l2cache device.
*/
spa_aux_vdev_t *sav = &spa->spa_l2cache;
if (sav->sav_count == 0) {
spa_config_exit(spa, SCL_STATE, FTAG);
(void) pthread_rwlock_unlock(&ztest_name_lock);
goto out;
}
vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)];
guid0 = vd0->vdev_guid;
(void) strcpy(path0, vd0->vdev_path);
(void) strcpy(pathrand, vd0->vdev_path);
leaf = 0;
leaves = 1;
maxfaults = INT_MAX; /* no limit on cache devices */
}
spa_config_exit(spa, SCL_STATE, FTAG);
(void) pthread_rwlock_unlock(&ztest_name_lock);
/*
* If we can tolerate two or more faults, or we're dealing
* with a slog, randomly online/offline vd0.
*/
if ((maxfaults >= 2 || islog) && guid0 != 0) {
if (ztest_random(10) < 6) {
int flags = (ztest_random(2) == 0 ?
ZFS_OFFLINE_TEMPORARY : 0);
/*
* We have to grab the zs_name_lock as writer to
* prevent a race between offlining a slog and
* destroying a dataset. Offlining the slog will
* grab a reference on the dataset which may cause
* dsl_destroy_head() to fail with EBUSY thus
* leaving the dataset in an inconsistent state.
*/
if (islog)
(void) pthread_rwlock_wrlock(&ztest_name_lock);
VERIFY3U(vdev_offline(spa, guid0, flags), !=, EBUSY);
if (islog)
(void) pthread_rwlock_unlock(&ztest_name_lock);
} else {
/*
* Ideally we would like to be able to randomly
* call vdev_[on|off]line without holding locks
* to force unpredictable failures but the side
* effects of vdev_[on|off]line prevent us from
* doing so. We grab the ztest_vdev_lock here to
* prevent a race between injection testing and
* aux_vdev removal.
*/
mutex_enter(&ztest_vdev_lock);
(void) vdev_online(spa, guid0, 0, NULL);
mutex_exit(&ztest_vdev_lock);
}
}
if (maxfaults == 0)
goto out;
/*
* We have at least single-fault tolerance, so inject data corruption.
*/
fd = open(pathrand, O_RDWR);
if (fd == -1) /* we hit a gap in the device namespace */
goto out;
fsize = lseek(fd, 0, SEEK_END);
while (--iters != 0) {
/*
* The offset must be chosen carefully to ensure that
* we do not inject a given logical block with errors
* on two different leaf devices, because ZFS can not
* tolerate that (if maxfaults==1).
*
* To achieve this we divide each leaf device into
* chunks of size (# leaves * SPA_MAXBLOCKSIZE * 4).
* Each chunk is further divided into error-injection
* ranges (can accept errors) and clear ranges (we do
* not inject errors in those). Each error-injection
* range can accept errors only for a single leaf vdev.
* Error-injection ranges are separated by clear ranges.
*
* For example, with 3 leaves, each chunk looks like:
* 0 to 32M: injection range for leaf 0
* 32M to 64M: clear range - no injection allowed
* 64M to 96M: injection range for leaf 1
* 96M to 128M: clear range - no injection allowed
* 128M to 160M: injection range for leaf 2
* 160M to 192M: clear range - no injection allowed
*
* Each clear range must be large enough such that a
* single block cannot straddle it. This way a block
* can't be a target in two different injection ranges
* (on different leaf vdevs).
*/
offset = ztest_random(fsize / (leaves << bshift)) *
(leaves << bshift) + (leaf << bshift) +
(ztest_random(1ULL << (bshift - 1)) & -8ULL);
/*
* Only allow damage to the labels at one end of the vdev.
*
* If all labels are damaged, the device will be totally
* inaccessible, which will result in loss of data,
* because we also damage (parts of) the other side of
* the mirror/raidz.
*
* Additionally, we will always have both an even and an
* odd label, so that we can handle crashes in the
* middle of vdev_config_sync().
*/
if ((leaf & 1) == 0 && offset < VDEV_LABEL_START_SIZE)
continue;
/*
* The two end labels are stored at the "end" of the disk, but
* the end of the disk (vdev_psize) is aligned to
* sizeof (vdev_label_t).
*/
uint64_t psize = P2ALIGN(fsize, sizeof (vdev_label_t));
if ((leaf & 1) == 1 &&
offset + sizeof (bad) > psize - VDEV_LABEL_END_SIZE)
continue;
mutex_enter(&ztest_vdev_lock);
if (mirror_save != zs->zs_mirrors) {
mutex_exit(&ztest_vdev_lock);
(void) close(fd);
goto out;
}
if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad))
fatal(B_TRUE,
"can't inject bad word at 0x%"PRIx64" in %s",
offset, pathrand);
mutex_exit(&ztest_vdev_lock);
if (ztest_opts.zo_verbose >= 7)
(void) printf("injected bad word into %s,"
" offset 0x%"PRIx64"\n", pathrand, offset);
}
(void) close(fd);
out:
umem_free(path0, MAXPATHLEN);
umem_free(pathrand, MAXPATHLEN);
}
/*
* By design ztest will never inject uncorrectable damage in to the pool.
* Issue a scrub, wait for it to complete, and verify there is never any
* persistent damage.
*
* Only after a full scrub has been completed is it safe to start injecting
* data corruption. See the comment in zfs_fault_inject().
*/
static int
ztest_scrub_impl(spa_t *spa)
{
int error = spa_scan(spa, POOL_SCAN_SCRUB);
if (error)
return (error);
while (dsl_scan_scrubbing(spa_get_dsl(spa)))
txg_wait_synced(spa_get_dsl(spa), 0);
if (spa_get_errlog_size(spa) > 0)
return (ECKSUM);
ztest_pool_scrubbed = B_TRUE;
return (0);
}
/*
* Scrub the pool.
*/
void
ztest_scrub(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
spa_t *spa = ztest_spa;
int error;
/*
* Scrub in progress by device removal.
*/
if (ztest_device_removal_active)
return;
/*
* Start a scrub, wait a moment, then force a restart.
*/
(void) spa_scan(spa, POOL_SCAN_SCRUB);
(void) poll(NULL, 0, 100);
error = ztest_scrub_impl(spa);
if (error == EBUSY)
error = 0;
ASSERT0(error);
}
/*
* Change the guid for the pool.
*/
void
ztest_reguid(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
spa_t *spa = ztest_spa;
uint64_t orig, load;
int error;
if (ztest_opts.zo_mmp_test)
return;
orig = spa_guid(spa);
load = spa_load_guid(spa);
(void) pthread_rwlock_wrlock(&ztest_name_lock);
error = spa_change_guid(spa);
(void) pthread_rwlock_unlock(&ztest_name_lock);
if (error != 0)
return;
if (ztest_opts.zo_verbose >= 4) {
(void) printf("Changed guid old %"PRIu64" -> %"PRIu64"\n",
orig, spa_guid(spa));
}
VERIFY3U(orig, !=, spa_guid(spa));
VERIFY3U(load, ==, spa_load_guid(spa));
}
void
ztest_blake3(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
hrtime_t end = gethrtime() + NANOSEC;
zio_cksum_salt_t salt;
void *salt_ptr = &salt.zcs_bytes;
struct abd *abd_data, *abd_meta;
void *buf, *templ;
int i, *ptr;
uint32_t size;
BLAKE3_CTX ctx;
size = ztest_random_blocksize();
buf = umem_alloc(size, UMEM_NOFAIL);
abd_data = abd_alloc(size, B_FALSE);
abd_meta = abd_alloc(size, B_TRUE);
for (i = 0, ptr = buf; i < size / sizeof (*ptr); i++, ptr++)
*ptr = ztest_random(UINT_MAX);
memset(salt_ptr, 'A', 32);
abd_copy_from_buf_off(abd_data, buf, 0, size);
abd_copy_from_buf_off(abd_meta, buf, 0, size);
while (gethrtime() <= end) {
int run_count = 100;
zio_cksum_t zc_ref1, zc_ref2;
zio_cksum_t zc_res1, zc_res2;
void *ref1 = &zc_ref1;
void *ref2 = &zc_ref2;
void *res1 = &zc_res1;
void *res2 = &zc_res2;
/* BLAKE3_KEY_LEN = 32 */
- VERIFY0(blake3_set_impl_name("generic"));
+ VERIFY0(blake3_impl_setname("generic"));
templ = abd_checksum_blake3_tmpl_init(&salt);
Blake3_InitKeyed(&ctx, salt_ptr);
Blake3_Update(&ctx, buf, size);
Blake3_Final(&ctx, ref1);
zc_ref2 = zc_ref1;
ZIO_CHECKSUM_BSWAP(&zc_ref2);
abd_checksum_blake3_tmpl_free(templ);
- VERIFY0(blake3_set_impl_name("cycle"));
+ VERIFY0(blake3_impl_setname("cycle"));
while (run_count-- > 0) {
/* Test current implementation */
Blake3_InitKeyed(&ctx, salt_ptr);
Blake3_Update(&ctx, buf, size);
Blake3_Final(&ctx, res1);
zc_res2 = zc_res1;
ZIO_CHECKSUM_BSWAP(&zc_res2);
VERIFY0(memcmp(ref1, res1, 32));
VERIFY0(memcmp(ref2, res2, 32));
/* Test ABD - data */
templ = abd_checksum_blake3_tmpl_init(&salt);
abd_checksum_blake3_native(abd_data, size,
templ, &zc_res1);
abd_checksum_blake3_byteswap(abd_data, size,
templ, &zc_res2);
VERIFY0(memcmp(ref1, res1, 32));
VERIFY0(memcmp(ref2, res2, 32));
/* Test ABD - metadata */
abd_checksum_blake3_native(abd_meta, size,
templ, &zc_res1);
abd_checksum_blake3_byteswap(abd_meta, size,
templ, &zc_res2);
abd_checksum_blake3_tmpl_free(templ);
VERIFY0(memcmp(ref1, res1, 32));
VERIFY0(memcmp(ref2, res2, 32));
}
}
abd_free(abd_data);
abd_free(abd_meta);
umem_free(buf, size);
}
void
ztest_fletcher(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
hrtime_t end = gethrtime() + NANOSEC;
while (gethrtime() <= end) {
int run_count = 100;
void *buf;
struct abd *abd_data, *abd_meta;
uint32_t size;
int *ptr;
int i;
zio_cksum_t zc_ref;
zio_cksum_t zc_ref_byteswap;
size = ztest_random_blocksize();
buf = umem_alloc(size, UMEM_NOFAIL);
abd_data = abd_alloc(size, B_FALSE);
abd_meta = abd_alloc(size, B_TRUE);
for (i = 0, ptr = buf; i < size / sizeof (*ptr); i++, ptr++)
*ptr = ztest_random(UINT_MAX);
abd_copy_from_buf_off(abd_data, buf, 0, size);
abd_copy_from_buf_off(abd_meta, buf, 0, size);
VERIFY0(fletcher_4_impl_set("scalar"));
fletcher_4_native(buf, size, NULL, &zc_ref);
fletcher_4_byteswap(buf, size, NULL, &zc_ref_byteswap);
VERIFY0(fletcher_4_impl_set("cycle"));
while (run_count-- > 0) {
zio_cksum_t zc;
zio_cksum_t zc_byteswap;
fletcher_4_byteswap(buf, size, NULL, &zc_byteswap);
fletcher_4_native(buf, size, NULL, &zc);
VERIFY0(memcmp(&zc, &zc_ref, sizeof (zc)));
VERIFY0(memcmp(&zc_byteswap, &zc_ref_byteswap,
sizeof (zc_byteswap)));
/* Test ABD - data */
abd_fletcher_4_byteswap(abd_data, size, NULL,
&zc_byteswap);
abd_fletcher_4_native(abd_data, size, NULL, &zc);
VERIFY0(memcmp(&zc, &zc_ref, sizeof (zc)));
VERIFY0(memcmp(&zc_byteswap, &zc_ref_byteswap,
sizeof (zc_byteswap)));
/* Test ABD - metadata */
abd_fletcher_4_byteswap(abd_meta, size, NULL,
&zc_byteswap);
abd_fletcher_4_native(abd_meta, size, NULL, &zc);
VERIFY0(memcmp(&zc, &zc_ref, sizeof (zc)));
VERIFY0(memcmp(&zc_byteswap, &zc_ref_byteswap,
sizeof (zc_byteswap)));
}
umem_free(buf, size);
abd_free(abd_data);
abd_free(abd_meta);
}
}
void
ztest_fletcher_incr(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
void *buf;
size_t size;
int *ptr;
int i;
zio_cksum_t zc_ref;
zio_cksum_t zc_ref_bswap;
hrtime_t end = gethrtime() + NANOSEC;
while (gethrtime() <= end) {
int run_count = 100;
size = ztest_random_blocksize();
buf = umem_alloc(size, UMEM_NOFAIL);
for (i = 0, ptr = buf; i < size / sizeof (*ptr); i++, ptr++)
*ptr = ztest_random(UINT_MAX);
VERIFY0(fletcher_4_impl_set("scalar"));
fletcher_4_native(buf, size, NULL, &zc_ref);
fletcher_4_byteswap(buf, size, NULL, &zc_ref_bswap);
VERIFY0(fletcher_4_impl_set("cycle"));
while (run_count-- > 0) {
zio_cksum_t zc;
zio_cksum_t zc_bswap;
size_t pos = 0;
ZIO_SET_CHECKSUM(&zc, 0, 0, 0, 0);
ZIO_SET_CHECKSUM(&zc_bswap, 0, 0, 0, 0);
while (pos < size) {
size_t inc = 64 * ztest_random(size / 67);
/* sometimes add few bytes to test non-simd */
if (ztest_random(100) < 10)
inc += P2ALIGN(ztest_random(64),
sizeof (uint32_t));
if (inc > (size - pos))
inc = size - pos;
fletcher_4_incremental_native(buf + pos, inc,
&zc);
fletcher_4_incremental_byteswap(buf + pos, inc,
&zc_bswap);
pos += inc;
}
VERIFY3U(pos, ==, size);
VERIFY(ZIO_CHECKSUM_EQUAL(zc, zc_ref));
VERIFY(ZIO_CHECKSUM_EQUAL(zc_bswap, zc_ref_bswap));
/*
* verify if incremental on the whole buffer is
* equivalent to non-incremental version
*/
ZIO_SET_CHECKSUM(&zc, 0, 0, 0, 0);
ZIO_SET_CHECKSUM(&zc_bswap, 0, 0, 0, 0);
fletcher_4_incremental_native(buf, size, &zc);
fletcher_4_incremental_byteswap(buf, size, &zc_bswap);
VERIFY(ZIO_CHECKSUM_EQUAL(zc, zc_ref));
VERIFY(ZIO_CHECKSUM_EQUAL(zc_bswap, zc_ref_bswap));
}
umem_free(buf, size);
}
}
static int
ztest_set_global_vars(void)
{
for (size_t i = 0; i < ztest_opts.zo_gvars_count; i++) {
char *kv = ztest_opts.zo_gvars[i];
VERIFY3U(strlen(kv), <=, ZO_GVARS_MAX_ARGLEN);
VERIFY3U(strlen(kv), >, 0);
int err = set_global_var(kv);
if (ztest_opts.zo_verbose > 0) {
(void) printf("setting global var %s ... %s\n", kv,
err ? "failed" : "ok");
}
if (err != 0) {
(void) fprintf(stderr,
"failed to set global var '%s'\n", kv);
return (err);
}
}
return (0);
}
static char **
ztest_global_vars_to_zdb_args(void)
{
char **args = calloc(2*ztest_opts.zo_gvars_count + 1, sizeof (char *));
char **cur = args;
for (size_t i = 0; i < ztest_opts.zo_gvars_count; i++) {
*cur++ = (char *)"-o";
*cur++ = ztest_opts.zo_gvars[i];
}
ASSERT3P(cur, ==, &args[2*ztest_opts.zo_gvars_count]);
*cur = NULL;
return (args);
}
/* The end of strings is indicated by a NULL element */
static char *
join_strings(char **strings, const char *sep)
{
size_t totallen = 0;
for (char **sp = strings; *sp != NULL; sp++) {
totallen += strlen(*sp);
totallen += strlen(sep);
}
if (totallen > 0) {
ASSERT(totallen >= strlen(sep));
totallen -= strlen(sep);
}
size_t buflen = totallen + 1;
char *o = malloc(buflen); /* trailing 0 byte */
o[0] = '\0';
for (char **sp = strings; *sp != NULL; sp++) {
size_t would;
would = strlcat(o, *sp, buflen);
VERIFY3U(would, <, buflen);
if (*(sp+1) == NULL) {
break;
}
would = strlcat(o, sep, buflen);
VERIFY3U(would, <, buflen);
}
ASSERT3S(strlen(o), ==, totallen);
return (o);
}
static int
ztest_check_path(char *path)
{
struct stat s;
/* return true on success */
return (!stat(path, &s));
}
static void
ztest_get_zdb_bin(char *bin, int len)
{
char *zdb_path;
/*
* Try to use $ZDB and in-tree zdb path. If not successful, just
* let popen to search through PATH.
*/
if ((zdb_path = getenv("ZDB"))) {
strlcpy(bin, zdb_path, len); /* In env */
if (!ztest_check_path(bin)) {
ztest_dump_core = 0;
fatal(B_TRUE, "invalid ZDB '%s'", bin);
}
return;
}
VERIFY3P(realpath(getexecname(), bin), !=, NULL);
if (strstr(bin, ".libs/ztest")) {
strstr(bin, ".libs/ztest")[0] = '\0'; /* In-tree */
strcat(bin, "zdb");
if (ztest_check_path(bin))
return;
}
strcpy(bin, "zdb");
}
static vdev_t *
ztest_random_concrete_vdev_leaf(vdev_t *vd)
{
if (vd == NULL)
return (NULL);
if (vd->vdev_children == 0)
return (vd);
vdev_t *eligible[vd->vdev_children];
int eligible_idx = 0, i;
for (i = 0; i < vd->vdev_children; i++) {
vdev_t *cvd = vd->vdev_child[i];
if (cvd->vdev_top->vdev_removing)
continue;
if (cvd->vdev_children > 0 ||
(vdev_is_concrete(cvd) && !cvd->vdev_detached)) {
eligible[eligible_idx++] = cvd;
}
}
VERIFY3S(eligible_idx, >, 0);
uint64_t child_no = ztest_random(eligible_idx);
return (ztest_random_concrete_vdev_leaf(eligible[child_no]));
}
void
ztest_initialize(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
spa_t *spa = ztest_spa;
int error = 0;
mutex_enter(&ztest_vdev_lock);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
/* Random leaf vdev */
vdev_t *rand_vd = ztest_random_concrete_vdev_leaf(spa->spa_root_vdev);
if (rand_vd == NULL) {
spa_config_exit(spa, SCL_VDEV, FTAG);
mutex_exit(&ztest_vdev_lock);
return;
}
/*
* The random vdev we've selected may change as soon as we
* drop the spa_config_lock. We create local copies of things
* we're interested in.
*/
uint64_t guid = rand_vd->vdev_guid;
char *path = strdup(rand_vd->vdev_path);
boolean_t active = rand_vd->vdev_initialize_thread != NULL;
zfs_dbgmsg("vd %px, guid %llu", rand_vd, (u_longlong_t)guid);
spa_config_exit(spa, SCL_VDEV, FTAG);
uint64_t cmd = ztest_random(POOL_INITIALIZE_FUNCS);
nvlist_t *vdev_guids = fnvlist_alloc();
nvlist_t *vdev_errlist = fnvlist_alloc();
fnvlist_add_uint64(vdev_guids, path, guid);
error = spa_vdev_initialize(spa, vdev_guids, cmd, vdev_errlist);
fnvlist_free(vdev_guids);
fnvlist_free(vdev_errlist);
switch (cmd) {
case POOL_INITIALIZE_CANCEL:
if (ztest_opts.zo_verbose >= 4) {
(void) printf("Cancel initialize %s", path);
if (!active)
(void) printf(" failed (no initialize active)");
(void) printf("\n");
}
break;
case POOL_INITIALIZE_START:
if (ztest_opts.zo_verbose >= 4) {
(void) printf("Start initialize %s", path);
if (active && error == 0)
(void) printf(" failed (already active)");
else if (error != 0)
(void) printf(" failed (error %d)", error);
(void) printf("\n");
}
break;
case POOL_INITIALIZE_SUSPEND:
if (ztest_opts.zo_verbose >= 4) {
(void) printf("Suspend initialize %s", path);
if (!active)
(void) printf(" failed (no initialize active)");
(void) printf("\n");
}
break;
}
free(path);
mutex_exit(&ztest_vdev_lock);
}
void
ztest_trim(ztest_ds_t *zd, uint64_t id)
{
(void) zd, (void) id;
spa_t *spa = ztest_spa;
int error = 0;
mutex_enter(&ztest_vdev_lock);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
/* Random leaf vdev */
vdev_t *rand_vd = ztest_random_concrete_vdev_leaf(spa->spa_root_vdev);
if (rand_vd == NULL) {
spa_config_exit(spa, SCL_VDEV, FTAG);
mutex_exit(&ztest_vdev_lock);
return;
}
/*
* The random vdev we've selected may change as soon as we
* drop the spa_config_lock. We create local copies of things
* we're interested in.
*/
uint64_t guid = rand_vd->vdev_guid;
char *path = strdup(rand_vd->vdev_path);
boolean_t active = rand_vd->vdev_trim_thread != NULL;
zfs_dbgmsg("vd %p, guid %llu", rand_vd, (u_longlong_t)guid);
spa_config_exit(spa, SCL_VDEV, FTAG);
uint64_t cmd = ztest_random(POOL_TRIM_FUNCS);
uint64_t rate = 1 << ztest_random(30);
boolean_t partial = (ztest_random(5) > 0);
boolean_t secure = (ztest_random(5) > 0);
nvlist_t *vdev_guids = fnvlist_alloc();
nvlist_t *vdev_errlist = fnvlist_alloc();
fnvlist_add_uint64(vdev_guids, path, guid);
error = spa_vdev_trim(spa, vdev_guids, cmd, rate, partial,
secure, vdev_errlist);
fnvlist_free(vdev_guids);
fnvlist_free(vdev_errlist);
switch (cmd) {
case POOL_TRIM_CANCEL:
if (ztest_opts.zo_verbose >= 4) {
(void) printf("Cancel TRIM %s", path);
if (!active)
(void) printf(" failed (no TRIM active)");
(void) printf("\n");
}
break;
case POOL_TRIM_START:
if (ztest_opts.zo_verbose >= 4) {
(void) printf("Start TRIM %s", path);
if (active && error == 0)
(void) printf(" failed (already active)");
else if (error != 0)
(void) printf(" failed (error %d)", error);
(void) printf("\n");
}
break;
case POOL_TRIM_SUSPEND:
if (ztest_opts.zo_verbose >= 4) {
(void) printf("Suspend TRIM %s", path);
if (!active)
(void) printf(" failed (no TRIM active)");
(void) printf("\n");
}
break;
}
free(path);
mutex_exit(&ztest_vdev_lock);
}
/*
* Verify pool integrity by running zdb.
*/
static void
ztest_run_zdb(const char *pool)
{
int status;
char *bin;
char *zdb;
char *zbuf;
const int len = MAXPATHLEN + MAXNAMELEN + 20;
FILE *fp;
bin = umem_alloc(len, UMEM_NOFAIL);
zdb = umem_alloc(len, UMEM_NOFAIL);
zbuf = umem_alloc(1024, UMEM_NOFAIL);
ztest_get_zdb_bin(bin, len);
char **set_gvars_args = ztest_global_vars_to_zdb_args();
char *set_gvars_args_joined = join_strings(set_gvars_args, " ");
free(set_gvars_args);
size_t would = snprintf(zdb, len,
"%s -bcc%s%s -G -d -Y -e -y %s -p %s %s",
bin,
ztest_opts.zo_verbose >= 3 ? "s" : "",
ztest_opts.zo_verbose >= 4 ? "v" : "",
set_gvars_args_joined,
ztest_opts.zo_dir,
pool);
ASSERT3U(would, <, len);
free(set_gvars_args_joined);
if (ztest_opts.zo_verbose >= 5)
(void) printf("Executing %s\n", zdb);
fp = popen(zdb, "r");
while (fgets(zbuf, 1024, fp) != NULL)
if (ztest_opts.zo_verbose >= 3)
(void) printf("%s", zbuf);
status = pclose(fp);
if (status == 0)
goto out;
ztest_dump_core = 0;
if (WIFEXITED(status))
fatal(B_FALSE, "'%s' exit code %d", zdb, WEXITSTATUS(status));
else
fatal(B_FALSE, "'%s' died with signal %d",
zdb, WTERMSIG(status));
out:
umem_free(bin, len);
umem_free(zdb, len);
umem_free(zbuf, 1024);
}
static void
ztest_walk_pool_directory(const char *header)
{
spa_t *spa = NULL;
if (ztest_opts.zo_verbose >= 6)
(void) puts(header);
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL)
if (ztest_opts.zo_verbose >= 6)
(void) printf("\t%s\n", spa_name(spa));
mutex_exit(&spa_namespace_lock);
}
static void
ztest_spa_import_export(char *oldname, char *newname)
{
nvlist_t *config, *newconfig;
uint64_t pool_guid;
spa_t *spa;
int error;
if (ztest_opts.zo_verbose >= 4) {
(void) printf("import/export: old = %s, new = %s\n",
oldname, newname);
}
/*
* Clean up from previous runs.
*/
(void) spa_destroy(newname);
/*
* Get the pool's configuration and guid.
*/
VERIFY0(spa_open(oldname, &spa, FTAG));
/*
* Kick off a scrub to tickle scrub/export races.
*/
if (ztest_random(2) == 0)
(void) spa_scan(spa, POOL_SCAN_SCRUB);
pool_guid = spa_guid(spa);
spa_close(spa, FTAG);
ztest_walk_pool_directory("pools before export");
/*
* Export it.
*/
VERIFY0(spa_export(oldname, &config, B_FALSE, B_FALSE));
ztest_walk_pool_directory("pools after export");
/*
* Try to import it.
*/
newconfig = spa_tryimport(config);
ASSERT3P(newconfig, !=, NULL);
fnvlist_free(newconfig);
/*
* Import it under the new name.
*/
error = spa_import(newname, config, NULL, 0);
if (error != 0) {
dump_nvlist(config, 0);
fatal(B_FALSE, "couldn't import pool %s as %s: error %u",
oldname, newname, error);
}
ztest_walk_pool_directory("pools after import");
/*
* Try to import it again -- should fail with EEXIST.
*/
VERIFY3U(EEXIST, ==, spa_import(newname, config, NULL, 0));
/*
* Try to import it under a different name -- should fail with EEXIST.
*/
VERIFY3U(EEXIST, ==, spa_import(oldname, config, NULL, 0));
/*
* Verify that the pool is no longer visible under the old name.
*/
VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG));
/*
* Verify that we can open and close the pool using the new name.
*/
VERIFY0(spa_open(newname, &spa, FTAG));
ASSERT3U(pool_guid, ==, spa_guid(spa));
spa_close(spa, FTAG);
fnvlist_free(config);
}
static void
ztest_resume(spa_t *spa)
{
if (spa_suspended(spa) && ztest_opts.zo_verbose >= 6)
(void) printf("resuming from suspended state\n");
spa_vdev_state_enter(spa, SCL_NONE);
vdev_clear(spa, NULL);
(void) spa_vdev_state_exit(spa, NULL, 0);
(void) zio_resume(spa);
}
static __attribute__((noreturn)) void
ztest_resume_thread(void *arg)
{
spa_t *spa = arg;
while (!ztest_exiting) {
if (spa_suspended(spa))
ztest_resume(spa);
(void) poll(NULL, 0, 100);
/*
* Periodically change the zfs_compressed_arc_enabled setting.
*/
if (ztest_random(10) == 0)
zfs_compressed_arc_enabled = ztest_random(2);
/*
* Periodically change the zfs_abd_scatter_enabled setting.
*/
if (ztest_random(10) == 0)
zfs_abd_scatter_enabled = ztest_random(2);
}
thread_exit();
}
static __attribute__((noreturn)) void
ztest_deadman_thread(void *arg)
{
ztest_shared_t *zs = arg;
spa_t *spa = ztest_spa;
hrtime_t delay, overdue, last_run = gethrtime();
delay = (zs->zs_thread_stop - zs->zs_thread_start) +
MSEC2NSEC(zfs_deadman_synctime_ms);
while (!ztest_exiting) {
/*
* Wait for the delay timer while checking occasionally
* if we should stop.
*/
if (gethrtime() < last_run + delay) {
(void) poll(NULL, 0, 1000);
continue;
}
/*
* If the pool is suspended then fail immediately. Otherwise,
* check to see if the pool is making any progress. If
* vdev_deadman() discovers that there hasn't been any recent
* I/Os then it will end up aborting the tests.
*/
if (spa_suspended(spa) || spa->spa_root_vdev == NULL) {
fatal(B_FALSE,
"aborting test after %lu seconds because "
"pool has transitioned to a suspended state.",
zfs_deadman_synctime_ms / 1000);
}
vdev_deadman(spa->spa_root_vdev, FTAG);
/*
* If the process doesn't complete within a grace period of
* zfs_deadman_synctime_ms over the expected finish time,
* then it may be hung and is terminated.
*/
overdue = zs->zs_proc_stop + MSEC2NSEC(zfs_deadman_synctime_ms);
if (gethrtime() > overdue) {
fatal(B_FALSE,
"aborting test after %llu seconds because "
"the process is overdue for termination.",
(gethrtime() - zs->zs_proc_start) / NANOSEC);
}
(void) printf("ztest has been running for %lld seconds\n",
(gethrtime() - zs->zs_proc_start) / NANOSEC);
last_run = gethrtime();
delay = MSEC2NSEC(zfs_deadman_checktime_ms);
}
thread_exit();
}
static void
ztest_execute(int test, ztest_info_t *zi, uint64_t id)
{
ztest_ds_t *zd = &ztest_ds[id % ztest_opts.zo_datasets];
ztest_shared_callstate_t *zc = ZTEST_GET_SHARED_CALLSTATE(test);
hrtime_t functime = gethrtime();
int i;
for (i = 0; i < zi->zi_iters; i++)
zi->zi_func(zd, id);
functime = gethrtime() - functime;
atomic_add_64(&zc->zc_count, 1);
atomic_add_64(&zc->zc_time, functime);
if (ztest_opts.zo_verbose >= 4)
(void) printf("%6.2f sec in %s\n",
(double)functime / NANOSEC, zi->zi_funcname);
}
static __attribute__((noreturn)) void
ztest_thread(void *arg)
{
int rand;
uint64_t id = (uintptr_t)arg;
ztest_shared_t *zs = ztest_shared;
uint64_t call_next;
hrtime_t now;
ztest_info_t *zi;
ztest_shared_callstate_t *zc;
while ((now = gethrtime()) < zs->zs_thread_stop) {
/*
* See if it's time to force a crash.
*/
if (now > zs->zs_thread_kill)
ztest_kill(zs);
/*
* If we're getting ENOSPC with some regularity, stop.
*/
if (zs->zs_enospc_count > 10)
break;
/*
* Pick a random function to execute.
*/
rand = ztest_random(ZTEST_FUNCS);
zi = &ztest_info[rand];
zc = ZTEST_GET_SHARED_CALLSTATE(rand);
call_next = zc->zc_next;
if (now >= call_next &&
atomic_cas_64(&zc->zc_next, call_next, call_next +
ztest_random(2 * zi->zi_interval[0] + 1)) == call_next) {
ztest_execute(rand, zi, id);
}
}
thread_exit();
}
static void
ztest_dataset_name(char *dsname, const char *pool, int d)
{
(void) snprintf(dsname, ZFS_MAX_DATASET_NAME_LEN, "%s/ds_%d", pool, d);
}
static void
ztest_dataset_destroy(int d)
{
char name[ZFS_MAX_DATASET_NAME_LEN];
int t;
ztest_dataset_name(name, ztest_opts.zo_pool, d);
if (ztest_opts.zo_verbose >= 3)
(void) printf("Destroying %s to free up space\n", name);
/*
* Cleanup any non-standard clones and snapshots. In general,
* ztest thread t operates on dataset (t % zopt_datasets),
* so there may be more than one thing to clean up.
*/
for (t = d; t < ztest_opts.zo_threads;
t += ztest_opts.zo_datasets)
ztest_dsl_dataset_cleanup(name, t);
(void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL,
DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
}
static void
ztest_dataset_dirobj_verify(ztest_ds_t *zd)
{
uint64_t usedobjs, dirobjs, scratch;
/*
* ZTEST_DIROBJ is the object directory for the entire dataset.
* Therefore, the number of objects in use should equal the
* number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself.
* If not, we have an object leak.
*
* Note that we can only check this in ztest_dataset_open(),
* when the open-context and syncing-context values agree.
* That's because zap_count() returns the open-context value,
* while dmu_objset_space() returns the rootbp fill count.
*/
VERIFY0(zap_count(zd->zd_os, ZTEST_DIROBJ, &dirobjs));
dmu_objset_space(zd->zd_os, &scratch, &scratch, &usedobjs, &scratch);
ASSERT3U(dirobjs + 1, ==, usedobjs);
}
static int
ztest_dataset_open(int d)
{
ztest_ds_t *zd = &ztest_ds[d];
uint64_t committed_seq = ZTEST_GET_SHARED_DS(d)->zd_seq;
objset_t *os;
zilog_t *zilog;
char name[ZFS_MAX_DATASET_NAME_LEN];
int error;
ztest_dataset_name(name, ztest_opts.zo_pool, d);
(void) pthread_rwlock_rdlock(&ztest_name_lock);
error = ztest_dataset_create(name);
if (error == ENOSPC) {
(void) pthread_rwlock_unlock(&ztest_name_lock);
ztest_record_enospc(FTAG);
return (error);
}
ASSERT(error == 0 || error == EEXIST);
VERIFY0(ztest_dmu_objset_own(name, DMU_OST_OTHER, B_FALSE,
B_TRUE, zd, &os));
(void) pthread_rwlock_unlock(&ztest_name_lock);
ztest_zd_init(zd, ZTEST_GET_SHARED_DS(d), os);
zilog = zd->zd_zilog;
if (zilog->zl_header->zh_claim_lr_seq != 0 &&
zilog->zl_header->zh_claim_lr_seq < committed_seq)
fatal(B_FALSE, "missing log records: "
"claimed %"PRIu64" < committed %"PRIu64"",
zilog->zl_header->zh_claim_lr_seq, committed_seq);
ztest_dataset_dirobj_verify(zd);
zil_replay(os, zd, ztest_replay_vector);
ztest_dataset_dirobj_verify(zd);
if (ztest_opts.zo_verbose >= 6)
(void) printf("%s replay %"PRIu64" blocks, "
"%"PRIu64" records, seq %"PRIu64"\n",
zd->zd_name,
zilog->zl_parse_blk_count,
zilog->zl_parse_lr_count,
zilog->zl_replaying_seq);
zilog = zil_open(os, ztest_get_data, NULL);
if (zilog->zl_replaying_seq != 0 &&
zilog->zl_replaying_seq < committed_seq)
fatal(B_FALSE, "missing log records: "
"replayed %"PRIu64" < committed %"PRIu64"",
zilog->zl_replaying_seq, committed_seq);
return (0);
}
static void
ztest_dataset_close(int d)
{
ztest_ds_t *zd = &ztest_ds[d];
zil_close(zd->zd_zilog);
dmu_objset_disown(zd->zd_os, B_TRUE, zd);
ztest_zd_fini(zd);
}
static int
ztest_replay_zil_cb(const char *name, void *arg)
{
(void) arg;
objset_t *os;
ztest_ds_t *zdtmp;
VERIFY0(ztest_dmu_objset_own(name, DMU_OST_ANY, B_TRUE,
B_TRUE, FTAG, &os));
zdtmp = umem_alloc(sizeof (ztest_ds_t), UMEM_NOFAIL);
ztest_zd_init(zdtmp, NULL, os);
zil_replay(os, zdtmp, ztest_replay_vector);
ztest_zd_fini(zdtmp);
if (dmu_objset_zil(os)->zl_parse_lr_count != 0 &&
ztest_opts.zo_verbose >= 6) {
zilog_t *zilog = dmu_objset_zil(os);
(void) printf("%s replay %"PRIu64" blocks, "
"%"PRIu64" records, seq %"PRIu64"\n",
name,
zilog->zl_parse_blk_count,
zilog->zl_parse_lr_count,
zilog->zl_replaying_seq);
}
umem_free(zdtmp, sizeof (ztest_ds_t));
dmu_objset_disown(os, B_TRUE, FTAG);
return (0);
}
static void
ztest_freeze(void)
{
ztest_ds_t *zd = &ztest_ds[0];
spa_t *spa;
int numloops = 0;
if (ztest_opts.zo_verbose >= 3)
(void) printf("testing spa_freeze()...\n");
kernel_init(SPA_MODE_READ | SPA_MODE_WRITE);
VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG));
VERIFY0(ztest_dataset_open(0));
ztest_spa = spa;
/*
* Force the first log block to be transactionally allocated.
* We have to do this before we freeze the pool -- otherwise
* the log chain won't be anchored.
*/
while (BP_IS_HOLE(&zd->zd_zilog->zl_header->zh_log)) {
ztest_dmu_object_alloc_free(zd, 0);
zil_commit(zd->zd_zilog, 0);
}
txg_wait_synced(spa_get_dsl(spa), 0);
/*
* Freeze the pool. This stops spa_sync() from doing anything,
* so that the only way to record changes from now on is the ZIL.
*/
spa_freeze(spa);
/*
* Because it is hard to predict how much space a write will actually
* require beforehand, we leave ourselves some fudge space to write over
* capacity.
*/
uint64_t capacity = metaslab_class_get_space(spa_normal_class(spa)) / 2;
/*
* Run tests that generate log records but don't alter the pool config
* or depend on DSL sync tasks (snapshots, objset create/destroy, etc).
* We do a txg_wait_synced() after each iteration to force the txg
* to increase well beyond the last synced value in the uberblock.
* The ZIL should be OK with that.
*
* Run a random number of times less than zo_maxloops and ensure we do
* not run out of space on the pool.
*/
while (ztest_random(10) != 0 &&
numloops++ < ztest_opts.zo_maxloops &&
metaslab_class_get_alloc(spa_normal_class(spa)) < capacity) {
ztest_od_t od;
ztest_od_init(&od, 0, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0, 0);
VERIFY0(ztest_object_init(zd, &od, sizeof (od), B_FALSE));
ztest_io(zd, od.od_object,
ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
txg_wait_synced(spa_get_dsl(spa), 0);
}
/*
* Commit all of the changes we just generated.
*/
zil_commit(zd->zd_zilog, 0);
txg_wait_synced(spa_get_dsl(spa), 0);
/*
* Close our dataset and close the pool.
*/
ztest_dataset_close(0);
spa_close(spa, FTAG);
kernel_fini();
/*
* Open and close the pool and dataset to induce log replay.
*/
kernel_init(SPA_MODE_READ | SPA_MODE_WRITE);
VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG));
ASSERT3U(spa_freeze_txg(spa), ==, UINT64_MAX);
VERIFY0(ztest_dataset_open(0));
ztest_spa = spa;
txg_wait_synced(spa_get_dsl(spa), 0);
ztest_dataset_close(0);
ztest_reguid(NULL, 0);
spa_close(spa, FTAG);
kernel_fini();
}
static void
ztest_import_impl(void)
{
importargs_t args = { 0 };
nvlist_t *cfg = NULL;
int nsearch = 1;
char *searchdirs[nsearch];
int flags = ZFS_IMPORT_MISSING_LOG;
searchdirs[0] = ztest_opts.zo_dir;
args.paths = nsearch;
args.path = searchdirs;
args.can_be_active = B_FALSE;
VERIFY0(zpool_find_config(NULL, ztest_opts.zo_pool, &cfg, &args,
&libzpool_config_ops));
VERIFY0(spa_import(ztest_opts.zo_pool, cfg, NULL, flags));
fnvlist_free(cfg);
}
/*
* Import a storage pool with the given name.
*/
static void
ztest_import(ztest_shared_t *zs)
{
spa_t *spa;
mutex_init(&ztest_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&ztest_checkpoint_lock, NULL, MUTEX_DEFAULT, NULL);
VERIFY0(pthread_rwlock_init(&ztest_name_lock, NULL));
kernel_init(SPA_MODE_READ | SPA_MODE_WRITE);
ztest_import_impl();
VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG));
zs->zs_metaslab_sz =
1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift;
spa_close(spa, FTAG);
kernel_fini();
if (!ztest_opts.zo_mmp_test) {
ztest_run_zdb(ztest_opts.zo_pool);
ztest_freeze();
ztest_run_zdb(ztest_opts.zo_pool);
}
(void) pthread_rwlock_destroy(&ztest_name_lock);
mutex_destroy(&ztest_vdev_lock);
mutex_destroy(&ztest_checkpoint_lock);
}
/*
* Kick off threads to run tests on all datasets in parallel.
*/
static void
ztest_run(ztest_shared_t *zs)
{
spa_t *spa;
objset_t *os;
kthread_t *resume_thread, *deadman_thread;
kthread_t **run_threads;
uint64_t object;
int error;
int t, d;
ztest_exiting = B_FALSE;
/*
* Initialize parent/child shared state.
*/
mutex_init(&ztest_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&ztest_checkpoint_lock, NULL, MUTEX_DEFAULT, NULL);
VERIFY0(pthread_rwlock_init(&ztest_name_lock, NULL));
zs->zs_thread_start = gethrtime();
zs->zs_thread_stop =
zs->zs_thread_start + ztest_opts.zo_passtime * NANOSEC;
zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop);
zs->zs_thread_kill = zs->zs_thread_stop;
if (ztest_random(100) < ztest_opts.zo_killrate) {
zs->zs_thread_kill -=
ztest_random(ztest_opts.zo_passtime * NANOSEC);
}
mutex_init(&zcl.zcl_callbacks_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t),
offsetof(ztest_cb_data_t, zcd_node));
/*
* Open our pool. It may need to be imported first depending on
* what tests were running when the previous pass was terminated.
*/
kernel_init(SPA_MODE_READ | SPA_MODE_WRITE);
error = spa_open(ztest_opts.zo_pool, &spa, FTAG);
if (error) {
VERIFY3S(error, ==, ENOENT);
ztest_import_impl();
VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG));
zs->zs_metaslab_sz =
1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift;
}
metaslab_preload_limit = ztest_random(20) + 1;
ztest_spa = spa;
VERIFY0(vdev_raidz_impl_set("cycle"));
dmu_objset_stats_t dds;
VERIFY0(ztest_dmu_objset_own(ztest_opts.zo_pool,
DMU_OST_ANY, B_TRUE, B_TRUE, FTAG, &os));
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
dmu_objset_fast_stat(os, &dds);
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
zs->zs_guid = dds.dds_guid;
dmu_objset_disown(os, B_TRUE, FTAG);
/*
* Create a thread to periodically resume suspended I/O.
*/
resume_thread = thread_create(NULL, 0, ztest_resume_thread,
spa, 0, NULL, TS_RUN | TS_JOINABLE, defclsyspri);
/*
* Create a deadman thread and set to panic if we hang.
*/
deadman_thread = thread_create(NULL, 0, ztest_deadman_thread,
zs, 0, NULL, TS_RUN | TS_JOINABLE, defclsyspri);
spa->spa_deadman_failmode = ZIO_FAILURE_MODE_PANIC;
/*
* Verify that we can safely inquire about any object,
* whether it's allocated or not. To make it interesting,
* we probe a 5-wide window around each power of two.
* This hits all edge cases, including zero and the max.
*/
for (t = 0; t < 64; t++) {
for (d = -5; d <= 5; d++) {
error = dmu_object_info(spa->spa_meta_objset,
(1ULL << t) + d, NULL);
ASSERT(error == 0 || error == ENOENT ||
error == EINVAL);
}
}
/*
* If we got any ENOSPC errors on the previous run, destroy something.
*/
if (zs->zs_enospc_count != 0) {
int d = ztest_random(ztest_opts.zo_datasets);
ztest_dataset_destroy(d);
}
zs->zs_enospc_count = 0;
/*
* If we were in the middle of ztest_device_removal() and were killed
* we need to ensure the removal and scrub complete before running
* any tests that check ztest_device_removal_active. The removal will
* be restarted automatically when the spa is opened, but we need to
* initiate the scrub manually if it is not already in progress. Note
* that we always run the scrub whenever an indirect vdev exists
* because we have no way of knowing for sure if ztest_device_removal()
* fully completed its scrub before the pool was reimported.
*/
if (spa->spa_removing_phys.sr_state == DSS_SCANNING ||
spa->spa_removing_phys.sr_prev_indirect_vdev != -1) {
while (spa->spa_removing_phys.sr_state == DSS_SCANNING)
txg_wait_synced(spa_get_dsl(spa), 0);
error = ztest_scrub_impl(spa);
if (error == EBUSY)
error = 0;
ASSERT0(error);
}
run_threads = umem_zalloc(ztest_opts.zo_threads * sizeof (kthread_t *),
UMEM_NOFAIL);
if (ztest_opts.zo_verbose >= 4)
(void) printf("starting main threads...\n");
/*
* Replay all logs of all datasets in the pool. This is primarily for
* temporary datasets which wouldn't otherwise get replayed, which
* can trigger failures when attempting to offline a SLOG in
* ztest_fault_inject().
*/
(void) dmu_objset_find(ztest_opts.zo_pool, ztest_replay_zil_cb,
NULL, DS_FIND_CHILDREN);
/*
* Kick off all the tests that run in parallel.
*/
for (t = 0; t < ztest_opts.zo_threads; t++) {
if (t < ztest_opts.zo_datasets && ztest_dataset_open(t) != 0) {
umem_free(run_threads, ztest_opts.zo_threads *
sizeof (kthread_t *));
return;
}
run_threads[t] = thread_create(NULL, 0, ztest_thread,
(void *)(uintptr_t)t, 0, NULL, TS_RUN | TS_JOINABLE,
defclsyspri);
}
/*
* Wait for all of the tests to complete.
*/
for (t = 0; t < ztest_opts.zo_threads; t++)
VERIFY0(thread_join(run_threads[t]));
/*
* Close all datasets. This must be done after all the threads
* are joined so we can be sure none of the datasets are in-use
* by any of the threads.
*/
for (t = 0; t < ztest_opts.zo_threads; t++) {
if (t < ztest_opts.zo_datasets)
ztest_dataset_close(t);
}
txg_wait_synced(spa_get_dsl(spa), 0);
zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
zs->zs_space = metaslab_class_get_space(spa_normal_class(spa));
umem_free(run_threads, ztest_opts.zo_threads * sizeof (kthread_t *));
/* Kill the resume and deadman threads */
ztest_exiting = B_TRUE;
VERIFY0(thread_join(resume_thread));
VERIFY0(thread_join(deadman_thread));
ztest_resume(spa);
/*
* Right before closing the pool, kick off a bunch of async I/O;
* spa_close() should wait for it to complete.
*/
for (object = 1; object < 50; object++) {
dmu_prefetch(spa->spa_meta_objset, object, 0, 0, 1ULL << 20,
ZIO_PRIORITY_SYNC_READ);
}
/* Verify that at least one commit cb was called in a timely fashion */
if (zc_cb_counter >= ZTEST_COMMIT_CB_MIN_REG)
VERIFY0(zc_min_txg_delay);
spa_close(spa, FTAG);
/*
* Verify that we can loop over all pools.
*/
mutex_enter(&spa_namespace_lock);
for (spa = spa_next(NULL); spa != NULL; spa = spa_next(spa))
if (ztest_opts.zo_verbose > 3)
(void) printf("spa_next: found %s\n", spa_name(spa));
mutex_exit(&spa_namespace_lock);
/*
* Verify that we can export the pool and reimport it under a
* different name.
*/
if ((ztest_random(2) == 0) && !ztest_opts.zo_mmp_test) {
char name[ZFS_MAX_DATASET_NAME_LEN];
(void) snprintf(name, sizeof (name), "%s_import",
ztest_opts.zo_pool);
ztest_spa_import_export(ztest_opts.zo_pool, name);
ztest_spa_import_export(name, ztest_opts.zo_pool);
}
kernel_fini();
list_destroy(&zcl.zcl_callbacks);
mutex_destroy(&zcl.zcl_callbacks_lock);
(void) pthread_rwlock_destroy(&ztest_name_lock);
mutex_destroy(&ztest_vdev_lock);
mutex_destroy(&ztest_checkpoint_lock);
}
static void
print_time(hrtime_t t, char *timebuf)
{
hrtime_t s = t / NANOSEC;
hrtime_t m = s / 60;
hrtime_t h = m / 60;
hrtime_t d = h / 24;
s -= m * 60;
m -= h * 60;
h -= d * 24;
timebuf[0] = '\0';
if (d)
(void) sprintf(timebuf,
"%llud%02lluh%02llum%02llus", d, h, m, s);
else if (h)
(void) sprintf(timebuf, "%lluh%02llum%02llus", h, m, s);
else if (m)
(void) sprintf(timebuf, "%llum%02llus", m, s);
else
(void) sprintf(timebuf, "%llus", s);
}
static nvlist_t *
make_random_props(void)
{
nvlist_t *props;
props = fnvlist_alloc();
if (ztest_random(2) == 0)
return (props);
fnvlist_add_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE), 1);
return (props);
}
/*
* Create a storage pool with the given name and initial vdev size.
* Then test spa_freeze() functionality.
*/
static void
ztest_init(ztest_shared_t *zs)
{
spa_t *spa;
nvlist_t *nvroot, *props;
int i;
mutex_init(&ztest_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&ztest_checkpoint_lock, NULL, MUTEX_DEFAULT, NULL);
VERIFY0(pthread_rwlock_init(&ztest_name_lock, NULL));
kernel_init(SPA_MODE_READ | SPA_MODE_WRITE);
/*
* Create the storage pool.
*/
(void) spa_destroy(ztest_opts.zo_pool);
ztest_shared->zs_vdev_next_leaf = 0;
zs->zs_splits = 0;
zs->zs_mirrors = ztest_opts.zo_mirrors;
nvroot = make_vdev_root(NULL, NULL, NULL, ztest_opts.zo_vdev_size, 0,
NULL, ztest_opts.zo_raid_children, zs->zs_mirrors, 1);
props = make_random_props();
/*
* We don't expect the pool to suspend unless maxfaults == 0,
* in which case ztest_fault_inject() temporarily takes away
* the only valid replica.
*/
fnvlist_add_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_FAILUREMODE),
MAXFAULTS(zs) ? ZIO_FAILURE_MODE_PANIC : ZIO_FAILURE_MODE_WAIT);
for (i = 0; i < SPA_FEATURES; i++) {
char *buf;
if (!spa_feature_table[i].fi_zfs_mod_supported)
continue;
/*
* 75% chance of using the log space map feature. We want ztest
* to exercise both the code paths that use the log space map
* feature and the ones that don't.
*/
if (i == SPA_FEATURE_LOG_SPACEMAP && ztest_random(4) == 0)
continue;
VERIFY3S(-1, !=, asprintf(&buf, "feature@%s",
spa_feature_table[i].fi_uname));
fnvlist_add_uint64(props, buf, 0);
free(buf);
}
VERIFY0(spa_create(ztest_opts.zo_pool, nvroot, props, NULL, NULL));
fnvlist_free(nvroot);
fnvlist_free(props);
VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG));
zs->zs_metaslab_sz =
1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift;
spa_close(spa, FTAG);
kernel_fini();
if (!ztest_opts.zo_mmp_test) {
ztest_run_zdb(ztest_opts.zo_pool);
ztest_freeze();
ztest_run_zdb(ztest_opts.zo_pool);
}
(void) pthread_rwlock_destroy(&ztest_name_lock);
mutex_destroy(&ztest_vdev_lock);
mutex_destroy(&ztest_checkpoint_lock);
}
static void
setup_data_fd(void)
{
static char ztest_name_data[] = "/tmp/ztest.data.XXXXXX";
ztest_fd_data = mkstemp(ztest_name_data);
ASSERT3S(ztest_fd_data, >=, 0);
(void) unlink(ztest_name_data);
}
static int
shared_data_size(ztest_shared_hdr_t *hdr)
{
int size;
size = hdr->zh_hdr_size;
size += hdr->zh_opts_size;
size += hdr->zh_size;
size += hdr->zh_stats_size * hdr->zh_stats_count;
size += hdr->zh_ds_size * hdr->zh_ds_count;
return (size);
}
static void
setup_hdr(void)
{
int size;
ztest_shared_hdr_t *hdr;
hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0);
ASSERT3P(hdr, !=, MAP_FAILED);
VERIFY0(ftruncate(ztest_fd_data, sizeof (ztest_shared_hdr_t)));
hdr->zh_hdr_size = sizeof (ztest_shared_hdr_t);
hdr->zh_opts_size = sizeof (ztest_shared_opts_t);
hdr->zh_size = sizeof (ztest_shared_t);
hdr->zh_stats_size = sizeof (ztest_shared_callstate_t);
hdr->zh_stats_count = ZTEST_FUNCS;
hdr->zh_ds_size = sizeof (ztest_shared_ds_t);
hdr->zh_ds_count = ztest_opts.zo_datasets;
size = shared_data_size(hdr);
VERIFY0(ftruncate(ztest_fd_data, size));
(void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
}
static void
setup_data(void)
{
int size, offset;
ztest_shared_hdr_t *hdr;
uint8_t *buf;
hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
PROT_READ, MAP_SHARED, ztest_fd_data, 0);
ASSERT3P(hdr, !=, MAP_FAILED);
size = shared_data_size(hdr);
(void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
hdr = ztest_shared_hdr = (void *)mmap(0, P2ROUNDUP(size, getpagesize()),
PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0);
ASSERT3P(hdr, !=, MAP_FAILED);
buf = (uint8_t *)hdr;
offset = hdr->zh_hdr_size;
ztest_shared_opts = (void *)&buf[offset];
offset += hdr->zh_opts_size;
ztest_shared = (void *)&buf[offset];
offset += hdr->zh_size;
ztest_shared_callstate = (void *)&buf[offset];
offset += hdr->zh_stats_size * hdr->zh_stats_count;
ztest_shared_ds = (void *)&buf[offset];
}
static boolean_t
exec_child(char *cmd, char *libpath, boolean_t ignorekill, int *statusp)
{
pid_t pid;
int status;
char *cmdbuf = NULL;
pid = fork();
if (cmd == NULL) {
cmdbuf = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
(void) strlcpy(cmdbuf, getexecname(), MAXPATHLEN);
cmd = cmdbuf;
}
if (pid == -1)
fatal(B_TRUE, "fork failed");
if (pid == 0) { /* child */
char fd_data_str[12];
VERIFY3S(11, >=,
snprintf(fd_data_str, 12, "%d", ztest_fd_data));
VERIFY0(setenv("ZTEST_FD_DATA", fd_data_str, 1));
if (libpath != NULL) {
const char *curlp = getenv("LD_LIBRARY_PATH");
if (curlp == NULL)
VERIFY0(setenv("LD_LIBRARY_PATH", libpath, 1));
else {
char *newlp = NULL;
VERIFY3S(-1, !=,
asprintf(&newlp, "%s:%s", libpath, curlp));
VERIFY0(setenv("LD_LIBRARY_PATH", newlp, 1));
+ free(newlp);
}
}
(void) execl(cmd, cmd, (char *)NULL);
ztest_dump_core = B_FALSE;
fatal(B_TRUE, "exec failed: %s", cmd);
}
if (cmdbuf != NULL) {
umem_free(cmdbuf, MAXPATHLEN);
cmd = NULL;
}
while (waitpid(pid, &status, 0) != pid)
continue;
if (statusp != NULL)
*statusp = status;
if (WIFEXITED(status)) {
if (WEXITSTATUS(status) != 0) {
(void) fprintf(stderr, "child exited with code %d\n",
WEXITSTATUS(status));
exit(2);
}
return (B_FALSE);
} else if (WIFSIGNALED(status)) {
if (!ignorekill || WTERMSIG(status) != SIGKILL) {
(void) fprintf(stderr, "child died with signal %d\n",
WTERMSIG(status));
exit(3);
}
return (B_TRUE);
} else {
(void) fprintf(stderr, "something strange happened to child\n");
exit(4);
}
}
static void
ztest_run_init(void)
{
int i;
ztest_shared_t *zs = ztest_shared;
/*
* Blow away any existing copy of zpool.cache
*/
(void) remove(spa_config_path);
if (ztest_opts.zo_init == 0) {
if (ztest_opts.zo_verbose >= 1)
(void) printf("Importing pool %s\n",
ztest_opts.zo_pool);
ztest_import(zs);
return;
}
/*
* Create and initialize our storage pool.
*/
for (i = 1; i <= ztest_opts.zo_init; i++) {
memset(zs, 0, sizeof (*zs));
if (ztest_opts.zo_verbose >= 3 &&
ztest_opts.zo_init != 1) {
(void) printf("ztest_init(), pass %d\n", i);
}
ztest_init(zs);
}
}
int
main(int argc, char **argv)
{
int kills = 0;
int iters = 0;
int older = 0;
int newer = 0;
ztest_shared_t *zs;
ztest_info_t *zi;
ztest_shared_callstate_t *zc;
char timebuf[100];
char numbuf[NN_NUMBUF_SZ];
char *cmd;
boolean_t hasalt;
int f, err;
char *fd_data_str = getenv("ZTEST_FD_DATA");
struct sigaction action;
(void) setvbuf(stdout, NULL, _IOLBF, 0);
dprintf_setup(&argc, argv);
zfs_deadman_synctime_ms = 300000;
zfs_deadman_checktime_ms = 30000;
/*
* As two-word space map entries may not come up often (especially
* if pool and vdev sizes are small) we want to force at least some
* of them so the feature get tested.
*/
zfs_force_some_double_word_sm_entries = B_TRUE;
/*
* Verify that even extensively damaged split blocks with many
* segments can be reconstructed in a reasonable amount of time
* when reconstruction is known to be possible.
*
* Note: the lower this value is, the more damage we inflict, and
* the more time ztest spends in recovering that damage. We chose
* to induce damage 1/100th of the time so recovery is tested but
* not so frequently that ztest doesn't get to test other code paths.
*/
zfs_reconstruct_indirect_damage_fraction = 100;
action.sa_handler = sig_handler;
sigemptyset(&action.sa_mask);
action.sa_flags = 0;
if (sigaction(SIGSEGV, &action, NULL) < 0) {
(void) fprintf(stderr, "ztest: cannot catch SIGSEGV: %s.\n",
strerror(errno));
exit(EXIT_FAILURE);
}
if (sigaction(SIGABRT, &action, NULL) < 0) {
(void) fprintf(stderr, "ztest: cannot catch SIGABRT: %s.\n",
strerror(errno));
exit(EXIT_FAILURE);
}
/*
* Force random_get_bytes() to use /dev/urandom in order to prevent
* ztest from needlessly depleting the system entropy pool.
*/
random_path = "/dev/urandom";
ztest_fd_rand = open(random_path, O_RDONLY | O_CLOEXEC);
ASSERT3S(ztest_fd_rand, >=, 0);
if (!fd_data_str) {
process_options(argc, argv);
setup_data_fd();
setup_hdr();
setup_data();
memcpy(ztest_shared_opts, &ztest_opts,
sizeof (*ztest_shared_opts));
} else {
ztest_fd_data = atoi(fd_data_str);
setup_data();
memcpy(&ztest_opts, ztest_shared_opts, sizeof (ztest_opts));
}
ASSERT3U(ztest_opts.zo_datasets, ==, ztest_shared_hdr->zh_ds_count);
err = ztest_set_global_vars();
if (err != 0 && !fd_data_str) {
/* error message done by ztest_set_global_vars */
exit(EXIT_FAILURE);
} else {
/* children should not be spawned if setting gvars fails */
VERIFY3S(err, ==, 0);
}
/* Override location of zpool.cache */
VERIFY3S(asprintf((char **)&spa_config_path, "%s/zpool.cache",
ztest_opts.zo_dir), !=, -1);
ztest_ds = umem_alloc(ztest_opts.zo_datasets * sizeof (ztest_ds_t),
UMEM_NOFAIL);
zs = ztest_shared;
if (fd_data_str) {
metaslab_force_ganging = ztest_opts.zo_metaslab_force_ganging;
metaslab_df_alloc_threshold =
zs->zs_metaslab_df_alloc_threshold;
if (zs->zs_do_init)
ztest_run_init();
else
ztest_run(zs);
exit(0);
}
hasalt = (strlen(ztest_opts.zo_alt_ztest) != 0);
if (ztest_opts.zo_verbose >= 1) {
(void) printf("%"PRIu64" vdevs, %d datasets, %d threads,"
"%d %s disks, %"PRIu64" seconds...\n\n",
ztest_opts.zo_vdevs,
ztest_opts.zo_datasets,
ztest_opts.zo_threads,
ztest_opts.zo_raid_children,
ztest_opts.zo_raid_type,
ztest_opts.zo_time);
}
cmd = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
(void) strlcpy(cmd, getexecname(), MAXNAMELEN);
zs->zs_do_init = B_TRUE;
if (strlen(ztest_opts.zo_alt_ztest) != 0) {
if (ztest_opts.zo_verbose >= 1) {
(void) printf("Executing older ztest for "
"initialization: %s\n", ztest_opts.zo_alt_ztest);
}
VERIFY(!exec_child(ztest_opts.zo_alt_ztest,
ztest_opts.zo_alt_libpath, B_FALSE, NULL));
} else {
VERIFY(!exec_child(NULL, NULL, B_FALSE, NULL));
}
zs->zs_do_init = B_FALSE;
zs->zs_proc_start = gethrtime();
zs->zs_proc_stop = zs->zs_proc_start + ztest_opts.zo_time * NANOSEC;
for (f = 0; f < ZTEST_FUNCS; f++) {
zi = &ztest_info[f];
zc = ZTEST_GET_SHARED_CALLSTATE(f);
if (zs->zs_proc_start + zi->zi_interval[0] > zs->zs_proc_stop)
zc->zc_next = UINT64_MAX;
else
zc->zc_next = zs->zs_proc_start +
ztest_random(2 * zi->zi_interval[0] + 1);
}
/*
* Run the tests in a loop. These tests include fault injection
* to verify that self-healing data works, and forced crashes
* to verify that we never lose on-disk consistency.
*/
while (gethrtime() < zs->zs_proc_stop) {
int status;
boolean_t killed;
/*
* Initialize the workload counters for each function.
*/
for (f = 0; f < ZTEST_FUNCS; f++) {
zc = ZTEST_GET_SHARED_CALLSTATE(f);
zc->zc_count = 0;
zc->zc_time = 0;
}
/* Set the allocation switch size */
zs->zs_metaslab_df_alloc_threshold =
ztest_random(zs->zs_metaslab_sz / 4) + 1;
if (!hasalt || ztest_random(2) == 0) {
if (hasalt && ztest_opts.zo_verbose >= 1) {
(void) printf("Executing newer ztest: %s\n",
cmd);
}
newer++;
killed = exec_child(cmd, NULL, B_TRUE, &status);
} else {
if (hasalt && ztest_opts.zo_verbose >= 1) {
(void) printf("Executing older ztest: %s\n",
ztest_opts.zo_alt_ztest);
}
older++;
killed = exec_child(ztest_opts.zo_alt_ztest,
ztest_opts.zo_alt_libpath, B_TRUE, &status);
}
if (killed)
kills++;
iters++;
if (ztest_opts.zo_verbose >= 1) {
hrtime_t now = gethrtime();
now = MIN(now, zs->zs_proc_stop);
print_time(zs->zs_proc_stop - now, timebuf);
nicenum(zs->zs_space, numbuf, sizeof (numbuf));
(void) printf("Pass %3d, %8s, %3"PRIu64" ENOSPC, "
"%4.1f%% of %5s used, %3.0f%% done, %8s to go\n",
iters,
WIFEXITED(status) ? "Complete" : "SIGKILL",
zs->zs_enospc_count,
100.0 * zs->zs_alloc / zs->zs_space,
numbuf,
100.0 * (now - zs->zs_proc_start) /
(ztest_opts.zo_time * NANOSEC), timebuf);
}
if (ztest_opts.zo_verbose >= 2) {
(void) printf("\nWorkload summary:\n\n");
(void) printf("%7s %9s %s\n",
"Calls", "Time", "Function");
(void) printf("%7s %9s %s\n",
"-----", "----", "--------");
for (f = 0; f < ZTEST_FUNCS; f++) {
zi = &ztest_info[f];
zc = ZTEST_GET_SHARED_CALLSTATE(f);
print_time(zc->zc_time, timebuf);
(void) printf("%7"PRIu64" %9s %s\n",
zc->zc_count, timebuf,
zi->zi_funcname);
}
(void) printf("\n");
}
if (!ztest_opts.zo_mmp_test)
ztest_run_zdb(ztest_opts.zo_pool);
}
if (ztest_opts.zo_verbose >= 1) {
if (hasalt) {
(void) printf("%d runs of older ztest: %s\n", older,
ztest_opts.zo_alt_ztest);
(void) printf("%d runs of newer ztest: %s\n", newer,
cmd);
}
(void) printf("%d killed, %d completed, %.0f%% kill rate\n",
kills, iters - kills, (100.0 * kills) / MAX(1, iters));
}
umem_free(cmd, MAXNAMELEN);
return (0);
}
diff --git a/sys/contrib/openzfs/config/kernel-fadvise.m4 b/sys/contrib/openzfs/config/kernel-fadvise.m4
new file mode 100644
index 000000000000..08912de16ed8
--- /dev/null
+++ b/sys/contrib/openzfs/config/kernel-fadvise.m4
@@ -0,0 +1,23 @@
+dnl #
+dnl # Linux 4.19 API
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_SRC_FADVISE], [
+ ZFS_LINUX_TEST_SRC([file_fadvise], [
+ #include <linux/fs.h>
+
+ static const struct file_operations
+ fops __attribute__ ((unused)) = {
+ .fadvise = NULL,
+ };
+ ],[])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_FADVISE], [
+ AC_MSG_CHECKING([whether fops->fadvise() exists])
+ ZFS_LINUX_TEST_RESULT([file_fadvise], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_FILE_FADVISE, 1, [fops->fadvise() exists])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+])
diff --git a/sys/contrib/openzfs/config/kernel-generic_fadvise.m4 b/sys/contrib/openzfs/config/kernel-generic_fadvise.m4
new file mode 100644
index 000000000000..8d122064b229
--- /dev/null
+++ b/sys/contrib/openzfs/config/kernel-generic_fadvise.m4
@@ -0,0 +1,27 @@
+dnl #
+dnl # 5.3 API change
+dnl # The generic_fadvise() function is present since 4.19 kernel
+dnl # but it was not exported until Linux 5.3.
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_SRC_GENERIC_FADVISE], [
+ ZFS_LINUX_TEST_SRC([generic_fadvise], [
+ #include <linux/fs.h>
+ ], [
+ struct file *fp __attribute__ ((unused)) = NULL;
+ loff_t offset __attribute__ ((unused)) = 0;
+ loff_t len __attribute__ ((unused)) = 0;
+ int advise __attribute__ ((unused)) = 0;
+ generic_fadvise(fp, offset, len, advise);
+ ])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_GENERIC_FADVISE], [
+ AC_MSG_CHECKING([whether generic_fadvise() is available])
+ ZFS_LINUX_TEST_RESULT_SYMBOL([generic_fadvise],
+ [generic_fadvise], [mm/fadvise.c], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_GENERIC_FADVISE, 1, [yes])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+])
diff --git a/sys/contrib/openzfs/config/kernel-xattr-handler.m4 b/sys/contrib/openzfs/config/kernel-xattr-handler.m4
index 00b1e74a9ccb..b6cbfa155007 100644
--- a/sys/contrib/openzfs/config/kernel-xattr-handler.m4
+++ b/sys/contrib/openzfs/config/kernel-xattr-handler.m4
@@ -1,425 +1,452 @@
dnl #
dnl # 2.6.35 API change,
dnl # The 'struct xattr_handler' was constified in the generic
dnl # super_block structure.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_CONST_XATTR_HANDLER], [
ZFS_LINUX_TEST_SRC([const_xattr_handler], [
#include <linux/fs.h>
#include <linux/xattr.h>
const struct xattr_handler xattr_test_handler = {
.prefix = "test",
.get = NULL,
.set = NULL,
};
const struct xattr_handler *xattr_handlers[] = {
&xattr_test_handler,
};
const struct super_block sb __attribute__ ((unused)) = {
.s_xattr = xattr_handlers,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_CONST_XATTR_HANDLER], [
AC_MSG_CHECKING([whether super_block uses const struct xattr_handler])
ZFS_LINUX_TEST_RESULT([const_xattr_handler], [
AC_MSG_RESULT([yes])
],[
ZFS_LINUX_TEST_ERROR([const xattr_handler])
])
])
dnl #
dnl # 4.5 API change,
dnl # struct xattr_handler added new member "name".
dnl # xattr_handler which matches to whole name rather than prefix should use
dnl # "name" instead of "prefix", e.g. "system.posix_acl_access"
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_XATTR_HANDLER_NAME], [
ZFS_LINUX_TEST_SRC([xattr_handler_name], [
#include <linux/xattr.h>
static const struct xattr_handler
xops __attribute__ ((unused)) = {
.name = XATTR_NAME_POSIX_ACL_ACCESS,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_XATTR_HANDLER_NAME], [
AC_MSG_CHECKING([whether xattr_handler has name])
ZFS_LINUX_TEST_RESULT([xattr_handler_name], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_XATTR_HANDLER_NAME, 1,
[xattr_handler has name])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # Supported xattr handler get() interfaces checked newest to oldest.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_XATTR_HANDLER_GET], [
ZFS_LINUX_TEST_SRC([xattr_handler_get_dentry_inode], [
#include <linux/xattr.h>
int get(const struct xattr_handler *handler,
struct dentry *dentry, struct inode *inode,
const char *name, void *buffer, size_t size) { return 0; }
static const struct xattr_handler
xops __attribute__ ((unused)) = {
.get = get,
};
],[])
ZFS_LINUX_TEST_SRC([xattr_handler_get_xattr_handler], [
#include <linux/xattr.h>
int get(const struct xattr_handler *handler,
struct dentry *dentry, const char *name,
void *buffer, size_t size) { return 0; }
static const struct xattr_handler
xops __attribute__ ((unused)) = {
.get = get,
};
],[])
ZFS_LINUX_TEST_SRC([xattr_handler_get_dentry], [
#include <linux/xattr.h>
int get(struct dentry *dentry, const char *name,
void *buffer, size_t size, int handler_flags)
{ return 0; }
static const struct xattr_handler
xops __attribute__ ((unused)) = {
.get = get,
};
],[])
+
+ ZFS_LINUX_TEST_SRC([xattr_handler_get_dentry_inode_flags], [
+ #include <linux/xattr.h>
+
+ int get(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *inode,
+ const char *name, void *buffer,
+ size_t size, int flags) { return 0; }
+ static const struct xattr_handler
+ xops __attribute__ ((unused)) = {
+ .get = get,
+ };
+ ],[])
])
AC_DEFUN([ZFS_AC_KERNEL_XATTR_HANDLER_GET], [
dnl #
dnl # 4.7 API change,
dnl # The xattr_handler->get() callback was changed to take both
dnl # dentry and inode.
dnl #
AC_MSG_CHECKING([whether xattr_handler->get() wants dentry and inode])
ZFS_LINUX_TEST_RESULT([xattr_handler_get_dentry_inode], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_XATTR_GET_DENTRY_INODE, 1,
[xattr_handler->get() wants both dentry and inode])
],[
dnl #
dnl # 4.4 API change,
dnl # The xattr_handler->get() callback was changed to take a
dnl # attr_handler, and handler_flags argument was removed and
dnl # should be accessed by handler->flags.
dnl #
AC_MSG_RESULT(no)
AC_MSG_CHECKING(
[whether xattr_handler->get() wants xattr_handler])
ZFS_LINUX_TEST_RESULT([xattr_handler_get_xattr_handler], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_XATTR_GET_HANDLER, 1,
[xattr_handler->get() wants xattr_handler])
],[
dnl #
dnl # 2.6.33 API change,
dnl # The xattr_handler->get() callback was changed
dnl # to take a dentry instead of an inode, and a
dnl # handler_flags argument was added.
dnl #
AC_MSG_RESULT(no)
AC_MSG_CHECKING(
[whether xattr_handler->get() wants dentry])
ZFS_LINUX_TEST_RESULT([xattr_handler_get_dentry], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_XATTR_GET_DENTRY, 1,
[xattr_handler->get() wants dentry])
],[
- ZFS_LINUX_TEST_ERROR([xattr get()])
+ dnl #
+ dnl # Android API change,
+ dnl # The xattr_handler->get() callback was
+ dnl # changed to take dentry, inode and flags.
+ dnl #
+ AC_MSG_RESULT(no)
+ AC_MSG_CHECKING(
+ [whether xattr_handler->get() wants dentry and inode and flags])
+ ZFS_LINUX_TEST_RESULT([xattr_handler_get_dentry_inode_flags], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_XATTR_GET_DENTRY_INODE_FLAGS, 1,
+ [xattr_handler->get() wants dentry and inode and flags])
+ ],[
+ ZFS_LINUX_TEST_ERROR([xattr get()])
+ ])
])
])
])
])
dnl #
dnl # Supported xattr handler set() interfaces checked newest to oldest.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_XATTR_HANDLER_SET], [
ZFS_LINUX_TEST_SRC([xattr_handler_set_userns], [
#include <linux/xattr.h>
int set(const struct xattr_handler *handler,
struct user_namespace *mnt_userns,
struct dentry *dentry, struct inode *inode,
const char *name, const void *buffer,
size_t size, int flags)
{ return 0; }
static const struct xattr_handler
xops __attribute__ ((unused)) = {
.set = set,
};
],[])
ZFS_LINUX_TEST_SRC([xattr_handler_set_dentry_inode], [
#include <linux/xattr.h>
int set(const struct xattr_handler *handler,
struct dentry *dentry, struct inode *inode,
const char *name, const void *buffer,
size_t size, int flags)
{ return 0; }
static const struct xattr_handler
xops __attribute__ ((unused)) = {
.set = set,
};
],[])
ZFS_LINUX_TEST_SRC([xattr_handler_set_xattr_handler], [
#include <linux/xattr.h>
int set(const struct xattr_handler *handler,
struct dentry *dentry, const char *name,
const void *buffer, size_t size, int flags)
{ return 0; }
static const struct xattr_handler
xops __attribute__ ((unused)) = {
.set = set,
};
],[])
ZFS_LINUX_TEST_SRC([xattr_handler_set_dentry], [
#include <linux/xattr.h>
int set(struct dentry *dentry, const char *name,
const void *buffer, size_t size, int flags,
int handler_flags) { return 0; }
static const struct xattr_handler
xops __attribute__ ((unused)) = {
.set = set,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_XATTR_HANDLER_SET], [
dnl #
dnl # 5.12 API change,
dnl # The xattr_handler->set() callback was changed to 8 arguments, and
dnl # struct user_namespace* was inserted as arg #2
dnl #
AC_MSG_CHECKING([whether xattr_handler->set() wants dentry, inode, and user_namespace])
ZFS_LINUX_TEST_RESULT([xattr_handler_set_userns], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_XATTR_SET_USERNS, 1,
[xattr_handler->set() takes user_namespace])
],[
dnl #
dnl # 4.7 API change,
dnl # The xattr_handler->set() callback was changed to take both
dnl # dentry and inode.
dnl #
AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether xattr_handler->set() wants dentry and inode])
ZFS_LINUX_TEST_RESULT([xattr_handler_set_dentry_inode], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_XATTR_SET_DENTRY_INODE, 1,
[xattr_handler->set() wants both dentry and inode])
],[
dnl #
dnl # 4.4 API change,
dnl # The xattr_handler->set() callback was changed to take a
dnl # xattr_handler, and handler_flags argument was removed and
dnl # should be accessed by handler->flags.
dnl #
AC_MSG_RESULT(no)
AC_MSG_CHECKING(
[whether xattr_handler->set() wants xattr_handler])
ZFS_LINUX_TEST_RESULT([xattr_handler_set_xattr_handler], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_XATTR_SET_HANDLER, 1,
[xattr_handler->set() wants xattr_handler])
],[
dnl #
dnl # 2.6.33 API change,
dnl # The xattr_handler->set() callback was changed
dnl # to take a dentry instead of an inode, and a
dnl # handler_flags argument was added.
dnl #
AC_MSG_RESULT(no)
AC_MSG_CHECKING(
[whether xattr_handler->set() wants dentry])
ZFS_LINUX_TEST_RESULT([xattr_handler_set_dentry], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_XATTR_SET_DENTRY, 1,
[xattr_handler->set() wants dentry])
],[
ZFS_LINUX_TEST_ERROR([xattr set()])
])
])
])
])
])
dnl #
dnl # Supported xattr handler list() interfaces checked newest to oldest.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_XATTR_HANDLER_LIST], [
ZFS_LINUX_TEST_SRC([xattr_handler_list_simple], [
#include <linux/xattr.h>
bool list(struct dentry *dentry) { return 0; }
static const struct xattr_handler
xops __attribute__ ((unused)) = {
.list = list,
};
],[])
ZFS_LINUX_TEST_SRC([xattr_handler_list_xattr_handler], [
#include <linux/xattr.h>
size_t list(const struct xattr_handler *handler,
struct dentry *dentry, char *list, size_t list_size,
const char *name, size_t name_len) { return 0; }
static const struct xattr_handler
xops __attribute__ ((unused)) = {
.list = list,
};
],[])
ZFS_LINUX_TEST_SRC([xattr_handler_list_dentry], [
#include <linux/xattr.h>
size_t list(struct dentry *dentry,
char *list, size_t list_size,
const char *name, size_t name_len,
int handler_flags) { return 0; }
static const struct xattr_handler
xops __attribute__ ((unused)) = {
.list = list,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_XATTR_HANDLER_LIST], [
dnl # 4.5 API change,
dnl # The xattr_handler->list() callback was changed to take only a
dnl # dentry and it only needs to return if it's accessible.
AC_MSG_CHECKING([whether xattr_handler->list() wants simple])
ZFS_LINUX_TEST_RESULT([xattr_handler_list_simple], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_XATTR_LIST_SIMPLE, 1,
[xattr_handler->list() wants simple])
],[
dnl #
dnl # 4.4 API change,
dnl # The xattr_handler->list() callback was changed to take a
dnl # xattr_handler, and handler_flags argument was removed
dnl # and should be accessed by handler->flags.
dnl #
AC_MSG_RESULT(no)
AC_MSG_CHECKING(
[whether xattr_handler->list() wants xattr_handler])
ZFS_LINUX_TEST_RESULT([xattr_handler_list_xattr_handler], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_XATTR_LIST_HANDLER, 1,
[xattr_handler->list() wants xattr_handler])
],[
dnl #
dnl # 2.6.33 API change,
dnl # The xattr_handler->list() callback was changed
dnl # to take a dentry instead of an inode, and a
dnl # handler_flags argument was added.
dnl #
AC_MSG_RESULT(no)
AC_MSG_CHECKING(
[whether xattr_handler->list() wants dentry])
ZFS_LINUX_TEST_RESULT([xattr_handler_list_dentry], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_XATTR_LIST_DENTRY, 1,
[xattr_handler->list() wants dentry])
],[
ZFS_LINUX_TEST_ERROR([xattr list()])
])
])
])
])
dnl #
dnl # 3.7 API change,
dnl # The posix_acl_{from,to}_xattr functions gained a new
dnl # parameter: user_ns
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_POSIX_ACL_FROM_XATTR_USERNS], [
ZFS_LINUX_TEST_SRC([posix_acl_from_xattr_userns], [
#include <linux/cred.h>
#include <linux/fs.h>
#include <linux/posix_acl_xattr.h>
],[
posix_acl_from_xattr(&init_user_ns, NULL, 0);
])
])
AC_DEFUN([ZFS_AC_KERNEL_POSIX_ACL_FROM_XATTR_USERNS], [
AC_MSG_CHECKING([whether posix_acl_from_xattr() needs user_ns])
ZFS_LINUX_TEST_RESULT([posix_acl_from_xattr_userns], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_POSIX_ACL_FROM_XATTR_USERNS, 1,
[posix_acl_from_xattr() needs user_ns])
],[
ZFS_LINUX_TEST_ERROR([posix_acl_from_xattr()])
])
])
dnl #
dnl # 4.9 API change,
dnl # iops->{set,get,remove}xattr and generic_{set,get,remove}xattr are
dnl # removed. xattr operations will directly go through sb->s_xattr.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_GENERIC_SETXATTR], [
ZFS_LINUX_TEST_SRC([have_generic_setxattr], [
#include <linux/fs.h>
#include <linux/xattr.h>
static const struct inode_operations
iops __attribute__ ((unused)) = {
.setxattr = generic_setxattr
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_GENERIC_SETXATTR], [
AC_MSG_CHECKING([whether generic_setxattr() exists])
ZFS_LINUX_TEST_RESULT([have_generic_setxattr], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_GENERIC_SETXATTR, 1,
[generic_setxattr() exists])
],[
AC_MSG_RESULT(no)
])
])
AC_DEFUN([ZFS_AC_KERNEL_SRC_XATTR], [
ZFS_AC_KERNEL_SRC_CONST_XATTR_HANDLER
ZFS_AC_KERNEL_SRC_XATTR_HANDLER_NAME
ZFS_AC_KERNEL_SRC_XATTR_HANDLER_GET
ZFS_AC_KERNEL_SRC_XATTR_HANDLER_SET
ZFS_AC_KERNEL_SRC_XATTR_HANDLER_LIST
ZFS_AC_KERNEL_SRC_POSIX_ACL_FROM_XATTR_USERNS
ZFS_AC_KERNEL_SRC_GENERIC_SETXATTR
])
AC_DEFUN([ZFS_AC_KERNEL_XATTR], [
ZFS_AC_KERNEL_CONST_XATTR_HANDLER
ZFS_AC_KERNEL_XATTR_HANDLER_NAME
ZFS_AC_KERNEL_XATTR_HANDLER_GET
ZFS_AC_KERNEL_XATTR_HANDLER_SET
ZFS_AC_KERNEL_XATTR_HANDLER_LIST
ZFS_AC_KERNEL_POSIX_ACL_FROM_XATTR_USERNS
ZFS_AC_KERNEL_GENERIC_SETXATTR
])
diff --git a/sys/contrib/openzfs/config/kernel.m4 b/sys/contrib/openzfs/config/kernel.m4
index 1f274cbe4f30..6aad2cf88e02 100644
--- a/sys/contrib/openzfs/config/kernel.m4
+++ b/sys/contrib/openzfs/config/kernel.m4
@@ -1,945 +1,949 @@
dnl #
dnl # Default ZFS kernel configuration
dnl #
AC_DEFUN([ZFS_AC_CONFIG_KERNEL], [
AM_COND_IF([BUILD_LINUX], [
dnl # Setup the kernel build environment.
ZFS_AC_KERNEL
ZFS_AC_QAT
dnl # Sanity checks for module building and CONFIG_* defines
ZFS_AC_KERNEL_CONFIG_DEFINED
ZFS_AC_MODULE_SYMVERS
dnl # Sequential ZFS_LINUX_TRY_COMPILE tests
ZFS_AC_KERNEL_FPU_HEADER
ZFS_AC_KERNEL_OBJTOOL_HEADER
ZFS_AC_KERNEL_WAIT_QUEUE_ENTRY_T
ZFS_AC_KERNEL_MISC_MINOR
ZFS_AC_KERNEL_DECLARE_EVENT_CLASS
dnl # Parallel ZFS_LINUX_TEST_SRC / ZFS_LINUX_TEST_RESULT tests
ZFS_AC_KERNEL_TEST_SRC
ZFS_AC_KERNEL_TEST_RESULT
AS_IF([test "$LINUX_OBJ" != "$LINUX"], [
KERNEL_MAKE="$KERNEL_MAKE O=$LINUX_OBJ"
])
AC_SUBST(KERNEL_MAKE)
])
])
dnl #
dnl # Generate and compile all of the kernel API test cases to determine
dnl # which interfaces are available. By invoking the kernel build system
dnl # only once the compilation can be done in parallel significantly
dnl # speeding up the process.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_TEST_SRC], [
ZFS_AC_KERNEL_SRC_OBJTOOL
ZFS_AC_KERNEL_SRC_GLOBAL_PAGE_STATE
ZFS_AC_KERNEL_SRC_ACCESS_OK_TYPE
ZFS_AC_KERNEL_SRC_PDE_DATA
ZFS_AC_KERNEL_SRC_FALLOCATE
+ ZFS_AC_KERNEL_SRC_FADVISE
+ ZFS_AC_KERNEL_SRC_GENERIC_FADVISE
ZFS_AC_KERNEL_SRC_2ARGS_ZLIB_DEFLATE_WORKSPACESIZE
ZFS_AC_KERNEL_SRC_RWSEM
ZFS_AC_KERNEL_SRC_SCHED
ZFS_AC_KERNEL_SRC_USLEEP_RANGE
ZFS_AC_KERNEL_SRC_KMEM_CACHE
ZFS_AC_KERNEL_SRC_KVMALLOC
ZFS_AC_KERNEL_SRC_VMALLOC_PAGE_KERNEL
ZFS_AC_KERNEL_SRC_WAIT
ZFS_AC_KERNEL_SRC_INODE_TIMES
ZFS_AC_KERNEL_SRC_INODE_LOCK
ZFS_AC_KERNEL_SRC_GROUP_INFO_GID
ZFS_AC_KERNEL_SRC_RW
ZFS_AC_KERNEL_SRC_TIMER_SETUP
ZFS_AC_KERNEL_SRC_SUPER_USER_NS
ZFS_AC_KERNEL_SRC_PROC_OPERATIONS
ZFS_AC_KERNEL_SRC_BLOCK_DEVICE_OPERATIONS
ZFS_AC_KERNEL_SRC_BIO
ZFS_AC_KERNEL_SRC_BLKDEV
ZFS_AC_KERNEL_SRC_BLK_QUEUE
ZFS_AC_KERNEL_SRC_GENHD_FLAGS
ZFS_AC_KERNEL_SRC_REVALIDATE_DISK
ZFS_AC_KERNEL_SRC_GET_DISK_RO
ZFS_AC_KERNEL_SRC_GENERIC_READLINK_GLOBAL
ZFS_AC_KERNEL_SRC_DISCARD_GRANULARITY
ZFS_AC_KERNEL_SRC_INODE_OWNER_OR_CAPABLE
ZFS_AC_KERNEL_SRC_XATTR
ZFS_AC_KERNEL_SRC_ACL
ZFS_AC_KERNEL_SRC_INODE_GETATTR
ZFS_AC_KERNEL_SRC_INODE_SET_FLAGS
ZFS_AC_KERNEL_SRC_INODE_SET_IVERSION
ZFS_AC_KERNEL_SRC_SHOW_OPTIONS
ZFS_AC_KERNEL_SRC_FILE_INODE
ZFS_AC_KERNEL_SRC_FILE_DENTRY
ZFS_AC_KERNEL_SRC_FSYNC
ZFS_AC_KERNEL_SRC_AIO_FSYNC
ZFS_AC_KERNEL_SRC_EVICT_INODE
ZFS_AC_KERNEL_SRC_DIRTY_INODE
ZFS_AC_KERNEL_SRC_SHRINKER
ZFS_AC_KERNEL_SRC_MKDIR
ZFS_AC_KERNEL_SRC_LOOKUP_FLAGS
ZFS_AC_KERNEL_SRC_CREATE
ZFS_AC_KERNEL_SRC_PERMISSION
ZFS_AC_KERNEL_SRC_GET_LINK
ZFS_AC_KERNEL_SRC_PUT_LINK
ZFS_AC_KERNEL_SRC_TMPFILE
ZFS_AC_KERNEL_SRC_AUTOMOUNT
ZFS_AC_KERNEL_SRC_ENCODE_FH_WITH_INODE
ZFS_AC_KERNEL_SRC_COMMIT_METADATA
ZFS_AC_KERNEL_SRC_CLEAR_INODE
ZFS_AC_KERNEL_SRC_SETATTR_PREPARE
ZFS_AC_KERNEL_SRC_INSERT_INODE_LOCKED
ZFS_AC_KERNEL_SRC_DENTRY
ZFS_AC_KERNEL_SRC_TRUNCATE_SETSIZE
ZFS_AC_KERNEL_SRC_SECURITY_INODE
ZFS_AC_KERNEL_SRC_FST_MOUNT
ZFS_AC_KERNEL_SRC_BDI
ZFS_AC_KERNEL_SRC_SET_NLINK
ZFS_AC_KERNEL_SRC_SGET
ZFS_AC_KERNEL_SRC_LSEEK_EXECUTE
ZFS_AC_KERNEL_SRC_VFS_FILEMAP_DIRTY_FOLIO
ZFS_AC_KERNEL_SRC_VFS_READ_FOLIO
ZFS_AC_KERNEL_SRC_VFS_GETATTR
ZFS_AC_KERNEL_SRC_VFS_FSYNC_2ARGS
ZFS_AC_KERNEL_SRC_VFS_ITERATE
ZFS_AC_KERNEL_SRC_VFS_DIRECT_IO
ZFS_AC_KERNEL_SRC_VFS_READPAGES
ZFS_AC_KERNEL_SRC_VFS_SET_PAGE_DIRTY_NOBUFFERS
ZFS_AC_KERNEL_SRC_VFS_RW_ITERATE
ZFS_AC_KERNEL_SRC_VFS_GENERIC_WRITE_CHECKS
ZFS_AC_KERNEL_SRC_VFS_IOV_ITER
ZFS_AC_KERNEL_SRC_KMAP_ATOMIC_ARGS
ZFS_AC_KERNEL_SRC_FOLLOW_DOWN_ONE
ZFS_AC_KERNEL_SRC_MAKE_REQUEST_FN
ZFS_AC_KERNEL_SRC_GENERIC_IO_ACCT
ZFS_AC_KERNEL_SRC_FPU
ZFS_AC_KERNEL_SRC_FMODE_T
ZFS_AC_KERNEL_SRC_KUIDGID_T
ZFS_AC_KERNEL_SRC_KUID_HELPERS
ZFS_AC_KERNEL_SRC_MODULE_PARAM_CALL_CONST
ZFS_AC_KERNEL_SRC_RENAME
ZFS_AC_KERNEL_SRC_CURRENT_TIME
ZFS_AC_KERNEL_SRC_USERNS_CAPABILITIES
ZFS_AC_KERNEL_SRC_IN_COMPAT_SYSCALL
ZFS_AC_KERNEL_SRC_KTIME
ZFS_AC_KERNEL_SRC_TOTALRAM_PAGES_FUNC
ZFS_AC_KERNEL_SRC_TOTALHIGH_PAGES
ZFS_AC_KERNEL_SRC_KSTRTOUL
ZFS_AC_KERNEL_SRC_PERCPU
ZFS_AC_KERNEL_SRC_CPU_HOTPLUG
ZFS_AC_KERNEL_SRC_GENERIC_FILLATTR_USERNS
ZFS_AC_KERNEL_SRC_MKNOD
ZFS_AC_KERNEL_SRC_SYMLINK
ZFS_AC_KERNEL_SRC_BIO_MAX_SEGS
ZFS_AC_KERNEL_SRC_SIGNAL_STOP
ZFS_AC_KERNEL_SRC_SIGINFO
ZFS_AC_KERNEL_SRC_SYSFS
ZFS_AC_KERNEL_SRC_SET_SPECIAL_STATE
ZFS_AC_KERNEL_SRC_STANDALONE_LINUX_STDARG
ZFS_AC_KERNEL_SRC_PAGEMAP_FOLIO_WAIT_BIT
ZFS_AC_KERNEL_SRC_ADD_DISK
ZFS_AC_KERNEL_SRC_KTHREAD
ZFS_AC_KERNEL_SRC_ZERO_PAGE
ZFS_AC_KERNEL_SRC___COPY_FROM_USER_INATOMIC
ZFS_AC_KERNEL_SRC_USER_NS_COMMON_INUM
AC_MSG_CHECKING([for available kernel interfaces])
ZFS_LINUX_TEST_COMPILE_ALL([kabi])
AC_MSG_RESULT([done])
])
dnl #
dnl # Check results of kernel interface tests.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_TEST_RESULT], [
ZFS_AC_KERNEL_ACCESS_OK_TYPE
ZFS_AC_KERNEL_GLOBAL_PAGE_STATE
ZFS_AC_KERNEL_OBJTOOL
ZFS_AC_KERNEL_PDE_DATA
ZFS_AC_KERNEL_FALLOCATE
+ ZFS_AC_KERNEL_FADVISE
+ ZFS_AC_KERNEL_GENERIC_FADVISE
ZFS_AC_KERNEL_2ARGS_ZLIB_DEFLATE_WORKSPACESIZE
ZFS_AC_KERNEL_RWSEM
ZFS_AC_KERNEL_SCHED
ZFS_AC_KERNEL_USLEEP_RANGE
ZFS_AC_KERNEL_KMEM_CACHE
ZFS_AC_KERNEL_KVMALLOC
ZFS_AC_KERNEL_VMALLOC_PAGE_KERNEL
ZFS_AC_KERNEL_WAIT
ZFS_AC_KERNEL_INODE_TIMES
ZFS_AC_KERNEL_INODE_LOCK
ZFS_AC_KERNEL_GROUP_INFO_GID
ZFS_AC_KERNEL_RW
ZFS_AC_KERNEL_TIMER_SETUP
ZFS_AC_KERNEL_SUPER_USER_NS
ZFS_AC_KERNEL_PROC_OPERATIONS
ZFS_AC_KERNEL_BLOCK_DEVICE_OPERATIONS
ZFS_AC_KERNEL_BIO
ZFS_AC_KERNEL_BLKDEV
ZFS_AC_KERNEL_BLK_QUEUE
ZFS_AC_KERNEL_GENHD_FLAGS
ZFS_AC_KERNEL_REVALIDATE_DISK
ZFS_AC_KERNEL_GET_DISK_RO
ZFS_AC_KERNEL_GENERIC_READLINK_GLOBAL
ZFS_AC_KERNEL_DISCARD_GRANULARITY
ZFS_AC_KERNEL_INODE_OWNER_OR_CAPABLE
ZFS_AC_KERNEL_XATTR
ZFS_AC_KERNEL_ACL
ZFS_AC_KERNEL_INODE_GETATTR
ZFS_AC_KERNEL_INODE_SET_FLAGS
ZFS_AC_KERNEL_INODE_SET_IVERSION
ZFS_AC_KERNEL_SHOW_OPTIONS
ZFS_AC_KERNEL_FILE_INODE
ZFS_AC_KERNEL_FILE_DENTRY
ZFS_AC_KERNEL_FSYNC
ZFS_AC_KERNEL_AIO_FSYNC
ZFS_AC_KERNEL_EVICT_INODE
ZFS_AC_KERNEL_DIRTY_INODE
ZFS_AC_KERNEL_SHRINKER
ZFS_AC_KERNEL_MKDIR
ZFS_AC_KERNEL_LOOKUP_FLAGS
ZFS_AC_KERNEL_CREATE
ZFS_AC_KERNEL_PERMISSION
ZFS_AC_KERNEL_GET_LINK
ZFS_AC_KERNEL_PUT_LINK
ZFS_AC_KERNEL_TMPFILE
ZFS_AC_KERNEL_AUTOMOUNT
ZFS_AC_KERNEL_ENCODE_FH_WITH_INODE
ZFS_AC_KERNEL_COMMIT_METADATA
ZFS_AC_KERNEL_CLEAR_INODE
ZFS_AC_KERNEL_SETATTR_PREPARE
ZFS_AC_KERNEL_INSERT_INODE_LOCKED
ZFS_AC_KERNEL_DENTRY
ZFS_AC_KERNEL_TRUNCATE_SETSIZE
ZFS_AC_KERNEL_SECURITY_INODE
ZFS_AC_KERNEL_FST_MOUNT
ZFS_AC_KERNEL_BDI
ZFS_AC_KERNEL_SET_NLINK
ZFS_AC_KERNEL_SGET
ZFS_AC_KERNEL_LSEEK_EXECUTE
ZFS_AC_KERNEL_VFS_FILEMAP_DIRTY_FOLIO
ZFS_AC_KERNEL_VFS_READ_FOLIO
ZFS_AC_KERNEL_VFS_GETATTR
ZFS_AC_KERNEL_VFS_FSYNC_2ARGS
ZFS_AC_KERNEL_VFS_ITERATE
ZFS_AC_KERNEL_VFS_DIRECT_IO
ZFS_AC_KERNEL_VFS_READPAGES
ZFS_AC_KERNEL_VFS_SET_PAGE_DIRTY_NOBUFFERS
ZFS_AC_KERNEL_VFS_RW_ITERATE
ZFS_AC_KERNEL_VFS_GENERIC_WRITE_CHECKS
ZFS_AC_KERNEL_VFS_IOV_ITER
ZFS_AC_KERNEL_KMAP_ATOMIC_ARGS
ZFS_AC_KERNEL_FOLLOW_DOWN_ONE
ZFS_AC_KERNEL_MAKE_REQUEST_FN
ZFS_AC_KERNEL_GENERIC_IO_ACCT
ZFS_AC_KERNEL_FPU
ZFS_AC_KERNEL_FMODE_T
ZFS_AC_KERNEL_KUIDGID_T
ZFS_AC_KERNEL_KUID_HELPERS
ZFS_AC_KERNEL_MODULE_PARAM_CALL_CONST
ZFS_AC_KERNEL_RENAME
ZFS_AC_KERNEL_CURRENT_TIME
ZFS_AC_KERNEL_USERNS_CAPABILITIES
ZFS_AC_KERNEL_IN_COMPAT_SYSCALL
ZFS_AC_KERNEL_KTIME
ZFS_AC_KERNEL_TOTALRAM_PAGES_FUNC
ZFS_AC_KERNEL_TOTALHIGH_PAGES
ZFS_AC_KERNEL_KSTRTOUL
ZFS_AC_KERNEL_PERCPU
ZFS_AC_KERNEL_CPU_HOTPLUG
ZFS_AC_KERNEL_GENERIC_FILLATTR_USERNS
ZFS_AC_KERNEL_MKNOD
ZFS_AC_KERNEL_SYMLINK
ZFS_AC_KERNEL_BIO_MAX_SEGS
ZFS_AC_KERNEL_SIGNAL_STOP
ZFS_AC_KERNEL_SIGINFO
ZFS_AC_KERNEL_SYSFS
ZFS_AC_KERNEL_SET_SPECIAL_STATE
ZFS_AC_KERNEL_STANDALONE_LINUX_STDARG
ZFS_AC_KERNEL_PAGEMAP_FOLIO_WAIT_BIT
ZFS_AC_KERNEL_ADD_DISK
ZFS_AC_KERNEL_KTHREAD
ZFS_AC_KERNEL_ZERO_PAGE
ZFS_AC_KERNEL___COPY_FROM_USER_INATOMIC
ZFS_AC_KERNEL_USER_NS_COMMON_INUM
])
dnl #
dnl # Detect name used for Module.symvers file in kernel
dnl #
AC_DEFUN([ZFS_AC_MODULE_SYMVERS], [
modpost=$LINUX/scripts/Makefile.modpost
AC_MSG_CHECKING([kernel file name for module symbols])
AS_IF([test "x$enable_linux_builtin" != xyes -a -f "$modpost"], [
AS_IF([grep -q Modules.symvers $modpost], [
LINUX_SYMBOLS=Modules.symvers
], [
LINUX_SYMBOLS=Module.symvers
])
AS_IF([test ! -f "$LINUX_OBJ/$LINUX_SYMBOLS"], [
AC_MSG_ERROR([
*** Please make sure the kernel devel package for your distribution
*** is installed. If you are building with a custom kernel, make sure
*** the kernel is configured, built, and the '--with-linux=PATH'
*** configure option refers to the location of the kernel source.
])
])
], [
LINUX_SYMBOLS=NONE
])
AC_MSG_RESULT($LINUX_SYMBOLS)
AC_SUBST(LINUX_SYMBOLS)
])
dnl #
dnl # Detect the kernel to be built against
dnl #
dnl # Most modern Linux distributions have separate locations for bare
dnl # source (source) and prebuilt (build) files. Additionally, there are
dnl # `source` and `build` symlinks in `/lib/modules/$(KERNEL_VERSION)`
dnl # pointing to them. The directory search order is now:
dnl #
dnl # - `configure` command line values if both `--with-linux` and
dnl # `--with-linux-obj` were defined
dnl #
dnl # - If only `--with-linux` was defined, `--with-linux-obj` is assumed
dnl # to have the same value as `--with-linux`
dnl #
dnl # - If neither `--with-linux` nor `--with-linux-obj` were defined
dnl # autodetection is used:
dnl #
dnl # - `/lib/modules/$(uname -r)/{source,build}` respectively, if exist.
dnl #
dnl # - If only `/lib/modules/$(uname -r)/build` exists, it is assumed
dnl # to be both source and build directory.
dnl #
dnl # - The first directory in `/lib/modules` with the highest version
dnl # number according to `sort -V` which contains both `source` and
dnl # `build` symlinks/directories. If module directory contains only
dnl # `build` component, it is assumed to be both source and build
dnl # directory.
dnl #
dnl # - Last resort: the first directory matching `/usr/src/kernels/*`
dnl # and `/usr/src/linux-*` with the highest version number according
dnl # to `sort -V` is assumed to be both source and build directory.
dnl #
AC_DEFUN([ZFS_AC_KERNEL], [
AC_ARG_WITH([linux],
AS_HELP_STRING([--with-linux=PATH],
[Path to kernel source]),
[kernelsrc="$withval"])
AC_ARG_WITH(linux-obj,
AS_HELP_STRING([--with-linux-obj=PATH],
[Path to kernel build objects]),
[kernelbuild="$withval"])
AC_MSG_CHECKING([kernel source and build directories])
AS_IF([test -n "$kernelsrc" && test -z "$kernelbuild"], [
kernelbuild="$kernelsrc"
], [test -z "$kernelsrc"], [
AS_IF([test -e "/lib/modules/$(uname -r)/source" && \
test -e "/lib/modules/$(uname -r)/build"], [
src="/lib/modules/$(uname -r)/source"
build="/lib/modules/$(uname -r)/build"
], [test -e "/lib/modules/$(uname -r)/build"], [
build="/lib/modules/$(uname -r)/build"
src="$build"
], [
src=
for d in $(ls -1d /lib/modules/* 2>/dev/null | sort -Vr); do
if test -e "$d/source" && test -e "$d/build"; then
src="$d/source"
build="$d/build"
break
fi
if test -e "$d/build"; then
src="$d/build"
build="$d/build"
break
fi
done
# the least reliable method
if test -z "$src"; then
src=$(ls -1d /usr/src/kernels/* /usr/src/linux-* \
2>/dev/null | grep -v obj | sort -Vr | head -1)
build="$src"
fi
])
AS_IF([test -n "$src" && test -e "$src"], [
kernelsrc=$(readlink -e "$src")
], [
kernelsrc="[Not found]"
])
AS_IF([test -n "$build" && test -e "$build"], [
kernelbuild=$(readlink -e "$build")
], [
kernelbuild="[Not found]"
])
], [
AS_IF([test "$kernelsrc" = "NONE"], [
kernsrcver=NONE
])
withlinux=yes
])
AC_MSG_RESULT([done])
AC_MSG_CHECKING([kernel source directory])
AC_MSG_RESULT([$kernelsrc])
AC_MSG_CHECKING([kernel build directory])
AC_MSG_RESULT([$kernelbuild])
AS_IF([test ! -d "$kernelsrc" || test ! -d "$kernelbuild"], [
AC_MSG_ERROR([
*** Please make sure the kernel devel package for your distribution
*** is installed and then try again. If that fails, you can specify the
*** location of the kernel source and build with the '--with-linux=PATH' and
*** '--with-linux-obj=PATH' options respectively.])
])
AC_MSG_CHECKING([kernel source version])
utsrelease1=$kernelbuild/include/linux/version.h
utsrelease2=$kernelbuild/include/linux/utsrelease.h
utsrelease3=$kernelbuild/include/generated/utsrelease.h
AS_IF([test -r $utsrelease1 && grep -qF UTS_RELEASE $utsrelease1], [
utsrelease=$utsrelease1
], [test -r $utsrelease2 && grep -qF UTS_RELEASE $utsrelease2], [
utsrelease=$utsrelease2
], [test -r $utsrelease3 && grep -qF UTS_RELEASE $utsrelease3], [
utsrelease=$utsrelease3
])
AS_IF([test -n "$utsrelease"], [
kernsrcver=$($AWK '/UTS_RELEASE/ { gsub(/"/, "", $[3]); print $[3] }' $utsrelease)
AS_IF([test -z "$kernsrcver"], [
AC_MSG_RESULT([Not found])
AC_MSG_ERROR([
*** Cannot determine kernel version.
])
])
], [
AC_MSG_RESULT([Not found])
if test "x$enable_linux_builtin" != xyes; then
AC_MSG_ERROR([
*** Cannot find UTS_RELEASE definition.
])
else
AC_MSG_ERROR([
*** Cannot find UTS_RELEASE definition.
*** Please run 'make prepare' inside the kernel source tree.])
fi
])
AC_MSG_RESULT([$kernsrcver])
AS_VERSION_COMPARE([$kernsrcver], [$ZFS_META_KVER_MIN], [
AC_MSG_ERROR([
*** Cannot build against kernel version $kernsrcver.
*** The minimum supported kernel version is $ZFS_META_KVER_MIN.
])
])
LINUX=${kernelsrc}
LINUX_OBJ=${kernelbuild}
LINUX_VERSION=${kernsrcver}
AC_SUBST(LINUX)
AC_SUBST(LINUX_OBJ)
AC_SUBST(LINUX_VERSION)
])
dnl #
dnl # Detect the QAT module to be built against, QAT provides hardware
dnl # acceleration for data compression:
dnl #
dnl # https://01.org/intel-quickassist-technology
dnl #
dnl # 1) Download and install QAT driver from the above link
dnl # 2) Start QAT driver in your system:
dnl # service qat_service start
dnl # 3) Enable QAT in ZFS, e.g.:
dnl # ./configure --with-qat=<qat-driver-path>/QAT1.6
dnl # make
dnl # 4) Set GZIP compression in ZFS dataset:
dnl # zfs set compression = gzip <dataset>
dnl #
dnl # Then the data written to this ZFS pool is compressed by QAT accelerator
dnl # automatically, and de-compressed by QAT when read from the pool.
dnl #
dnl # 1) Get QAT hardware statistics with:
dnl # cat /proc/icp_dh895xcc_dev/qat
dnl # 2) To disable QAT:
dnl # insmod zfs.ko zfs_qat_disable=1
dnl #
AC_DEFUN([ZFS_AC_QAT], [
AC_ARG_WITH([qat],
AS_HELP_STRING([--with-qat=PATH],
[Path to qat source]),
AS_IF([test "$withval" = "yes"],
AC_MSG_ERROR([--with-qat=PATH requires a PATH]),
[qatsrc="$withval"]))
AC_ARG_WITH([qat-obj],
AS_HELP_STRING([--with-qat-obj=PATH],
[Path to qat build objects]),
[qatbuild="$withval"])
AS_IF([test ! -z "${qatsrc}"], [
AC_MSG_CHECKING([qat source directory])
AC_MSG_RESULT([$qatsrc])
QAT_SRC="${qatsrc}/quickassist"
AS_IF([ test ! -e "$QAT_SRC/include/cpa.h"], [
AC_MSG_ERROR([
*** Please make sure the qat driver package is installed
*** and specify the location of the qat source with the
*** '--with-qat=PATH' option then try again. Failed to
*** find cpa.h in:
${QAT_SRC}/include])
])
])
AS_IF([test ! -z "${qatsrc}"], [
AC_MSG_CHECKING([qat build directory])
AS_IF([test -z "$qatbuild"], [
qatbuild="${qatsrc}/build"
])
AC_MSG_RESULT([$qatbuild])
QAT_OBJ=${qatbuild}
AS_IF([ ! test -e "$QAT_OBJ/icp_qa_al.ko" && ! test -e "$QAT_OBJ/qat_api.ko"], [
AC_MSG_ERROR([
*** Please make sure the qat driver is installed then try again.
*** Failed to find icp_qa_al.ko or qat_api.ko in:
$QAT_OBJ])
])
AC_SUBST(QAT_SRC)
AC_SUBST(QAT_OBJ)
AC_DEFINE(HAVE_QAT, 1,
[qat is enabled and existed])
])
dnl #
dnl # Detect the name used for the QAT Module.symvers file.
dnl #
AS_IF([test ! -z "${qatsrc}"], [
AC_MSG_CHECKING([qat file for module symbols])
QAT_SYMBOLS=$QAT_SRC/lookaside/access_layer/src/Module.symvers
AS_IF([test -r $QAT_SYMBOLS], [
AC_MSG_RESULT([$QAT_SYMBOLS])
AC_SUBST(QAT_SYMBOLS)
],[
AC_MSG_ERROR([
*** Please make sure the qat driver is installed then try again.
*** Failed to find Module.symvers in:
$QAT_SYMBOLS
])
])
])
])
dnl #
dnl # ZFS_LINUX_CONFTEST_H
dnl #
AC_DEFUN([ZFS_LINUX_CONFTEST_H], [
test -d build/$2 || mkdir -p build/$2
cat - <<_ACEOF >build/$2/$2.h
$1
_ACEOF
])
dnl #
dnl # ZFS_LINUX_CONFTEST_C
dnl #
AC_DEFUN([ZFS_LINUX_CONFTEST_C], [
test -d build/$2 || mkdir -p build/$2
cat confdefs.h - <<_ACEOF >build/$2/$2.c
$1
_ACEOF
])
dnl #
dnl # ZFS_LINUX_CONFTEST_MAKEFILE
dnl #
dnl # $1 - test case name
dnl # $2 - add to top-level Makefile
dnl # $3 - additional build flags
dnl #
AC_DEFUN([ZFS_LINUX_CONFTEST_MAKEFILE], [
test -d build || mkdir -p build
test -d build/$1 || mkdir -p build/$1
file=build/$1/Makefile
dnl # Example command line to manually build source.
cat - <<_ACEOF >$file
# Example command line to manually build source
# make modules -C $LINUX_OBJ $ARCH_UM M=$PWD/build/$1
ccflags-y := -Werror $FRAME_LARGER_THAN
_ACEOF
dnl # Additional custom CFLAGS as requested.
m4_ifval($3, [echo "ccflags-y += $3" >>$file], [])
dnl # Test case source
echo "obj-m := $1.o" >>$file
AS_IF([test "x$2" = "xyes"], [echo "obj-m += $1/" >>build/Makefile], [])
])
dnl #
dnl # ZFS_LINUX_TEST_PROGRAM(C)([PROLOGUE], [BODY])
dnl #
m4_define([ZFS_LINUX_TEST_PROGRAM], [
#include <linux/module.h>
$1
int
main (void)
{
$2
;
return 0;
}
MODULE_DESCRIPTION("conftest");
MODULE_AUTHOR(ZFS_META_AUTHOR);
MODULE_VERSION(ZFS_META_VERSION "-" ZFS_META_RELEASE);
MODULE_LICENSE($3);
])
dnl #
dnl # ZFS_LINUX_TEST_REMOVE
dnl #
dnl # Removes the specified test source and results.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_REMOVE], [
test -d build/$1 && rm -Rf build/$1
test -f build/Makefile && sed '/$1/d' build/Makefile
])
dnl #
dnl # ZFS_LINUX_COMPILE
dnl #
dnl # $1 - build dir
dnl # $2 - test command
dnl # $3 - pass command
dnl # $4 - fail command
dnl # $5 - set KBUILD_MODPOST_NOFINAL='yes'
dnl # $6 - set KBUILD_MODPOST_WARN='yes'
dnl #
dnl # Used internally by ZFS_LINUX_TEST_{COMPILE,MODPOST}
dnl #
AC_DEFUN([ZFS_LINUX_COMPILE], [
AC_ARG_VAR([KERNEL_CC], [C compiler for
building kernel modules])
AC_ARG_VAR([KERNEL_LD], [Linker for
building kernel modules])
AC_ARG_VAR([KERNEL_LLVM], [Binary option to
build kernel modules with LLVM/CLANG toolchain])
AC_TRY_COMMAND([
KBUILD_MODPOST_NOFINAL="$5" KBUILD_MODPOST_WARN="$6"
make modules -k -j$TEST_JOBS ${KERNEL_CC:+CC=$KERNEL_CC}
${KERNEL_LD:+LD=$KERNEL_LD} ${KERNEL_LLVM:+LLVM=$KERNEL_LLVM}
CONFIG_MODULES=y CFLAGS_MODULE=-DCONFIG_MODULES
-C $LINUX_OBJ $ARCH_UM M=$PWD/$1 >$1/build.log 2>&1])
AS_IF([AC_TRY_COMMAND([$2])], [$3], [$4])
])
dnl #
dnl # ZFS_LINUX_TEST_COMPILE
dnl #
dnl # Perform a full compile excluding the final modpost phase.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_COMPILE], [
ZFS_LINUX_COMPILE([$2], [test -f $2/build.log], [
mv $2/Makefile $2/Makefile.compile.$1
mv $2/build.log $2/build.log.$1
],[
AC_MSG_ERROR([
*** Unable to compile test source to determine kernel interfaces.])
], [yes], [])
])
dnl #
dnl # ZFS_LINUX_TEST_MODPOST
dnl #
dnl # Perform a full compile including the modpost phase. This may
dnl # be an incremental build if the objects have already been built.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_MODPOST], [
ZFS_LINUX_COMPILE([$2], [test -f $2/build.log], [
mv $2/Makefile $2/Makefile.modpost.$1
cat $2/build.log >>build/build.log.$1
],[
AC_MSG_ERROR([
*** Unable to modpost test source to determine kernel interfaces.])
], [], [yes])
])
dnl #
dnl # Perform the compilation of the test cases in two phases.
dnl #
dnl # Phase 1) attempt to build the object files for all of the tests
dnl # defined by the ZFS_LINUX_TEST_SRC macro. But do not
dnl # perform the final modpost stage.
dnl #
dnl # Phase 2) disable all tests which failed the initial compilation,
dnl # then invoke the final modpost step for the remaining tests.
dnl #
dnl # This allows us efficiently build the test cases in parallel while
dnl # remaining resilient to build failures which are expected when
dnl # detecting the available kernel interfaces.
dnl #
dnl # The maximum allowed parallelism can be controlled by setting the
dnl # TEST_JOBS environment variable. Otherwise, it default to $(nproc).
dnl #
AC_DEFUN([ZFS_LINUX_TEST_COMPILE_ALL], [
dnl # Phase 1 - Compilation only, final linking is skipped.
ZFS_LINUX_TEST_COMPILE([$1], [build])
dnl #
dnl # Phase 2 - When building external modules disable test cases
dnl # which failed to compile and invoke modpost to verify the
dnl # final linking.
dnl #
dnl # Test names suffixed with '_license' call modpost independently
dnl # to ensure that a single incompatibility does not result in the
dnl # modpost phase exiting early. This check is not performed on
dnl # every symbol since the majority are compatible and doing so
dnl # would significantly slow down this phase.
dnl #
dnl # When configuring for builtin (--enable-linux-builtin)
dnl # fake the linking step artificially create the expected .ko
dnl # files for tests which did compile. This is required for
dnl # kernels which do not have loadable module support or have
dnl # not yet been built.
dnl #
AS_IF([test "x$enable_linux_builtin" = "xno"], [
for dir in $(awk '/^obj-m/ { print [$]3 }' \
build/Makefile.compile.$1); do
name=${dir%/}
AS_IF([test -f build/$name/$name.o], [
AS_IF([test "${name##*_}" = "license"], [
ZFS_LINUX_TEST_MODPOST([$1],
[build/$name])
echo "obj-n += $dir" >>build/Makefile
], [
echo "obj-m += $dir" >>build/Makefile
])
], [
echo "obj-n += $dir" >>build/Makefile
])
done
ZFS_LINUX_TEST_MODPOST([$1], [build])
], [
for dir in $(awk '/^obj-m/ { print [$]3 }' \
build/Makefile.compile.$1); do
name=${dir%/}
AS_IF([test -f build/$name/$name.o], [
touch build/$name/$name.ko
])
done
])
])
dnl #
dnl # ZFS_LINUX_TEST_SRC
dnl #
dnl # $1 - name
dnl # $2 - global
dnl # $3 - source
dnl # $4 - extra cflags
dnl # $5 - check license-compatibility
dnl #
dnl # Check if the test source is buildable at all and then if it is
dnl # license compatible.
dnl #
dnl # N.B because all of the test cases are compiled in parallel they
dnl # must never depend on the results of previous tests. Each test
dnl # needs to be entirely independent.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_SRC], [
ZFS_LINUX_CONFTEST_C([ZFS_LINUX_TEST_PROGRAM([[$2]], [[$3]],
[["Dual BSD/GPL"]])], [$1])
ZFS_LINUX_CONFTEST_MAKEFILE([$1], [yes], [$4])
AS_IF([ test -n "$5" ], [
ZFS_LINUX_CONFTEST_C([ZFS_LINUX_TEST_PROGRAM(
[[$2]], [[$3]], [[$5]])], [$1_license])
ZFS_LINUX_CONFTEST_MAKEFILE([$1_license], [yes], [$4])
])
])
dnl #
dnl # ZFS_LINUX_TEST_RESULT
dnl #
dnl # $1 - name of a test source (ZFS_LINUX_TEST_SRC)
dnl # $2 - run on success (valid .ko generated)
dnl # $3 - run on failure (unable to compile)
dnl #
AC_DEFUN([ZFS_LINUX_TEST_RESULT], [
AS_IF([test -d build/$1], [
AS_IF([test -f build/$1/$1.ko], [$2], [$3])
], [
AC_MSG_ERROR([
*** No matching source for the "$1" test, check that
*** both the test source and result macros refer to the same name.
])
])
])
dnl #
dnl # ZFS_LINUX_TEST_ERROR
dnl #
dnl # Generic error message which can be used when none of the expected
dnl # kernel interfaces were detected.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_ERROR], [
AC_MSG_ERROR([
*** None of the expected "$1" interfaces were detected.
*** This may be because your kernel version is newer than what is
*** supported, or you are using a patched custom kernel with
*** incompatible modifications.
***
*** ZFS Version: $ZFS_META_ALIAS
*** Compatible Kernels: $ZFS_META_KVER_MIN - $ZFS_META_KVER_MAX
])
])
dnl #
dnl # ZFS_LINUX_TEST_RESULT_SYMBOL
dnl #
dnl # Like ZFS_LINUX_TEST_RESULT except ZFS_CHECK_SYMBOL_EXPORT is called to
dnl # verify symbol exports, unless --enable-linux-builtin was provided to
dnl # configure.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_RESULT_SYMBOL], [
AS_IF([ ! test -f build/$1/$1.ko], [
$5
], [
AS_IF([test "x$enable_linux_builtin" != "xyes"], [
ZFS_CHECK_SYMBOL_EXPORT([$2], [$3], [$4], [$5])
], [
$4
])
])
])
dnl #
dnl # ZFS_LINUX_COMPILE_IFELSE
dnl #
AC_DEFUN([ZFS_LINUX_COMPILE_IFELSE], [
ZFS_LINUX_TEST_REMOVE([conftest])
m4_ifvaln([$1], [ZFS_LINUX_CONFTEST_C([$1], [conftest])])
m4_ifvaln([$5], [ZFS_LINUX_CONFTEST_H([$5], [conftest])],
[ZFS_LINUX_CONFTEST_H([], [conftest])])
ZFS_LINUX_CONFTEST_MAKEFILE([conftest], [no],
[m4_ifvaln([$5], [-I$PWD/build/conftest], [])])
ZFS_LINUX_COMPILE([build/conftest], [$2], [$3], [$4], [], [])
])
dnl #
dnl # ZFS_LINUX_TRY_COMPILE
dnl #
dnl # $1 - global
dnl # $2 - source
dnl # $3 - run on success (valid .ko generated)
dnl # $4 - run on failure (unable to compile)
dnl #
dnl # When configuring as builtin (--enable-linux-builtin) for kernels
dnl # without loadable module support (CONFIG_MODULES=n) only the object
dnl # file is created. See ZFS_LINUX_TEST_COMPILE_ALL for details.
dnl #
AC_DEFUN([ZFS_LINUX_TRY_COMPILE], [
AS_IF([test "x$enable_linux_builtin" = "xyes"], [
ZFS_LINUX_COMPILE_IFELSE(
[ZFS_LINUX_TEST_PROGRAM([[$1]], [[$2]],
[[ZFS_META_LICENSE]])],
[test -f build/conftest/conftest.o], [$3], [$4])
], [
ZFS_LINUX_COMPILE_IFELSE(
[ZFS_LINUX_TEST_PROGRAM([[$1]], [[$2]],
[[ZFS_META_LICENSE]])],
[test -f build/conftest/conftest.ko], [$3], [$4])
])
])
dnl #
dnl # ZFS_CHECK_SYMBOL_EXPORT
dnl #
dnl # Check if a symbol is exported on not by consulting the symbols
dnl # file, or optionally the source code.
dnl #
AC_DEFUN([ZFS_CHECK_SYMBOL_EXPORT], [
grep -q -E '[[[:space:]]]$1[[[:space:]]]' \
$LINUX_OBJ/$LINUX_SYMBOLS 2>/dev/null
rc=$?
if test $rc -ne 0; then
export=0
for file in $2; do
grep -q -E "EXPORT_SYMBOL.*($1)" \
"$LINUX/$file" 2>/dev/null
rc=$?
if test $rc -eq 0; then
export=1
break;
fi
done
if test $export -eq 0; then :
$4
else :
$3
fi
else :
$3
fi
])
dnl #
dnl # ZFS_LINUX_TRY_COMPILE_SYMBOL
dnl #
dnl # Like ZFS_LINUX_TRY_COMPILER except ZFS_CHECK_SYMBOL_EXPORT is called
dnl # to verify symbol exports, unless --enable-linux-builtin was provided
dnl # to configure.
dnl #
AC_DEFUN([ZFS_LINUX_TRY_COMPILE_SYMBOL], [
ZFS_LINUX_TRY_COMPILE([$1], [$2], [rc=0], [rc=1])
if test $rc -ne 0; then :
$6
else
if test "x$enable_linux_builtin" != xyes; then
ZFS_CHECK_SYMBOL_EXPORT([$3], [$4], [rc=0], [rc=1])
fi
if test $rc -ne 0; then :
$6
else :
$5
fi
fi
])
dnl #
dnl # ZFS_LINUX_TRY_COMPILE_HEADER
dnl # like ZFS_LINUX_TRY_COMPILE, except the contents conftest.h are
dnl # provided via the fifth parameter
dnl #
AC_DEFUN([ZFS_LINUX_TRY_COMPILE_HEADER], [
ZFS_LINUX_COMPILE_IFELSE(
[ZFS_LINUX_TEST_PROGRAM([[$1]], [[$2]], [[ZFS_META_LICENSE]])],
[test -f build/conftest/conftest.ko],
[$3], [$4], [$5])
])
diff --git a/sys/contrib/openzfs/contrib/coverity/model.c b/sys/contrib/openzfs/contrib/coverity/model.c
new file mode 100644
index 000000000000..d27abd038767
--- /dev/null
+++ b/sys/contrib/openzfs/contrib/coverity/model.c
@@ -0,0 +1,422 @@
+/*
+ * Coverity Scan model
+ * https://scan.coverity.com/models
+ *
+ * This is a modeling file for Coverity Scan.
+ * Modeling helps to avoid false positives.
+ *
+ * - Modeling doesn't need full structs and typedefs. Rudimentary structs
+ * and similar types are sufficient.
+ * - An uninitialized local pointer is not an error. It signifies that the
+ * variable could be either NULL or have some data.
+ *
+ * Coverity Scan doesn't pick up modifications automatically. The model file
+ * must be uploaded by an admin in the analysis settings.
+ *
+ * Some of this initially cribbed from:
+ *
+ * https://github.com/kees/coverity-linux/blob/trunk/model.c
+ *
+ * The below model was based on the original model by Brian Behlendorf for the
+ * original zfsonlinux/zfs repository. Some inspiration was taken from
+ * kees/coverity-linux, specifically involving memory copies.
+ */
+
+#include <stdarg.h>
+
+#define UMEM_DEFAULT 0x0000 /* normal -- may fail */
+#define UMEM_NOFAIL 0x0100 /* Never fails */
+
+#define NULL (0)
+
+int condition0, condition1;
+
+void
+abort()
+{
+ __coverity_panic__();
+}
+
+void
+exit(int status)
+{
+ (void) status;
+
+ __coverity_panic__();
+}
+
+void
+_exit(int status)
+{
+ (void) status;
+
+ __coverity_panic__();
+}
+
+void
+zed_log_die(const char *fmt, ...)
+{
+ __coverity_format_string_sink__(fmt);
+ __coverity_panic__();
+}
+
+void
+panic(const char *fmt, ...)
+{
+ __coverity_format_string_sink__(fmt);
+ __coverity_panic__();
+}
+
+void
+vpanic(const char *fmt, va_list adx)
+{
+ (void) adx;
+
+ __coverity_format_string_sink__(fmt);
+ __coverity_panic__();
+}
+
+void
+uu_panic(const char *format, ...)
+{
+ __coverity_format_string_sink__(format);
+ __coverity_panic__();
+}
+
+int
+libspl_assertf(const char *file, const char *func, int line,
+ const char *format, ...)
+{
+ __coverity_format_string_sink__(format);
+ __coverity_panic__();
+}
+
+int
+ddi_copyin(const void *from, void *to, size_t len, int flags)
+{
+ __coverity_tainted_data_argument__(from);
+ __coverity_tainted_data_argument__(to);
+ __coverity_writeall__(to);
+}
+
+void *
+memset(void *dst, int c, size_t len)
+{
+ __coverity_writeall__(dst);
+ return (dst);
+}
+
+void *
+memmove(void *dst, void *src, size_t len)
+{
+ __coverity_writeall__(dst);
+ return (dst);
+}
+
+void *
+memcpy(void *dst, void *src, size_t len)
+{
+ __coverity_writeall__(dst);
+ return (dst);
+}
+
+void *
+umem_alloc_aligned(size_t size, size_t align, int kmflags)
+{
+ (void) align;
+
+ if (UMEM_NOFAIL & kmflags == UMEM_NOFAIL)
+ return (__coverity_alloc__(size));
+ else if (condition0)
+ return (__coverity_alloc__(size));
+ else
+ return (NULL);
+}
+
+void *
+umem_alloc(size_t size, int kmflags)
+{
+ if (UMEM_NOFAIL & kmflags == UMEM_NOFAIL)
+ return (__coverity_alloc__(size));
+ else if (condition0)
+ return (__coverity_alloc__(size));
+ else
+ return (NULL);
+}
+
+void *
+umem_zalloc(size_t size, int kmflags)
+{
+ if (UMEM_NOFAIL & kmflags == UMEM_NOFAIL)
+ return (__coverity_alloc__(size));
+ else if (condition0)
+ return (__coverity_alloc__(size));
+ else
+ return (NULL);
+}
+
+void
+umem_free(void *buf, size_t size)
+{
+ (void) size;
+
+ __coverity_free__(buf);
+}
+
+void *
+spl_kmem_alloc(size_t sz, int fl, const char *func, int line)
+{
+ (void) func;
+ (void) line;
+
+ if (condition1)
+ __coverity_sleep__();
+
+ if (fl == 0) {
+ return (__coverity_alloc__(sz));
+ } else if (condition0)
+ return (__coverity_alloc__(sz));
+ else
+ return (NULL);
+}
+
+void *
+spl_kmem_zalloc(size_t sz, int fl, const char *func, int line)
+{
+ (void) func;
+ (void) line;
+
+ if (condition1)
+ __coverity_sleep__();
+
+ if (fl == 0) {
+ return (__coverity_alloc__(sz));
+ } else if (condition0)
+ return (__coverity_alloc__(sz));
+ else
+ return (NULL);
+}
+
+void
+spl_kmem_free(const void *ptr, size_t sz)
+{
+ (void) sz;
+
+ __coverity_free__(ptr);
+}
+
+typedef struct {} spl_kmem_cache_t;
+
+void *
+spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
+{
+ (void) skc;
+
+ if (condition1)
+ __coverity_sleep__();
+
+ if (flags == 0) {
+ return (__coverity_alloc_nosize__());
+ } else if (condition0)
+ return (__coverity_alloc_nosize__());
+ else
+ return (NULL);
+}
+
+void
+spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
+{
+ (void) skc;
+
+ __coverity_free__(obj);
+}
+
+void
+malloc(size_t size)
+{
+ __coverity_alloc__(size);
+}
+
+void
+free(void *buf)
+{
+ __coverity_free__(buf);
+}
+
+int
+spl_panic(const char *file, const char *func, int line, const char *fmt, ...)
+{
+ __coverity_format_string_sink__(fmt);
+ __coverity_panic__();
+}
+
+int
+sched_yield(void)
+{
+ __coverity_sleep__();
+}
+
+typedef struct {} kmutex_t;
+typedef struct {} krwlock_t;
+typedef int krw_t;
+
+/*
+ * Coverty reportedly does not support macros, so this only works for
+ * userspace.
+ */
+
+void
+mutex_enter(kmutex_t *mp)
+{
+ if (condition0)
+ __coverity_sleep__();
+
+ __coverity_exclusive_lock_acquire__(mp);
+}
+
+int
+mutex_tryenter(kmutex_t *mp)
+{
+ if (condition0) {
+ __coverity_exclusive_lock_acquire__(mp);
+ return (1);
+ }
+
+ return (0);
+}
+
+void
+mutex_exit(kmutex_t *mp)
+{
+ __coverity_exclusive_lock_release__(mp);
+}
+
+void
+rw_enter(krwlock_t *rwlp, krw_t rw)
+{
+ (void) rw;
+
+ if (condition0)
+ __coverity_sleep__();
+
+ __coverity_recursive_lock_acquire__(rwlp);
+}
+
+void
+rw_exit(krwlock_t *rwlp)
+{
+ __coverity_recursive_lock_release__(rwlp);
+
+}
+
+int
+rw_tryenter(krwlock_t *rwlp, krw_t rw)
+{
+ if (condition0) {
+ __coverity_recursive_lock_acquire__(rwlp);
+ return (1);
+ }
+
+ return (0);
+}
+
+/* Thus, we fallback to the Linux kernel locks */
+struct {} mutex;
+struct {} rw_semaphore;
+
+void
+mutex_lock(struct mutex *lock)
+{
+ if (condition0) {
+ __coverity_sleep__();
+ }
+ __coverity_exclusive_lock_acquire__(lock);
+}
+
+void
+mutex_unlock(struct mutex *lock)
+{
+ __coverity_exclusive_lock_release__(lock);
+}
+
+void
+down_read(struct rw_semaphore *sem)
+{
+ if (condition0) {
+ __coverity_sleep__();
+ }
+ __coverity_recursive_lock_acquire__(sem);
+}
+
+void
+down_write(struct rw_semaphore *sem)
+{
+ if (condition0) {
+ __coverity_sleep__();
+ }
+ __coverity_recursive_lock_acquire__(sem);
+}
+
+int
+down_read_trylock(struct rw_semaphore *sem)
+{
+ if (condition0) {
+ __coverity_recursive_lock_acquire__(sem);
+ return (1);
+ }
+
+ return (0);
+}
+
+int
+down_write_trylock(struct rw_semaphore *sem)
+{
+ if (condition0) {
+ __coverity_recursive_lock_acquire__(sem);
+ return (1);
+ }
+
+ return (0);
+}
+
+void
+up_read(struct rw_semaphore *sem)
+{
+ __coverity_recursive_lock_release__(sem);
+}
+
+void
+up_write(struct rw_semaphore *sem)
+{
+ __coverity_recursive_lock_release__(sem);
+}
+
+int
+__cond_resched(void)
+{
+ if (condition0) {
+ __coverity_sleep__();
+ }
+}
+
+/*
+ * An endian-independent filesystem must support doing byte swaps on data. We
+ * attempt to suppress taint warnings, which are false positives for us.
+ */
+void
+byteswap_uint64_array(void *vbuf, size_t size)
+{
+ __coverity_tainted_data_sanitize__(vbuf);
+}
+
+void
+byteswap_uint32_array(void *vbuf, size_t size)
+{
+ __coverity_tainted_data_sanitize__(vbuf);
+}
+
+void
+byteswap_uint16_array(void *vbuf, size_t size)
+{
+ __coverity_tainted_data_sanitize__(vbuf);
+}
diff --git a/sys/contrib/openzfs/contrib/pam_zfs_key/pam_zfs_key.c b/sys/contrib/openzfs/contrib/pam_zfs_key/pam_zfs_key.c
index 6f95d468074f..c1001e6b81c2 100644
--- a/sys/contrib/openzfs/contrib/pam_zfs_key/pam_zfs_key.c
+++ b/sys/contrib/openzfs/contrib/pam_zfs_key/pam_zfs_key.c
@@ -1,845 +1,848 @@
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the <organization> nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Copyright (c) 2020, Felix Dörre
* All rights reserved.
*/
#include <sys/dsl_crypt.h>
#include <sys/byteorder.h>
#include <libzfs.h>
#include <syslog.h>
#include <sys/zio_crypt.h>
#include <openssl/evp.h>
#define PAM_SM_AUTH
#define PAM_SM_PASSWORD
#define PAM_SM_SESSION
#include <security/pam_modules.h>
#if defined(__linux__)
#include <security/pam_ext.h>
#define MAP_FLAGS MAP_PRIVATE | MAP_ANONYMOUS
#elif defined(__FreeBSD__)
#include <security/pam_appl.h>
static void
pam_syslog(pam_handle_t *pamh, int loglevel, const char *fmt, ...)
{
(void) pamh;
va_list args;
va_start(args, fmt);
vsyslog(loglevel, fmt, args);
va_end(args);
}
#define MAP_FLAGS MAP_PRIVATE | MAP_ANON | MAP_NOCORE
#endif
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/file.h>
#include <sys/wait.h>
#include <pwd.h>
#include <sys/mman.h>
static const char PASSWORD_VAR_NAME[] = "pam_zfs_key_authtok";
static libzfs_handle_t *g_zfs;
static void destroy_pw(pam_handle_t *pamh, void *data, int errcode);
typedef int (*mlock_func_t) (const void *, size_t);
typedef struct {
size_t len;
char *value;
} pw_password_t;
/*
* Try to mlock(2) or munlock(2) addr while handling EAGAIN by retrying ten
* times and sleeping 10 milliseconds in between for a total of 0.1
* seconds. lock_func must point to either mlock(2) or munlock(2).
*/
static int
try_lock(mlock_func_t lock_func, const void *addr, size_t len)
{
int err;
int retries = 10;
useconds_t sleep_dur = 10 * 1000;
if ((err = (*lock_func)(addr, len)) != EAGAIN) {
return (err);
}
for (int i = retries; i > 0; --i) {
(void) usleep(sleep_dur);
if ((err = (*lock_func)(addr, len)) != EAGAIN) {
break;
}
}
return (err);
}
static pw_password_t *
alloc_pw_size(size_t len)
{
pw_password_t *pw = malloc(sizeof (pw_password_t));
if (!pw) {
return (NULL);
}
pw->len = len;
/*
* We use mmap(2) rather than malloc(3) since later on we mlock(2) the
* memory region. Since mlock(2) and munlock(2) operate on whole memory
* pages we should allocate a whole page here as mmap(2) does. Further
* this ensures that the addresses passed to mlock(2) an munlock(2) are
* on a page boundary as suggested by FreeBSD and required by some
* other implementations. Finally we avoid inadvertently munlocking
* memory mlocked by an concurrently running instance of us.
*/
pw->value = mmap(NULL, pw->len, PROT_READ | PROT_WRITE, MAP_FLAGS,
-1, 0);
if (pw->value == MAP_FAILED) {
free(pw);
return (NULL);
}
if (try_lock(mlock, pw->value, pw->len) != 0) {
(void) munmap(pw->value, pw->len);
free(pw);
return (NULL);
}
return (pw);
}
static pw_password_t *
alloc_pw_string(const char *source)
{
size_t len = strlen(source) + 1;
pw_password_t *pw = alloc_pw_size(len);
if (!pw) {
return (NULL);
}
memcpy(pw->value, source, pw->len);
return (pw);
}
static void
pw_free(pw_password_t *pw)
{
memset(pw->value, 0, pw->len);
if (try_lock(munlock, pw->value, pw->len) == 0) {
(void) munmap(pw->value, pw->len);
}
free(pw);
}
static pw_password_t *
pw_fetch(pam_handle_t *pamh)
{
const char *token;
if (pam_get_authtok(pamh, PAM_AUTHTOK, &token, NULL) != PAM_SUCCESS) {
pam_syslog(pamh, LOG_ERR,
"couldn't get password from PAM stack");
return (NULL);
}
if (!token) {
pam_syslog(pamh, LOG_ERR,
"token from PAM stack is null");
return (NULL);
}
return (alloc_pw_string(token));
}
static const pw_password_t *
pw_fetch_lazy(pam_handle_t *pamh)
{
pw_password_t *pw = pw_fetch(pamh);
if (pw == NULL) {
return (NULL);
}
int ret = pam_set_data(pamh, PASSWORD_VAR_NAME, pw, destroy_pw);
if (ret != PAM_SUCCESS) {
pw_free(pw);
pam_syslog(pamh, LOG_ERR, "pam_set_data failed");
return (NULL);
}
return (pw);
}
static const pw_password_t *
pw_get(pam_handle_t *pamh)
{
const pw_password_t *authtok = NULL;
int ret = pam_get_data(pamh, PASSWORD_VAR_NAME,
(const void**)(&authtok));
if (ret == PAM_SUCCESS)
return (authtok);
if (ret == PAM_NO_MODULE_DATA)
return (pw_fetch_lazy(pamh));
pam_syslog(pamh, LOG_ERR, "password not available");
return (NULL);
}
static int
pw_clear(pam_handle_t *pamh)
{
int ret = pam_set_data(pamh, PASSWORD_VAR_NAME, NULL, NULL);
if (ret != PAM_SUCCESS) {
pam_syslog(pamh, LOG_ERR, "clearing password failed");
return (-1);
}
return (0);
}
static void
destroy_pw(pam_handle_t *pamh, void *data, int errcode)
{
(void) pamh, (void) errcode;
if (data != NULL) {
pw_free((pw_password_t *)data);
}
}
static int
pam_zfs_init(pam_handle_t *pamh)
{
int error = 0;
if ((g_zfs = libzfs_init()) == NULL) {
error = errno;
pam_syslog(pamh, LOG_ERR, "Zfs initialization error: %s",
libzfs_error_init(error));
}
return (error);
}
static void
pam_zfs_free(void)
{
libzfs_fini(g_zfs);
}
static pw_password_t *
prepare_passphrase(pam_handle_t *pamh, zfs_handle_t *ds,
const char *passphrase, nvlist_t *nvlist)
{
pw_password_t *key = alloc_pw_size(WRAPPING_KEY_LEN);
if (!key) {
return (NULL);
}
uint64_t salt;
uint64_t iters;
if (nvlist != NULL) {
int fd = open("/dev/urandom", O_RDONLY);
if (fd < 0) {
pw_free(key);
return (NULL);
}
int bytes_read = 0;
char *buf = (char *)&salt;
size_t bytes = sizeof (uint64_t);
while (bytes_read < bytes) {
ssize_t len = read(fd, buf + bytes_read, bytes
- bytes_read);
if (len < 0) {
close(fd);
pw_free(key);
return (NULL);
}
bytes_read += len;
}
close(fd);
if (nvlist_add_uint64(nvlist,
zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT), salt)) {
pam_syslog(pamh, LOG_ERR,
"failed to add salt to nvlist");
pw_free(key);
return (NULL);
}
iters = DEFAULT_PBKDF2_ITERATIONS;
if (nvlist_add_uint64(nvlist, zfs_prop_to_name(
ZFS_PROP_PBKDF2_ITERS), iters)) {
pam_syslog(pamh, LOG_ERR,
"failed to add iters to nvlist");
pw_free(key);
return (NULL);
}
} else {
salt = zfs_prop_get_int(ds, ZFS_PROP_PBKDF2_SALT);
iters = zfs_prop_get_int(ds, ZFS_PROP_PBKDF2_ITERS);
}
salt = LE_64(salt);
if (!PKCS5_PBKDF2_HMAC_SHA1((char *)passphrase,
strlen(passphrase), (uint8_t *)&salt,
sizeof (uint64_t), iters, WRAPPING_KEY_LEN,
(uint8_t *)key->value)) {
pam_syslog(pamh, LOG_ERR, "pbkdf failed");
pw_free(key);
return (NULL);
}
return (key);
}
static int
is_key_loaded(pam_handle_t *pamh, const char *ds_name)
{
zfs_handle_t *ds = zfs_open(g_zfs, ds_name, ZFS_TYPE_FILESYSTEM);
if (ds == NULL) {
pam_syslog(pamh, LOG_ERR, "dataset %s not found", ds_name);
return (-1);
}
int keystatus = zfs_prop_get_int(ds, ZFS_PROP_KEYSTATUS);
zfs_close(ds);
return (keystatus != ZFS_KEYSTATUS_UNAVAILABLE);
}
static int
change_key(pam_handle_t *pamh, const char *ds_name,
const char *passphrase)
{
zfs_handle_t *ds = zfs_open(g_zfs, ds_name, ZFS_TYPE_FILESYSTEM);
if (ds == NULL) {
pam_syslog(pamh, LOG_ERR, "dataset %s not found", ds_name);
return (-1);
}
nvlist_t *nvlist = fnvlist_alloc();
pw_password_t *key = prepare_passphrase(pamh, ds, passphrase, nvlist);
if (key == NULL) {
nvlist_free(nvlist);
zfs_close(ds);
return (-1);
}
if (nvlist_add_string(nvlist,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION),
"prompt")) {
pam_syslog(pamh, LOG_ERR, "nvlist_add failed for keylocation");
pw_free(key);
nvlist_free(nvlist);
zfs_close(ds);
return (-1);
}
if (nvlist_add_uint64(nvlist,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT),
ZFS_KEYFORMAT_PASSPHRASE)) {
pam_syslog(pamh, LOG_ERR, "nvlist_add failed for keyformat");
pw_free(key);
nvlist_free(nvlist);
zfs_close(ds);
return (-1);
}
int ret = lzc_change_key(ds_name, DCP_CMD_NEW_KEY, nvlist,
(uint8_t *)key->value, WRAPPING_KEY_LEN);
pw_free(key);
if (ret) {
pam_syslog(pamh, LOG_ERR, "change_key failed: %d", ret);
nvlist_free(nvlist);
zfs_close(ds);
return (-1);
}
nvlist_free(nvlist);
zfs_close(ds);
return (0);
}
static int
decrypt_mount(pam_handle_t *pamh, const char *ds_name,
const char *passphrase)
{
zfs_handle_t *ds = zfs_open(g_zfs, ds_name, ZFS_TYPE_FILESYSTEM);
if (ds == NULL) {
pam_syslog(pamh, LOG_ERR, "dataset %s not found", ds_name);
return (-1);
}
pw_password_t *key = prepare_passphrase(pamh, ds, passphrase, NULL);
if (key == NULL) {
zfs_close(ds);
return (-1);
}
int ret = lzc_load_key(ds_name, B_FALSE, (uint8_t *)key->value,
WRAPPING_KEY_LEN);
pw_free(key);
if (ret) {
pam_syslog(pamh, LOG_ERR, "load_key failed: %d", ret);
zfs_close(ds);
return (-1);
}
ret = zfs_mount(ds, NULL, 0);
if (ret) {
pam_syslog(pamh, LOG_ERR, "mount failed: %d", ret);
zfs_close(ds);
return (-1);
}
zfs_close(ds);
return (0);
}
static int
unmount_unload(pam_handle_t *pamh, const char *ds_name)
{
zfs_handle_t *ds = zfs_open(g_zfs, ds_name, ZFS_TYPE_FILESYSTEM);
if (ds == NULL) {
pam_syslog(pamh, LOG_ERR, "dataset %s not found", ds_name);
return (-1);
}
int ret = zfs_unmount(ds, NULL, 0);
if (ret) {
pam_syslog(pamh, LOG_ERR, "zfs_unmount failed with: %d", ret);
zfs_close(ds);
return (-1);
}
ret = lzc_unload_key(ds_name);
if (ret) {
pam_syslog(pamh, LOG_ERR, "unload_key failed with: %d", ret);
zfs_close(ds);
return (-1);
}
zfs_close(ds);
return (0);
}
typedef struct {
char *homes_prefix;
char *runstatedir;
char *homedir;
char *dsname;
uid_t uid;
const char *username;
int unmount_and_unload;
} zfs_key_config_t;
static int
zfs_key_config_load(pam_handle_t *pamh, zfs_key_config_t *config,
int argc, const char **argv)
{
config->homes_prefix = strdup("rpool/home");
if (config->homes_prefix == NULL) {
pam_syslog(pamh, LOG_ERR, "strdup failure");
return (-1);
}
config->runstatedir = strdup(RUNSTATEDIR "/pam_zfs_key");
if (config->runstatedir == NULL) {
pam_syslog(pamh, LOG_ERR, "strdup failure");
free(config->homes_prefix);
return (-1);
}
const char *name;
if (pam_get_user(pamh, &name, NULL) != PAM_SUCCESS) {
pam_syslog(pamh, LOG_ERR,
"couldn't get username from PAM stack");
free(config->runstatedir);
free(config->homes_prefix);
return (-1);
}
struct passwd *entry = getpwnam(name);
if (!entry) {
free(config->runstatedir);
free(config->homes_prefix);
return (-1);
}
config->uid = entry->pw_uid;
config->username = name;
config->unmount_and_unload = 1;
config->dsname = NULL;
config->homedir = NULL;
for (int c = 0; c < argc; c++) {
if (strncmp(argv[c], "homes=", 6) == 0) {
free(config->homes_prefix);
config->homes_prefix = strdup(argv[c] + 6);
} else if (strncmp(argv[c], "runstatedir=", 12) == 0) {
free(config->runstatedir);
config->runstatedir = strdup(argv[c] + 12);
} else if (strcmp(argv[c], "nounmount") == 0) {
config->unmount_and_unload = 0;
} else if (strcmp(argv[c], "prop_mountpoint") == 0) {
config->homedir = strdup(entry->pw_dir);
}
}
return (0);
}
static void
zfs_key_config_free(zfs_key_config_t *config)
{
free(config->homes_prefix);
free(config->runstatedir);
free(config->homedir);
free(config->dsname);
}
static int
find_dsname_by_prop_value(zfs_handle_t *zhp, void *data)
{
zfs_type_t type = zfs_get_type(zhp);
zfs_key_config_t *target = data;
char mountpoint[ZFS_MAXPROPLEN];
/* Skip any datasets whose type does not match */
if ((type & ZFS_TYPE_FILESYSTEM) == 0) {
zfs_close(zhp);
return (0);
}
/* Skip any datasets whose mountpoint does not match */
(void) zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, mountpoint,
sizeof (mountpoint), NULL, NULL, 0, B_FALSE);
if (strcmp(target->homedir, mountpoint) != 0) {
zfs_close(zhp);
return (0);
}
target->dsname = strdup(zfs_get_name(zhp));
zfs_close(zhp);
return (1);
}
static char *
zfs_key_config_get_dataset(zfs_key_config_t *config)
{
if (config->homedir != NULL &&
config->homes_prefix != NULL) {
zfs_handle_t *zhp = zfs_open(g_zfs, config->homes_prefix,
ZFS_TYPE_FILESYSTEM);
if (zhp == NULL) {
pam_syslog(NULL, LOG_ERR, "dataset %s not found",
config->homes_prefix);
- zfs_close(zhp);
return (NULL);
}
(void) zfs_iter_filesystems(zhp, find_dsname_by_prop_value,
config);
zfs_close(zhp);
char *dsname = config->dsname;
config->dsname = NULL;
return (dsname);
}
+ if (config->homes_prefix == NULL) {
+ return (NULL);
+ }
+
size_t len = ZFS_MAX_DATASET_NAME_LEN;
size_t total_len = strlen(config->homes_prefix) + 1
+ strlen(config->username);
if (total_len > len) {
return (NULL);
}
char *ret = malloc(len + 1);
if (!ret) {
return (NULL);
}
ret[0] = 0;
strcat(ret, config->homes_prefix);
strcat(ret, "/");
strcat(ret, config->username);
return (ret);
}
static int
zfs_key_config_modify_session_counter(pam_handle_t *pamh,
zfs_key_config_t *config, int delta)
{
const char *runtime_path = config->runstatedir;
if (mkdir(runtime_path, S_IRWXU) != 0 && errno != EEXIST) {
pam_syslog(pamh, LOG_ERR, "Can't create runtime path: %d",
errno);
return (-1);
}
if (chown(runtime_path, 0, 0) != 0) {
pam_syslog(pamh, LOG_ERR, "Can't chown runtime path: %d",
errno);
return (-1);
}
if (chmod(runtime_path, S_IRWXU) != 0) {
pam_syslog(pamh, LOG_ERR, "Can't chmod runtime path: %d",
errno);
return (-1);
}
size_t runtime_path_len = strlen(runtime_path);
size_t counter_path_len = runtime_path_len + 1 + 10;
char *counter_path = malloc(counter_path_len + 1);
if (!counter_path) {
return (-1);
}
counter_path[0] = 0;
strcat(counter_path, runtime_path);
snprintf(counter_path + runtime_path_len, counter_path_len, "/%d",
config->uid);
const int fd = open(counter_path,
O_RDWR | O_CLOEXEC | O_CREAT | O_NOFOLLOW,
S_IRUSR | S_IWUSR);
free(counter_path);
if (fd < 0) {
pam_syslog(pamh, LOG_ERR, "Can't open counter file: %d", errno);
return (-1);
}
if (flock(fd, LOCK_EX) != 0) {
pam_syslog(pamh, LOG_ERR, "Can't lock counter file: %d", errno);
close(fd);
return (-1);
}
char counter[20];
char *pos = counter;
int remaining = sizeof (counter) - 1;
int ret;
counter[sizeof (counter) - 1] = 0;
while (remaining > 0 && (ret = read(fd, pos, remaining)) > 0) {
remaining -= ret;
pos += ret;
}
*pos = 0;
long int counter_value = strtol(counter, NULL, 10);
counter_value += delta;
if (counter_value < 0) {
counter_value = 0;
}
lseek(fd, 0, SEEK_SET);
if (ftruncate(fd, 0) != 0) {
pam_syslog(pamh, LOG_ERR, "Can't truncate counter file: %d",
errno);
close(fd);
return (-1);
}
snprintf(counter, sizeof (counter), "%ld", counter_value);
remaining = strlen(counter);
pos = counter;
while (remaining > 0 && (ret = write(fd, pos, remaining)) > 0) {
remaining -= ret;
pos += ret;
}
close(fd);
return (counter_value);
}
__attribute__((visibility("default")))
PAM_EXTERN int
pam_sm_authenticate(pam_handle_t *pamh, int flags,
int argc, const char **argv)
{
(void) flags, (void) argc, (void) argv;
if (pw_fetch_lazy(pamh) == NULL) {
return (PAM_AUTH_ERR);
}
return (PAM_SUCCESS);
}
__attribute__((visibility("default")))
PAM_EXTERN int
pam_sm_setcred(pam_handle_t *pamh, int flags,
int argc, const char **argv)
{
(void) pamh, (void) flags, (void) argc, (void) argv;
return (PAM_SUCCESS);
}
__attribute__((visibility("default")))
PAM_EXTERN int
pam_sm_chauthtok(pam_handle_t *pamh, int flags,
int argc, const char **argv)
{
if (geteuid() != 0) {
pam_syslog(pamh, LOG_ERR,
"Cannot zfs_mount when not being root.");
return (PAM_PERM_DENIED);
}
zfs_key_config_t config;
if (zfs_key_config_load(pamh, &config, argc, argv) == -1) {
return (PAM_SERVICE_ERR);
}
if (config.uid < 1000) {
zfs_key_config_free(&config);
return (PAM_SUCCESS);
}
{
if (pam_zfs_init(pamh) != 0) {
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
char *dataset = zfs_key_config_get_dataset(&config);
if (!dataset) {
pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
int key_loaded = is_key_loaded(pamh, dataset);
if (key_loaded == -1) {
free(dataset);
pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
free(dataset);
pam_zfs_free();
if (! key_loaded) {
pam_syslog(pamh, LOG_ERR,
"key not loaded, returning try_again");
zfs_key_config_free(&config);
return (PAM_PERM_DENIED);
}
}
if ((flags & PAM_UPDATE_AUTHTOK) != 0) {
const pw_password_t *token = pw_get(pamh);
if (token == NULL) {
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
if (pam_zfs_init(pamh) != 0) {
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
char *dataset = zfs_key_config_get_dataset(&config);
if (!dataset) {
pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
if (change_key(pamh, dataset, token->value) == -1) {
free(dataset);
pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
free(dataset);
pam_zfs_free();
zfs_key_config_free(&config);
if (pw_clear(pamh) == -1) {
return (PAM_SERVICE_ERR);
}
} else {
zfs_key_config_free(&config);
}
return (PAM_SUCCESS);
}
PAM_EXTERN int
pam_sm_open_session(pam_handle_t *pamh, int flags,
int argc, const char **argv)
{
(void) flags;
if (geteuid() != 0) {
pam_syslog(pamh, LOG_ERR,
"Cannot zfs_mount when not being root.");
return (PAM_SUCCESS);
}
zfs_key_config_t config;
zfs_key_config_load(pamh, &config, argc, argv);
if (config.uid < 1000) {
zfs_key_config_free(&config);
return (PAM_SUCCESS);
}
int counter = zfs_key_config_modify_session_counter(pamh, &config, 1);
if (counter != 1) {
zfs_key_config_free(&config);
return (PAM_SUCCESS);
}
const pw_password_t *token = pw_get(pamh);
if (token == NULL) {
zfs_key_config_free(&config);
return (PAM_SESSION_ERR);
}
if (pam_zfs_init(pamh) != 0) {
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
char *dataset = zfs_key_config_get_dataset(&config);
if (!dataset) {
pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
if (decrypt_mount(pamh, dataset, token->value) == -1) {
free(dataset);
pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
free(dataset);
pam_zfs_free();
zfs_key_config_free(&config);
if (pw_clear(pamh) == -1) {
return (PAM_SERVICE_ERR);
}
return (PAM_SUCCESS);
}
__attribute__((visibility("default")))
PAM_EXTERN int
pam_sm_close_session(pam_handle_t *pamh, int flags,
int argc, const char **argv)
{
(void) flags;
if (geteuid() != 0) {
pam_syslog(pamh, LOG_ERR,
"Cannot zfs_mount when not being root.");
return (PAM_SUCCESS);
}
zfs_key_config_t config;
zfs_key_config_load(pamh, &config, argc, argv);
if (config.uid < 1000) {
zfs_key_config_free(&config);
return (PAM_SUCCESS);
}
int counter = zfs_key_config_modify_session_counter(pamh, &config, -1);
if (counter != 0) {
zfs_key_config_free(&config);
return (PAM_SUCCESS);
}
if (config.unmount_and_unload) {
if (pam_zfs_init(pamh) != 0) {
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
char *dataset = zfs_key_config_get_dataset(&config);
if (!dataset) {
pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_SESSION_ERR);
}
if (unmount_unload(pamh, dataset) == -1) {
free(dataset);
pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_SESSION_ERR);
}
free(dataset);
pam_zfs_free();
}
zfs_key_config_free(&config);
return (PAM_SUCCESS);
}
diff --git a/sys/contrib/openzfs/etc/systemd/system/zfs-share.service.in b/sys/contrib/openzfs/etc/systemd/system/zfs-share.service.in
index 263055e5281f..1a6342a06fec 100644
--- a/sys/contrib/openzfs/etc/systemd/system/zfs-share.service.in
+++ b/sys/contrib/openzfs/etc/systemd/system/zfs-share.service.in
@@ -1,20 +1,20 @@
[Unit]
Description=ZFS file system shares
Documentation=man:zfs(8)
-After=nfs-server.service nfs-kernel-server.service
+Before=nfs-server.service nfs-kernel-server.service
After=smb.service
Before=rpc-statd-notify.service
Wants=zfs-mount.service
After=zfs-mount.service
PartOf=nfs-server.service nfs-kernel-server.service
PartOf=smb.service
ConditionPathIsDirectory=/sys/module/zfs
[Service]
Type=oneshot
RemainAfterExit=yes
EnvironmentFile=-@initconfdir@/zfs
ExecStart=@sbindir@/zfs share -a
[Install]
WantedBy=zfs.target
diff --git a/sys/contrib/openzfs/include/Makefile.am b/sys/contrib/openzfs/include/Makefile.am
index 1a7f67e9c440..19726bba1864 100644
--- a/sys/contrib/openzfs/include/Makefile.am
+++ b/sys/contrib/openzfs/include/Makefile.am
@@ -1,201 +1,200 @@
if BUILD_LINUX
include $(srcdir)/%D%/os/linux/Makefile.am
endif
if BUILD_FREEBSD
include $(srcdir)/%D%/os/freebsd/Makefile.am
endif
COMMON_H = \
cityhash.h \
zfeature_common.h \
zfs_comutil.h \
zfs_deleg.h \
zfs_fletcher.h \
zfs_namecheck.h \
zfs_prop.h \
\
sys/abd.h \
sys/abd_impl.h \
sys/aggsum.h \
sys/arc.h \
sys/arc_impl.h \
sys/avl.h \
sys/avl_impl.h \
sys/bitops.h \
sys/blake3.h \
sys/blkptr.h \
sys/bplist.h \
sys/bpobj.h \
sys/bptree.h \
sys/bqueue.h \
sys/btree.h \
sys/dataset_kstats.h \
sys/dbuf.h \
sys/ddt.h \
sys/dmu.h \
sys/dmu_impl.h \
sys/dmu_objset.h \
sys/dmu_recv.h \
sys/dmu_redact.h \
sys/dmu_send.h \
sys/dmu_traverse.h \
sys/dmu_tx.h \
sys/dmu_zfetch.h \
sys/dnode.h \
sys/dsl_bookmark.h \
sys/dsl_crypt.h \
sys/dsl_dataset.h \
sys/dsl_deadlist.h \
sys/dsl_deleg.h \
sys/dsl_destroy.h \
sys/dsl_dir.h \
sys/dsl_pool.h \
sys/dsl_prop.h \
sys/dsl_scan.h \
sys/dsl_synctask.h \
sys/dsl_userhold.h \
sys/edonr.h \
sys/efi_partition.h \
sys/frame.h \
sys/hkdf.h \
sys/metaslab.h \
sys/metaslab_impl.h \
sys/mmp.h \
sys/mntent.h \
sys/mod.h \
sys/multilist.h \
sys/nvpair.h \
sys/nvpair_impl.h \
sys/objlist.h \
sys/pathname.h \
sys/qat.h \
sys/range_tree.h \
sys/rrwlock.h \
sys/sa.h \
sys/sa_impl.h \
sys/skein.h \
sys/spa.h \
- sys/spa_boot.h \
sys/spa_checkpoint.h \
sys/spa_checksum.h \
sys/spa_impl.h \
sys/spa_log_spacemap.h \
sys/space_map.h \
sys/space_reftree.h \
sys/sysevent.h \
sys/txg.h \
sys/txg_impl.h \
sys/u8_textprep.h \
sys/u8_textprep_data.h \
sys/uberblock.h \
sys/uberblock_impl.h \
sys/uio_impl.h \
sys/unique.h \
sys/uuid.h \
sys/vdev.h \
sys/vdev_disk.h \
sys/vdev_draid.h \
sys/vdev_file.h \
sys/vdev_impl.h \
sys/vdev_indirect_births.h \
sys/vdev_indirect_mapping.h \
sys/vdev_initialize.h \
sys/vdev_raidz.h \
sys/vdev_raidz_impl.h \
sys/vdev_rebuild.h \
sys/vdev_removal.h \
sys/vdev_trim.h \
sys/xvattr.h \
sys/zap.h \
sys/zap_impl.h \
sys/zap_leaf.h \
sys/zcp.h \
sys/zcp_global.h \
sys/zcp_iter.h \
sys/zcp_prop.h \
sys/zcp_set.h \
sys/zfeature.h \
sys/zfs_acl.h \
sys/zfs_bootenv.h \
sys/zfs_chksum.h \
sys/zfs_context.h \
sys/zfs_debug.h \
sys/zfs_delay.h \
sys/zfs_file.h \
sys/zfs_fuid.h \
sys/zfs_project.h \
sys/zfs_quota.h \
sys/zfs_racct.h \
sys/zfs_ratelimit.h \
sys/zfs_refcount.h \
sys/zfs_rlock.h \
sys/zfs_sa.h \
sys/zfs_stat.h \
sys/zfs_sysfs.h \
sys/zfs_vfsops.h \
sys/zfs_vnops.h \
sys/zfs_znode.h \
sys/zil.h \
sys/zil_impl.h \
sys/zio.h \
sys/zio_checksum.h \
sys/zio_compress.h \
sys/zio_crypt.h \
sys/zio_impl.h \
sys/zio_priority.h \
sys/zrlock.h \
sys/zthr.h \
\
sys/crypto/api.h \
sys/crypto/common.h \
sys/crypto/icp.h \
\
sys/fm/protocol.h \
sys/fm/util.h \
sys/fm/fs/zfs.h \
\
sys/fs/zfs.h \
\
sys/lua/lauxlib.h \
sys/lua/lua.h \
sys/lua/luaconf.h \
sys/lua/lualib.h \
\
sys/sysevent/dev.h \
sys/sysevent/eventdefs.h \
\
sys/zstd/zstd.h
KERNEL_H = \
sys/zfs_ioctl.h \
sys/zfs_ioctl_impl.h \
sys/zfs_onexit.h \
sys/zvol.h \
sys/zvol_impl.h
USER_H = \
libnvpair.h \
libuutil.h \
libuutil_common.h \
libuutil_impl.h \
libzfs.h \
libzfs_core.h \
libzfsbootenv.h \
libzutil.h \
thread_pool.h
if CONFIG_USER
libzfsdir = $(includedir)/libzfs
nobase_libzfs_HEADERS = $(COMMON_H) $(USER_H)
endif
kerneldir = $(prefix)/src/zfs-$(VERSION)/include
if CONFIG_KERNEL
if BUILD_LINUX
nobase_kernel_HEADERS = $(COMMON_H) $(KERNEL_H)
endif
endif
diff --git a/sys/contrib/openzfs/include/libuutil.h b/sys/contrib/openzfs/include/libuutil.h
index cb3d366c4764..906b49ea5ca9 100644
--- a/sys/contrib/openzfs/include/libuutil.h
+++ b/sys/contrib/openzfs/include/libuutil.h
@@ -1,359 +1,326 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
*/
#ifndef _LIBUUTIL_H
#define _LIBUUTIL_H
#include <sys/types.h>
#include <stdarg.h>
#include <stdio.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Standard flags codes.
*/
#define UU_DEFAULT 0
/*
* Standard error codes.
*/
#define UU_ERROR_NONE 0 /* no error */
#define UU_ERROR_INVALID_ARGUMENT 1 /* invalid argument */
#define UU_ERROR_UNKNOWN_FLAG 2 /* passed flag invalid */
#define UU_ERROR_NO_MEMORY 3 /* out of memory */
#define UU_ERROR_CALLBACK_FAILED 4 /* callback-initiated error */
#define UU_ERROR_NOT_SUPPORTED 5 /* operation not supported */
#define UU_ERROR_EMPTY 6 /* no value provided */
#define UU_ERROR_UNDERFLOW 7 /* value is too small */
#define UU_ERROR_OVERFLOW 8 /* value is too value */
#define UU_ERROR_INVALID_CHAR 9 /* value contains unexpected char */
#define UU_ERROR_INVALID_DIGIT 10 /* value contains digit not in base */
#define UU_ERROR_SYSTEM 99 /* underlying system error */
#define UU_ERROR_UNKNOWN 100 /* error status not known */
-/*
- * Standard program exit codes.
- */
-#define UU_EXIT_OK (*(uu_exit_ok()))
-#define UU_EXIT_FATAL (*(uu_exit_fatal()))
-#define UU_EXIT_USAGE (*(uu_exit_usage()))
-
/*
* Exit status profiles.
*/
#define UU_PROFILE_DEFAULT 0
#define UU_PROFILE_LAUNCHER 1
/*
* Error reporting functions.
*/
uint32_t uu_error(void);
const char *uu_strerror(uint32_t);
-/*
- * Program notification functions.
- */
-extern void uu_alt_exit(int);
-extern const char *uu_setpname(char *);
-extern const char *uu_getpname(void);
-extern void uu_warn(const char *, ...)
- __attribute__((format(printf, 1, 2)));
-extern void uu_vwarn(const char *, va_list)
- __attribute__((format(printf, 1, 0)));
-extern __attribute__((noreturn)) void uu_die(const char *, ...)
- __attribute__((format(printf, 1, 2)));
-extern __attribute__((noreturn)) void uu_vdie(const char *, va_list)
- __attribute__((format(printf, 1, 0)));
-extern __attribute__((noreturn)) void uu_xdie(int, const char *, ...)
- __attribute__((format(printf, 2, 3)));
-extern __attribute__((noreturn)) void uu_vxdie(int, const char *, va_list)
- __attribute__((format(printf, 2, 0)));
-
-/*
- * Exit status functions (not to be used directly)
- */
-extern int *uu_exit_ok(void);
-extern int *uu_exit_fatal(void);
-extern int *uu_exit_usage(void);
-
/*
* Identifier test flags and function.
*/
#define UU_NAME_DOMAIN 0x1 /* allow SUNW, or com.sun, prefix */
#define UU_NAME_PATH 0x2 /* allow '/'-delimited paths */
int uu_check_name(const char *, uint_t);
/*
* Convenience functions.
*/
#define UU_NELEM(a) (sizeof (a) / sizeof ((a)[0]))
extern char *uu_msprintf(const char *format, ...)
__attribute__((format(printf, 1, 2)));
extern void *uu_zalloc(size_t);
extern char *uu_strdup(const char *);
extern void uu_free(void *);
extern boolean_t uu_strcaseeq(const char *a, const char *b);
extern boolean_t uu_streq(const char *a, const char *b);
extern char *uu_strndup(const char *s, size_t n);
extern boolean_t uu_strbw(const char *a, const char *b);
extern void *uu_memdup(const void *buf, size_t sz);
/*
* Comparison function type definition.
* Developers should be careful in their use of the _private argument. If you
* break interface guarantees, you get undefined behavior.
*/
typedef int uu_compare_fn_t(const void *__left, const void *__right,
void *__private);
/*
* Walk variant flags.
* A data structure need not provide support for all variants and
* combinations. Refer to the appropriate documentation.
*/
#define UU_WALK_ROBUST 0x00000001 /* walk can survive removes */
#define UU_WALK_REVERSE 0x00000002 /* reverse walk order */
#define UU_WALK_PREORDER 0x00000010 /* walk tree in pre-order */
#define UU_WALK_POSTORDER 0x00000020 /* walk tree in post-order */
/*
* Walk callback function return codes.
*/
#define UU_WALK_ERROR -1
#define UU_WALK_NEXT 0
#define UU_WALK_DONE 1
/*
* Walk callback function type definition.
*/
typedef int uu_walk_fn_t(void *_elem, void *_private);
/*
* lists: opaque structures
*/
typedef struct uu_list_pool uu_list_pool_t;
typedef struct uu_list uu_list_t;
typedef struct uu_list_node {
uintptr_t uln_opaque[2];
} uu_list_node_t;
typedef struct uu_list_walk uu_list_walk_t;
typedef uintptr_t uu_list_index_t;
/*
* lists: interface
*
* basic usage:
* typedef struct foo {
* ...
* uu_list_node_t foo_node;
* ...
* } foo_t;
*
* static int
* foo_compare(void *l_arg, void *r_arg, void *private)
* {
* foo_t *l = l_arg;
* foo_t *r = r_arg;
*
* if (... l greater than r ...)
* return (1);
* if (... l less than r ...)
* return (-1);
* return (0);
* }
*
* ...
* // at initialization time
* foo_pool = uu_list_pool_create("foo_pool",
* sizeof (foo_t), offsetof(foo_t, foo_node), foo_compare,
* debugging? 0 : UU_AVL_POOL_DEBUG);
* ...
*/
uu_list_pool_t *uu_list_pool_create(const char *, size_t, size_t,
uu_compare_fn_t *, uint32_t);
#define UU_LIST_POOL_DEBUG 0x00000001
void uu_list_pool_destroy(uu_list_pool_t *);
/*
* usage:
*
* foo_t *a;
* a = malloc(sizeof (*a));
* uu_list_node_init(a, &a->foo_list, pool);
* ...
* uu_list_node_fini(a, &a->foo_list, pool);
* free(a);
*/
void uu_list_node_init(void *, uu_list_node_t *, uu_list_pool_t *);
void uu_list_node_fini(void *, uu_list_node_t *, uu_list_pool_t *);
uu_list_t *uu_list_create(uu_list_pool_t *, void *_parent, uint32_t);
#define UU_LIST_DEBUG 0x00000001
#define UU_LIST_SORTED 0x00000002 /* list is sorted */
void uu_list_destroy(uu_list_t *); /* list must be empty */
size_t uu_list_numnodes(uu_list_t *);
void *uu_list_first(uu_list_t *);
void *uu_list_last(uu_list_t *);
void *uu_list_next(uu_list_t *, void *);
void *uu_list_prev(uu_list_t *, void *);
int uu_list_walk(uu_list_t *, uu_walk_fn_t *, void *, uint32_t);
uu_list_walk_t *uu_list_walk_start(uu_list_t *, uint32_t);
void *uu_list_walk_next(uu_list_walk_t *);
void uu_list_walk_end(uu_list_walk_t *);
void *uu_list_find(uu_list_t *, void *, void *, uu_list_index_t *);
void uu_list_insert(uu_list_t *, void *, uu_list_index_t);
void *uu_list_nearest_next(uu_list_t *, uu_list_index_t);
void *uu_list_nearest_prev(uu_list_t *, uu_list_index_t);
void *uu_list_teardown(uu_list_t *, void **);
void uu_list_remove(uu_list_t *, void *);
/*
* lists: interfaces for non-sorted lists only
*/
int uu_list_insert_before(uu_list_t *, void *_target, void *_elem);
int uu_list_insert_after(uu_list_t *, void *_target, void *_elem);
/*
* avl trees: opaque structures
*/
typedef struct uu_avl_pool uu_avl_pool_t;
typedef struct uu_avl uu_avl_t;
typedef struct uu_avl_node {
#ifdef _LP64
uintptr_t uan_opaque[3];
#else
uintptr_t uan_opaque[4];
#endif
} uu_avl_node_t;
typedef struct uu_avl_walk uu_avl_walk_t;
typedef uintptr_t uu_avl_index_t;
/*
* avl trees: interface
*
* basic usage:
* typedef struct foo {
* ...
* uu_avl_node_t foo_node;
* ...
* } foo_t;
*
* static int
* foo_compare(void *l_arg, void *r_arg, void *private)
* {
* foo_t *l = l_arg;
* foo_t *r = r_arg;
*
* if (... l greater than r ...)
* return (1);
* if (... l less than r ...)
* return (-1);
* return (0);
* }
*
* ...
* // at initialization time
* foo_pool = uu_avl_pool_create("foo_pool",
* sizeof (foo_t), offsetof(foo_t, foo_node), foo_compare,
* debugging? 0 : UU_AVL_POOL_DEBUG);
* ...
*/
uu_avl_pool_t *uu_avl_pool_create(const char *, size_t, size_t,
uu_compare_fn_t *, uint32_t);
#define UU_AVL_POOL_DEBUG 0x00000001
void uu_avl_pool_destroy(uu_avl_pool_t *);
/*
* usage:
*
* foo_t *a;
* a = malloc(sizeof (*a));
* uu_avl_node_init(a, &a->foo_avl, pool);
* ...
* uu_avl_node_fini(a, &a->foo_avl, pool);
* free(a);
*/
void uu_avl_node_init(void *, uu_avl_node_t *, uu_avl_pool_t *);
void uu_avl_node_fini(void *, uu_avl_node_t *, uu_avl_pool_t *);
uu_avl_t *uu_avl_create(uu_avl_pool_t *, void *_parent, uint32_t);
#define UU_AVL_DEBUG 0x00000001
void uu_avl_destroy(uu_avl_t *); /* list must be empty */
size_t uu_avl_numnodes(uu_avl_t *);
void *uu_avl_first(uu_avl_t *);
void *uu_avl_last(uu_avl_t *);
void *uu_avl_next(uu_avl_t *, void *);
void *uu_avl_prev(uu_avl_t *, void *);
int uu_avl_walk(uu_avl_t *, uu_walk_fn_t *, void *, uint32_t);
uu_avl_walk_t *uu_avl_walk_start(uu_avl_t *, uint32_t);
void *uu_avl_walk_next(uu_avl_walk_t *);
void uu_avl_walk_end(uu_avl_walk_t *);
void *uu_avl_find(uu_avl_t *, void *, void *, uu_avl_index_t *);
void uu_avl_insert(uu_avl_t *, void *, uu_avl_index_t);
void *uu_avl_nearest_next(uu_avl_t *, uu_avl_index_t);
void *uu_avl_nearest_prev(uu_avl_t *, uu_avl_index_t);
void *uu_avl_teardown(uu_avl_t *, void **);
void uu_avl_remove(uu_avl_t *, void *);
#ifdef __cplusplus
}
#endif
#endif /* _LIBUUTIL_H */
diff --git a/sys/contrib/openzfs/include/libzfs.h b/sys/contrib/openzfs/include/libzfs.h
index 96cf1e186521..4fc776122596 100644
--- a/sys/contrib/openzfs/include/libzfs.h
+++ b/sys/contrib/openzfs/include/libzfs.h
@@ -1,999 +1,1001 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2011, 2020 by Delphix. All rights reserved.
+ * Copyright (c) 2011, 2022 by Delphix. All rights reserved.
* Copyright Joyent, Inc.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright (c) 2016, Intel Corporation.
* Copyright 2016 Nexenta Systems, Inc.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
* Copyright (c) 2019 Datto Inc.
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
*/
#ifndef _LIBZFS_H
#define _LIBZFS_H extern __attribute__((visibility("default")))
#include <assert.h>
#include <libshare.h>
#include <libnvpair.h>
#include <sys/mnttab.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/fs/zfs.h>
#include <sys/avl.h>
#include <libzfs_core.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Miscellaneous ZFS constants
*/
#define ZFS_MAXPROPLEN MAXPATHLEN
#define ZPOOL_MAXPROPLEN MAXPATHLEN
/*
* libzfs errors
*/
typedef enum zfs_error {
EZFS_SUCCESS = 0, /* no error -- success */
EZFS_NOMEM = 2000, /* out of memory */
EZFS_BADPROP, /* invalid property value */
EZFS_PROPREADONLY, /* cannot set readonly property */
EZFS_PROPTYPE, /* property does not apply to dataset type */
EZFS_PROPNONINHERIT, /* property is not inheritable */
EZFS_PROPSPACE, /* bad quota or reservation */
EZFS_BADTYPE, /* dataset is not of appropriate type */
EZFS_BUSY, /* pool or dataset is busy */
EZFS_EXISTS, /* pool or dataset already exists */
EZFS_NOENT, /* no such pool or dataset */
EZFS_BADSTREAM, /* bad backup stream */
EZFS_DSREADONLY, /* dataset is readonly */
EZFS_VOLTOOBIG, /* volume is too large for 32-bit system */
EZFS_INVALIDNAME, /* invalid dataset name */
EZFS_BADRESTORE, /* unable to restore to destination */
EZFS_BADBACKUP, /* backup failed */
EZFS_BADTARGET, /* bad attach/detach/replace target */
EZFS_NODEVICE, /* no such device in pool */
EZFS_BADDEV, /* invalid device to add */
EZFS_NOREPLICAS, /* no valid replicas */
EZFS_RESILVERING, /* resilvering (healing reconstruction) */
EZFS_BADVERSION, /* unsupported version */
EZFS_POOLUNAVAIL, /* pool is currently unavailable */
EZFS_DEVOVERFLOW, /* too many devices in one vdev */
EZFS_BADPATH, /* must be an absolute path */
EZFS_CROSSTARGET, /* rename or clone across pool or dataset */
EZFS_ZONED, /* used improperly in local zone */
EZFS_MOUNTFAILED, /* failed to mount dataset */
EZFS_UMOUNTFAILED, /* failed to unmount dataset */
EZFS_UNSHARENFSFAILED, /* failed to unshare over nfs */
EZFS_SHARENFSFAILED, /* failed to share over nfs */
EZFS_PERM, /* permission denied */
EZFS_NOSPC, /* out of space */
EZFS_FAULT, /* bad address */
EZFS_IO, /* I/O error */
EZFS_INTR, /* signal received */
EZFS_ISSPARE, /* device is a hot spare */
EZFS_INVALCONFIG, /* invalid vdev configuration */
EZFS_RECURSIVE, /* recursive dependency */
EZFS_NOHISTORY, /* no history object */
EZFS_POOLPROPS, /* couldn't retrieve pool props */
EZFS_POOL_NOTSUP, /* ops not supported for this type of pool */
EZFS_POOL_INVALARG, /* invalid argument for this pool operation */
EZFS_NAMETOOLONG, /* dataset name is too long */
EZFS_OPENFAILED, /* open of device failed */
EZFS_NOCAP, /* couldn't get capacity */
EZFS_LABELFAILED, /* write of label failed */
EZFS_BADWHO, /* invalid permission who */
EZFS_BADPERM, /* invalid permission */
EZFS_BADPERMSET, /* invalid permission set name */
EZFS_NODELEGATION, /* delegated administration is disabled */
EZFS_UNSHARESMBFAILED, /* failed to unshare over smb */
EZFS_SHARESMBFAILED, /* failed to share over smb */
EZFS_BADCACHE, /* bad cache file */
EZFS_ISL2CACHE, /* device is for the level 2 ARC */
EZFS_VDEVNOTSUP, /* unsupported vdev type */
EZFS_NOTSUP, /* ops not supported on this dataset */
EZFS_ACTIVE_SPARE, /* pool has active shared spare devices */
EZFS_UNPLAYED_LOGS, /* log device has unplayed logs */
EZFS_REFTAG_RELE, /* snapshot release: tag not found */
EZFS_REFTAG_HOLD, /* snapshot hold: tag already exists */
EZFS_TAGTOOLONG, /* snapshot hold/rele: tag too long */
EZFS_PIPEFAILED, /* pipe create failed */
EZFS_THREADCREATEFAILED, /* thread create failed */
EZFS_POSTSPLIT_ONLINE, /* onlining a disk after splitting it */
EZFS_SCRUBBING, /* currently scrubbing */
EZFS_NO_SCRUB, /* no active scrub */
EZFS_DIFF, /* general failure of zfs diff */
EZFS_DIFFDATA, /* bad zfs diff data */
EZFS_POOLREADONLY, /* pool is in read-only mode */
EZFS_SCRUB_PAUSED, /* scrub currently paused */
EZFS_ACTIVE_POOL, /* pool is imported on a different system */
EZFS_CRYPTOFAILED, /* failed to setup encryption */
EZFS_NO_PENDING, /* cannot cancel, no operation is pending */
EZFS_CHECKPOINT_EXISTS, /* checkpoint exists */
EZFS_DISCARDING_CHECKPOINT, /* currently discarding a checkpoint */
EZFS_NO_CHECKPOINT, /* pool has no checkpoint */
EZFS_DEVRM_IN_PROGRESS, /* a device is currently being removed */
EZFS_VDEV_TOO_BIG, /* a device is too big to be used */
EZFS_IOC_NOTSUPPORTED, /* operation not supported by zfs module */
EZFS_TOOMANY, /* argument list too long */
EZFS_INITIALIZING, /* currently initializing */
EZFS_NO_INITIALIZE, /* no active initialize */
EZFS_WRONG_PARENT, /* invalid parent dataset (e.g ZVOL) */
EZFS_TRIMMING, /* currently trimming */
EZFS_NO_TRIM, /* no active trim */
EZFS_TRIM_NOTSUP, /* device does not support trim */
EZFS_NO_RESILVER_DEFER, /* pool doesn't support resilver_defer */
EZFS_EXPORT_IN_PROGRESS, /* currently exporting the pool */
EZFS_REBUILDING, /* resilvering (sequential reconstrution) */
EZFS_VDEV_NOTSUP, /* ops not supported for this type of vdev */
EZFS_NOT_USER_NAMESPACE, /* a file is not a user namespace */
+ EZFS_CKSUM, /* insufficient replicas */
EZFS_UNKNOWN
} zfs_error_t;
/*
* The following data structures are all part
* of the zfs_allow_t data structure which is
* used for printing 'allow' permissions.
* It is a linked list of zfs_allow_t's which
* then contain avl tree's for user/group/sets/...
* and each one of the entries in those trees have
* avl tree's for the permissions they belong to and
* whether they are local,descendent or local+descendent
* permissions. The AVL trees are used primarily for
* sorting purposes, but also so that we can quickly find
* a given user and or permission.
*/
typedef struct zfs_perm_node {
avl_node_t z_node;
char z_pname[MAXPATHLEN];
} zfs_perm_node_t;
typedef struct zfs_allow_node {
avl_node_t z_node;
char z_key[MAXPATHLEN]; /* name, such as joe */
avl_tree_t z_localdescend; /* local+descendent perms */
avl_tree_t z_local; /* local permissions */
avl_tree_t z_descend; /* descendent permissions */
} zfs_allow_node_t;
typedef struct zfs_allow {
struct zfs_allow *z_next;
char z_setpoint[MAXPATHLEN];
avl_tree_t z_sets;
avl_tree_t z_crperms;
avl_tree_t z_user;
avl_tree_t z_group;
avl_tree_t z_everyone;
} zfs_allow_t;
/*
* Basic handle types
*/
typedef struct zfs_handle zfs_handle_t;
typedef struct zpool_handle zpool_handle_t;
typedef struct libzfs_handle libzfs_handle_t;
_LIBZFS_H int zpool_wait(zpool_handle_t *, zpool_wait_activity_t);
_LIBZFS_H int zpool_wait_status(zpool_handle_t *, zpool_wait_activity_t,
boolean_t *, boolean_t *);
/*
* Library initialization
*/
_LIBZFS_H libzfs_handle_t *libzfs_init(void);
_LIBZFS_H void libzfs_fini(libzfs_handle_t *);
_LIBZFS_H libzfs_handle_t *zpool_get_handle(zpool_handle_t *);
_LIBZFS_H libzfs_handle_t *zfs_get_handle(zfs_handle_t *);
_LIBZFS_H void libzfs_print_on_error(libzfs_handle_t *, boolean_t);
_LIBZFS_H void zfs_save_arguments(int argc, char **, char *, int);
_LIBZFS_H int zpool_log_history(libzfs_handle_t *, const char *);
_LIBZFS_H int libzfs_errno(libzfs_handle_t *);
_LIBZFS_H const char *libzfs_error_init(int);
_LIBZFS_H const char *libzfs_error_action(libzfs_handle_t *);
_LIBZFS_H const char *libzfs_error_description(libzfs_handle_t *);
_LIBZFS_H int zfs_standard_error(libzfs_handle_t *, int, const char *);
_LIBZFS_H void libzfs_mnttab_init(libzfs_handle_t *);
_LIBZFS_H void libzfs_mnttab_fini(libzfs_handle_t *);
_LIBZFS_H void libzfs_mnttab_cache(libzfs_handle_t *, boolean_t);
_LIBZFS_H int libzfs_mnttab_find(libzfs_handle_t *, const char *,
struct mnttab *);
_LIBZFS_H void libzfs_mnttab_add(libzfs_handle_t *, const char *,
const char *, const char *);
_LIBZFS_H void libzfs_mnttab_remove(libzfs_handle_t *, const char *);
/*
* Basic handle functions
*/
_LIBZFS_H zpool_handle_t *zpool_open(libzfs_handle_t *, const char *);
_LIBZFS_H zpool_handle_t *zpool_open_canfail(libzfs_handle_t *, const char *);
_LIBZFS_H void zpool_close(zpool_handle_t *);
_LIBZFS_H const char *zpool_get_name(zpool_handle_t *);
_LIBZFS_H int zpool_get_state(zpool_handle_t *);
_LIBZFS_H const char *zpool_state_to_name(vdev_state_t, vdev_aux_t);
_LIBZFS_H const char *zpool_pool_state_to_name(pool_state_t);
_LIBZFS_H void zpool_free_handles(libzfs_handle_t *);
/*
* Iterate over all active pools in the system.
*/
typedef int (*zpool_iter_f)(zpool_handle_t *, void *);
_LIBZFS_H int zpool_iter(libzfs_handle_t *, zpool_iter_f, void *);
_LIBZFS_H boolean_t zpool_skip_pool(const char *);
/*
* Functions to create and destroy pools
*/
_LIBZFS_H int zpool_create(libzfs_handle_t *, const char *, nvlist_t *,
nvlist_t *, nvlist_t *);
_LIBZFS_H int zpool_destroy(zpool_handle_t *, const char *);
_LIBZFS_H int zpool_add(zpool_handle_t *, nvlist_t *);
typedef struct splitflags {
/* do not split, but return the config that would be split off */
int dryrun : 1;
/* after splitting, import the pool */
int import : 1;
int name_flags;
} splitflags_t;
typedef struct trimflags {
/* requested vdevs are for the entire pool */
boolean_t fullpool;
/* request a secure trim, requires support from device */
boolean_t secure;
/* after starting trim, block until trim completes */
boolean_t wait;
/* trim at the requested rate in bytes/second */
uint64_t rate;
} trimflags_t;
/*
* Functions to manipulate pool and vdev state
*/
_LIBZFS_H int zpool_scan(zpool_handle_t *, pool_scan_func_t, pool_scrub_cmd_t);
_LIBZFS_H int zpool_initialize(zpool_handle_t *, pool_initialize_func_t,
nvlist_t *);
_LIBZFS_H int zpool_initialize_wait(zpool_handle_t *, pool_initialize_func_t,
nvlist_t *);
_LIBZFS_H int zpool_trim(zpool_handle_t *, pool_trim_func_t, nvlist_t *,
trimflags_t *);
_LIBZFS_H int zpool_clear(zpool_handle_t *, const char *, nvlist_t *);
_LIBZFS_H int zpool_reguid(zpool_handle_t *);
_LIBZFS_H int zpool_reopen_one(zpool_handle_t *, void *);
_LIBZFS_H int zpool_sync_one(zpool_handle_t *, void *);
_LIBZFS_H int zpool_vdev_online(zpool_handle_t *, const char *, int,
vdev_state_t *);
_LIBZFS_H int zpool_vdev_offline(zpool_handle_t *, const char *, boolean_t);
_LIBZFS_H int zpool_vdev_attach(zpool_handle_t *, const char *,
const char *, nvlist_t *, int, boolean_t);
_LIBZFS_H int zpool_vdev_detach(zpool_handle_t *, const char *);
_LIBZFS_H int zpool_vdev_remove(zpool_handle_t *, const char *);
_LIBZFS_H int zpool_vdev_remove_cancel(zpool_handle_t *);
_LIBZFS_H int zpool_vdev_indirect_size(zpool_handle_t *, const char *,
uint64_t *);
_LIBZFS_H int zpool_vdev_split(zpool_handle_t *, char *, nvlist_t **,
nvlist_t *, splitflags_t);
_LIBZFS_H int zpool_vdev_fault(zpool_handle_t *, uint64_t, vdev_aux_t);
_LIBZFS_H int zpool_vdev_degrade(zpool_handle_t *, uint64_t, vdev_aux_t);
_LIBZFS_H int zpool_vdev_clear(zpool_handle_t *, uint64_t);
_LIBZFS_H nvlist_t *zpool_find_vdev(zpool_handle_t *, const char *, boolean_t *,
boolean_t *, boolean_t *);
_LIBZFS_H nvlist_t *zpool_find_vdev_by_physpath(zpool_handle_t *, const char *,
boolean_t *, boolean_t *, boolean_t *);
_LIBZFS_H int zpool_label_disk(libzfs_handle_t *, zpool_handle_t *,
const char *);
_LIBZFS_H uint64_t zpool_vdev_path_to_guid(zpool_handle_t *zhp,
const char *path);
_LIBZFS_H const char *zpool_get_state_str(zpool_handle_t *);
/*
* Functions to manage pool properties
*/
_LIBZFS_H int zpool_set_prop(zpool_handle_t *, const char *, const char *);
_LIBZFS_H int zpool_get_prop(zpool_handle_t *, zpool_prop_t, char *,
size_t proplen, zprop_source_t *, boolean_t literal);
_LIBZFS_H uint64_t zpool_get_prop_int(zpool_handle_t *, zpool_prop_t,
zprop_source_t *);
_LIBZFS_H int zpool_props_refresh(zpool_handle_t *);
_LIBZFS_H const char *zpool_prop_to_name(zpool_prop_t);
_LIBZFS_H const char *zpool_prop_values(zpool_prop_t);
/*
* Functions to manage vdev properties
*/
_LIBZFS_H int zpool_get_vdev_prop_value(nvlist_t *, vdev_prop_t, char *, char *,
size_t, zprop_source_t *, boolean_t);
_LIBZFS_H int zpool_get_vdev_prop(zpool_handle_t *, const char *, vdev_prop_t,
char *, char *, size_t, zprop_source_t *, boolean_t);
_LIBZFS_H int zpool_get_all_vdev_props(zpool_handle_t *, const char *,
nvlist_t **);
_LIBZFS_H int zpool_set_vdev_prop(zpool_handle_t *, const char *, const char *,
const char *);
_LIBZFS_H const char *vdev_prop_to_name(vdev_prop_t);
_LIBZFS_H const char *vdev_prop_values(vdev_prop_t);
_LIBZFS_H boolean_t vdev_prop_user(const char *name);
_LIBZFS_H const char *vdev_prop_column_name(vdev_prop_t);
_LIBZFS_H boolean_t vdev_prop_align_right(vdev_prop_t);
/*
* Pool health statistics.
*/
typedef enum {
/*
* The following correspond to faults as defined in the (fault.fs.zfs.*)
* event namespace. Each is associated with a corresponding message ID.
* This must be kept in sync with the zfs_msgid_table in
* lib/libzfs/libzfs_status.c.
*/
ZPOOL_STATUS_CORRUPT_CACHE, /* corrupt /kernel/drv/zpool.cache */
ZPOOL_STATUS_MISSING_DEV_R, /* missing device with replicas */
ZPOOL_STATUS_MISSING_DEV_NR, /* missing device with no replicas */
ZPOOL_STATUS_CORRUPT_LABEL_R, /* bad device label with replicas */
ZPOOL_STATUS_CORRUPT_LABEL_NR, /* bad device label with no replicas */
ZPOOL_STATUS_BAD_GUID_SUM, /* sum of device guids didn't match */
ZPOOL_STATUS_CORRUPT_POOL, /* pool metadata is corrupted */
ZPOOL_STATUS_CORRUPT_DATA, /* data errors in user (meta)data */
ZPOOL_STATUS_FAILING_DEV, /* device experiencing errors */
ZPOOL_STATUS_VERSION_NEWER, /* newer on-disk version */
ZPOOL_STATUS_HOSTID_MISMATCH, /* last accessed by another system */
ZPOOL_STATUS_HOSTID_ACTIVE, /* currently active on another system */
ZPOOL_STATUS_HOSTID_REQUIRED, /* multihost=on and hostid=0 */
ZPOOL_STATUS_IO_FAILURE_WAIT, /* failed I/O, failmode 'wait' */
ZPOOL_STATUS_IO_FAILURE_CONTINUE, /* failed I/O, failmode 'continue' */
ZPOOL_STATUS_IO_FAILURE_MMP, /* failed MMP, failmode not 'panic' */
ZPOOL_STATUS_BAD_LOG, /* cannot read log chain(s) */
ZPOOL_STATUS_ERRATA, /* informational errata available */
/*
* If the pool has unsupported features but can still be opened in
* read-only mode, its status is ZPOOL_STATUS_UNSUP_FEAT_WRITE. If the
* pool has unsupported features but cannot be opened at all, its
* status is ZPOOL_STATUS_UNSUP_FEAT_READ.
*/
ZPOOL_STATUS_UNSUP_FEAT_READ, /* unsupported features for read */
ZPOOL_STATUS_UNSUP_FEAT_WRITE, /* unsupported features for write */
/*
* These faults have no corresponding message ID. At the time we are
* checking the status, the original reason for the FMA fault (I/O or
* checksum errors) has been lost.
*/
ZPOOL_STATUS_FAULTED_DEV_R, /* faulted device with replicas */
ZPOOL_STATUS_FAULTED_DEV_NR, /* faulted device with no replicas */
/*
* The following are not faults per se, but still an error possibly
* requiring administrative attention. There is no corresponding
* message ID.
*/
ZPOOL_STATUS_VERSION_OLDER, /* older legacy on-disk version */
ZPOOL_STATUS_FEAT_DISABLED, /* supported features are disabled */
ZPOOL_STATUS_RESILVERING, /* device being resilvered */
ZPOOL_STATUS_OFFLINE_DEV, /* device offline */
ZPOOL_STATUS_REMOVED_DEV, /* removed device */
ZPOOL_STATUS_REBUILDING, /* device being rebuilt */
ZPOOL_STATUS_REBUILD_SCRUB, /* recommend scrubbing the pool */
ZPOOL_STATUS_NON_NATIVE_ASHIFT, /* (e.g. 512e dev with ashift of 9) */
ZPOOL_STATUS_COMPATIBILITY_ERR, /* bad 'compatibility' property */
ZPOOL_STATUS_INCOMPATIBLE_FEAT, /* feature set outside compatibility */
/*
* Finally, the following indicates a healthy pool.
*/
ZPOOL_STATUS_OK
} zpool_status_t;
_LIBZFS_H zpool_status_t zpool_get_status(zpool_handle_t *, const char **,
zpool_errata_t *);
_LIBZFS_H zpool_status_t zpool_import_status(nvlist_t *, const char **,
zpool_errata_t *);
/*
* Statistics and configuration functions.
*/
_LIBZFS_H nvlist_t *zpool_get_config(zpool_handle_t *, nvlist_t **);
_LIBZFS_H nvlist_t *zpool_get_features(zpool_handle_t *);
_LIBZFS_H int zpool_refresh_stats(zpool_handle_t *, boolean_t *);
_LIBZFS_H int zpool_get_errlog(zpool_handle_t *, nvlist_t **);
/*
* Import and export functions
*/
_LIBZFS_H int zpool_export(zpool_handle_t *, boolean_t, const char *);
_LIBZFS_H int zpool_export_force(zpool_handle_t *, const char *);
_LIBZFS_H int zpool_import(libzfs_handle_t *, nvlist_t *, const char *,
char *altroot);
_LIBZFS_H int zpool_import_props(libzfs_handle_t *, nvlist_t *, const char *,
nvlist_t *, int);
_LIBZFS_H void zpool_print_unsup_feat(nvlist_t *config);
/*
* Miscellaneous pool functions
*/
struct zfs_cmd;
_LIBZFS_H const char *const zfs_history_event_names[];
typedef enum {
VDEV_NAME_PATH = 1 << 0,
VDEV_NAME_GUID = 1 << 1,
VDEV_NAME_FOLLOW_LINKS = 1 << 2,
VDEV_NAME_TYPE_ID = 1 << 3,
} vdev_name_t;
_LIBZFS_H char *zpool_vdev_name(libzfs_handle_t *, zpool_handle_t *, nvlist_t *,
int name_flags);
_LIBZFS_H int zpool_upgrade(zpool_handle_t *, uint64_t);
_LIBZFS_H int zpool_get_history(zpool_handle_t *, nvlist_t **, uint64_t *,
boolean_t *);
_LIBZFS_H int zpool_events_next(libzfs_handle_t *, nvlist_t **, int *, unsigned,
int);
_LIBZFS_H int zpool_events_clear(libzfs_handle_t *, int *);
_LIBZFS_H int zpool_events_seek(libzfs_handle_t *, uint64_t, int);
_LIBZFS_H void zpool_obj_to_path_ds(zpool_handle_t *, uint64_t, uint64_t,
char *, size_t);
_LIBZFS_H void zpool_obj_to_path(zpool_handle_t *, uint64_t, uint64_t, char *,
size_t);
_LIBZFS_H int zfs_ioctl(libzfs_handle_t *, int, struct zfs_cmd *);
_LIBZFS_H void zpool_explain_recover(libzfs_handle_t *, const char *, int,
nvlist_t *);
_LIBZFS_H int zpool_checkpoint(zpool_handle_t *);
_LIBZFS_H int zpool_discard_checkpoint(zpool_handle_t *);
_LIBZFS_H boolean_t zpool_is_draid_spare(const char *);
/*
* Basic handle manipulations. These functions do not create or destroy the
* underlying datasets, only the references to them.
*/
_LIBZFS_H zfs_handle_t *zfs_open(libzfs_handle_t *, const char *, int);
_LIBZFS_H zfs_handle_t *zfs_handle_dup(zfs_handle_t *);
_LIBZFS_H void zfs_close(zfs_handle_t *);
_LIBZFS_H zfs_type_t zfs_get_type(const zfs_handle_t *);
_LIBZFS_H zfs_type_t zfs_get_underlying_type(const zfs_handle_t *);
_LIBZFS_H const char *zfs_get_name(const zfs_handle_t *);
_LIBZFS_H zpool_handle_t *zfs_get_pool_handle(const zfs_handle_t *);
_LIBZFS_H const char *zfs_get_pool_name(const zfs_handle_t *);
/*
* Property management functions. Some functions are shared with the kernel,
* and are found in sys/fs/zfs.h.
*/
/*
* zfs dataset property management
*/
_LIBZFS_H const char *zfs_prop_default_string(zfs_prop_t);
_LIBZFS_H uint64_t zfs_prop_default_numeric(zfs_prop_t);
_LIBZFS_H const char *zfs_prop_column_name(zfs_prop_t);
_LIBZFS_H boolean_t zfs_prop_align_right(zfs_prop_t);
_LIBZFS_H nvlist_t *zfs_valid_proplist(libzfs_handle_t *, zfs_type_t,
nvlist_t *, uint64_t, zfs_handle_t *, zpool_handle_t *, boolean_t,
const char *);
_LIBZFS_H const char *zfs_prop_to_name(zfs_prop_t);
_LIBZFS_H int zfs_prop_set(zfs_handle_t *, const char *, const char *);
_LIBZFS_H int zfs_prop_set_list(zfs_handle_t *, nvlist_t *);
_LIBZFS_H int zfs_prop_get(zfs_handle_t *, zfs_prop_t, char *, size_t,
zprop_source_t *, char *, size_t, boolean_t);
_LIBZFS_H int zfs_prop_get_recvd(zfs_handle_t *, const char *, char *, size_t,
boolean_t);
_LIBZFS_H int zfs_prop_get_numeric(zfs_handle_t *, zfs_prop_t, uint64_t *,
zprop_source_t *, char *, size_t);
_LIBZFS_H int zfs_prop_get_userquota_int(zfs_handle_t *zhp,
const char *propname, uint64_t *propvalue);
_LIBZFS_H int zfs_prop_get_userquota(zfs_handle_t *zhp, const char *propname,
char *propbuf, int proplen, boolean_t literal);
_LIBZFS_H int zfs_prop_get_written_int(zfs_handle_t *zhp, const char *propname,
uint64_t *propvalue);
_LIBZFS_H int zfs_prop_get_written(zfs_handle_t *zhp, const char *propname,
char *propbuf, int proplen, boolean_t literal);
_LIBZFS_H int zfs_prop_get_feature(zfs_handle_t *zhp, const char *propname,
char *buf, size_t len);
_LIBZFS_H uint64_t getprop_uint64(zfs_handle_t *, zfs_prop_t, char **);
_LIBZFS_H uint64_t zfs_prop_get_int(zfs_handle_t *, zfs_prop_t);
_LIBZFS_H int zfs_prop_inherit(zfs_handle_t *, const char *, boolean_t);
_LIBZFS_H const char *zfs_prop_values(zfs_prop_t);
_LIBZFS_H int zfs_prop_is_string(zfs_prop_t prop);
_LIBZFS_H nvlist_t *zfs_get_all_props(zfs_handle_t *);
_LIBZFS_H nvlist_t *zfs_get_user_props(zfs_handle_t *);
_LIBZFS_H nvlist_t *zfs_get_recvd_props(zfs_handle_t *);
_LIBZFS_H nvlist_t *zfs_get_clones_nvl(zfs_handle_t *);
_LIBZFS_H int zfs_wait_status(zfs_handle_t *, zfs_wait_activity_t,
boolean_t *, boolean_t *);
/*
* zfs encryption management
*/
_LIBZFS_H int zfs_crypto_get_encryption_root(zfs_handle_t *, boolean_t *,
char *);
_LIBZFS_H int zfs_crypto_create(libzfs_handle_t *, char *, nvlist_t *,
nvlist_t *, boolean_t stdin_available, uint8_t **, uint_t *);
_LIBZFS_H int zfs_crypto_clone_check(libzfs_handle_t *, zfs_handle_t *, char *,
nvlist_t *);
_LIBZFS_H int zfs_crypto_attempt_load_keys(libzfs_handle_t *, const char *);
_LIBZFS_H int zfs_crypto_load_key(zfs_handle_t *, boolean_t, const char *);
_LIBZFS_H int zfs_crypto_unload_key(zfs_handle_t *);
_LIBZFS_H int zfs_crypto_rewrap(zfs_handle_t *, nvlist_t *, boolean_t);
typedef struct zprop_list {
int pl_prop;
char *pl_user_prop;
struct zprop_list *pl_next;
boolean_t pl_all;
size_t pl_width;
size_t pl_recvd_width;
boolean_t pl_fixed;
} zprop_list_t;
_LIBZFS_H int zfs_expand_proplist(zfs_handle_t *, zprop_list_t **, boolean_t,
boolean_t);
_LIBZFS_H void zfs_prune_proplist(zfs_handle_t *, uint8_t *);
_LIBZFS_H int vdev_expand_proplist(zpool_handle_t *, const char *,
zprop_list_t **);
#define ZFS_MOUNTPOINT_NONE "none"
#define ZFS_MOUNTPOINT_LEGACY "legacy"
#define ZFS_FEATURE_DISABLED "disabled"
#define ZFS_FEATURE_ENABLED "enabled"
#define ZFS_FEATURE_ACTIVE "active"
#define ZFS_UNSUPPORTED_INACTIVE "inactive"
#define ZFS_UNSUPPORTED_READONLY "readonly"
/*
* zpool property management
*/
_LIBZFS_H int zpool_expand_proplist(zpool_handle_t *, zprop_list_t **,
zfs_type_t, boolean_t);
_LIBZFS_H int zpool_prop_get_feature(zpool_handle_t *, const char *, char *,
size_t);
_LIBZFS_H const char *zpool_prop_default_string(zpool_prop_t);
_LIBZFS_H uint64_t zpool_prop_default_numeric(zpool_prop_t);
_LIBZFS_H const char *zpool_prop_column_name(zpool_prop_t);
_LIBZFS_H boolean_t zpool_prop_align_right(zpool_prop_t);
/*
* Functions shared by zfs and zpool property management.
*/
_LIBZFS_H int zprop_iter(zprop_func func, void *cb, boolean_t show_all,
boolean_t ordered, zfs_type_t type);
_LIBZFS_H int zprop_get_list(libzfs_handle_t *, char *, zprop_list_t **,
zfs_type_t);
_LIBZFS_H void zprop_free_list(zprop_list_t *);
#define ZFS_GET_NCOLS 5
typedef enum {
GET_COL_NONE,
GET_COL_NAME,
GET_COL_PROPERTY,
GET_COL_VALUE,
GET_COL_RECVD,
GET_COL_SOURCE
} zfs_get_column_t;
/*
* Functions for printing zfs or zpool properties
*/
typedef struct vdev_cbdata {
int cb_name_flags;
char **cb_names;
unsigned int cb_names_count;
} vdev_cbdata_t;
typedef struct zprop_get_cbdata {
int cb_sources;
zfs_get_column_t cb_columns[ZFS_GET_NCOLS];
int cb_colwidths[ZFS_GET_NCOLS + 1];
boolean_t cb_scripted;
boolean_t cb_literal;
boolean_t cb_first;
zprop_list_t *cb_proplist;
zfs_type_t cb_type;
vdev_cbdata_t cb_vdevs;
} zprop_get_cbdata_t;
_LIBZFS_H void zprop_print_one_property(const char *, zprop_get_cbdata_t *,
const char *, const char *, zprop_source_t, const char *,
const char *);
/*
* Iterator functions.
*/
typedef int (*zfs_iter_f)(zfs_handle_t *, void *);
_LIBZFS_H int zfs_iter_root(libzfs_handle_t *, zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_children(zfs_handle_t *, zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_dependents(zfs_handle_t *, boolean_t, zfs_iter_f,
void *);
_LIBZFS_H int zfs_iter_filesystems(zfs_handle_t *, zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_snapshots(zfs_handle_t *, boolean_t, zfs_iter_f, void *,
uint64_t, uint64_t);
_LIBZFS_H int zfs_iter_snapshots_sorted(zfs_handle_t *, zfs_iter_f, void *,
uint64_t, uint64_t);
_LIBZFS_H int zfs_iter_snapspec(zfs_handle_t *, const char *, zfs_iter_f,
void *);
_LIBZFS_H int zfs_iter_bookmarks(zfs_handle_t *, zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_mounted(zfs_handle_t *, zfs_iter_f, void *);
typedef struct get_all_cb {
zfs_handle_t **cb_handles;
size_t cb_alloc;
size_t cb_used;
} get_all_cb_t;
_LIBZFS_H void zfs_foreach_mountpoint(libzfs_handle_t *, zfs_handle_t **,
size_t, zfs_iter_f, void *, boolean_t);
_LIBZFS_H void libzfs_add_handle(get_all_cb_t *, zfs_handle_t *);
/*
* Functions to create and destroy datasets.
*/
_LIBZFS_H int zfs_create(libzfs_handle_t *, const char *, zfs_type_t,
nvlist_t *);
_LIBZFS_H int zfs_create_ancestors(libzfs_handle_t *, const char *);
_LIBZFS_H int zfs_destroy(zfs_handle_t *, boolean_t);
_LIBZFS_H int zfs_destroy_snaps(zfs_handle_t *, char *, boolean_t);
_LIBZFS_H int zfs_destroy_snaps_nvl(libzfs_handle_t *, nvlist_t *, boolean_t);
_LIBZFS_H int zfs_destroy_snaps_nvl_os(libzfs_handle_t *, nvlist_t *);
_LIBZFS_H int zfs_clone(zfs_handle_t *, const char *, nvlist_t *);
_LIBZFS_H int zfs_snapshot(libzfs_handle_t *, const char *, boolean_t,
nvlist_t *);
_LIBZFS_H int zfs_snapshot_nvl(libzfs_handle_t *hdl, nvlist_t *snaps,
nvlist_t *props);
_LIBZFS_H int zfs_rollback(zfs_handle_t *, zfs_handle_t *, boolean_t);
typedef struct renameflags {
/* recursive rename */
int recursive : 1;
/* don't unmount file systems */
int nounmount : 1;
/* force unmount file systems */
int forceunmount : 1;
} renameflags_t;
_LIBZFS_H int zfs_rename(zfs_handle_t *, const char *, renameflags_t);
typedef struct sendflags {
/* Amount of extra information to print. */
int verbosity;
/* recursive send (ie, -R) */
boolean_t replicate;
/* for recursive send, skip sending missing snapshots */
boolean_t skipmissing;
/* for incrementals, do all intermediate snapshots */
boolean_t doall;
/* if dataset is a clone, do incremental from its origin */
boolean_t fromorigin;
/* field no longer used, maintained for backwards compatibility */
boolean_t pad;
/* send properties (ie, -p) */
boolean_t props;
/* do not send (no-op, ie. -n) */
boolean_t dryrun;
/* parsable verbose output (ie. -P) */
boolean_t parsable;
/* show progress (ie. -v) */
boolean_t progress;
/* large blocks (>128K) are permitted */
boolean_t largeblock;
/* WRITE_EMBEDDED records of type DATA are permitted */
boolean_t embed_data;
/* compressed WRITE records are permitted */
boolean_t compress;
/* raw encrypted records are permitted */
boolean_t raw;
/* only send received properties (ie. -b) */
boolean_t backup;
/* include snapshot holds in send stream */
boolean_t holds;
/* stream represents a partially received dataset */
boolean_t saved;
} sendflags_t;
typedef boolean_t (snapfilter_cb_t)(zfs_handle_t *, void *);
_LIBZFS_H int zfs_send(zfs_handle_t *, const char *, const char *,
sendflags_t *, int, snapfilter_cb_t, void *, nvlist_t **);
_LIBZFS_H int zfs_send_one(zfs_handle_t *, const char *, int, sendflags_t *,
const char *);
_LIBZFS_H int zfs_send_progress(zfs_handle_t *, int, uint64_t *, uint64_t *);
_LIBZFS_H int zfs_send_resume(libzfs_handle_t *, sendflags_t *, int outfd,
const char *);
_LIBZFS_H int zfs_send_saved(zfs_handle_t *, sendflags_t *, int, const char *);
_LIBZFS_H nvlist_t *zfs_send_resume_token_to_nvlist(libzfs_handle_t *hdl,
const char *token);
_LIBZFS_H int zfs_promote(zfs_handle_t *);
_LIBZFS_H int zfs_hold(zfs_handle_t *, const char *, const char *,
boolean_t, int);
_LIBZFS_H int zfs_hold_nvl(zfs_handle_t *, int, nvlist_t *);
_LIBZFS_H int zfs_release(zfs_handle_t *, const char *, const char *,
boolean_t);
_LIBZFS_H int zfs_get_holds(zfs_handle_t *, nvlist_t **);
_LIBZFS_H uint64_t zvol_volsize_to_reservation(zpool_handle_t *, uint64_t,
nvlist_t *);
typedef int (*zfs_userspace_cb_t)(void *arg, const char *domain,
uid_t rid, uint64_t space);
_LIBZFS_H int zfs_userspace(zfs_handle_t *, zfs_userquota_prop_t,
zfs_userspace_cb_t, void *);
_LIBZFS_H int zfs_get_fsacl(zfs_handle_t *, nvlist_t **);
_LIBZFS_H int zfs_set_fsacl(zfs_handle_t *, boolean_t, nvlist_t *);
typedef struct recvflags {
/* print informational messages (ie, -v was specified) */
boolean_t verbose;
/* the destination is a prefix, not the exact fs (ie, -d) */
boolean_t isprefix;
/*
* Only the tail of the sent snapshot path is appended to the
* destination to determine the received snapshot name (ie, -e).
*/
boolean_t istail;
/* do not actually do the recv, just check if it would work (ie, -n) */
boolean_t dryrun;
/* rollback/destroy filesystems as necessary (eg, -F) */
boolean_t force;
/* set "canmount=off" on all modified filesystems */
boolean_t canmountoff;
/*
* Mark the file systems as "resumable" and do not destroy them if the
* receive is interrupted
*/
boolean_t resumable;
/* byteswap flag is used internally; callers need not specify */
boolean_t byteswap;
/* do not mount file systems as they are extracted (private) */
boolean_t nomount;
/* Was holds flag set in the compound header? */
boolean_t holds;
/* skip receive of snapshot holds */
boolean_t skipholds;
/* mount the filesystem unless nomount is specified */
boolean_t domount;
/* force unmount while recv snapshot (private) */
boolean_t forceunmount;
/* use this recv to check (and heal if needed) an existing snapshot */
boolean_t heal;
} recvflags_t;
_LIBZFS_H int zfs_receive(libzfs_handle_t *, const char *, nvlist_t *,
recvflags_t *, int, avl_tree_t *);
typedef enum diff_flags {
ZFS_DIFF_PARSEABLE = 1 << 0,
ZFS_DIFF_TIMESTAMP = 1 << 1,
ZFS_DIFF_CLASSIFY = 1 << 2,
ZFS_DIFF_NO_MANGLE = 1 << 3
} diff_flags_t;
_LIBZFS_H int zfs_show_diffs(zfs_handle_t *, int, const char *, const char *,
int);
/*
* Miscellaneous functions.
*/
_LIBZFS_H const char *zfs_type_to_name(zfs_type_t);
_LIBZFS_H void zfs_refresh_properties(zfs_handle_t *);
_LIBZFS_H int zfs_name_valid(const char *, zfs_type_t);
_LIBZFS_H zfs_handle_t *zfs_path_to_zhandle(libzfs_handle_t *, const char *,
zfs_type_t);
_LIBZFS_H int zfs_parent_name(zfs_handle_t *, char *, size_t);
_LIBZFS_H boolean_t zfs_dataset_exists(libzfs_handle_t *, const char *,
zfs_type_t);
_LIBZFS_H int zfs_spa_version(zfs_handle_t *, int *);
_LIBZFS_H boolean_t zfs_bookmark_exists(const char *path);
/*
* Mount support functions.
*/
_LIBZFS_H boolean_t is_mounted(libzfs_handle_t *, const char *special, char **);
_LIBZFS_H boolean_t zfs_is_mounted(zfs_handle_t *, char **);
_LIBZFS_H int zfs_mount(zfs_handle_t *, const char *, int);
_LIBZFS_H int zfs_mount_at(zfs_handle_t *, const char *, int, const char *);
_LIBZFS_H int zfs_unmount(zfs_handle_t *, const char *, int);
_LIBZFS_H int zfs_unmountall(zfs_handle_t *, int);
_LIBZFS_H int zfs_mount_delegation_check(void);
#if defined(__linux__) || defined(__APPLE__)
_LIBZFS_H int zfs_parse_mount_options(const char *mntopts,
unsigned long *mntflags, unsigned long *zfsflags, int sloppy, char *badopt,
char *mtabopt);
_LIBZFS_H void zfs_adjust_mount_options(zfs_handle_t *zhp, const char *mntpoint,
char *mntopts, char *mtabopt);
#endif
/*
* Share support functions.
*
* enum sa_protocol * lists are terminated with SA_NO_PROTOCOL,
* NULL means "all/any known to this libzfs".
*/
#define SA_NO_PROTOCOL -1
_LIBZFS_H boolean_t zfs_is_shared(zfs_handle_t *zhp, char **where,
const enum sa_protocol *proto);
_LIBZFS_H int zfs_share(zfs_handle_t *zhp, const enum sa_protocol *proto);
_LIBZFS_H int zfs_unshare(zfs_handle_t *zhp, const char *mountpoint,
const enum sa_protocol *proto);
_LIBZFS_H int zfs_unshareall(zfs_handle_t *zhp,
const enum sa_protocol *proto);
_LIBZFS_H void zfs_commit_shares(const enum sa_protocol *proto);
+_LIBZFS_H void zfs_truncate_shares(const enum sa_protocol *proto);
_LIBZFS_H int zfs_nicestrtonum(libzfs_handle_t *, const char *, uint64_t *);
/*
* Utility functions to run an external process.
*/
#define STDOUT_VERBOSE 0x01
#define STDERR_VERBOSE 0x02
#define NO_DEFAULT_PATH 0x04 /* Don't use $PATH to lookup the command */
_LIBZFS_H int libzfs_run_process(const char *, char **, int);
_LIBZFS_H int libzfs_run_process_get_stdout(const char *, char *[], char *[],
char **[], int *);
_LIBZFS_H int libzfs_run_process_get_stdout_nopath(const char *, char *[],
char *[], char **[], int *);
_LIBZFS_H void libzfs_free_str_array(char **, int);
_LIBZFS_H boolean_t libzfs_envvar_is_set(const char *);
/*
* Utility functions for zfs version
*/
_LIBZFS_H const char *zfs_version_userland(void);
_LIBZFS_H char *zfs_version_kernel(void);
_LIBZFS_H int zfs_version_print(void);
/*
* Given a device or file, determine if it is part of a pool.
*/
_LIBZFS_H int zpool_in_use(libzfs_handle_t *, int, pool_state_t *, char **,
boolean_t *);
/*
* Label manipulation.
*/
_LIBZFS_H int zpool_clear_label(int);
_LIBZFS_H int zpool_set_bootenv(zpool_handle_t *, const nvlist_t *);
_LIBZFS_H int zpool_get_bootenv(zpool_handle_t *, nvlist_t **);
/*
* Management interfaces for SMB ACL files
*/
_LIBZFS_H int zfs_smb_acl_add(libzfs_handle_t *, char *, char *, char *);
_LIBZFS_H int zfs_smb_acl_remove(libzfs_handle_t *, char *, char *, char *);
_LIBZFS_H int zfs_smb_acl_purge(libzfs_handle_t *, char *, char *);
_LIBZFS_H int zfs_smb_acl_rename(libzfs_handle_t *, char *, char *, char *,
char *);
/*
* Enable and disable datasets within a pool by mounting/unmounting and
* sharing/unsharing them.
*/
_LIBZFS_H int zpool_enable_datasets(zpool_handle_t *, const char *, int);
_LIBZFS_H int zpool_disable_datasets(zpool_handle_t *, boolean_t);
_LIBZFS_H void zpool_disable_datasets_os(zpool_handle_t *, boolean_t);
_LIBZFS_H void zpool_disable_volume_os(const char *);
/*
* Parse a features file for -o compatibility
*/
typedef enum {
ZPOOL_COMPATIBILITY_OK,
ZPOOL_COMPATIBILITY_WARNTOKEN,
ZPOOL_COMPATIBILITY_BADTOKEN,
ZPOOL_COMPATIBILITY_BADFILE,
ZPOOL_COMPATIBILITY_NOFILES
} zpool_compat_status_t;
_LIBZFS_H zpool_compat_status_t zpool_load_compat(const char *,
boolean_t *, char *, size_t);
#ifdef __FreeBSD__
/*
* Attach/detach the given filesystem to/from the given jail.
*/
_LIBZFS_H int zfs_jail(zfs_handle_t *zhp, int jailid, int attach);
/*
* Set loader options for next boot.
*/
_LIBZFS_H int zpool_nextboot(libzfs_handle_t *, uint64_t, uint64_t,
const char *);
#endif /* __FreeBSD__ */
#ifdef __linux__
/*
* Add or delete the given filesystem to/from the given user namespace.
*/
_LIBZFS_H int zfs_userns(zfs_handle_t *zhp, const char *nspath, int attach);
#endif
#ifdef __cplusplus
}
#endif
#endif /* _LIBZFS_H */
diff --git a/sys/contrib/openzfs/include/os/freebsd/Makefile.am b/sys/contrib/openzfs/include/os/freebsd/Makefile.am
index 5ddb7cd710b8..a750f52e7d25 100644
--- a/sys/contrib/openzfs/include/os/freebsd/Makefile.am
+++ b/sys/contrib/openzfs/include/os/freebsd/Makefile.am
@@ -1,90 +1,90 @@
noinst_HEADERS = \
%D%/linux/compiler.h \
%D%/linux/types.h \
\
%D%/spl/acl/acl_common.h \
\
%D%/spl/rpc/xdr.h \
\
%D%/spl/sys/acl.h \
%D%/spl/sys/acl_impl.h \
%D%/spl/sys/atomic.h \
%D%/spl/sys/byteorder.h \
%D%/spl/sys/callb.h \
%D%/spl/sys/ccompat.h \
%D%/spl/sys/ccompile.h \
%D%/spl/sys/cmn_err.h \
%D%/spl/sys/condvar.h \
%D%/spl/sys/cred.h \
%D%/spl/sys/ctype.h \
%D%/spl/sys/debug.h \
%D%/spl/sys/dirent.h \
%D%/spl/sys/disp.h \
%D%/spl/sys/dkio.h \
- %D%/spl/sys/extdirent.h \
%D%/spl/sys/fcntl.h \
%D%/spl/sys/file.h \
%D%/spl/sys/freebsd_rwlock.h \
%D%/spl/sys/idmap.h \
%D%/spl/sys/inttypes.h \
%D%/spl/sys/isa_defs.h \
%D%/spl/sys/kmem.h \
%D%/spl/sys/kmem_cache.h \
%D%/spl/sys/kstat.h \
%D%/spl/sys/list.h \
%D%/spl/sys/list_impl.h \
%D%/spl/sys/lock.h \
%D%/spl/sys/misc.h \
%D%/spl/sys/mod_os.h \
%D%/spl/sys/mode.h \
%D%/spl/sys/mount.h \
%D%/spl/sys/mutex.h \
%D%/spl/sys/param.h \
%D%/spl/sys/policy.h \
%D%/spl/sys/proc.h \
%D%/spl/sys/processor.h \
%D%/spl/sys/procfs_list.h \
%D%/spl/sys/random.h \
%D%/spl/sys/rwlock.h \
%D%/spl/sys/sdt.h \
%D%/spl/sys/sid.h \
%D%/spl/sys/sig.h \
%D%/spl/sys/simd.h \
+ %D%/spl/sys/simd_powerpc.h \
%D%/spl/sys/simd_x86.h \
%D%/spl/sys/spl_condvar.h \
%D%/spl/sys/string.h \
%D%/spl/sys/sunddi.h \
%D%/spl/sys/sysmacros.h \
%D%/spl/sys/systeminfo.h \
%D%/spl/sys/systm.h \
%D%/spl/sys/taskq.h \
%D%/spl/sys/thread.h \
%D%/spl/sys/time.h \
%D%/spl/sys/timer.h \
%D%/spl/sys/trace.h \
%D%/spl/sys/trace_zfs.h \
%D%/spl/sys/types.h \
%D%/spl/sys/types32.h \
%D%/spl/sys/uio.h \
%D%/spl/sys/uuid.h \
%D%/spl/sys/vfs.h \
%D%/spl/sys/vm.h \
%D%/spl/sys/vmsystm.h \
%D%/spl/sys/vnode.h \
%D%/spl/sys/vnode_impl.h \
%D%/spl/sys/wmsum.h \
%D%/spl/sys/zmod.h \
%D%/spl/sys/zone.h \
\
%D%/zfs/sys/freebsd_crypto.h \
%D%/zfs/sys/sha2.h \
%D%/zfs/sys/vdev_os.h \
%D%/zfs/sys/zfs_bootenv_os.h \
%D%/zfs/sys/zfs_context_os.h \
%D%/zfs/sys/zfs_ctldir.h \
%D%/zfs/sys/zfs_dir.h \
%D%/zfs/sys/zfs_ioctl_compat.h \
%D%/zfs/sys/zfs_vfsops_os.h \
%D%/zfs/sys/zfs_vnops_os.h \
%D%/zfs/sys/zfs_znode_impl.h \
%D%/zfs/sys/zpl.h
diff --git a/sys/contrib/openzfs/include/os/freebsd/linux/compiler.h b/sys/contrib/openzfs/include/os/freebsd/linux/compiler.h
index 3a66da195891..b408b77c746d 100644
--- a/sys/contrib/openzfs/include/os/freebsd/linux/compiler.h
+++ b/sys/contrib/openzfs/include/os/freebsd/linux/compiler.h
@@ -1,102 +1,101 @@
/*
* Copyright (c) 2010 Isilon Systems, Inc.
* Copyright (c) 2010 iXsystems, Inc.
* Copyright (c) 2010 Panasas, Inc.
* Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
* Copyright (c) 2015 François Tigeot
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _LINUX_COMPILER_H_
#define _LINUX_COMPILER_H_
#include <sys/cdefs.h>
#define __user
#define __kernel
#define __safe
#define __force
#define __nocast
#define __iomem
#define __chk_user_ptr(x) ((void)0)
#define __chk_io_ptr(x) ((void)0)
#define __builtin_warning(x, y...) (1)
#define __acquires(x)
#define __releases(x)
#define __acquire(x) do { } while (0)
#define __release(x) do { } while (0)
#define __cond_lock(x, c) (c)
#define __bitwise
#define __devinitdata
#define __deprecated
#define __init
#define __initconst
#define __devinit
#define __devexit
#define __exit
#define __rcu
#define __percpu
#define __weak __weak_symbol
#define __malloc
#define ___stringify(...) #__VA_ARGS__
#define __stringify(...) ___stringify(__VA_ARGS__)
#define __attribute_const__ __attribute__((__const__))
#undef __always_inline
#define __always_inline inline
#define noinline __noinline
#define ____cacheline_aligned __aligned(CACHE_LINE_SIZE)
#define zfs_fallthrough __attribute__((__fallthrough__))
#if !defined(_KERNEL) && !defined(_STANDALONE)
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
#define typeof(x) __typeof(x)
#define uninitialized_var(x) x = x
#define __maybe_unused __unused
#define __always_unused __unused
#define __must_check __result_use_check
#define __printf(a, b) __printflike(a, b)
#define barrier() __asm__ __volatile__("": : :"memory")
-#define smp_rmb() rmb()
#define ___PASTE(a, b) a##b
#define __PASTE(a, b) ___PASTE(a, b)
#define ACCESS_ONCE(x) (*(volatile __typeof(x) *)&(x))
#define WRITE_ONCE(x, v) do { \
barrier(); \
ACCESS_ONCE(x) = (v); \
barrier(); \
} while (0)
#define lockless_dereference(p) READ_ONCE(p)
#define _AT(T, X) ((T)(X))
#endif /* _LINUX_COMPILER_H_ */
diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/atomic.h b/sys/contrib/openzfs/include/os/freebsd/spl/sys/atomic.h
index b15953e5754f..ccbf6d3293c4 100644
--- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/atomic.h
+++ b/sys/contrib/openzfs/include/os/freebsd/spl/sys/atomic.h
@@ -1,195 +1,197 @@
/*
* Copyright (c) 2007 Pawel Jakub Dawidek <pjd@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _OPENSOLARIS_SYS_ATOMIC_H_
#define _OPENSOLARIS_SYS_ATOMIC_H_
#ifndef _STANDALONE
#include <sys/types.h>
#include <machine/atomic.h>
#define atomic_sub_64 atomic_subtract_64
#if defined(__i386__) && (defined(_KERNEL) || defined(KLD_MODULE))
#define I386_HAVE_ATOMIC64
#endif
#if defined(__i386__) || defined(__amd64__) || defined(__arm__)
/* No spurious failures from fcmpset. */
#define STRONG_FCMPSET
#endif
#if !defined(__LP64__) && !defined(__mips_n32) && \
!defined(ARM_HAVE_ATOMIC64) && !defined(I386_HAVE_ATOMIC64) && \
!defined(HAS_EMULATED_ATOMIC64)
extern void atomic_add_64(volatile uint64_t *target, int64_t delta);
extern void atomic_dec_64(volatile uint64_t *target);
extern uint64_t atomic_swap_64(volatile uint64_t *a, uint64_t value);
extern uint64_t atomic_load_64(volatile uint64_t *a);
extern uint64_t atomic_add_64_nv(volatile uint64_t *target, int64_t delta);
extern uint64_t atomic_cas_64(volatile uint64_t *target, uint64_t cmp,
uint64_t newval);
#endif
-#define membar_producer atomic_thread_fence_rel
+#define membar_consumer() atomic_thread_fence_acq()
+#define membar_producer() atomic_thread_fence_rel()
+#define membar_sync() atomic_thread_fence_seq_cst()
static __inline uint32_t
atomic_add_32_nv(volatile uint32_t *target, int32_t delta)
{
return (atomic_fetchadd_32(target, delta) + delta);
}
static __inline uint_t
atomic_add_int_nv(volatile uint_t *target, int delta)
{
return (atomic_add_32_nv(target, delta));
}
static __inline void
atomic_inc_32(volatile uint32_t *target)
{
atomic_add_32(target, 1);
}
static __inline uint32_t
atomic_inc_32_nv(volatile uint32_t *target)
{
return (atomic_add_32_nv(target, 1));
}
static __inline void
atomic_dec_32(volatile uint32_t *target)
{
atomic_subtract_32(target, 1);
}
static __inline uint32_t
atomic_dec_32_nv(volatile uint32_t *target)
{
return (atomic_add_32_nv(target, -1));
}
#ifndef __sparc64__
static inline uint32_t
atomic_cas_32(volatile uint32_t *target, uint32_t cmp, uint32_t newval)
{
#ifdef STRONG_FCMPSET
(void) atomic_fcmpset_32(target, &cmp, newval);
#else
uint32_t expected = cmp;
do {
if (atomic_fcmpset_32(target, &cmp, newval))
break;
} while (cmp == expected);
#endif
return (cmp);
}
#endif
#if defined(__LP64__) || defined(__mips_n32) || \
defined(ARM_HAVE_ATOMIC64) || defined(I386_HAVE_ATOMIC64) || \
defined(HAS_EMULATED_ATOMIC64)
static __inline void
atomic_dec_64(volatile uint64_t *target)
{
atomic_subtract_64(target, 1);
}
static inline uint64_t
atomic_add_64_nv(volatile uint64_t *target, int64_t delta)
{
return (atomic_fetchadd_64(target, delta) + delta);
}
#ifndef __sparc64__
static inline uint64_t
atomic_cas_64(volatile uint64_t *target, uint64_t cmp, uint64_t newval)
{
#ifdef STRONG_FCMPSET
(void) atomic_fcmpset_64(target, &cmp, newval);
#else
uint64_t expected = cmp;
do {
if (atomic_fcmpset_64(target, &cmp, newval))
break;
} while (cmp == expected);
#endif
return (cmp);
}
#endif
#endif
static __inline void
atomic_inc_64(volatile uint64_t *target)
{
atomic_add_64(target, 1);
}
static __inline uint64_t
atomic_inc_64_nv(volatile uint64_t *target)
{
return (atomic_add_64_nv(target, 1));
}
static __inline uint64_t
atomic_dec_64_nv(volatile uint64_t *target)
{
return (atomic_add_64_nv(target, -1));
}
#if !defined(COMPAT_32BIT) && defined(__LP64__)
static __inline void *
atomic_cas_ptr(volatile void *target, void *cmp, void *newval)
{
return ((void *)atomic_cas_64((volatile uint64_t *)target,
(uint64_t)cmp, (uint64_t)newval));
}
#else
static __inline void *
atomic_cas_ptr(volatile void *target, void *cmp, void *newval)
{
return ((void *)atomic_cas_32((volatile uint32_t *)target,
(uint32_t)cmp, (uint32_t)newval));
}
#endif /* !defined(COMPAT_32BIT) && defined(__LP64__) */
#else /* _STANDALONE */
/*
* sometimes atomic_add_64 is defined, sometimes not, but the
* following is always right for the boot loader.
*/
#undef atomic_add_64
#define atomic_add_64(ptr, val) *(ptr) += val
#undef atomic_sub_64
#define atomic_sub_64(ptr, val) *(ptr) -= val
#endif /* !_STANDALONE */
#endif /* !_OPENSOLARIS_SYS_ATOMIC_H_ */
diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/disp.h b/sys/contrib/openzfs/include/os/freebsd/spl/sys/disp.h
index 2be1b76e4334..d46a7d2c0143 100644
--- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/disp.h
+++ b/sys/contrib/openzfs/include/os/freebsd/spl/sys/disp.h
@@ -1,36 +1,38 @@
/*
* Copyright (c) 2013 Andriy Gapon
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _OPENSOLARIS_SYS_DISP_H_
#define _OPENSOLARIS_SYS_DISP_H_
#include <sys/proc.h>
+#define KPREEMPT_SYNC (-1)
+
#define kpreempt(x) kern_yield(PRI_USER)
#endif /* _OPENSOLARIS_SYS_DISP_H_ */
diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/extdirent.h b/sys/contrib/openzfs/include/os/freebsd/spl/sys/extdirent.h
deleted file mode 100644
index d6927ae40bbb..000000000000
--- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/extdirent.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or https://opensource.org/licenses/CDDL-1.0.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef _SYS_EXTDIRENT_H
-#define _SYS_EXTDIRENT_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <sys/types.h>
-#include <sys/dirent.h>
-
-/*
- * Extended file-system independent directory entry. This style of
- * dirent provides additional informational flag bits for each
- * directory entry. This dirent will be returned instead of the
- * standard dirent if a VOP_READDIR() requests dirent flags via
- * V_RDDIR_ENTFLAGS, and if the file system supports the flags.
- */
-typedef struct edirent {
- ino64_t ed_ino; /* "inode number" of entry */
- off64_t ed_off; /* offset of disk directory entry */
- uint32_t ed_eflags; /* per-entry flags */
- unsigned short ed_reclen; /* length of this record */
- char ed_name[1]; /* name of file */
-} edirent_t;
-
-#define EDIRENT_RECLEN(namelen) \
- ((offsetof(edirent_t, ed_name[0]) + 1 + (namelen) + 7) & ~ 7)
-#define EDIRENT_NAMELEN(reclen) \
- ((reclen) - (offsetof(edirent_t, ed_name[0])))
-
-/*
- * Extended entry flags
- * Extended entries include a bitfield of extra information
- * regarding that entry.
- */
-#define ED_CASE_CONFLICT 0x10 /* Disconsidering case, entry is not unique */
-
-/*
- * Extended flags accessor function
- */
-#define ED_CASE_CONFLICTS(x) ((x)->ed_eflags & ED_CASE_CONFLICT)
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _SYS_EXTDIRENT_H */
diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/mod_os.h b/sys/contrib/openzfs/include/os/freebsd/spl/sys/mod_os.h
index 3a9ebbfc3bc4..e2815ce9e543 100644
--- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/mod_os.h
+++ b/sys/contrib/openzfs/include/os/freebsd/spl/sys/mod_os.h
@@ -1,120 +1,125 @@
/*
* Copyright (c) 2020 iXsystems, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _SPL_MOD_H
#define _SPL_MOD_H
#include <sys/sysctl.h>
-#define EXPORT_SYMBOL(x)
-#define module_param(a, b, c)
-#define MODULE_PARM_DESC(a, b)
-
#define ZMOD_RW CTLFLAG_RWTUN
#define ZMOD_RD CTLFLAG_RDTUN
#define ZFS_MODULE_PARAM(scope_prefix, name_prefix, name, type, perm, desc) \
SYSCTL_DECL(_vfs_ ## scope_prefix); \
SYSCTL_##type(_vfs_ ## scope_prefix, OID_AUTO, name, perm, \
&name_prefix ## name, 0, desc)
#define ZFS_MODULE_PARAM_ARGS SYSCTL_HANDLER_ARGS
#define ZFS_MODULE_PARAM_CALL_IMPL(parent, name, perm, args, desc) \
SYSCTL_DECL(parent); \
- SYSCTL_PROC(parent, OID_AUTO, name, perm | args, desc)
+ SYSCTL_PROC(parent, OID_AUTO, name, CTLFLAG_MPSAFE | perm | args, desc)
#define ZFS_MODULE_PARAM_CALL( \
scope_prefix, name_prefix, name, func, _, perm, desc) \
ZFS_MODULE_PARAM_CALL_IMPL(_vfs_ ## scope_prefix, name, perm, \
func ## _args(name_prefix ## name), desc)
#define ZFS_MODULE_VIRTUAL_PARAM_CALL ZFS_MODULE_PARAM_CALL
#define param_set_arc_long_args(var) \
CTLTYPE_ULONG, &var, 0, param_set_arc_long, "LU"
+#define param_set_arc_int_args(var) \
+ CTLTYPE_INT, &var, 0, param_set_arc_int, "I"
+
#define param_set_arc_min_args(var) \
- CTLTYPE_ULONG, &var, 0, param_set_arc_min, "LU"
+ CTLTYPE_ULONG, NULL, 0, param_set_arc_min, "LU"
#define param_set_arc_max_args(var) \
- CTLTYPE_ULONG, &var, 0, param_set_arc_max, "LU"
+ CTLTYPE_ULONG, NULL, 0, param_set_arc_max, "LU"
-#define param_set_arc_int_args(var) \
- CTLTYPE_INT, &var, 0, param_set_arc_int, "I"
+#define param_set_arc_free_target_args(var) \
+ CTLTYPE_UINT, NULL, 0, param_set_arc_free_target, "IU"
+
+#define param_set_arc_no_grow_shift_args(var) \
+ CTLTYPE_INT, NULL, 0, param_set_arc_no_grow_shift, "I"
#define param_set_deadman_failmode_args(var) \
CTLTYPE_STRING, NULL, 0, param_set_deadman_failmode, "A"
#define param_set_deadman_synctime_args(var) \
CTLTYPE_ULONG, NULL, 0, param_set_deadman_synctime, "LU"
#define param_set_deadman_ziotime_args(var) \
CTLTYPE_ULONG, NULL, 0, param_set_deadman_ziotime, "LU"
#define param_set_multihost_interval_args(var) \
- CTLTYPE_ULONG, &var, 0, param_set_multihost_interval, "LU"
+ CTLTYPE_ULONG, NULL, 0, param_set_multihost_interval, "LU"
#define param_set_slop_shift_args(var) \
- CTLTYPE_INT, &var, 0, param_set_slop_shift, "I"
+ CTLTYPE_INT, NULL, 0, param_set_slop_shift, "I"
#define param_set_min_auto_ashift_args(var) \
- CTLTYPE_U64, &var, 0, param_set_min_auto_ashift, "QU"
+ CTLTYPE_U64, NULL, 0, param_set_min_auto_ashift, "QU"
#define param_set_max_auto_ashift_args(var) \
- CTLTYPE_U64, &var, 0, param_set_max_auto_ashift, "QU"
+ CTLTYPE_U64, NULL, 0, param_set_max_auto_ashift, "QU"
#define fletcher_4_param_set_args(var) \
CTLTYPE_STRING, NULL, 0, fletcher_4_param, "A"
+#define blake3_param_set_args(var) \
+ CTLTYPE_STRING, NULL, 0, blake3_param, "A"
+
#include <sys/kernel.h>
#define module_init(fn) \
static void \
wrap_ ## fn(void *dummy __unused) \
{ \
fn(); \
} \
SYSINIT(zfs_ ## fn, SI_SUB_LAST, SI_ORDER_FIRST, wrap_ ## fn, NULL)
#define module_init_early(fn) \
static void \
wrap_ ## fn(void *dummy __unused) \
{ \
fn(); \
} \
SYSINIT(zfs_ ## fn, SI_SUB_INT_CONFIG_HOOKS, SI_ORDER_FIRST, wrap_ ## fn, NULL)
#define module_exit(fn) \
static void \
wrap_ ## fn(void *dummy __unused) \
{ \
fn(); \
} \
SYSUNINIT(zfs_ ## fn, SI_SUB_LAST, SI_ORDER_FIRST, wrap_ ## fn, NULL)
#endif /* SPL_MOD_H */
diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/simd.h b/sys/contrib/openzfs/include/os/freebsd/spl/sys/simd.h
index 53503e838912..3106e4853c70 100644
--- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/simd.h
+++ b/sys/contrib/openzfs/include/os/freebsd/spl/sys/simd.h
@@ -1,43 +1,47 @@
/*
* Copyright (c) 2020 iXsystems, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
-
#ifndef _FREEBSD_SIMD_H
#define _FREEBSD_SIMD_H
+
#if defined(__amd64__) || defined(__i386__)
#include <sys/simd_x86.h>
-#else
+#elif defined(__powerpc__)
+#include <sys/simd_powerpc.h>
+
+#else
#define kfpu_allowed() 0
#define kfpu_initialize(tsk) do {} while (0)
#define kfpu_begin() do {} while (0)
#define kfpu_end() do {} while (0)
#define kfpu_init() (0)
#define kfpu_fini() do {} while (0)
#endif
+
#endif
diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/simd_powerpc.h b/sys/contrib/openzfs/include/os/freebsd/spl/sys/simd_powerpc.h
new file mode 100644
index 000000000000..b90240580c7a
--- /dev/null
+++ b/sys/contrib/openzfs/include/os/freebsd/spl/sys/simd_powerpc.h
@@ -0,0 +1,90 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or https://opensource.org/licenses/CDDL-1.0.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (C) 2022 Tino Reichardt <milky-zfs@mcmilk.de>
+ */
+
+/*
+ * USER API:
+ *
+ * Kernel fpu methods:
+ * kfpu_allowed()
+ * kfpu_begin()
+ * kfpu_end()
+ * kfpu_init()
+ * kfpu_fini()
+ *
+ * SIMD support:
+ *
+ * Following functions should be called to determine whether CPU feature
+ * is supported. All functions are usable in kernel and user space.
+ * If a SIMD algorithm is using more than one instruction set
+ * all relevant feature test functions should be called.
+ *
+ * Supported features:
+ * zfs_altivec_available()
+ * zfs_vsx_available()
+ * zfs_isa207_available()
+ */
+
+#ifndef _FREEBSD_SIMD_POWERPC_H
+#define _FREEBSD_SIMD_POWERPC_H
+
+#include <sys/types.h>
+#include <sys/cdefs.h>
+
+#include <machine/pcb.h>
+#include <powerpc/cpu.h>
+
+#define kfpu_allowed() 1
+#define kfpu_initialize(tsk) do {} while (0)
+#define kfpu_begin() do {} while (0)
+#define kfpu_end() do {} while (0)
+#define kfpu_init() (0)
+#define kfpu_fini() do {} while (0)
+
+/*
+ * Check if Altivec is available
+ */
+static inline boolean_t
+zfs_altivec_available(void)
+{
+ return ((cpu_features & PPC_FEATURE_HAS_ALTIVEC) != 0);
+}
+
+/*
+ * Check if VSX is available
+ */
+static inline boolean_t
+zfs_vsx_available(void)
+{
+ return ((cpu_features & PPC_FEATURE_HAS_VSX) != 0);
+}
+
+/*
+ * Check if POWER ISA 2.07 is available (SHA2)
+ */
+static inline boolean_t
+zfs_isa207_available(void)
+{
+ return ((cpu_features2 & PPC_FEATURE2_ARCH_2_07) != 0);
+}
diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/simd_x86.h b/sys/contrib/openzfs/include/os/freebsd/spl/sys/simd_x86.h
index 480bfd28973b..7a0ca243f768 100644
--- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/simd_x86.h
+++ b/sys/contrib/openzfs/include/os/freebsd/spl/sys/simd_x86.h
@@ -1,297 +1,297 @@
/*
* Copyright (c) 2020 iXsystems, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/types.h>
#include <sys/cdefs.h>
#include <sys/proc.h>
#include <sys/systm.h>
#include <machine/pcb.h>
#include <x86/x86_var.h>
#include <x86/specialreg.h>
#define kfpu_init() (0)
#define kfpu_fini() do {} while (0)
#define kfpu_allowed() 1
#define kfpu_initialize(tsk) do {} while (0)
#define kfpu_begin() { \
if (__predict_false(!is_fpu_kern_thread(0))) \
fpu_kern_enter(curthread, NULL, FPU_KERN_NOCTX);\
}
#define kfpu_end() { \
if (__predict_false(curpcb->pcb_flags & PCB_FPUNOSAVE)) \
fpu_kern_leave(curthread, NULL); \
}
/*
* Check if OS supports AVX and AVX2 by checking XCR0
* Only call this function if CPUID indicates that AVX feature is
* supported by the CPU, otherwise it might be an illegal instruction.
*/
static inline uint64_t
xgetbv(uint32_t index)
{
uint32_t eax, edx;
/* xgetbv - instruction byte code */
__asm__ __volatile__(".byte 0x0f; .byte 0x01; .byte 0xd0"
: "=a" (eax), "=d" (edx)
: "c" (index));
return ((((uint64_t)edx)<<32) | (uint64_t)eax);
}
/*
* Detect register set support
*/
static inline boolean_t
__simd_state_enabled(const uint64_t state)
{
boolean_t has_osxsave;
uint64_t xcr0;
- has_osxsave = !!(cpu_feature2 & CPUID2_OSXSAVE);
+ has_osxsave = (cpu_feature2 & CPUID2_OSXSAVE) != 0;
if (!has_osxsave)
return (B_FALSE);
xcr0 = xgetbv(0);
return ((xcr0 & state) == state);
}
#define _XSTATE_SSE_AVX (0x2 | 0x4)
#define _XSTATE_AVX512 (0xE0 | _XSTATE_SSE_AVX)
#define __ymm_enabled() __simd_state_enabled(_XSTATE_SSE_AVX)
#define __zmm_enabled() __simd_state_enabled(_XSTATE_AVX512)
/*
* Check if SSE instruction set is available
*/
static inline boolean_t
zfs_sse_available(void)
{
- return (!!(cpu_feature & CPUID_SSE));
+ return ((cpu_feature & CPUID_SSE) != 0);
}
/*
* Check if SSE2 instruction set is available
*/
static inline boolean_t
zfs_sse2_available(void)
{
- return (!!(cpu_feature & CPUID_SSE2));
+ return ((cpu_feature & CPUID_SSE2) != 0);
}
/*
* Check if SSE3 instruction set is available
*/
static inline boolean_t
zfs_sse3_available(void)
{
- return (!!(cpu_feature2 & CPUID2_SSE3));
+ return ((cpu_feature2 & CPUID2_SSE3) != 0);
}
/*
* Check if SSSE3 instruction set is available
*/
static inline boolean_t
zfs_ssse3_available(void)
{
- return (!!(cpu_feature2 & CPUID2_SSSE3));
+ return ((cpu_feature2 & CPUID2_SSSE3) != 0);
}
/*
* Check if SSE4.1 instruction set is available
*/
static inline boolean_t
zfs_sse4_1_available(void)
{
- return (!!(cpu_feature2 & CPUID2_SSE41));
+ return ((cpu_feature2 & CPUID2_SSE41) != 0);
}
/*
* Check if SSE4.2 instruction set is available
*/
static inline boolean_t
zfs_sse4_2_available(void)
{
- return (!!(cpu_feature2 & CPUID2_SSE42));
+ return ((cpu_feature2 & CPUID2_SSE42) != 0);
}
/*
* Check if AVX instruction set is available
*/
static inline boolean_t
zfs_avx_available(void)
{
boolean_t has_avx;
- has_avx = !!(cpu_feature2 & CPUID2_AVX);
+ has_avx = (cpu_feature2 & CPUID2_AVX) != 0;
return (has_avx && __ymm_enabled());
}
/*
* Check if AVX2 instruction set is available
*/
static inline boolean_t
zfs_avx2_available(void)
{
boolean_t has_avx2;
- has_avx2 = !!(cpu_stdext_feature & CPUID_STDEXT_AVX2);
+ has_avx2 = (cpu_stdext_feature & CPUID_STDEXT_AVX2) != 0;
return (has_avx2 && __ymm_enabled());
}
/*
* AVX-512 family of instruction sets:
*
* AVX512F Foundation
* AVX512CD Conflict Detection Instructions
* AVX512ER Exponential and Reciprocal Instructions
* AVX512PF Prefetch Instructions
*
* AVX512BW Byte and Word Instructions
* AVX512DQ Double-word and Quadword Instructions
* AVX512VL Vector Length Extensions
*
* AVX512IFMA Integer Fused Multiply Add (Not supported by kernel 4.4)
* AVX512VBMI Vector Byte Manipulation Instructions
*/
/* Check if AVX512F instruction set is available */
static inline boolean_t
zfs_avx512f_available(void)
{
boolean_t has_avx512;
- has_avx512 = !!(cpu_stdext_feature & CPUID_STDEXT_AVX512F);
+ has_avx512 = (cpu_stdext_feature & CPUID_STDEXT_AVX512F) != 0;
return (has_avx512 && __zmm_enabled());
}
/* Check if AVX512CD instruction set is available */
static inline boolean_t
zfs_avx512cd_available(void)
{
boolean_t has_avx512;
- has_avx512 = !!(cpu_stdext_feature & CPUID_STDEXT_AVX512F) &&
- !!(cpu_stdext_feature & CPUID_STDEXT_AVX512CD);
+ has_avx512 = (cpu_stdext_feature & CPUID_STDEXT_AVX512F) != 0 &&
+ (cpu_stdext_feature & CPUID_STDEXT_AVX512CD) != 0;
return (has_avx512 && __zmm_enabled());
}
/* Check if AVX512ER instruction set is available */
static inline boolean_t
zfs_avx512er_available(void)
{
boolean_t has_avx512;
- has_avx512 = !!(cpu_stdext_feature & CPUID_STDEXT_AVX512F) &&
- !!(cpu_stdext_feature & CPUID_STDEXT_AVX512CD);
+ has_avx512 = (cpu_stdext_feature & CPUID_STDEXT_AVX512F) != 0 &&
+ (cpu_stdext_feature & CPUID_STDEXT_AVX512CD) != 0;
return (has_avx512 && __zmm_enabled());
}
/* Check if AVX512PF instruction set is available */
static inline boolean_t
zfs_avx512pf_available(void)
{
boolean_t has_avx512;
- has_avx512 = !!(cpu_stdext_feature & CPUID_STDEXT_AVX512F) &&
- !!(cpu_stdext_feature & CPUID_STDEXT_AVX512PF);
+ has_avx512 = (cpu_stdext_feature & CPUID_STDEXT_AVX512F) != 0 &&
+ (cpu_stdext_feature & CPUID_STDEXT_AVX512PF) != 0;
return (has_avx512 && __zmm_enabled());
}
/* Check if AVX512BW instruction set is available */
static inline boolean_t
zfs_avx512bw_available(void)
{
boolean_t has_avx512 = B_FALSE;
- has_avx512 = !!(cpu_stdext_feature & CPUID_STDEXT_AVX512BW);
+ has_avx512 = (cpu_stdext_feature & CPUID_STDEXT_AVX512BW) != 0;
return (has_avx512 && __zmm_enabled());
}
/* Check if AVX512DQ instruction set is available */
static inline boolean_t
zfs_avx512dq_available(void)
{
boolean_t has_avx512;
- has_avx512 = !!(cpu_stdext_feature & CPUID_STDEXT_AVX512F) &&
- !!(cpu_stdext_feature & CPUID_STDEXT_AVX512DQ);
+ has_avx512 = (cpu_stdext_feature & CPUID_STDEXT_AVX512F) != 0 &&
+ (cpu_stdext_feature & CPUID_STDEXT_AVX512DQ) != 0;
return (has_avx512 && __zmm_enabled());
}
/* Check if AVX512VL instruction set is available */
static inline boolean_t
zfs_avx512vl_available(void)
{
boolean_t has_avx512;
- has_avx512 = !!(cpu_stdext_feature & CPUID_STDEXT_AVX512F) &&
- !!(cpu_stdext_feature & CPUID_STDEXT_AVX512VL);
+ has_avx512 = (cpu_stdext_feature & CPUID_STDEXT_AVX512F) != 0 &&
+ (cpu_stdext_feature & CPUID_STDEXT_AVX512VL) != 0;
return (has_avx512 && __zmm_enabled());
}
/* Check if AVX512IFMA instruction set is available */
static inline boolean_t
zfs_avx512ifma_available(void)
{
boolean_t has_avx512;
- has_avx512 = !!(cpu_stdext_feature & CPUID_STDEXT_AVX512F) &&
- !!(cpu_stdext_feature & CPUID_STDEXT_AVX512IFMA);
+ has_avx512 = (cpu_stdext_feature & CPUID_STDEXT_AVX512F) != 0 &&
+ (cpu_stdext_feature & CPUID_STDEXT_AVX512IFMA) != 0;
return (has_avx512 && __zmm_enabled());
}
/* Check if AVX512VBMI instruction set is available */
static inline boolean_t
zfs_avx512vbmi_available(void)
{
boolean_t has_avx512;
- has_avx512 = !!(cpu_stdext_feature & CPUID_STDEXT_AVX512F) &&
- !!(cpu_stdext_feature & CPUID_STDEXT_BMI1);
+ has_avx512 = (cpu_stdext_feature & CPUID_STDEXT_AVX512F) != 0 &&
+ (cpu_stdext_feature & CPUID_STDEXT_BMI1) != 0;
return (has_avx512 && __zmm_enabled());
}
diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/timer.h b/sys/contrib/openzfs/include/os/freebsd/spl/sys/timer.h
index d4694bb7c09c..7ff77e9b1b74 100644
--- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/timer.h
+++ b/sys/contrib/openzfs/include/os/freebsd/spl/sys/timer.h
@@ -1,38 +1,36 @@
/*
* Copyright (c) 2020 iXsystems, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _SPL_TIMER_H_
#define _SPL_TIMER_H_
#define ddi_time_after(a, b) ((a) > (b))
#define ddi_time_after64(a, b) ((a) > (b))
#define usleep_range(wakeup, wakeupepsilon) \
pause_sbt("usleep_range", ustosbt(wakeup), \
ustosbt(wakeupepsilon - wakeup), 0)
-
-#define schedule() pause("schedule", 1)
#endif
diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/vfs.h b/sys/contrib/openzfs/include/os/freebsd/spl/sys/vfs.h
index 22d57cc473e2..7f163fcfdb1e 100644
--- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/vfs.h
+++ b/sys/contrib/openzfs/include/os/freebsd/spl/sys/vfs.h
@@ -1,125 +1,121 @@
/*
* Copyright (c) 2007 Pawel Jakub Dawidek <pjd@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _OPENSOLARIS_SYS_VFS_H_
#define _OPENSOLARIS_SYS_VFS_H_
#include <sys/param.h>
#include <sys/proc.h>
#include <sys/vnode.h>
#define rootdir rootvnode
struct thread;
struct vnode;
typedef struct mount vfs_t;
typedef int umode_t;
#define vfs_flag mnt_flag
#define vfs_data mnt_data
#define vfs_count mnt_ref
#define vfs_fsid mnt_stat.f_fsid
#define vfs_bsize mnt_stat.f_bsize
#define vfs_resource mnt_stat.f_mntfromname
#define v_flag v_vflag
#define v_vfsp v_mount
#define VFS_RDONLY MNT_RDONLY
#define VFS_NOSETUID MNT_NOSUID
#define VFS_NOEXEC MNT_NOEXEC
#define VROOT VV_ROOT
#define XU_NGROUPS 16
/*
* Structure defining a mount option for a filesystem.
* option names are found in mntent.h
*/
typedef struct mntopt {
char *mo_name; /* option name */
char **mo_cancel; /* list of options cancelled by this one */
char *mo_arg; /* argument string for this option */
int mo_flags; /* flags for this mount option */
void *mo_data; /* filesystem specific data */
} mntopt_t;
/*
* Flags that apply to mount options
*/
#define MO_SET 0x01 /* option is set */
#define MO_NODISPLAY 0x02 /* option not listed in mnttab */
#define MO_HASVALUE 0x04 /* option takes a value */
#define MO_IGNORE 0x08 /* option ignored by parser */
#define MO_DEFAULT MO_SET /* option is on by default */
#define MO_TAG 0x10 /* flags a tag set by user program */
#define MO_EMPTY 0x20 /* empty space in option table */
#define VFS_NOFORCEOPT 0x01 /* honor MO_IGNORE (don't set option) */
#define VFS_DISPLAY 0x02 /* Turn off MO_NODISPLAY bit for opt */
#define VFS_NODISPLAY 0x04 /* Turn on MO_NODISPLAY bit for opt */
#define VFS_CREATEOPT 0x08 /* Create the opt if it's not there */
/*
* Structure holding mount option strings for the mounted file system.
*/
typedef struct mntopts {
uint_t mo_count; /* number of entries in table */
mntopt_t *mo_list; /* list of mount options */
} mntopts_t;
void vfs_setmntopt(vfs_t *vfsp, const char *name, const char *arg,
int flags __unused);
void vfs_clearmntopt(vfs_t *vfsp, const char *name);
int vfs_optionisset(const vfs_t *vfsp, const char *opt, char **argp);
int mount_snapshot(kthread_t *td, vnode_t **vpp, const char *fstype,
char *fspath, char *fspec, int fsflags);
typedef uint64_t vfs_feature_t;
#define VFSFT_XVATTR 0x100000001 /* Supports xvattr for attrs */
#define VFSFT_CASEINSENSITIVE 0x100000002 /* Supports case-insensitive */
#define VFSFT_NOCASESENSITIVE 0x100000004 /* NOT case-sensitive */
#define VFSFT_DIRENTFLAGS 0x100000008 /* Supports dirent flags */
#define VFSFT_ACLONCREATE 0x100000010 /* Supports ACL on create */
#define VFSFT_ACEMASKONACCESS 0x100000020 /* Can use ACEMASK for access */
#define VFSFT_SYSATTR_VIEWS 0x100000040 /* Supports sysattr view i/f */
#define VFSFT_ACCESS_FILTER 0x100000080 /* dirents filtered by access */
#define VFSFT_REPARSE 0x100000100 /* Supports reparse point */
#define VFSFT_ZEROCOPY_SUPPORTED 0x100000200
/* Support loaning /returning cache buffer */
-#define vfs_set_feature(vfsp, feature) do { } while (0)
-#define vfs_clear_feature(vfsp, feature) do { } while (0)
-#define vfs_has_feature(vfsp, feature) (0)
-
#include <sys/mount.h>
#endif /* _OPENSOLARIS_SYS_VFS_H_ */
diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/vnode_impl.h b/sys/contrib/openzfs/include/os/freebsd/spl/sys/vnode_impl.h
index 3e698d7ac92a..4e04b5e80a06 100644
--- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/vnode_impl.h
+++ b/sys/contrib/openzfs/include/os/freebsd/spl/sys/vnode_impl.h
@@ -1,268 +1,255 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2017 RackTop Systems.
*/
/* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
/* All Rights Reserved */
/*
* University Copyright- Copyright (c) 1982, 1986, 1988
* The Regents of the University of California
* All Rights Reserved
*
* University Acknowledgment- Portions of this document are derived from
* software developed by the University of California, Berkeley, and its
* contributors.
*/
#ifndef _SYS_VNODE_IMPL_H
#define _SYS_VNODE_IMPL_H
#define IS_DEVVP(vp) \
((vp)->v_type == VCHR || (vp)->v_type == VBLK || (vp)->v_type == VFIFO)
-#define V_XATTRDIR 0x0000 /* attribute unnamed directory */
-
#define AV_SCANSTAMP_SZ 32 /* length of anti-virus scanstamp */
/*
* The xvattr structure is really a variable length structure that
* is made up of:
* - The classic vattr_t (xva_vattr)
* - a 32 bit quantity (xva_mapsize) that specifies the size of the
* attribute bitmaps in 32 bit words.
* - A pointer to the returned attribute bitmap (needed because the
* previous element, the requested attribute bitmap) is variable length.
* - The requested attribute bitmap, which is an array of 32 bit words.
* Callers use the XVA_SET_REQ() macro to set the bits corresponding to
* the attributes that are being requested.
* - The returned attribute bitmap, which is an array of 32 bit words.
* File systems that support optional attributes use the XVA_SET_RTN()
* macro to set the bits corresponding to the attributes that are being
* returned.
* - The xoptattr_t structure which contains the attribute values
*
* xva_mapsize determines how many words in the attribute bitmaps.
* Immediately following the attribute bitmaps is the xoptattr_t.
* xva_getxoptattr() is used to get the pointer to the xoptattr_t
* section.
*/
#define XVA_MAPSIZE 3 /* Size of attr bitmaps */
#define XVA_MAGIC 0x78766174 /* Magic # for verification */
/*
* The xvattr structure is an extensible structure which permits optional
* attributes to be requested/returned. File systems may or may not support
* optional attributes. They do so at their own discretion but if they do
* support optional attributes, they must register the VFSFT_XVATTR feature
* so that the optional attributes can be set/retrieved.
*
* The fields of the xvattr structure are:
*
* xva_vattr - The first element of an xvattr is a legacy vattr structure
* which includes the common attributes. If AT_XVATTR is set in the va_mask
* then the entire structure is treated as an xvattr. If AT_XVATTR is not
* set, then only the xva_vattr structure can be used.
*
* xva_magic - 0x78766174 (hex for "xvat"). Magic number for verification.
*
* xva_mapsize - Size of requested and returned attribute bitmaps.
*
* xva_rtnattrmapp - Pointer to xva_rtnattrmap[]. We need this since the
* size of the array before it, xva_reqattrmap[], could change which means
* the location of xva_rtnattrmap[] could change. This will allow unbundled
* file systems to find the location of xva_rtnattrmap[] when the sizes change.
*
* xva_reqattrmap[] - Array of requested attributes. Attributes are
* represented by a specific bit in a specific element of the attribute
* map array. Callers set the bits corresponding to the attributes
* that the caller wants to get/set.
*
* xva_rtnattrmap[] - Array of attributes that the file system was able to
* process. Not all file systems support all optional attributes. This map
* informs the caller which attributes the underlying file system was able
* to set/get. (Same structure as the requested attributes array in terms
* of each attribute corresponding to specific bits and array elements.)
*
* xva_xoptattrs - Structure containing values of optional attributes.
* These values are only valid if the corresponding bits in xva_reqattrmap
* are set and the underlying file system supports those attributes.
*/
/*
* Attribute bits used in the extensible attribute's (xva's) attribute
* bitmaps. Note that the bitmaps are made up of a variable length number
* of 32-bit words. The convention is to use XAT{n}_{attrname} where "n"
* is the element in the bitmap (starting at 1). This convention is for
* the convenience of the maintainer to keep track of which element each
* attribute belongs to.
*
* NOTE THAT CONSUMERS MUST *NOT* USE THE XATn_* DEFINES DIRECTLY. CONSUMERS
* MUST USE THE XAT_* DEFINES.
*/
#define XAT0_INDEX 0LL /* Index into bitmap for XAT0 attrs */
#define XAT0_CREATETIME 0x00000001 /* Create time of file */
#define XAT0_ARCHIVE 0x00000002 /* Archive */
#define XAT0_SYSTEM 0x00000004 /* System */
#define XAT0_READONLY 0x00000008 /* Readonly */
#define XAT0_HIDDEN 0x00000010 /* Hidden */
#define XAT0_NOUNLINK 0x00000020 /* Nounlink */
#define XAT0_IMMUTABLE 0x00000040 /* immutable */
#define XAT0_APPENDONLY 0x00000080 /* appendonly */
#define XAT0_NODUMP 0x00000100 /* nodump */
#define XAT0_OPAQUE 0x00000200 /* opaque */
#define XAT0_AV_QUARANTINED 0x00000400 /* anti-virus quarantine */
#define XAT0_AV_MODIFIED 0x00000800 /* anti-virus modified */
#define XAT0_AV_SCANSTAMP 0x00001000 /* anti-virus scanstamp */
#define XAT0_REPARSE 0x00002000 /* FS reparse point */
#define XAT0_GEN 0x00004000 /* object generation number */
#define XAT0_OFFLINE 0x00008000 /* offline */
#define XAT0_SPARSE 0x00010000 /* sparse */
/* Support for XAT_* optional attributes */
#define XVA_MASK 0xffffffff /* Used to mask off 32 bits */
#define XVA_SHFT 32 /* Used to shift index */
/*
* Used to pry out the index and attribute bits from the XAT_* attributes
* defined below. Note that we're masking things down to 32 bits then
* casting to uint32_t.
*/
#define XVA_INDEX(attr) ((uint32_t)(((attr) >> XVA_SHFT) & XVA_MASK))
#define XVA_ATTRBIT(attr) ((uint32_t)((attr) & XVA_MASK))
/*
* The following defines present a "flat namespace" so that consumers don't
* need to keep track of which element belongs to which bitmap entry.
*
* NOTE THAT THESE MUST NEVER BE OR-ed TOGETHER
*/
#define XAT_CREATETIME ((XAT0_INDEX << XVA_SHFT) | XAT0_CREATETIME)
#define XAT_ARCHIVE ((XAT0_INDEX << XVA_SHFT) | XAT0_ARCHIVE)
#define XAT_SYSTEM ((XAT0_INDEX << XVA_SHFT) | XAT0_SYSTEM)
#define XAT_READONLY ((XAT0_INDEX << XVA_SHFT) | XAT0_READONLY)
#define XAT_HIDDEN ((XAT0_INDEX << XVA_SHFT) | XAT0_HIDDEN)
#define XAT_NOUNLINK ((XAT0_INDEX << XVA_SHFT) | XAT0_NOUNLINK)
#define XAT_IMMUTABLE ((XAT0_INDEX << XVA_SHFT) | XAT0_IMMUTABLE)
#define XAT_APPENDONLY ((XAT0_INDEX << XVA_SHFT) | XAT0_APPENDONLY)
#define XAT_NODUMP ((XAT0_INDEX << XVA_SHFT) | XAT0_NODUMP)
#define XAT_OPAQUE ((XAT0_INDEX << XVA_SHFT) | XAT0_OPAQUE)
#define XAT_AV_QUARANTINED ((XAT0_INDEX << XVA_SHFT) | XAT0_AV_QUARANTINED)
#define XAT_AV_MODIFIED ((XAT0_INDEX << XVA_SHFT) | XAT0_AV_MODIFIED)
#define XAT_AV_SCANSTAMP ((XAT0_INDEX << XVA_SHFT) | XAT0_AV_SCANSTAMP)
#define XAT_REPARSE ((XAT0_INDEX << XVA_SHFT) | XAT0_REPARSE)
#define XAT_GEN ((XAT0_INDEX << XVA_SHFT) | XAT0_GEN)
#define XAT_OFFLINE ((XAT0_INDEX << XVA_SHFT) | XAT0_OFFLINE)
#define XAT_SPARSE ((XAT0_INDEX << XVA_SHFT) | XAT0_SPARSE)
/*
* The returned attribute map array (xva_rtnattrmap[]) is located past the
* requested attribute map array (xva_reqattrmap[]). Its location changes
* when the array sizes change. We use a separate pointer in a known location
* (xva_rtnattrmapp) to hold the location of xva_rtnattrmap[]. This is
* set in xva_init()
*/
#define XVA_RTNATTRMAP(xvap) ((xvap)->xva_rtnattrmapp)
#define MODEMASK 07777 /* mode bits plus permission bits */
#define PERMMASK 00777 /* permission bits */
-/*
- * VOP_ACCESS flags
- */
-#define V_ACE_MASK 0x1 /* mask represents NFSv4 ACE permissions */
-
/*
* Flags for vnode operations.
*/
enum rm { RMFILE, RMDIRECTORY }; /* rm or rmdir (remove) */
enum create { CRCREAT, CRMKNOD, CRMKDIR }; /* reason for create */
/*
* Structure used by various vnode operations to determine
* the context (pid, host, identity) of a caller.
*
* The cc_caller_id is used to identify one or more callers who invoke
* operations, possibly on behalf of others. For example, the NFS
* server could have its own cc_caller_id which can be detected by
* vnode/vfs operations or (FEM) monitors on those operations. New
* caller IDs are generated by fs_new_caller_id().
*/
typedef struct caller_context {
pid_t cc_pid; /* Process ID of the caller */
int cc_sysid; /* System ID, used for remote calls */
u_longlong_t cc_caller_id; /* Identifier for (set of) caller(s) */
ulong_t cc_flags;
} caller_context_t;
struct taskq;
/*
* Flags for VOP_LOOKUP
*
* Defined in file.h, but also possible, FIGNORECASE and FSEARCH
*
*/
#define LOOKUP_DIR 0x01 /* want parent dir vp */
#define LOOKUP_XATTR 0x02 /* lookup up extended attr dir */
#define CREATE_XATTR_DIR 0x04 /* Create extended attr dir */
#define LOOKUP_HAVE_SYSATTR_DIR 0x08 /* Already created virtual GFS dir */
-/*
- * Flags for VOP_READDIR
- */
-#define V_RDDIR_ENTFLAGS 0x01 /* request dirent flags */
-#define V_RDDIR_ACCFILTER 0x02 /* filter out inaccessible dirents */
-
/*
* Public vnode manipulation functions.
*/
void vn_rele_async(struct vnode *vp, struct taskq *taskq);
#define VN_RELE_ASYNC(vp, taskq) { \
vn_rele_async(vp, taskq); \
}
/*
* Flags to VOP_SETATTR/VOP_GETATTR.
*/
#define ATTR_UTIME 0x01 /* non-default utime(2) request */
#define ATTR_EXEC 0x02 /* invocation from exec(2) */
#define ATTR_COMM 0x04 /* yield common vp attributes */
#define ATTR_HINT 0x08 /* information returned will be `hint' */
#define ATTR_REAL 0x10 /* yield attributes of the real vp */
#define ATTR_NOACLCHECK 0x20 /* Don't check ACL when checking permissions */
#define ATTR_TRIGGER 0x40 /* Mount first if vnode is a trigger mount */
#ifdef __cplusplus
}
#endif
#endif /* _SYS_VNODE_H */
diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/disp.h b/sys/contrib/openzfs/include/os/freebsd/zfs/sys/arc_os.h
similarity index 75%
copy from sys/contrib/openzfs/include/os/freebsd/spl/sys/disp.h
copy to sys/contrib/openzfs/include/os/freebsd/zfs/sys/arc_os.h
index 2be1b76e4334..a95618b91fed 100644
--- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/disp.h
+++ b/sys/contrib/openzfs/include/os/freebsd/zfs/sys/arc_os.h
@@ -1,36 +1,34 @@
/*
- * Copyright (c) 2013 Andriy Gapon
- * All rights reserved.
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Martin Matuska
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
- *
- * $FreeBSD$
*/
-#ifndef _OPENSOLARIS_SYS_DISP_H_
-#define _OPENSOLARIS_SYS_DISP_H_
-
-#include <sys/proc.h>
+#ifndef _SYS_ARC_OS_H
+#define _SYS_ARC_OS_H
-#define kpreempt(x) kern_yield(PRI_USER)
+int param_set_arc_free_target(SYSCTL_HANDLER_ARGS);
+int param_set_arc_no_grow_shift(SYSCTL_HANDLER_ARGS);
-#endif /* _OPENSOLARIS_SYS_DISP_H_ */
+#endif
diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/disp.h b/sys/contrib/openzfs/include/os/freebsd/zfs/sys/freebsd_event.h
similarity index 74%
copy from sys/contrib/openzfs/include/os/freebsd/spl/sys/disp.h
copy to sys/contrib/openzfs/include/os/freebsd/zfs/sys/freebsd_event.h
index 2be1b76e4334..544ff8b0f81f 100644
--- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/disp.h
+++ b/sys/contrib/openzfs/include/os/freebsd/zfs/sys/freebsd_event.h
@@ -1,36 +1,37 @@
/*
- * Copyright (c) 2013 Andriy Gapon
- * All rights reserved.
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Rob Wing
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
- *
- * $FreeBSD$
*/
-#ifndef _OPENSOLARIS_SYS_DISP_H_
-#define _OPENSOLARIS_SYS_DISP_H_
+#ifndef _ZFS_FREEBSD_EVENT_H
+#define _ZFS_FREEBSD_EVENT_H
+
+#ifdef _KERNEL
-#include <sys/proc.h>
+void knlist_init_sx(struct knlist *knl, struct sx *lock);
-#define kpreempt(x) kern_yield(PRI_USER)
+#endif /* !_KERNEL */
-#endif /* _OPENSOLARIS_SYS_DISP_H_ */
+#endif /* !_ZFS_FREEBSD_EVENT_H */
diff --git a/sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_context_os.h b/sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_context_os.h
index 867199501396..1ce72330412c 100644
--- a/sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_context_os.h
+++ b/sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_context_os.h
@@ -1,90 +1,88 @@
/*
* Copyright (c) 2020 iXsystems, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef ZFS_CONTEXT_OS_H_
#define ZFS_CONTEXT_OS_H_
#include <sys/condvar.h>
#include <sys/rwlock.h>
#include <sys/sig.h>
#include_next <sys/sdt.h>
#include <sys/misc.h>
#include <sys/kdb.h>
#include <sys/pathname.h>
#include <sys/conf.h>
#include <sys/types.h>
#include <sys/ccompat.h>
#include <linux/types.h>
#if KSTACK_PAGES * PAGE_SIZE >= 16384
#define HAVE_LARGE_STACKS 1
#endif
-#define cond_resched() kern_yield(PRI_USER)
-
#define taskq_create_sysdc(a, b, d, e, p, dc, f) \
((void) sizeof (dc), taskq_create(a, b, maxclsyspri, d, e, f))
#define tsd_create(keyp, destructor) do { \
*(keyp) = osd_thread_register((destructor)); \
KASSERT(*(keyp) > 0, ("cannot register OSD")); \
} while (0)
#define tsd_destroy(keyp) osd_thread_deregister(*(keyp))
#define tsd_get(key) osd_thread_get(curthread, (key))
#define tsd_set(key, value) osd_thread_set(curthread, (key), (value))
#define fm_panic panic
extern int zfs_debug_level;
extern struct mtx zfs_debug_mtx;
#define ZFS_LOG(lvl, ...) do { \
if (((lvl) & 0xff) <= zfs_debug_level) { \
mtx_lock(&zfs_debug_mtx); \
printf("%s:%u[%d]: ", \
__func__, __LINE__, (lvl)); \
printf(__VA_ARGS__); \
printf("\n"); \
if ((lvl) & 0x100) \
kdb_backtrace(); \
mtx_unlock(&zfs_debug_mtx); \
} \
} while (0)
#define MSEC_TO_TICK(msec) (howmany((hrtime_t)(msec) * hz, MILLISEC))
extern int hz;
extern int tick;
typedef int fstrans_cookie_t;
#define spl_fstrans_mark() (0)
#define spl_fstrans_unmark(x) (x = 0)
#define signal_pending(x) SIGPENDING(x)
#define current curthread
#define thread_join(x)
typedef struct opensolaris_utsname utsname_t;
extern utsname_t *utsname(void);
extern int spa_import_rootpool(const char *name, bool checkpointrewind);
#endif
diff --git a/sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_vfsops_os.h b/sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_vfsops_os.h
index c44f7c6f06b1..f765d38dbac8 100644
--- a/sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_vfsops_os.h
+++ b/sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_vfsops_os.h
@@ -1,317 +1,311 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011 Pawel Jakub Dawidek <pawel@dawidek.net>.
* All rights reserved.
*/
#ifndef _SYS_FS_ZFS_VFSOPS_H
#define _SYS_FS_ZFS_VFSOPS_H
#if __FreeBSD_version >= 1300125
#define TEARDOWN_RMS
#endif
#if __FreeBSD_version >= 1300109
#define TEARDOWN_INACTIVE_RMS
#endif
#include <sys/dataset_kstats.h>
#include <sys/list.h>
#include <sys/vfs.h>
#include <sys/zil.h>
#include <sys/sa.h>
#include <sys/rrwlock.h>
#ifdef TEARDOWN_INACTIVE_RMS
#include <sys/rmlock.h>
#endif
#include <sys/zfs_ioctl.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef TEARDOWN_RMS
typedef struct rmslock zfs_teardown_lock_t;
#else
#define zfs_teardown_lock_t rrmlock_t
#endif
#ifdef TEARDOWN_INACTIVE_RMS
typedef struct rmslock zfs_teardown_inactive_lock_t;
#else
#define zfs_teardown_inactive_lock_t krwlock_t
#endif
typedef struct zfsvfs zfsvfs_t;
struct znode;
struct zfsvfs {
vfs_t *z_vfs; /* generic fs struct */
zfsvfs_t *z_parent; /* parent fs */
objset_t *z_os; /* objset reference */
uint64_t z_flags; /* super_block flags */
uint64_t z_root; /* id of root znode */
uint64_t z_unlinkedobj; /* id of unlinked zapobj */
uint64_t z_max_blksz; /* maximum block size for files */
uint64_t z_fuid_obj; /* fuid table object number */
uint64_t z_fuid_size; /* fuid table size */
avl_tree_t z_fuid_idx; /* fuid tree keyed by index */
avl_tree_t z_fuid_domain; /* fuid tree keyed by domain */
krwlock_t z_fuid_lock; /* fuid lock */
boolean_t z_fuid_loaded; /* fuid tables are loaded */
boolean_t z_fuid_dirty; /* need to sync fuid table ? */
struct zfs_fuid_info *z_fuid_replay; /* fuid info for replay */
zilog_t *z_log; /* intent log pointer */
uint_t z_acl_type; /* type of acl usable on this fs */
uint_t z_acl_mode; /* acl chmod/mode behavior */
uint_t z_acl_inherit; /* acl inheritance behavior */
zfs_case_t z_case; /* case-sense */
boolean_t z_utf8; /* utf8-only */
int z_norm; /* normalization flags */
boolean_t z_atime; /* enable atimes mount option */
boolean_t z_unmounted; /* unmounted */
zfs_teardown_lock_t z_teardown_lock;
zfs_teardown_inactive_lock_t z_teardown_inactive_lock;
list_t z_all_znodes; /* all vnodes in the fs */
uint64_t z_nr_znodes; /* number of znodes in the fs */
kmutex_t z_znodes_lock; /* lock for z_all_znodes */
struct zfsctl_root *z_ctldir; /* .zfs directory pointer */
boolean_t z_show_ctldir; /* expose .zfs in the root dir */
boolean_t z_issnap; /* true if this is a snapshot */
boolean_t z_use_fuids; /* version allows fuids */
boolean_t z_replay; /* set during ZIL replay */
boolean_t z_use_sa; /* version allow system attributes */
boolean_t z_xattr_sa; /* allow xattrs to be stores as SA */
boolean_t z_use_namecache; /* make use of FreeBSD name cache */
uint8_t z_xattr; /* xattr type in use */
uint64_t z_version; /* ZPL version */
uint64_t z_shares_dir; /* hidden shares dir */
dataset_kstats_t z_kstat; /* fs kstats */
kmutex_t z_lock;
uint64_t z_userquota_obj;
uint64_t z_groupquota_obj;
uint64_t z_userobjquota_obj;
uint64_t z_groupobjquota_obj;
uint64_t z_projectquota_obj;
uint64_t z_projectobjquota_obj;
uint64_t z_replay_eof; /* New end of file - replay only */
sa_attr_type_t *z_attr_table; /* SA attr mapping->id */
#define ZFS_OBJ_MTX_SZ 64
kmutex_t z_hold_mtx[ZFS_OBJ_MTX_SZ]; /* znode hold locks */
struct task z_unlinked_drain_task;
};
#ifdef TEARDOWN_RMS
#define ZFS_TEARDOWN_INIT(zfsvfs) \
rms_init(&(zfsvfs)->z_teardown_lock, "zfs teardown")
#define ZFS_TEARDOWN_DESTROY(zfsvfs) \
rms_destroy(&(zfsvfs)->z_teardown_lock)
-#define ZFS_TEARDOWN_TRY_ENTER_READ(zfsvfs) \
- rms_try_rlock(&(zfsvfs)->z_teardown_lock)
-
#define ZFS_TEARDOWN_ENTER_READ(zfsvfs, tag) \
rms_rlock(&(zfsvfs)->z_teardown_lock);
#define ZFS_TEARDOWN_EXIT_READ(zfsvfs, tag) \
rms_runlock(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_ENTER_WRITE(zfsvfs, tag) \
rms_wlock(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_EXIT_WRITE(zfsvfs) \
rms_wunlock(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_EXIT(zfsvfs, tag) \
rms_unlock(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_READ_HELD(zfsvfs) \
rms_rowned(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_WRITE_HELD(zfsvfs) \
rms_wowned(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_HELD(zfsvfs) \
rms_owned_any(&(zfsvfs)->z_teardown_lock)
#else
#define ZFS_TEARDOWN_INIT(zfsvfs) \
rrm_init(&(zfsvfs)->z_teardown_lock, B_FALSE)
#define ZFS_TEARDOWN_DESTROY(zfsvfs) \
rrm_destroy(&(zfsvfs)->z_teardown_lock)
-#define ZFS_TEARDOWN_TRY_ENTER_READ(zfsvfs) \
- rw_tryenter(&(zfsvfs)->z_teardown_lock, RW_READER)
-
#define ZFS_TEARDOWN_ENTER_READ(zfsvfs, tag) \
rrm_enter_read(&(zfsvfs)->z_teardown_lock, tag);
#define ZFS_TEARDOWN_EXIT_READ(zfsvfs, tag) \
rrm_exit(&(zfsvfs)->z_teardown_lock, tag)
#define ZFS_TEARDOWN_ENTER_WRITE(zfsvfs, tag) \
rrm_enter(&(zfsvfs)->z_teardown_lock, RW_WRITER, tag)
#define ZFS_TEARDOWN_EXIT_WRITE(zfsvfs) \
rrm_exit(&(zfsvfs)->z_teardown_lock, tag)
#define ZFS_TEARDOWN_EXIT(zfsvfs, tag) \
rrm_exit(&(zfsvfs)->z_teardown_lock, tag)
#define ZFS_TEARDOWN_READ_HELD(zfsvfs) \
RRM_READ_HELD(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_WRITE_HELD(zfsvfs) \
RRM_WRITE_HELD(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_HELD(zfsvfs) \
RRM_LOCK_HELD(&(zfsvfs)->z_teardown_lock)
#endif
#ifdef TEARDOWN_INACTIVE_RMS
#define ZFS_TEARDOWN_INACTIVE_INIT(zfsvfs) \
rms_init(&(zfsvfs)->z_teardown_inactive_lock, "zfs teardown inactive")
#define ZFS_TEARDOWN_INACTIVE_DESTROY(zfsvfs) \
rms_destroy(&(zfsvfs)->z_teardown_inactive_lock)
#define ZFS_TEARDOWN_INACTIVE_TRY_ENTER_READ(zfsvfs) \
rms_try_rlock(&(zfsvfs)->z_teardown_inactive_lock)
#define ZFS_TEARDOWN_INACTIVE_ENTER_READ(zfsvfs) \
rms_rlock(&(zfsvfs)->z_teardown_inactive_lock)
#define ZFS_TEARDOWN_INACTIVE_EXIT_READ(zfsvfs) \
rms_runlock(&(zfsvfs)->z_teardown_inactive_lock)
#define ZFS_TEARDOWN_INACTIVE_ENTER_WRITE(zfsvfs) \
rms_wlock(&(zfsvfs)->z_teardown_inactive_lock)
#define ZFS_TEARDOWN_INACTIVE_EXIT_WRITE(zfsvfs) \
rms_wunlock(&(zfsvfs)->z_teardown_inactive_lock)
#define ZFS_TEARDOWN_INACTIVE_WRITE_HELD(zfsvfs) \
rms_wowned(&(zfsvfs)->z_teardown_inactive_lock)
#else
#define ZFS_TEARDOWN_INACTIVE_INIT(zfsvfs) \
rw_init(&(zfsvfs)->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL)
#define ZFS_TEARDOWN_INACTIVE_DESTROY(zfsvfs) \
rw_destroy(&(zfsvfs)->z_teardown_inactive_lock)
#define ZFS_TEARDOWN_INACTIVE_TRY_ENTER_READ(zfsvfs) \
rw_tryenter(&(zfsvfs)->z_teardown_inactive_lock, RW_READER)
#define ZFS_TEARDOWN_INACTIVE_ENTER_READ(zfsvfs) \
rw_enter(&(zfsvfs)->z_teardown_inactive_lock, RW_READER)
#define ZFS_TEARDOWN_INACTIVE_EXIT_READ(zfsvfs) \
rw_exit(&(zfsvfs)->z_teardown_inactive_lock)
#define ZFS_TEARDOWN_INACTIVE_ENTER_WRITE(zfsvfs) \
rw_enter(&(zfsvfs)->z_teardown_inactive_lock, RW_WRITER)
#define ZFS_TEARDOWN_INACTIVE_EXIT_WRITE(zfsvfs) \
rw_exit(&(zfsvfs)->z_teardown_inactive_lock)
#define ZFS_TEARDOWN_INACTIVE_WRITE_HELD(zfsvfs) \
RW_WRITE_HELD(&(zfsvfs)->z_teardown_inactive_lock)
#endif
#define ZSB_XATTR 0x0001 /* Enable user xattrs */
/*
* Normal filesystems (those not under .zfs/snapshot) have a total
* file ID size limited to 12 bytes (including the length field) due to
* NFSv2 protocol's limitation of 32 bytes for a filehandle. For historical
* reasons, this same limit is being imposed by the Solaris NFSv3 implementation
* (although the NFSv3 protocol actually permits a maximum of 64 bytes). It
* is not possible to expand beyond 12 bytes without abandoning support
* of NFSv2.
*
* For normal filesystems, we partition up the available space as follows:
* 2 bytes fid length (required)
* 6 bytes object number (48 bits)
* 4 bytes generation number (32 bits)
*
* We reserve only 48 bits for the object number, as this is the limit
* currently defined and imposed by the DMU.
*/
typedef struct zfid_short {
uint16_t zf_len;
uint8_t zf_object[6]; /* obj[i] = obj >> (8 * i) */
uint8_t zf_gen[4]; /* gen[i] = gen >> (8 * i) */
} zfid_short_t;
/*
* Filesystems under .zfs/snapshot have a total file ID size of 22[*] bytes
* (including the length field). This makes files under .zfs/snapshot
* accessible by NFSv3 and NFSv4, but not NFSv2.
*
* For files under .zfs/snapshot, we partition up the available space
* as follows:
* 2 bytes fid length (required)
* 6 bytes object number (48 bits)
* 4 bytes generation number (32 bits)
* 6 bytes objset id (48 bits)
* 4 bytes[**] currently just zero (32 bits)
*
* We reserve only 48 bits for the object number and objset id, as these are
* the limits currently defined and imposed by the DMU.
*
* [*] 20 bytes on FreeBSD to fit into the size of struct fid.
* [**] 2 bytes on FreeBSD for the above reason.
*/
typedef struct zfid_long {
zfid_short_t z_fid;
uint8_t zf_setid[6]; /* obj[i] = obj >> (8 * i) */
uint8_t zf_setgen[2]; /* gen[i] = gen >> (8 * i) */
} zfid_long_t;
#define SHORT_FID_LEN (sizeof (zfid_short_t) - sizeof (uint16_t))
#define LONG_FID_LEN (sizeof (zfid_long_t) - sizeof (uint16_t))
extern uint_t zfs_fsyncer_key;
extern int zfs_super_owner;
extern void zfs_init(void);
extern void zfs_fini(void);
extern int zfs_suspend_fs(zfsvfs_t *zfsvfs);
extern int zfs_resume_fs(zfsvfs_t *zfsvfs, struct dsl_dataset *ds);
extern int zfs_end_fs(zfsvfs_t *zfsvfs, struct dsl_dataset *ds);
extern int zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers);
extern int zfsvfs_create(const char *name, boolean_t readonly, zfsvfs_t **zfvp);
extern int zfsvfs_create_impl(zfsvfs_t **zfvp, zfsvfs_t *zfsvfs, objset_t *os);
extern void zfsvfs_free(zfsvfs_t *zfsvfs);
extern int zfs_check_global_label(const char *dsname, const char *hexsl);
extern boolean_t zfs_is_readonly(zfsvfs_t *zfsvfs);
extern int zfs_get_temporary_prop(struct dsl_dataset *ds, zfs_prop_t zfs_prop,
uint64_t *val, char *setpoint);
extern int zfs_busy(void);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_FS_ZFS_VFSOPS_H */
diff --git a/sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_znode_impl.h b/sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_znode_impl.h
index f76a841472f2..41a5bb218c12 100644
--- a/sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_znode_impl.h
+++ b/sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_znode_impl.h
@@ -1,190 +1,185 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Nexenta Systems, Inc. All rights reserved.
*/
#ifndef _FREEBSD_ZFS_SYS_ZNODE_IMPL_H
#define _FREEBSD_ZFS_SYS_ZNODE_IMPL_H
#include <sys/list.h>
#include <sys/dmu.h>
#include <sys/sa.h>
#include <sys/zfs_vfsops.h>
#include <sys/rrwlock.h>
#include <sys/zfs_sa.h>
#include <sys/zfs_stat.h>
#include <sys/zfs_rlock.h>
#include <sys/zfs_acl.h>
#include <sys/zil.h>
#include <sys/zfs_project.h>
#include <vm/vm_object.h>
#include <sys/uio.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Directory entry locks control access to directory entries.
* They are used to protect creates, deletes, and renames.
* Each directory znode has a mutex and a list of locked names.
*/
#define ZNODE_OS_FIELDS \
struct zfsvfs *z_zfsvfs; \
vnode_t *z_vnode; \
char *z_cached_symlink; \
uint64_t z_uid; \
uint64_t z_gid; \
uint64_t z_gen; \
uint64_t z_atime[2]; \
uint64_t z_links;
#define ZFS_LINK_MAX UINT64_MAX
/*
* ZFS minor numbers can refer to either a control device instance or
* a zvol. Depending on the value of zss_type, zss_data points to either
* a zvol_state_t or a zfs_onexit_t.
*/
enum zfs_soft_state_type {
ZSST_ZVOL,
ZSST_CTLDEV
};
typedef struct zfs_soft_state {
enum zfs_soft_state_type zss_type;
void *zss_data;
} zfs_soft_state_t;
/*
* Range locking rules
* --------------------
* 1. When truncating a file (zfs_create, zfs_setattr, zfs_space) the whole
* file range needs to be locked as RL_WRITER. Only then can the pages be
* freed etc and zp_size reset. zp_size must be set within range lock.
* 2. For writes and punching holes (zfs_write & zfs_space) just the range
* being written or freed needs to be locked as RL_WRITER.
* Multiple writes at the end of the file must coordinate zp_size updates
* to ensure data isn't lost. A compare and swap loop is currently used
* to ensure the file size is at least the offset last written.
* 3. For reads (zfs_read, zfs_get_data & zfs_putapage) just the range being
* read needs to be locked as RL_READER. A check against zp_size can then
* be made for reading beyond end of file.
*/
/*
* Convert between znode pointers and vnode pointers
*/
#define ZTOV(ZP) ((ZP)->z_vnode)
#define ZTOI(ZP) ((ZP)->z_vnode)
#define VTOZ(VP) ((struct znode *)(VP)->v_data)
#define VTOZ_SMR(VP) ((znode_t *)vn_load_v_data_smr(VP))
#define ITOZ(VP) ((struct znode *)(VP)->v_data)
#define zhold(zp) vhold(ZTOV((zp)))
#define zrele(zp) vrele(ZTOV((zp)))
#define ZTOZSB(zp) ((zp)->z_zfsvfs)
#define ITOZSB(vp) (VTOZ(vp)->z_zfsvfs)
#define ZTOTYPE(zp) (ZTOV(zp)->v_type)
#define ZTOGID(zp) ((zp)->z_gid)
#define ZTOUID(zp) ((zp)->z_uid)
#define ZTONLNK(zp) ((zp)->z_links)
#define Z_ISBLK(type) ((type) == VBLK)
#define Z_ISCHR(type) ((type) == VCHR)
#define Z_ISLNK(type) ((type) == VLNK)
#define Z_ISDIR(type) ((type) == VDIR)
#define zn_has_cached_data(zp) vn_has_cached_data(ZTOV(zp))
#define zn_flush_cached_data(zp, sync) vn_flush_cached_data(ZTOV(zp), sync)
#define zn_rlimit_fsize(zp, uio) \
vn_rlimit_fsize(ZTOV(zp), GET_UIO_STRUCT(uio), zfs_uio_td(uio))
-#define ZFS_ENTER_ERROR(zfsvfs, error) do { \
- ZFS_TEARDOWN_ENTER_READ((zfsvfs), FTAG); \
- if (__predict_false((zfsvfs)->z_unmounted)) { \
- ZFS_TEARDOWN_EXIT_READ(zfsvfs, FTAG); \
- return (error); \
- } \
-} while (0)
-
/* Called on entry to each ZFS vnode and vfs operation */
-#define ZFS_ENTER(zfsvfs) ZFS_ENTER_ERROR(zfsvfs, EIO)
+static inline int
+zfs_enter(zfsvfs_t *zfsvfs, const char *tag)
+{
+ ZFS_TEARDOWN_ENTER_READ(zfsvfs, tag);
+ if (__predict_false((zfsvfs)->z_unmounted)) {
+ ZFS_TEARDOWN_EXIT_READ(zfsvfs, tag);
+ return (SET_ERROR(EIO));
+ }
+ return (0);
+}
/* Must be called before exiting the vop */
-#define ZFS_EXIT(zfsvfs) ZFS_TEARDOWN_EXIT_READ(zfsvfs, FTAG)
-
-#define ZFS_VERIFY_ZP_ERROR(zp, error) do { \
- if (__predict_false((zp)->z_sa_hdl == NULL)) { \
- ZFS_EXIT((zp)->z_zfsvfs); \
- return (error); \
- } \
-} while (0)
-
-/* Verifies the znode is valid */
-#define ZFS_VERIFY_ZP(zp) ZFS_VERIFY_ZP_ERROR(zp, EIO)
+static inline void
+zfs_exit(zfsvfs_t *zfsvfs, const char *tag)
+{
+ ZFS_TEARDOWN_EXIT_READ(zfsvfs, tag);
+}
/*
* Macros for dealing with dmu_buf_hold
*/
#define ZFS_OBJ_HASH(obj_num) ((obj_num) & (ZFS_OBJ_MTX_SZ - 1))
#define ZFS_OBJ_MUTEX(zfsvfs, obj_num) \
(&(zfsvfs)->z_hold_mtx[ZFS_OBJ_HASH(obj_num)])
#define ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num) \
mutex_enter(ZFS_OBJ_MUTEX((zfsvfs), (obj_num)))
#define ZFS_OBJ_HOLD_TRYENTER(zfsvfs, obj_num) \
mutex_tryenter(ZFS_OBJ_MUTEX((zfsvfs), (obj_num)))
#define ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num) \
mutex_exit(ZFS_OBJ_MUTEX((zfsvfs), (obj_num)))
/* Encode ZFS stored time values from a struct timespec */
#define ZFS_TIME_ENCODE(tp, stmp) \
{ \
(stmp)[0] = (uint64_t)(tp)->tv_sec; \
(stmp)[1] = (uint64_t)(tp)->tv_nsec; \
}
/* Decode ZFS stored time values to a struct timespec */
#define ZFS_TIME_DECODE(tp, stmp) \
{ \
(tp)->tv_sec = (time_t)(stmp)[0]; \
(tp)->tv_nsec = (long)(stmp)[1]; \
}
#define ZFS_ACCESSTIME_STAMP(zfsvfs, zp) \
if ((zfsvfs)->z_atime && !((zfsvfs)->z_vfs->vfs_flag & VFS_RDONLY)) \
zfs_tstamp_update_setup_ext(zp, ACCESSED, NULL, NULL, B_FALSE);
extern void zfs_tstamp_update_setup_ext(struct znode *,
uint_t, uint64_t [2], uint64_t [2], boolean_t have_tx);
extern void zfs_znode_free(struct znode *);
extern zil_replay_func_t *const zfs_replay_vector[TX_MAX_TYPE];
extern int zfs_znode_parent_and_name(struct znode *zp, struct znode **dzpp,
char *buf);
#ifdef __cplusplus
}
#endif
#endif /* _FREEBSD_SYS_FS_ZFS_ZNODE_H */
diff --git a/sys/contrib/openzfs/include/os/linux/kernel/linux/simd_powerpc.h b/sys/contrib/openzfs/include/os/linux/kernel/linux/simd_powerpc.h
index 764c5dc51f95..2a2f92bc499d 100644
--- a/sys/contrib/openzfs/include/os/linux/kernel/linux/simd_powerpc.h
+++ b/sys/contrib/openzfs/include/os/linux/kernel/linux/simd_powerpc.h
@@ -1,129 +1,124 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (C) 2019 Romain Dolbeau
* <romain.dolbeau@european-processor-initiative.eu>
+ * Copyright (C) 2022 Tino Reichardt <milky-zfs@mcmilk.de>
*/
/*
* USER API:
*
* Kernel fpu methods:
* kfpu_allowed()
* kfpu_begin()
* kfpu_end()
* kfpu_init()
* kfpu_fini()
*
* SIMD support:
*
* Following functions should be called to determine whether CPU feature
* is supported. All functions are usable in kernel and user space.
* If a SIMD algorithm is using more than one instruction set
* all relevant feature test functions should be called.
*
* Supported features:
- * zfs_altivec_available()
+ * zfs_altivec_available()
+ * zfs_vsx_available()
+ * zfs_isa207_available()
*/
#ifndef _LINUX_SIMD_POWERPC_H
#define _LINUX_SIMD_POWERPC_H
/* only for __powerpc__ */
#if defined(__powerpc__)
#include <linux/preempt.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <asm/switch_to.h>
#include <sys/types.h>
#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+#include <asm/cpufeature.h>
+#else
+#include <asm/cputable.h>
+#endif
+
#define kfpu_allowed() 1
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
-#define kfpu_end() \
- { \
- disable_kernel_vsx(); \
- disable_kernel_altivec(); \
- preempt_enable(); \
- }
#define kfpu_begin() \
{ \
preempt_disable(); \
enable_kernel_altivec(); \
enable_kernel_vsx(); \
+ enable_kernel_spe(); \
+ }
+#define kfpu_end() \
+ { \
+ disable_kernel_spe(); \
+ disable_kernel_vsx(); \
+ disable_kernel_altivec(); \
+ preempt_enable(); \
}
#else
/* seems that before 4.5 no-one bothered */
#define kfpu_begin()
#define kfpu_end() preempt_enable()
#endif
+
#define kfpu_init() 0
#define kfpu_fini() ((void) 0)
+/*
+ * Check if AltiVec instruction set is available
+ */
+static inline boolean_t
+zfs_altivec_available(void)
+{
+ return (cpu_has_feature(CPU_FTR_ALTIVEC));
+}
+
+/*
+ * Check if VSX is available
+ */
static inline boolean_t
zfs_vsx_available(void)
{
- boolean_t res;
-#if defined(__powerpc64__)
- u64 msr;
-#else
- u32 msr;
-#endif
- kfpu_begin();
- __asm volatile("mfmsr %0" : "=r"(msr));
- res = (msr & 0x800000) != 0;
- kfpu_end();
- return (res);
+ return (cpu_has_feature(CPU_FTR_VSX));
}
/*
- * Check if AltiVec instruction set is available
+ * Check if POWER ISA 2.07 is available (SHA2)
*/
static inline boolean_t
-zfs_altivec_available(void)
+zfs_isa207_available(void)
{
- boolean_t res;
- /* suggested by macallan at netbsd dot org */
-#if defined(__powerpc64__)
- u64 msr;
-#else
- u32 msr;
-#endif
- kfpu_begin();
- __asm volatile("mfmsr %0" : "=r"(msr));
- /*
- * 64 bits -> need to check bit 38
- * Power ISA Version 3.0B
- * p944
- * 32 bits -> Need to check bit 6
- * AltiVec Technology Programming Environments Manual
- * p49 (2-9)
- * They are the same, as ppc counts 'backward' ...
- */
- res = (msr & 0x2000000) != 0;
- kfpu_end();
- return (res);
+ return (cpu_has_feature(CPU_FTR_ARCH_207S));
}
+
#endif /* defined(__powerpc) */
#endif /* _LINUX_SIMD_POWERPC_H */
diff --git a/sys/contrib/openzfs/include/os/linux/kernel/linux/xattr_compat.h b/sys/contrib/openzfs/include/os/linux/kernel/linux/xattr_compat.h
index 21c88dd07719..9b83813db708 100644
--- a/sys/contrib/openzfs/include/os/linux/kernel/linux/xattr_compat.h
+++ b/sys/contrib/openzfs/include/os/linux/kernel/linux/xattr_compat.h
@@ -1,199 +1,213 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (C) 2011 Lawrence Livermore National Security, LLC.
*/
#ifndef _ZFS_XATTR_H
#define _ZFS_XATTR_H
#include <linux/posix_acl_xattr.h>
/*
* 2.6.35 API change,
* The const keyword was added to the 'struct xattr_handler' in the
* generic Linux super_block structure. To handle this we define an
* appropriate xattr_handler_t typedef which can be used. This was
* the preferred solution because it keeps the code clean and readable.
*/
typedef const struct xattr_handler xattr_handler_t;
/*
* 4.5 API change,
*/
#if defined(HAVE_XATTR_LIST_SIMPLE)
#define ZPL_XATTR_LIST_WRAPPER(fn) \
static bool \
fn(struct dentry *dentry) \
{ \
return (!!__ ## fn(dentry->d_inode, NULL, 0, NULL, 0)); \
}
/*
* 4.4 API change,
*/
#elif defined(HAVE_XATTR_LIST_DENTRY)
#define ZPL_XATTR_LIST_WRAPPER(fn) \
static size_t \
fn(struct dentry *dentry, char *list, size_t list_size, \
const char *name, size_t name_len, int type) \
{ \
return (__ ## fn(dentry->d_inode, \
list, list_size, name, name_len)); \
}
/*
* 2.6.33 API change,
*/
#elif defined(HAVE_XATTR_LIST_HANDLER)
#define ZPL_XATTR_LIST_WRAPPER(fn) \
static size_t \
fn(const struct xattr_handler *handler, struct dentry *dentry, \
char *list, size_t list_size, const char *name, size_t name_len) \
{ \
return (__ ## fn(dentry->d_inode, \
list, list_size, name, name_len)); \
}
#else
#error "Unsupported kernel"
#endif
/*
* 4.7 API change,
* The xattr_handler->get() callback was changed to take a both dentry and
* inode, because the dentry might not be attached to an inode yet.
*/
#if defined(HAVE_XATTR_GET_DENTRY_INODE)
#define ZPL_XATTR_GET_WRAPPER(fn) \
static int \
fn(const struct xattr_handler *handler, struct dentry *dentry, \
struct inode *inode, const char *name, void *buffer, size_t size) \
{ \
return (__ ## fn(inode, name, buffer, size)); \
}
/*
* 4.4 API change,
* The xattr_handler->get() callback was changed to take a xattr_handler,
* and handler_flags argument was removed and should be accessed by
* handler->flags.
*/
#elif defined(HAVE_XATTR_GET_HANDLER)
#define ZPL_XATTR_GET_WRAPPER(fn) \
static int \
fn(const struct xattr_handler *handler, struct dentry *dentry, \
const char *name, void *buffer, size_t size) \
{ \
return (__ ## fn(dentry->d_inode, name, buffer, size)); \
}
/*
* 2.6.33 API change,
* The xattr_handler->get() callback was changed to take a dentry
* instead of an inode, and a handler_flags argument was added.
*/
#elif defined(HAVE_XATTR_GET_DENTRY)
#define ZPL_XATTR_GET_WRAPPER(fn) \
static int \
fn(struct dentry *dentry, const char *name, void *buffer, size_t size, \
int unused_handler_flags) \
{ \
return (__ ## fn(dentry->d_inode, name, buffer, size)); \
}
+/*
+ * Android API change,
+ * The xattr_handler->get() callback was changed to take a dentry and inode
+ * and flags, because the dentry might not be attached to an inode yet.
+ */
+#elif defined(HAVE_XATTR_GET_DENTRY_INODE_FLAGS)
+#define ZPL_XATTR_GET_WRAPPER(fn) \
+static int \
+fn(const struct xattr_handler *handler, struct dentry *dentry, \
+ struct inode *inode, const char *name, void *buffer, \
+ size_t size, int flags) \
+{ \
+ return (__ ## fn(inode, name, buffer, size)); \
+}
#else
#error "Unsupported kernel"
#endif
/*
* 5.12 API change,
* The xattr_handler->set() callback was changed to take the
* struct user_namespace* as the first arg, to support idmapped
* mounts.
*/
#if defined(HAVE_XATTR_SET_USERNS)
#define ZPL_XATTR_SET_WRAPPER(fn) \
static int \
fn(const struct xattr_handler *handler, struct user_namespace *user_ns, \
struct dentry *dentry, struct inode *inode, const char *name, \
const void *buffer, size_t size, int flags) \
{ \
return (__ ## fn(inode, name, buffer, size, flags)); \
}
/*
* 4.7 API change,
* The xattr_handler->set() callback was changed to take a both dentry and
* inode, because the dentry might not be attached to an inode yet.
*/
#elif defined(HAVE_XATTR_SET_DENTRY_INODE)
#define ZPL_XATTR_SET_WRAPPER(fn) \
static int \
fn(const struct xattr_handler *handler, struct dentry *dentry, \
struct inode *inode, const char *name, const void *buffer, \
size_t size, int flags) \
{ \
return (__ ## fn(inode, name, buffer, size, flags)); \
}
/*
* 4.4 API change,
* The xattr_handler->set() callback was changed to take a xattr_handler,
* and handler_flags argument was removed and should be accessed by
* handler->flags.
*/
#elif defined(HAVE_XATTR_SET_HANDLER)
#define ZPL_XATTR_SET_WRAPPER(fn) \
static int \
fn(const struct xattr_handler *handler, struct dentry *dentry, \
const char *name, const void *buffer, size_t size, int flags) \
{ \
return (__ ## fn(dentry->d_inode, name, buffer, size, flags)); \
}
/*
* 2.6.33 API change,
* The xattr_handler->set() callback was changed to take a dentry
* instead of an inode, and a handler_flags argument was added.
*/
#elif defined(HAVE_XATTR_SET_DENTRY)
#define ZPL_XATTR_SET_WRAPPER(fn) \
static int \
fn(struct dentry *dentry, const char *name, const void *buffer, \
size_t size, int flags, int unused_handler_flags) \
{ \
return (__ ## fn(dentry->d_inode, name, buffer, size, flags)); \
}
#else
#error "Unsupported kernel"
#endif
/*
* Linux 3.7 API change. posix_acl_{from,to}_xattr gained the user_ns
* parameter. All callers are expected to pass the &init_user_ns which
* is available through the init credential (kcred).
*/
static inline struct posix_acl *
zpl_acl_from_xattr(const void *value, int size)
{
return (posix_acl_from_xattr(kcred->user_ns, value, size));
}
static inline int
zpl_acl_to_xattr(struct posix_acl *acl, void *value, int size)
{
return (posix_acl_to_xattr(kcred->user_ns, acl, value, size));
}
#endif /* _ZFS_XATTR_H */
diff --git a/sys/contrib/openzfs/include/os/linux/spl/sys/disp.h b/sys/contrib/openzfs/include/os/linux/spl/sys/disp.h
index e106d3c5438e..c8be6ffbf10f 100644
--- a/sys/contrib/openzfs/include/os/linux/spl/sys/disp.h
+++ b/sys/contrib/openzfs/include/os/linux/spl/sys/disp.h
@@ -1,33 +1,35 @@
/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _SPL_DISP_H
#define _SPL_DISP_H
#include <linux/preempt.h>
-#define kpreempt(unused) schedule()
+#define KPREEMPT_SYNC (-1)
+
+#define kpreempt(unused) cond_resched()
#define kpreempt_disable() preempt_disable()
#define kpreempt_enable() preempt_enable()
#endif /* SPL_DISP_H */
diff --git a/sys/contrib/openzfs/include/os/linux/spl/sys/vmsystm.h b/sys/contrib/openzfs/include/os/linux/spl/sys/vmsystm.h
index b3f121ecf0ca..c6d99fb3183d 100644
--- a/sys/contrib/openzfs/include/os/linux/spl/sys/vmsystm.h
+++ b/sys/contrib/openzfs/include/os/linux/spl/sys/vmsystm.h
@@ -1,91 +1,94 @@
/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _SPL_VMSYSTM_H
#define _SPL_VMSYSTM_H
#include <linux/mmzone.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/highmem.h>
#include <linux/vmalloc.h>
#include <sys/types.h>
#include <asm/uaccess.h>
#ifdef HAVE_TOTALRAM_PAGES_FUNC
#define zfs_totalram_pages totalram_pages()
#else
#define zfs_totalram_pages totalram_pages
#endif
#ifdef HAVE_TOTALHIGH_PAGES
#define zfs_totalhigh_pages totalhigh_pages()
#else
#define zfs_totalhigh_pages totalhigh_pages
#endif
+#define membar_consumer() smp_rmb()
#define membar_producer() smp_wmb()
+#define membar_sync() smp_mb()
+
#define physmem zfs_totalram_pages
#define xcopyin(from, to, size) copy_from_user(to, from, size)
#define xcopyout(from, to, size) copy_to_user(to, from, size)
static __inline__ int
copyin(const void *from, void *to, size_t len)
{
/* On error copyin routine returns -1 */
if (xcopyin(from, to, len))
return (-1);
return (0);
}
static __inline__ int
copyout(const void *from, void *to, size_t len)
{
/* On error copyout routine returns -1 */
if (xcopyout(from, to, len))
return (-1);
return (0);
}
static __inline__ int
copyinstr(const void *from, void *to, size_t len, size_t *done)
{
size_t rc;
if (len == 0)
return (-ENAMETOOLONG);
/* XXX: Should return ENAMETOOLONG if 'strlen(from) > len' */
memset(to, 0, len);
rc = copyin(from, to, len - 1);
if (done != NULL)
*done = rc;
return (0);
}
#endif /* SPL_VMSYSTM_H */
diff --git a/sys/contrib/openzfs/include/os/linux/zfs/sys/zfs_vfsops_os.h b/sys/contrib/openzfs/include/os/linux/zfs/sys/zfs_vfsops_os.h
index 697ae2018ec4..e320b8de4222 100644
--- a/sys/contrib/openzfs/include/os/linux/zfs/sys/zfs_vfsops_os.h
+++ b/sys/contrib/openzfs/include/os/linux/zfs/sys/zfs_vfsops_os.h
@@ -1,259 +1,256 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2018 by Delphix. All rights reserved.
*/
#ifndef _SYS_FS_ZFS_VFSOPS_H
#define _SYS_FS_ZFS_VFSOPS_H
#include <sys/dataset_kstats.h>
#include <sys/isa_defs.h>
#include <sys/types32.h>
#include <sys/list.h>
#include <sys/vfs.h>
#include <sys/zil.h>
#include <sys/sa.h>
#include <sys/rrwlock.h>
#include <sys/dsl_dataset.h>
#include <sys/zfs_ioctl.h>
#include <sys/objlist.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef struct zfsvfs zfsvfs_t;
struct znode;
/*
* This structure emulates the vfs_t from other platforms. It's purpose
* is to facilitate the handling of mount options and minimize structural
* differences between the platforms.
*/
typedef struct vfs {
struct zfsvfs *vfs_data;
char *vfs_mntpoint; /* Primary mount point */
uint64_t vfs_xattr;
boolean_t vfs_readonly;
boolean_t vfs_do_readonly;
boolean_t vfs_setuid;
boolean_t vfs_do_setuid;
boolean_t vfs_exec;
boolean_t vfs_do_exec;
boolean_t vfs_devices;
boolean_t vfs_do_devices;
boolean_t vfs_do_xattr;
boolean_t vfs_atime;
boolean_t vfs_do_atime;
boolean_t vfs_relatime;
boolean_t vfs_do_relatime;
boolean_t vfs_nbmand;
boolean_t vfs_do_nbmand;
} vfs_t;
typedef struct zfs_mnt {
const char *mnt_osname; /* Objset name */
char *mnt_data; /* Raw mount options */
} zfs_mnt_t;
struct zfsvfs {
vfs_t *z_vfs; /* generic fs struct */
struct super_block *z_sb; /* generic super_block */
struct zfsvfs *z_parent; /* parent fs */
objset_t *z_os; /* objset reference */
uint64_t z_flags; /* super_block flags */
uint64_t z_root; /* id of root znode */
uint64_t z_unlinkedobj; /* id of unlinked zapobj */
uint64_t z_max_blksz; /* maximum block size for files */
uint64_t z_fuid_obj; /* fuid table object number */
uint64_t z_fuid_size; /* fuid table size */
avl_tree_t z_fuid_idx; /* fuid tree keyed by index */
avl_tree_t z_fuid_domain; /* fuid tree keyed by domain */
krwlock_t z_fuid_lock; /* fuid lock */
boolean_t z_fuid_loaded; /* fuid tables are loaded */
boolean_t z_fuid_dirty; /* need to sync fuid table ? */
struct zfs_fuid_info *z_fuid_replay; /* fuid info for replay */
zilog_t *z_log; /* intent log pointer */
uint_t z_acl_mode; /* acl chmod/mode behavior */
uint_t z_acl_inherit; /* acl inheritance behavior */
uint_t z_acl_type; /* type of ACL usable on this FS */
zfs_case_t z_case; /* case-sense */
boolean_t z_utf8; /* utf8-only */
int z_norm; /* normalization flags */
boolean_t z_relatime; /* enable relatime mount option */
boolean_t z_unmounted; /* unmounted */
rrmlock_t z_teardown_lock;
krwlock_t z_teardown_inactive_lock;
list_t z_all_znodes; /* all znodes in the fs */
uint64_t z_nr_znodes; /* number of znodes in the fs */
unsigned long z_rollback_time; /* last online rollback time */
unsigned long z_snap_defer_time; /* last snapshot unmount deferral */
kmutex_t z_znodes_lock; /* lock for z_all_znodes */
arc_prune_t *z_arc_prune; /* called by ARC to prune caches */
struct inode *z_ctldir; /* .zfs directory inode */
boolean_t z_show_ctldir; /* expose .zfs in the root dir */
boolean_t z_issnap; /* true if this is a snapshot */
boolean_t z_use_fuids; /* version allows fuids */
boolean_t z_replay; /* set during ZIL replay */
boolean_t z_use_sa; /* version allow system attributes */
boolean_t z_xattr_sa; /* allow xattrs to be stores as SA */
boolean_t z_draining; /* is true when drain is active */
boolean_t z_drain_cancel; /* signal the unlinked drain to stop */
uint64_t z_version; /* ZPL version */
uint64_t z_shares_dir; /* hidden shares dir */
dataset_kstats_t z_kstat; /* fs kstats */
kmutex_t z_lock;
uint64_t z_userquota_obj;
uint64_t z_groupquota_obj;
uint64_t z_userobjquota_obj;
uint64_t z_groupobjquota_obj;
uint64_t z_projectquota_obj;
uint64_t z_projectobjquota_obj;
uint64_t z_replay_eof; /* New end of file - replay only */
sa_attr_type_t *z_attr_table; /* SA attr mapping->id */
uint64_t z_hold_size; /* znode hold array size */
avl_tree_t *z_hold_trees; /* znode hold trees */
kmutex_t *z_hold_locks; /* znode hold locks */
taskqid_t z_drain_task; /* task id for the unlink drain task */
};
#define ZFS_TEARDOWN_INIT(zfsvfs) \
rrm_init(&(zfsvfs)->z_teardown_lock, B_FALSE)
#define ZFS_TEARDOWN_DESTROY(zfsvfs) \
rrm_destroy(&(zfsvfs)->z_teardown_lock)
-#define ZFS_TEARDOWN_TRY_ENTER_READ(zfsvfs) \
- rw_tryenter(&(zfsvfs)->z_teardown_lock, RW_READER)
-
#define ZFS_TEARDOWN_ENTER_READ(zfsvfs, tag) \
rrm_enter_read(&(zfsvfs)->z_teardown_lock, tag);
#define ZFS_TEARDOWN_EXIT_READ(zfsvfs, tag) \
rrm_exit(&(zfsvfs)->z_teardown_lock, tag)
#define ZFS_TEARDOWN_ENTER_WRITE(zfsvfs, tag) \
rrm_enter(&(zfsvfs)->z_teardown_lock, RW_WRITER, tag)
#define ZFS_TEARDOWN_EXIT_WRITE(zfsvfs) \
rrm_exit(&(zfsvfs)->z_teardown_lock, tag)
#define ZFS_TEARDOWN_EXIT(zfsvfs, tag) \
rrm_exit(&(zfsvfs)->z_teardown_lock, tag)
#define ZFS_TEARDOWN_READ_HELD(zfsvfs) \
RRM_READ_HELD(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_WRITE_HELD(zfsvfs) \
RRM_WRITE_HELD(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_HELD(zfsvfs) \
RRM_LOCK_HELD(&(zfsvfs)->z_teardown_lock)
#define ZSB_XATTR 0x0001 /* Enable user xattrs */
/*
* Allow a maximum number of links. While ZFS does not internally limit
* this the inode->i_nlink member is defined as an unsigned int. To be
* safe we use 2^31-1 as the limit.
*/
#define ZFS_LINK_MAX ((1U << 31) - 1U)
/*
* Normal filesystems (those not under .zfs/snapshot) have a total
* file ID size limited to 12 bytes (including the length field) due to
* NFSv2 protocol's limitation of 32 bytes for a filehandle. For historical
* reasons, this same limit is being imposed by the Solaris NFSv3 implementation
* (although the NFSv3 protocol actually permits a maximum of 64 bytes). It
* is not possible to expand beyond 12 bytes without abandoning support
* of NFSv2.
*
* For normal filesystems, we partition up the available space as follows:
* 2 bytes fid length (required)
* 6 bytes object number (48 bits)
* 4 bytes generation number (32 bits)
*
* We reserve only 48 bits for the object number, as this is the limit
* currently defined and imposed by the DMU.
*/
typedef struct zfid_short {
uint16_t zf_len;
uint8_t zf_object[6]; /* obj[i] = obj >> (8 * i) */
uint8_t zf_gen[4]; /* gen[i] = gen >> (8 * i) */
} zfid_short_t;
/*
* Filesystems under .zfs/snapshot have a total file ID size of 22 bytes
* (including the length field). This makes files under .zfs/snapshot
* accessible by NFSv3 and NFSv4, but not NFSv2.
*
* For files under .zfs/snapshot, we partition up the available space
* as follows:
* 2 bytes fid length (required)
* 6 bytes object number (48 bits)
* 4 bytes generation number (32 bits)
* 6 bytes objset id (48 bits)
* 4 bytes currently just zero (32 bits)
*
* We reserve only 48 bits for the object number and objset id, as these are
* the limits currently defined and imposed by the DMU.
*/
typedef struct zfid_long {
zfid_short_t z_fid;
uint8_t zf_setid[6]; /* obj[i] = obj >> (8 * i) */
uint8_t zf_setgen[4]; /* gen[i] = gen >> (8 * i) */
} zfid_long_t;
#define SHORT_FID_LEN (sizeof (zfid_short_t) - sizeof (uint16_t))
#define LONG_FID_LEN (sizeof (zfid_long_t) - sizeof (uint16_t))
extern void zfs_init(void);
extern void zfs_fini(void);
extern int zfs_suspend_fs(zfsvfs_t *zfsvfs);
extern int zfs_resume_fs(zfsvfs_t *zfsvfs, struct dsl_dataset *ds);
extern int zfs_end_fs(zfsvfs_t *zfsvfs, struct dsl_dataset *ds);
extern void zfs_exit_fs(zfsvfs_t *zfsvfs);
extern int zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers);
extern int zfsvfs_create(const char *name, boolean_t readony, zfsvfs_t **zfvp);
extern int zfsvfs_create_impl(zfsvfs_t **zfvp, zfsvfs_t *zfsvfs, objset_t *os);
extern void zfsvfs_free(zfsvfs_t *zfsvfs);
extern int zfs_check_global_label(const char *dsname, const char *hexsl);
extern boolean_t zfs_is_readonly(zfsvfs_t *zfsvfs);
extern int zfs_domount(struct super_block *sb, zfs_mnt_t *zm, int silent);
extern void zfs_preumount(struct super_block *sb);
extern int zfs_umount(struct super_block *sb);
extern int zfs_remount(struct super_block *sb, int *flags, zfs_mnt_t *zm);
extern int zfs_statvfs(struct inode *ip, struct kstatfs *statp);
extern int zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp);
extern int zfs_prune(struct super_block *sb, unsigned long nr_to_scan,
int *objects);
extern int zfs_get_temporary_prop(dsl_dataset_t *ds, zfs_prop_t zfs_prop,
uint64_t *val, char *setpoint);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_FS_ZFS_VFSOPS_H */
diff --git a/sys/contrib/openzfs/include/os/linux/zfs/sys/zfs_znode_impl.h b/sys/contrib/openzfs/include/os/linux/zfs/sys/zfs_znode_impl.h
index a6fa06a3f1a8..52568781011f 100644
--- a/sys/contrib/openzfs/include/os/linux/zfs/sys/zfs_znode_impl.h
+++ b/sys/contrib/openzfs/include/os/linux/zfs/sys/zfs_znode_impl.h
@@ -1,182 +1,184 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright 2016 Nexenta Systems, Inc. All rights reserved.
*/
#ifndef _SYS_ZFS_ZNODE_IMPL_H
#define _SYS_ZFS_ZNODE_IMPL_H
#ifndef _KERNEL
#error "no user serviceable parts within"
#endif
#include <sys/isa_defs.h>
#include <sys/types32.h>
#include <sys/list.h>
#include <sys/dmu.h>
#include <sys/sa.h>
#include <sys/time.h>
#include <sys/zfs_vfsops.h>
#include <sys/rrwlock.h>
#include <sys/zfs_sa.h>
#include <sys/zfs_stat.h>
#include <sys/zfs_rlock.h>
#ifdef __cplusplus
extern "C" {
#endif
#define ZNODE_OS_FIELDS \
inode_timespec_t z_btime; /* creation/birth time (cached) */ \
struct inode z_inode;
/*
* Convert between znode pointers and inode pointers
*/
#define ZTOI(znode) (&((znode)->z_inode))
#define ITOZ(inode) (container_of((inode), znode_t, z_inode))
#define ZTOZSB(znode) ((zfsvfs_t *)(ZTOI(znode)->i_sb->s_fs_info))
#define ITOZSB(inode) ((zfsvfs_t *)((inode)->i_sb->s_fs_info))
#define ZTOTYPE(zp) (ZTOI(zp)->i_mode)
#define ZTOGID(zp) (ZTOI(zp)->i_gid)
#define ZTOUID(zp) (ZTOI(zp)->i_uid)
#define ZTONLNK(zp) (ZTOI(zp)->i_nlink)
#define Z_ISBLK(type) S_ISBLK(type)
#define Z_ISCHR(type) S_ISCHR(type)
#define Z_ISLNK(type) S_ISLNK(type)
#define Z_ISDEV(type) (S_ISCHR(type) || S_ISBLK(type) || S_ISFIFO(type))
#define Z_ISDIR(type) S_ISDIR(type)
#define zn_has_cached_data(zp) ((zp)->z_is_mapped)
#define zn_flush_cached_data(zp, sync) write_inode_now(ZTOI(zp), sync)
#define zn_rlimit_fsize(zp, uio) (0)
/*
* zhold() wraps igrab() on Linux, and igrab() may fail when the
* inode is in the process of being deleted. As zhold() must only be
* called when a ref already exists - so the inode cannot be
* mid-deletion - we VERIFY() this.
*/
#define zhold(zp) VERIFY3P(igrab(ZTOI((zp))), !=, NULL)
#define zrele(zp) iput(ZTOI((zp)))
/* Called on entry to each ZFS inode and vfs operation. */
-#define ZFS_ENTER_ERROR(zfsvfs, error) \
-do { \
- ZFS_TEARDOWN_ENTER_READ(zfsvfs, FTAG); \
- if (unlikely((zfsvfs)->z_unmounted)) { \
- ZFS_TEARDOWN_EXIT_READ(zfsvfs, FTAG); \
- return (error); \
- } \
-} while (0)
-#define ZFS_ENTER(zfsvfs) ZFS_ENTER_ERROR(zfsvfs, EIO)
-#define ZPL_ENTER(zfsvfs) ZFS_ENTER_ERROR(zfsvfs, -EIO)
+static inline int
+zfs_enter(zfsvfs_t *zfsvfs, const char *tag)
+{
+ ZFS_TEARDOWN_ENTER_READ(zfsvfs, tag);
+ if (unlikely(zfsvfs->z_unmounted)) {
+ ZFS_TEARDOWN_EXIT_READ(zfsvfs, tag);
+ return (SET_ERROR(EIO));
+ }
+ return (0);
+}
/* Must be called before exiting the operation. */
-#define ZFS_EXIT(zfsvfs) \
-do { \
- zfs_exit_fs(zfsvfs); \
- ZFS_TEARDOWN_EXIT_READ(zfsvfs, FTAG); \
-} while (0)
+static inline void
+zfs_exit(zfsvfs_t *zfsvfs, const char *tag)
+{
+ zfs_exit_fs(zfsvfs);
+ ZFS_TEARDOWN_EXIT_READ(zfsvfs, tag);
+}
-#define ZPL_EXIT(zfsvfs) \
-do { \
- rrm_exit(&(zfsvfs)->z_teardown_lock, FTAG); \
-} while (0)
+static inline int
+zpl_enter(zfsvfs_t *zfsvfs, const char *tag)
+{
+ return (-zfs_enter(zfsvfs, tag));
+}
-/* Verifies the znode is valid. */
-#define ZFS_VERIFY_ZP_ERROR(zp, error) \
-do { \
- if (unlikely((zp)->z_sa_hdl == NULL)) { \
- ZFS_EXIT(ZTOZSB(zp)); \
- return (error); \
- } \
-} while (0)
-#define ZFS_VERIFY_ZP(zp) ZFS_VERIFY_ZP_ERROR(zp, EIO)
-#define ZPL_VERIFY_ZP(zp) ZFS_VERIFY_ZP_ERROR(zp, -EIO)
+static inline void
+zpl_exit(zfsvfs_t *zfsvfs, const char *tag)
+{
+ ZFS_TEARDOWN_EXIT_READ(zfsvfs, tag);
+}
+
+/* zfs_verify_zp and zfs_enter_verify_zp are defined in zfs_znode.h */
+#define zpl_verify_zp(zp) (-zfs_verify_zp(zp))
+#define zpl_enter_verify_zp(zfsvfs, zp, tag) \
+ (-zfs_enter_verify_zp(zfsvfs, zp, tag))
/*
* Macros for dealing with dmu_buf_hold
*/
#define ZFS_OBJ_MTX_SZ 64
#define ZFS_OBJ_MTX_MAX (1024 * 1024)
#define ZFS_OBJ_HASH(zfsvfs, obj) ((obj) & ((zfsvfs->z_hold_size) - 1))
extern unsigned int zfs_object_mutex_size;
/*
* Encode ZFS stored time values from a struct timespec / struct timespec64.
*/
#define ZFS_TIME_ENCODE(tp, stmp) \
do { \
(stmp)[0] = (uint64_t)(tp)->tv_sec; \
(stmp)[1] = (uint64_t)(tp)->tv_nsec; \
} while (0)
#if defined(HAVE_INODE_TIMESPEC64_TIMES)
/*
* Decode ZFS stored time values to a struct timespec64
* 4.18 and newer kernels.
*/
#define ZFS_TIME_DECODE(tp, stmp) \
do { \
(tp)->tv_sec = (time64_t)(stmp)[0]; \
(tp)->tv_nsec = (long)(stmp)[1]; \
} while (0)
#else
/*
* Decode ZFS stored time values to a struct timespec
* 4.17 and older kernels.
*/
#define ZFS_TIME_DECODE(tp, stmp) \
do { \
(tp)->tv_sec = (time_t)(stmp)[0]; \
(tp)->tv_nsec = (long)(stmp)[1]; \
} while (0)
#endif /* HAVE_INODE_TIMESPEC64_TIMES */
#define ZFS_ACCESSTIME_STAMP(zfsvfs, zp)
struct znode;
extern int zfs_sync(struct super_block *, int, cred_t *);
extern int zfs_inode_alloc(struct super_block *, struct inode **ip);
extern void zfs_inode_destroy(struct inode *);
extern void zfs_mark_inode_dirty(struct inode *);
extern boolean_t zfs_relatime_need_update(const struct inode *);
#if defined(HAVE_UIO_RW)
extern caddr_t zfs_map_page(page_t *, enum seg_rw);
extern void zfs_unmap_page(page_t *, caddr_t);
#endif /* HAVE_UIO_RW */
extern zil_replay_func_t *const zfs_replay_vector[TX_MAX_TYPE];
#ifdef __cplusplus
}
#endif
#endif /* _SYS_ZFS_ZNODE_IMPL_H */
diff --git a/sys/contrib/openzfs/include/sys/blake3.h b/sys/contrib/openzfs/include/sys/blake3.h
index 19500585f382..ad65fc8db7b9 100644
--- a/sys/contrib/openzfs/include/sys/blake3.h
+++ b/sys/contrib/openzfs/include/sys/blake3.h
@@ -1,125 +1,122 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Based on BLAKE3 v1.3.1, https://github.com/BLAKE3-team/BLAKE3
* Copyright (c) 2019-2020 Samuel Neves and Jack O'Connor
* Copyright (c) 2021 Tino Reichardt <milky-zfs@mcmilk.de>
*/
#ifndef BLAKE3_H
#define BLAKE3_H
#ifdef _KERNEL
#include <sys/types.h>
#else
#include <stdint.h>
#include <stdlib.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
#define BLAKE3_KEY_LEN 32
#define BLAKE3_OUT_LEN 32
#define BLAKE3_MAX_DEPTH 54
#define BLAKE3_BLOCK_LEN 64
#define BLAKE3_CHUNK_LEN 1024
/*
* This struct is a private implementation detail.
* It has to be here because it's part of BLAKE3_CTX below.
*/
typedef struct {
uint32_t cv[8];
uint64_t chunk_counter;
uint8_t buf[BLAKE3_BLOCK_LEN];
uint8_t buf_len;
uint8_t blocks_compressed;
uint8_t flags;
} blake3_chunk_state_t;
typedef struct {
uint32_t key[8];
blake3_chunk_state_t chunk;
uint8_t cv_stack_len;
/*
* The stack size is MAX_DEPTH + 1 because we do lazy merging. For
* example, with 7 chunks, we have 3 entries in the stack. Adding an
* 8th chunk requires a 4th entry, rather than merging everything down
* to 1, because we don't know whether more input is coming. This is
* different from how the reference implementation does things.
*/
uint8_t cv_stack[(BLAKE3_MAX_DEPTH + 1) * BLAKE3_OUT_LEN];
- /* const blake3_impl_ops_t *ops */
+ /* const blake3_ops_t *ops */
const void *ops;
} BLAKE3_CTX;
/* init the context for hash operation */
void Blake3_Init(BLAKE3_CTX *ctx);
/* init the context for a MAC and/or tree hash operation */
void Blake3_InitKeyed(BLAKE3_CTX *ctx, const uint8_t key[BLAKE3_KEY_LEN]);
/* process the input bytes */
void Blake3_Update(BLAKE3_CTX *ctx, const void *input, size_t input_len);
/* finalize the hash computation and output the result */
void Blake3_Final(const BLAKE3_CTX *ctx, uint8_t *out);
/* finalize the hash computation and output the result */
void Blake3_FinalSeek(const BLAKE3_CTX *ctx, uint64_t seek, uint8_t *out,
size_t out_len);
/* these are pre-allocated contexts */
extern void **blake3_per_cpu_ctx;
extern void blake3_per_cpu_ctx_init(void);
extern void blake3_per_cpu_ctx_fini(void);
-/* return number of supported implementations */
-extern int blake3_get_impl_count(void);
+/* get count of supported implementations */
+extern uint32_t blake3_impl_getcnt(void);
-/* return id of selected implementation */
-extern int blake3_get_impl_id(void);
+/* get id of selected implementation */
+extern uint32_t blake3_impl_getid(void);
-/* return name of selected implementation */
-extern const char *blake3_get_impl_name(void);
+/* get name of selected implementation */
+extern const char *blake3_impl_getname(void);
/* setup id as fastest implementation */
-extern void blake3_set_impl_fastest(uint32_t id);
+extern void blake3_impl_set_fastest(uint32_t id);
/* set implementation by id */
-extern void blake3_set_impl_id(uint32_t id);
+extern void blake3_impl_setid(uint32_t id);
/* set implementation by name */
-extern int blake3_set_impl_name(const char *name);
-
-/* set startup implementation */
-extern void blake3_setup_impl(void);
+extern int blake3_impl_setname(const char *name);
#ifdef __cplusplus
}
#endif
#endif /* BLAKE3_H */
diff --git a/sys/contrib/openzfs/include/sys/bqueue.h b/sys/contrib/openzfs/include/sys/bqueue.h
index 797aecd791a3..b9621966027a 100644
--- a/sys/contrib/openzfs/include/sys/bqueue.h
+++ b/sys/contrib/openzfs/include/sys/bqueue.h
@@ -1,56 +1,56 @@
/*
* CDDL HEADER START
*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2014, 2018 by Delphix. All rights reserved.
*/
#ifndef _BQUEUE_H
#define _BQUEUE_H
#ifdef __cplusplus
extern "C" {
#endif
#include <sys/zfs_context.h>
typedef struct bqueue {
list_t bq_list;
kmutex_t bq_lock;
kcondvar_t bq_add_cv;
kcondvar_t bq_pop_cv;
- uint64_t bq_size;
- uint64_t bq_maxsize;
- uint64_t bq_fill_fraction;
+ size_t bq_size;
+ size_t bq_maxsize;
+ uint_t bq_fill_fraction;
size_t bq_node_offset;
} bqueue_t;
typedef struct bqueue_node {
list_node_t bqn_node;
- uint64_t bqn_size;
+ size_t bqn_size;
} bqueue_node_t;
-int bqueue_init(bqueue_t *, uint64_t, uint64_t, size_t);
+int bqueue_init(bqueue_t *, uint_t, size_t, size_t);
void bqueue_destroy(bqueue_t *);
-void bqueue_enqueue(bqueue_t *, void *, uint64_t);
-void bqueue_enqueue_flush(bqueue_t *, void *, uint64_t);
+void bqueue_enqueue(bqueue_t *, void *, size_t);
+void bqueue_enqueue_flush(bqueue_t *, void *, size_t);
void *bqueue_dequeue(bqueue_t *);
boolean_t bqueue_empty(bqueue_t *);
#ifdef __cplusplus
}
#endif
#endif /* _BQUEUE_H */
diff --git a/sys/contrib/openzfs/include/sys/dmu.h b/sys/contrib/openzfs/include/sys/dmu.h
index 5a3d7d6a5055..0a4827e5ec3e 100644
--- a/sys/contrib/openzfs/include/sys/dmu.h
+++ b/sys/contrib/openzfs/include/sys/dmu.h
@@ -1,1079 +1,1079 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012, Joyent, Inc. All rights reserved.
* Copyright 2014 HybridCluster. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
*/
/* Portions Copyright 2010 Robert Milkowski */
#ifndef _SYS_DMU_H
#define _SYS_DMU_H
/*
* This file describes the interface that the DMU provides for its
* consumers.
*
* The DMU also interacts with the SPA. That interface is described in
* dmu_spa.h.
*/
#include <sys/zfs_context.h>
#include <sys/inttypes.h>
#include <sys/cred.h>
#include <sys/fs/zfs.h>
#include <sys/zio_compress.h>
#include <sys/zio_priority.h>
#include <sys/uio.h>
#include <sys/zfs_file.h>
#ifdef __cplusplus
extern "C" {
#endif
struct page;
struct vnode;
struct spa;
struct zilog;
struct zio;
struct blkptr;
struct zap_cursor;
struct dsl_dataset;
struct dsl_pool;
struct dnode;
struct drr_begin;
struct drr_end;
struct zbookmark_phys;
struct spa;
struct nvlist;
struct arc_buf;
struct zio_prop;
struct sa_handle;
struct dsl_crypto_params;
struct locked_range;
typedef struct objset objset_t;
typedef struct dmu_tx dmu_tx_t;
typedef struct dsl_dir dsl_dir_t;
typedef struct dnode dnode_t;
typedef enum dmu_object_byteswap {
DMU_BSWAP_UINT8,
DMU_BSWAP_UINT16,
DMU_BSWAP_UINT32,
DMU_BSWAP_UINT64,
DMU_BSWAP_ZAP,
DMU_BSWAP_DNODE,
DMU_BSWAP_OBJSET,
DMU_BSWAP_ZNODE,
DMU_BSWAP_OLDACL,
DMU_BSWAP_ACL,
/*
* Allocating a new byteswap type number makes the on-disk format
* incompatible with any other format that uses the same number.
*
* Data can usually be structured to work with one of the
* DMU_BSWAP_UINT* or DMU_BSWAP_ZAP types.
*/
DMU_BSWAP_NUMFUNCS
} dmu_object_byteswap_t;
#define DMU_OT_NEWTYPE 0x80
#define DMU_OT_METADATA 0x40
#define DMU_OT_ENCRYPTED 0x20
#define DMU_OT_BYTESWAP_MASK 0x1f
/*
* Defines a uint8_t object type. Object types specify if the data
* in the object is metadata (boolean) and how to byteswap the data
* (dmu_object_byteswap_t). All of the types created by this method
* are cached in the dbuf metadata cache.
*/
#define DMU_OT(byteswap, metadata, encrypted) \
(DMU_OT_NEWTYPE | \
((metadata) ? DMU_OT_METADATA : 0) | \
((encrypted) ? DMU_OT_ENCRYPTED : 0) | \
((byteswap) & DMU_OT_BYTESWAP_MASK))
#define DMU_OT_IS_VALID(ot) (((ot) & DMU_OT_NEWTYPE) ? \
((ot) & DMU_OT_BYTESWAP_MASK) < DMU_BSWAP_NUMFUNCS : \
(ot) < DMU_OT_NUMTYPES)
#define DMU_OT_IS_METADATA_CACHED(ot) (((ot) & DMU_OT_NEWTYPE) ? \
B_TRUE : dmu_ot[(ot)].ot_dbuf_metadata_cache)
/*
* MDB doesn't have dmu_ot; it defines these macros itself.
*/
#ifndef ZFS_MDB
#define DMU_OT_IS_METADATA_IMPL(ot) (dmu_ot[ot].ot_metadata)
#define DMU_OT_IS_ENCRYPTED_IMPL(ot) (dmu_ot[ot].ot_encrypt)
#define DMU_OT_BYTESWAP_IMPL(ot) (dmu_ot[ot].ot_byteswap)
#endif
#define DMU_OT_IS_METADATA(ot) (((ot) & DMU_OT_NEWTYPE) ? \
- ((ot) & DMU_OT_METADATA) : \
+ (((ot) & DMU_OT_METADATA) != 0) : \
DMU_OT_IS_METADATA_IMPL(ot))
#define DMU_OT_IS_DDT(ot) \
((ot) == DMU_OT_DDT_ZAP)
/* Note: ztest uses DMU_OT_UINT64_OTHER as a proxy for file blocks */
#define DMU_OT_IS_FILE(ot) \
((ot) == DMU_OT_PLAIN_FILE_CONTENTS || (ot) == DMU_OT_UINT64_OTHER)
#define DMU_OT_IS_ENCRYPTED(ot) (((ot) & DMU_OT_NEWTYPE) ? \
- ((ot) & DMU_OT_ENCRYPTED) : \
+ (((ot) & DMU_OT_ENCRYPTED) != 0) : \
DMU_OT_IS_ENCRYPTED_IMPL(ot))
/*
* These object types use bp_fill != 1 for their L0 bp's. Therefore they can't
* have their data embedded (i.e. use a BP_IS_EMBEDDED() bp), because bp_fill
* is repurposed for embedded BPs.
*/
#define DMU_OT_HAS_FILL(ot) \
((ot) == DMU_OT_DNODE || (ot) == DMU_OT_OBJSET)
#define DMU_OT_BYTESWAP(ot) (((ot) & DMU_OT_NEWTYPE) ? \
((ot) & DMU_OT_BYTESWAP_MASK) : \
DMU_OT_BYTESWAP_IMPL(ot))
typedef enum dmu_object_type {
DMU_OT_NONE,
/* general: */
DMU_OT_OBJECT_DIRECTORY, /* ZAP */
DMU_OT_OBJECT_ARRAY, /* UINT64 */
DMU_OT_PACKED_NVLIST, /* UINT8 (XDR by nvlist_pack/unpack) */
DMU_OT_PACKED_NVLIST_SIZE, /* UINT64 */
DMU_OT_BPOBJ, /* UINT64 */
DMU_OT_BPOBJ_HDR, /* UINT64 */
/* spa: */
DMU_OT_SPACE_MAP_HEADER, /* UINT64 */
DMU_OT_SPACE_MAP, /* UINT64 */
/* zil: */
DMU_OT_INTENT_LOG, /* UINT64 */
/* dmu: */
DMU_OT_DNODE, /* DNODE */
DMU_OT_OBJSET, /* OBJSET */
/* dsl: */
DMU_OT_DSL_DIR, /* UINT64 */
DMU_OT_DSL_DIR_CHILD_MAP, /* ZAP */
DMU_OT_DSL_DS_SNAP_MAP, /* ZAP */
DMU_OT_DSL_PROPS, /* ZAP */
DMU_OT_DSL_DATASET, /* UINT64 */
/* zpl: */
DMU_OT_ZNODE, /* ZNODE */
DMU_OT_OLDACL, /* Old ACL */
DMU_OT_PLAIN_FILE_CONTENTS, /* UINT8 */
DMU_OT_DIRECTORY_CONTENTS, /* ZAP */
DMU_OT_MASTER_NODE, /* ZAP */
DMU_OT_UNLINKED_SET, /* ZAP */
/* zvol: */
DMU_OT_ZVOL, /* UINT8 */
DMU_OT_ZVOL_PROP, /* ZAP */
/* other; for testing only! */
DMU_OT_PLAIN_OTHER, /* UINT8 */
DMU_OT_UINT64_OTHER, /* UINT64 */
DMU_OT_ZAP_OTHER, /* ZAP */
/* new object types: */
DMU_OT_ERROR_LOG, /* ZAP */
DMU_OT_SPA_HISTORY, /* UINT8 */
DMU_OT_SPA_HISTORY_OFFSETS, /* spa_his_phys_t */
DMU_OT_POOL_PROPS, /* ZAP */
DMU_OT_DSL_PERMS, /* ZAP */
DMU_OT_ACL, /* ACL */
DMU_OT_SYSACL, /* SYSACL */
DMU_OT_FUID, /* FUID table (Packed NVLIST UINT8) */
DMU_OT_FUID_SIZE, /* FUID table size UINT64 */
DMU_OT_NEXT_CLONES, /* ZAP */
DMU_OT_SCAN_QUEUE, /* ZAP */
DMU_OT_USERGROUP_USED, /* ZAP */
DMU_OT_USERGROUP_QUOTA, /* ZAP */
DMU_OT_USERREFS, /* ZAP */
DMU_OT_DDT_ZAP, /* ZAP */
DMU_OT_DDT_STATS, /* ZAP */
DMU_OT_SA, /* System attr */
DMU_OT_SA_MASTER_NODE, /* ZAP */
DMU_OT_SA_ATTR_REGISTRATION, /* ZAP */
DMU_OT_SA_ATTR_LAYOUTS, /* ZAP */
DMU_OT_SCAN_XLATE, /* ZAP */
DMU_OT_DEDUP, /* fake dedup BP from ddt_bp_create() */
DMU_OT_DEADLIST, /* ZAP */
DMU_OT_DEADLIST_HDR, /* UINT64 */
DMU_OT_DSL_CLONES, /* ZAP */
DMU_OT_BPOBJ_SUBOBJ, /* UINT64 */
/*
* Do not allocate new object types here. Doing so makes the on-disk
* format incompatible with any other format that uses the same object
* type number.
*
* When creating an object which does not have one of the above types
* use the DMU_OTN_* type with the correct byteswap and metadata
* values.
*
* The DMU_OTN_* types do not have entries in the dmu_ot table,
* use the DMU_OT_IS_METADATA() and DMU_OT_BYTESWAP() macros instead
* of indexing into dmu_ot directly (this works for both DMU_OT_* types
* and DMU_OTN_* types).
*/
DMU_OT_NUMTYPES,
/*
* Names for valid types declared with DMU_OT().
*/
DMU_OTN_UINT8_DATA = DMU_OT(DMU_BSWAP_UINT8, B_FALSE, B_FALSE),
DMU_OTN_UINT8_METADATA = DMU_OT(DMU_BSWAP_UINT8, B_TRUE, B_FALSE),
DMU_OTN_UINT16_DATA = DMU_OT(DMU_BSWAP_UINT16, B_FALSE, B_FALSE),
DMU_OTN_UINT16_METADATA = DMU_OT(DMU_BSWAP_UINT16, B_TRUE, B_FALSE),
DMU_OTN_UINT32_DATA = DMU_OT(DMU_BSWAP_UINT32, B_FALSE, B_FALSE),
DMU_OTN_UINT32_METADATA = DMU_OT(DMU_BSWAP_UINT32, B_TRUE, B_FALSE),
DMU_OTN_UINT64_DATA = DMU_OT(DMU_BSWAP_UINT64, B_FALSE, B_FALSE),
DMU_OTN_UINT64_METADATA = DMU_OT(DMU_BSWAP_UINT64, B_TRUE, B_FALSE),
DMU_OTN_ZAP_DATA = DMU_OT(DMU_BSWAP_ZAP, B_FALSE, B_FALSE),
DMU_OTN_ZAP_METADATA = DMU_OT(DMU_BSWAP_ZAP, B_TRUE, B_FALSE),
DMU_OTN_UINT8_ENC_DATA = DMU_OT(DMU_BSWAP_UINT8, B_FALSE, B_TRUE),
DMU_OTN_UINT8_ENC_METADATA = DMU_OT(DMU_BSWAP_UINT8, B_TRUE, B_TRUE),
DMU_OTN_UINT16_ENC_DATA = DMU_OT(DMU_BSWAP_UINT16, B_FALSE, B_TRUE),
DMU_OTN_UINT16_ENC_METADATA = DMU_OT(DMU_BSWAP_UINT16, B_TRUE, B_TRUE),
DMU_OTN_UINT32_ENC_DATA = DMU_OT(DMU_BSWAP_UINT32, B_FALSE, B_TRUE),
DMU_OTN_UINT32_ENC_METADATA = DMU_OT(DMU_BSWAP_UINT32, B_TRUE, B_TRUE),
DMU_OTN_UINT64_ENC_DATA = DMU_OT(DMU_BSWAP_UINT64, B_FALSE, B_TRUE),
DMU_OTN_UINT64_ENC_METADATA = DMU_OT(DMU_BSWAP_UINT64, B_TRUE, B_TRUE),
DMU_OTN_ZAP_ENC_DATA = DMU_OT(DMU_BSWAP_ZAP, B_FALSE, B_TRUE),
DMU_OTN_ZAP_ENC_METADATA = DMU_OT(DMU_BSWAP_ZAP, B_TRUE, B_TRUE),
} dmu_object_type_t;
/*
* These flags are intended to be used to specify the "txg_how"
* parameter when calling the dmu_tx_assign() function. See the comment
* above dmu_tx_assign() for more details on the meaning of these flags.
*/
#define TXG_NOWAIT (0ULL)
#define TXG_WAIT (1ULL<<0)
#define TXG_NOTHROTTLE (1ULL<<1)
void byteswap_uint64_array(void *buf, size_t size);
void byteswap_uint32_array(void *buf, size_t size);
void byteswap_uint16_array(void *buf, size_t size);
void byteswap_uint8_array(void *buf, size_t size);
void zap_byteswap(void *buf, size_t size);
void zfs_oldacl_byteswap(void *buf, size_t size);
void zfs_acl_byteswap(void *buf, size_t size);
void zfs_znode_byteswap(void *buf, size_t size);
#define DS_FIND_SNAPSHOTS (1<<0)
#define DS_FIND_CHILDREN (1<<1)
#define DS_FIND_SERIALIZE (1<<2)
/*
* The maximum number of bytes that can be accessed as part of one
* operation, including metadata.
*/
#define DMU_MAX_ACCESS (64 * 1024 * 1024) /* 64MB */
#define DMU_MAX_DELETEBLKCNT (20480) /* ~5MB of indirect blocks */
#define DMU_USERUSED_OBJECT (-1ULL)
#define DMU_GROUPUSED_OBJECT (-2ULL)
#define DMU_PROJECTUSED_OBJECT (-3ULL)
/*
* Zap prefix for object accounting in DMU_{USER,GROUP,PROJECT}USED_OBJECT.
*/
#define DMU_OBJACCT_PREFIX "obj-"
#define DMU_OBJACCT_PREFIX_LEN 4
/*
* artificial blkids for bonus buffer and spill blocks
*/
#define DMU_BONUS_BLKID (-1ULL)
#define DMU_SPILL_BLKID (-2ULL)
/*
* Public routines to create, destroy, open, and close objsets.
*/
typedef void dmu_objset_create_sync_func_t(objset_t *os, void *arg,
cred_t *cr, dmu_tx_t *tx);
int dmu_objset_hold(const char *name, const void *tag, objset_t **osp);
int dmu_objset_own(const char *name, dmu_objset_type_t type,
boolean_t readonly, boolean_t key_required, const void *tag,
objset_t **osp);
void dmu_objset_rele(objset_t *os, const void *tag);
void dmu_objset_disown(objset_t *os, boolean_t key_required, const void *tag);
int dmu_objset_open_ds(struct dsl_dataset *ds, objset_t **osp);
void dmu_objset_evict_dbufs(objset_t *os);
int dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags,
struct dsl_crypto_params *dcp, dmu_objset_create_sync_func_t func,
void *arg);
int dmu_objset_clone(const char *name, const char *origin);
int dsl_destroy_snapshots_nvl(struct nvlist *snaps, boolean_t defer,
struct nvlist *errlist);
int dmu_objset_snapshot_one(const char *fsname, const char *snapname);
int dmu_objset_find(const char *name, int func(const char *, void *), void *arg,
int flags);
void dmu_objset_byteswap(void *buf, size_t size);
int dsl_dataset_rename_snapshot(const char *fsname,
const char *oldsnapname, const char *newsnapname, boolean_t recursive);
typedef struct dmu_buf {
uint64_t db_object; /* object that this buffer is part of */
uint64_t db_offset; /* byte offset in this object */
uint64_t db_size; /* size of buffer in bytes */
void *db_data; /* data in buffer */
} dmu_buf_t;
/*
* The names of zap entries in the DIRECTORY_OBJECT of the MOS.
*/
#define DMU_POOL_DIRECTORY_OBJECT 1
#define DMU_POOL_CONFIG "config"
#define DMU_POOL_FEATURES_FOR_WRITE "features_for_write"
#define DMU_POOL_FEATURES_FOR_READ "features_for_read"
#define DMU_POOL_FEATURE_DESCRIPTIONS "feature_descriptions"
#define DMU_POOL_FEATURE_ENABLED_TXG "feature_enabled_txg"
#define DMU_POOL_ROOT_DATASET "root_dataset"
#define DMU_POOL_SYNC_BPOBJ "sync_bplist"
#define DMU_POOL_ERRLOG_SCRUB "errlog_scrub"
#define DMU_POOL_ERRLOG_LAST "errlog_last"
#define DMU_POOL_SPARES "spares"
#define DMU_POOL_DEFLATE "deflate"
#define DMU_POOL_HISTORY "history"
#define DMU_POOL_PROPS "pool_props"
#define DMU_POOL_L2CACHE "l2cache"
#define DMU_POOL_TMP_USERREFS "tmp_userrefs"
#define DMU_POOL_DDT "DDT-%s-%s-%s"
#define DMU_POOL_DDT_STATS "DDT-statistics"
#define DMU_POOL_CREATION_VERSION "creation_version"
#define DMU_POOL_SCAN "scan"
#define DMU_POOL_FREE_BPOBJ "free_bpobj"
#define DMU_POOL_BPTREE_OBJ "bptree_obj"
#define DMU_POOL_EMPTY_BPOBJ "empty_bpobj"
#define DMU_POOL_CHECKSUM_SALT "org.illumos:checksum_salt"
#define DMU_POOL_VDEV_ZAP_MAP "com.delphix:vdev_zap_map"
#define DMU_POOL_REMOVING "com.delphix:removing"
#define DMU_POOL_OBSOLETE_BPOBJ "com.delphix:obsolete_bpobj"
#define DMU_POOL_CONDENSING_INDIRECT "com.delphix:condensing_indirect"
#define DMU_POOL_ZPOOL_CHECKPOINT "com.delphix:zpool_checkpoint"
#define DMU_POOL_LOG_SPACEMAP_ZAP "com.delphix:log_spacemap_zap"
#define DMU_POOL_DELETED_CLONES "com.delphix:deleted_clones"
/*
* Allocate an object from this objset. The range of object numbers
* available is (0, DN_MAX_OBJECT). Object 0 is the meta-dnode.
*
* The transaction must be assigned to a txg. The newly allocated
* object will be "held" in the transaction (ie. you can modify the
* newly allocated object in this transaction).
*
* dmu_object_alloc() chooses an object and returns it in *objectp.
*
* dmu_object_claim() allocates a specific object number. If that
* number is already allocated, it fails and returns EEXIST.
*
* Return 0 on success, or ENOSPC or EEXIST as specified above.
*/
uint64_t dmu_object_alloc(objset_t *os, dmu_object_type_t ot,
int blocksize, dmu_object_type_t bonus_type, int bonus_len, dmu_tx_t *tx);
uint64_t dmu_object_alloc_ibs(objset_t *os, dmu_object_type_t ot, int blocksize,
int indirect_blockshift,
dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx);
uint64_t dmu_object_alloc_dnsize(objset_t *os, dmu_object_type_t ot,
int blocksize, dmu_object_type_t bonus_type, int bonus_len,
int dnodesize, dmu_tx_t *tx);
uint64_t dmu_object_alloc_hold(objset_t *os, dmu_object_type_t ot,
int blocksize, int indirect_blockshift, dmu_object_type_t bonustype,
int bonuslen, int dnodesize, dnode_t **allocated_dnode, const void *tag,
dmu_tx_t *tx);
int dmu_object_claim(objset_t *os, uint64_t object, dmu_object_type_t ot,
int blocksize, dmu_object_type_t bonus_type, int bonus_len, dmu_tx_t *tx);
int dmu_object_claim_dnsize(objset_t *os, uint64_t object, dmu_object_type_t ot,
int blocksize, dmu_object_type_t bonus_type, int bonus_len,
int dnodesize, dmu_tx_t *tx);
int dmu_object_reclaim(objset_t *os, uint64_t object, dmu_object_type_t ot,
int blocksize, dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *txp);
int dmu_object_reclaim_dnsize(objset_t *os, uint64_t object,
dmu_object_type_t ot, int blocksize, dmu_object_type_t bonustype,
int bonuslen, int dnodesize, boolean_t keep_spill, dmu_tx_t *tx);
int dmu_object_rm_spill(objset_t *os, uint64_t object, dmu_tx_t *tx);
/*
* Free an object from this objset.
*
* The object's data will be freed as well (ie. you don't need to call
* dmu_free(object, 0, -1, tx)).
*
* The object need not be held in the transaction.
*
* If there are any holds on this object's buffers (via dmu_buf_hold()),
* or tx holds on the object (via dmu_tx_hold_object()), you can not
* free it; it fails and returns EBUSY.
*
* If the object is not allocated, it fails and returns ENOENT.
*
* Return 0 on success, or EBUSY or ENOENT as specified above.
*/
int dmu_object_free(objset_t *os, uint64_t object, dmu_tx_t *tx);
/*
* Find the next allocated or free object.
*
* The objectp parameter is in-out. It will be updated to be the next
* object which is allocated. Ignore objects which have not been
* modified since txg.
*
* XXX Can only be called on a objset with no dirty data.
*
* Returns 0 on success, or ENOENT if there are no more objects.
*/
int dmu_object_next(objset_t *os, uint64_t *objectp,
boolean_t hole, uint64_t txg);
/*
* Set the number of levels on a dnode. nlevels must be greater than the
* current number of levels or an EINVAL will be returned.
*/
int dmu_object_set_nlevels(objset_t *os, uint64_t object, int nlevels,
dmu_tx_t *tx);
/*
* Set the data blocksize for an object.
*
* The object cannot have any blocks allocated beyond the first. If
* the first block is allocated already, the new size must be greater
* than the current block size. If these conditions are not met,
* ENOTSUP will be returned.
*
* Returns 0 on success, or EBUSY if there are any holds on the object
* contents, or ENOTSUP as described above.
*/
int dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size,
int ibs, dmu_tx_t *tx);
/*
* Manually set the maxblkid on a dnode. This will adjust nlevels accordingly
* to accommodate the change. When calling this function, the caller must
* ensure that the object's nlevels can sufficiently support the new maxblkid.
*/
int dmu_object_set_maxblkid(objset_t *os, uint64_t object, uint64_t maxblkid,
dmu_tx_t *tx);
/*
* Set the checksum property on a dnode. The new checksum algorithm will
* apply to all newly written blocks; existing blocks will not be affected.
*/
void dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum,
dmu_tx_t *tx);
/*
* Set the compress property on a dnode. The new compression algorithm will
* apply to all newly written blocks; existing blocks will not be affected.
*/
void dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress,
dmu_tx_t *tx);
void dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset,
void *data, uint8_t etype, uint8_t comp, int uncompressed_size,
int compressed_size, int byteorder, dmu_tx_t *tx);
void dmu_redact(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
dmu_tx_t *tx);
/*
* Decide how to write a block: checksum, compression, number of copies, etc.
*/
#define WP_NOFILL 0x1
#define WP_DMU_SYNC 0x2
#define WP_SPILL 0x4
void dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp,
struct zio_prop *zp);
/*
* The bonus data is accessed more or less like a regular buffer.
* You must dmu_bonus_hold() to get the buffer, which will give you a
* dmu_buf_t with db_offset==-1ULL, and db_size = the size of the bonus
* data. As with any normal buffer, you must call dmu_buf_will_dirty()
* before modifying it, and the
* object must be held in an assigned transaction before calling
* dmu_buf_will_dirty. You may use dmu_buf_set_user() on the bonus
* buffer as well. You must release what you hold with dmu_buf_rele().
*
* Returns ENOENT, EIO, or 0.
*/
int dmu_bonus_hold(objset_t *os, uint64_t object, const void *tag,
dmu_buf_t **dbp);
int dmu_bonus_hold_by_dnode(dnode_t *dn, const void *tag, dmu_buf_t **dbp,
uint32_t flags);
int dmu_bonus_max(void);
int dmu_set_bonus(dmu_buf_t *, int, dmu_tx_t *);
int dmu_set_bonustype(dmu_buf_t *, dmu_object_type_t, dmu_tx_t *);
dmu_object_type_t dmu_get_bonustype(dmu_buf_t *);
int dmu_rm_spill(objset_t *, uint64_t, dmu_tx_t *);
/*
* Special spill buffer support used by "SA" framework
*/
int dmu_spill_hold_by_bonus(dmu_buf_t *bonus, uint32_t flags, const void *tag,
dmu_buf_t **dbp);
int dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags,
const void *tag, dmu_buf_t **dbp);
int dmu_spill_hold_existing(dmu_buf_t *bonus, const void *tag, dmu_buf_t **dbp);
/*
* Obtain the DMU buffer from the specified object which contains the
* specified offset. dmu_buf_hold() puts a "hold" on the buffer, so
* that it will remain in memory. You must release the hold with
* dmu_buf_rele(). You must not access the dmu_buf_t after releasing
* what you hold. You must have a hold on any dmu_buf_t* you pass to the DMU.
*
* You must call dmu_buf_read, dmu_buf_will_dirty, or dmu_buf_will_fill
* on the returned buffer before reading or writing the buffer's
* db_data. The comments for those routines describe what particular
* operations are valid after calling them.
*
* The object number must be a valid, allocated object number.
*/
int dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset,
const void *tag, dmu_buf_t **, int flags);
int dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset,
uint64_t length, int read, const void *tag, int *numbufsp,
dmu_buf_t ***dbpp);
int dmu_buf_hold_by_dnode(dnode_t *dn, uint64_t offset,
const void *tag, dmu_buf_t **dbp, int flags);
int dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset,
uint64_t length, boolean_t read, const void *tag, int *numbufsp,
dmu_buf_t ***dbpp, uint32_t flags);
/*
* Add a reference to a dmu buffer that has already been held via
* dmu_buf_hold() in the current context.
*/
void dmu_buf_add_ref(dmu_buf_t *db, const void *tag);
/*
* Attempt to add a reference to a dmu buffer that is in an unknown state,
* using a pointer that may have been invalidated by eviction processing.
* The request will succeed if the passed in dbuf still represents the
* same os/object/blkid, is ineligible for eviction, and has at least
* one hold by a user other than the syncer.
*/
boolean_t dmu_buf_try_add_ref(dmu_buf_t *, objset_t *os, uint64_t object,
uint64_t blkid, const void *tag);
void dmu_buf_rele(dmu_buf_t *db, const void *tag);
uint64_t dmu_buf_refcount(dmu_buf_t *db);
uint64_t dmu_buf_user_refcount(dmu_buf_t *db);
/*
* dmu_buf_hold_array holds the DMU buffers which contain all bytes in a
* range of an object. A pointer to an array of dmu_buf_t*'s is
* returned (in *dbpp).
*
* dmu_buf_rele_array releases the hold on an array of dmu_buf_t*'s, and
* frees the array. The hold on the array of buffers MUST be released
* with dmu_buf_rele_array. You can NOT release the hold on each buffer
* individually with dmu_buf_rele.
*/
int dmu_buf_hold_array_by_bonus(dmu_buf_t *db, uint64_t offset,
uint64_t length, boolean_t read, const void *tag,
int *numbufsp, dmu_buf_t ***dbpp);
void dmu_buf_rele_array(dmu_buf_t **, int numbufs, const void *tag);
typedef void dmu_buf_evict_func_t(void *user_ptr);
/*
* A DMU buffer user object may be associated with a dbuf for the
* duration of its lifetime. This allows the user of a dbuf (client)
* to attach private data to a dbuf (e.g. in-core only data such as a
* dnode_children_t, zap_t, or zap_leaf_t) and be optionally notified
* when that dbuf has been evicted. Clients typically respond to the
* eviction notification by freeing their private data, thus ensuring
* the same lifetime for both dbuf and private data.
*
* The mapping from a dmu_buf_user_t to any client private data is the
* client's responsibility. All current consumers of the API with private
* data embed a dmu_buf_user_t as the first member of the structure for
* their private data. This allows conversions between the two types
* with a simple cast. Since the DMU buf user API never needs access
* to the private data, other strategies can be employed if necessary
* or convenient for the client (e.g. using container_of() to do the
* conversion for private data that cannot have the dmu_buf_user_t as
* its first member).
*
* Eviction callbacks are executed without the dbuf mutex held or any
* other type of mechanism to guarantee that the dbuf is still available.
* For this reason, users must assume the dbuf has already been freed
* and not reference the dbuf from the callback context.
*
* Users requesting "immediate eviction" are notified as soon as the dbuf
* is only referenced by dirty records (dirties == holds). Otherwise the
* notification occurs after eviction processing for the dbuf begins.
*/
typedef struct dmu_buf_user {
/*
* Asynchronous user eviction callback state.
*/
taskq_ent_t dbu_tqent;
/*
* This instance's eviction function pointers.
*
* dbu_evict_func_sync is called synchronously and then
* dbu_evict_func_async is executed asynchronously on a taskq.
*/
dmu_buf_evict_func_t *dbu_evict_func_sync;
dmu_buf_evict_func_t *dbu_evict_func_async;
#ifdef ZFS_DEBUG
/*
* Pointer to user's dbuf pointer. NULL for clients that do
* not associate a dbuf with their user data.
*
* The dbuf pointer is cleared upon eviction so as to catch
* use-after-evict bugs in clients.
*/
dmu_buf_t **dbu_clear_on_evict_dbufp;
#endif
} dmu_buf_user_t;
/*
* Initialize the given dmu_buf_user_t instance with the eviction function
* evict_func, to be called when the user is evicted.
*
* NOTE: This function should only be called once on a given dmu_buf_user_t.
* To allow enforcement of this, dbu must already be zeroed on entry.
*/
static inline void
dmu_buf_init_user(dmu_buf_user_t *dbu, dmu_buf_evict_func_t *evict_func_sync,
dmu_buf_evict_func_t *evict_func_async,
dmu_buf_t **clear_on_evict_dbufp __maybe_unused)
{
ASSERT(dbu->dbu_evict_func_sync == NULL);
ASSERT(dbu->dbu_evict_func_async == NULL);
/* must have at least one evict func */
IMPLY(evict_func_sync == NULL, evict_func_async != NULL);
dbu->dbu_evict_func_sync = evict_func_sync;
dbu->dbu_evict_func_async = evict_func_async;
taskq_init_ent(&dbu->dbu_tqent);
#ifdef ZFS_DEBUG
dbu->dbu_clear_on_evict_dbufp = clear_on_evict_dbufp;
#endif
}
/*
* Attach user data to a dbuf and mark it for normal (when the dbuf's
* data is cleared or its reference count goes to zero) eviction processing.
*
* Returns NULL on success, or the existing user if another user currently
* owns the buffer.
*/
void *dmu_buf_set_user(dmu_buf_t *db, dmu_buf_user_t *user);
/*
* Attach user data to a dbuf and mark it for immediate (its dirty and
* reference counts are equal) eviction processing.
*
* Returns NULL on success, or the existing user if another user currently
* owns the buffer.
*/
void *dmu_buf_set_user_ie(dmu_buf_t *db, dmu_buf_user_t *user);
/*
* Replace the current user of a dbuf.
*
* If given the current user of a dbuf, replaces the dbuf's user with
* "new_user" and returns the user data pointer that was replaced.
* Otherwise returns the current, and unmodified, dbuf user pointer.
*/
void *dmu_buf_replace_user(dmu_buf_t *db,
dmu_buf_user_t *old_user, dmu_buf_user_t *new_user);
/*
* Remove the specified user data for a DMU buffer.
*
* Returns the user that was removed on success, or the current user if
* another user currently owns the buffer.
*/
void *dmu_buf_remove_user(dmu_buf_t *db, dmu_buf_user_t *user);
/*
* Returns the user data (dmu_buf_user_t *) associated with this dbuf.
*/
void *dmu_buf_get_user(dmu_buf_t *db);
objset_t *dmu_buf_get_objset(dmu_buf_t *db);
dnode_t *dmu_buf_dnode_enter(dmu_buf_t *db);
void dmu_buf_dnode_exit(dmu_buf_t *db);
/* Block until any in-progress dmu buf user evictions complete. */
void dmu_buf_user_evict_wait(void);
/*
* Returns the blkptr associated with this dbuf, or NULL if not set.
*/
struct blkptr *dmu_buf_get_blkptr(dmu_buf_t *db);
/*
* Indicate that you are going to modify the buffer's data (db_data).
*
* The transaction (tx) must be assigned to a txg (ie. you've called
* dmu_tx_assign()). The buffer's object must be held in the tx
* (ie. you've called dmu_tx_hold_object(tx, db->db_object)).
*/
void dmu_buf_will_dirty(dmu_buf_t *db, dmu_tx_t *tx);
boolean_t dmu_buf_is_dirty(dmu_buf_t *db, dmu_tx_t *tx);
void dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder,
const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx);
/*
* You must create a transaction, then hold the objects which you will
* (or might) modify as part of this transaction. Then you must assign
* the transaction to a transaction group. Once the transaction has
* been assigned, you can modify buffers which belong to held objects as
* part of this transaction. You can't modify buffers before the
* transaction has been assigned; you can't modify buffers which don't
* belong to objects which this transaction holds; you can't hold
* objects once the transaction has been assigned. You may hold an
* object which you are going to free (with dmu_object_free()), but you
* don't have to.
*
* You can abort the transaction before it has been assigned.
*
* Note that you may hold buffers (with dmu_buf_hold) at any time,
* regardless of transaction state.
*/
#define DMU_NEW_OBJECT (-1ULL)
#define DMU_OBJECT_END (-1ULL)
dmu_tx_t *dmu_tx_create(objset_t *os);
void dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len);
void dmu_tx_hold_write_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off,
int len);
void dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off,
uint64_t len);
void dmu_tx_hold_free_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off,
uint64_t len);
void dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name);
void dmu_tx_hold_zap_by_dnode(dmu_tx_t *tx, dnode_t *dn, int add,
const char *name);
void dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object);
void dmu_tx_hold_bonus_by_dnode(dmu_tx_t *tx, dnode_t *dn);
void dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object);
void dmu_tx_hold_sa(dmu_tx_t *tx, struct sa_handle *hdl, boolean_t may_grow);
void dmu_tx_hold_sa_create(dmu_tx_t *tx, int total_size);
void dmu_tx_abort(dmu_tx_t *tx);
int dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how);
void dmu_tx_wait(dmu_tx_t *tx);
void dmu_tx_commit(dmu_tx_t *tx);
void dmu_tx_mark_netfree(dmu_tx_t *tx);
/*
* To register a commit callback, dmu_tx_callback_register() must be called.
*
* dcb_data is a pointer to caller private data that is passed on as a
* callback parameter. The caller is responsible for properly allocating and
* freeing it.
*
* When registering a callback, the transaction must be already created, but
* it cannot be committed or aborted. It can be assigned to a txg or not.
*
* The callback will be called after the transaction has been safely written
* to stable storage and will also be called if the dmu_tx is aborted.
* If there is any error which prevents the transaction from being committed to
* disk, the callback will be called with a value of error != 0.
*
* When multiple callbacks are registered to the transaction, the callbacks
* will be called in reverse order to let Lustre, the only user of commit
* callback currently, take the fast path of its commit callback handling.
*/
typedef void dmu_tx_callback_func_t(void *dcb_data, int error);
void dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *dcb_func,
void *dcb_data);
void dmu_tx_do_callbacks(list_t *cb_list, int error);
/*
* Free up the data blocks for a defined range of a file. If size is
* -1, the range from offset to end-of-file is freed.
*/
int dmu_free_range(objset_t *os, uint64_t object, uint64_t offset,
uint64_t size, dmu_tx_t *tx);
int dmu_free_long_range(objset_t *os, uint64_t object, uint64_t offset,
uint64_t size);
int dmu_free_long_object(objset_t *os, uint64_t object);
/*
* Convenience functions.
*
* Canfail routines will return 0 on success, or an errno if there is a
* nonrecoverable I/O error.
*/
#define DMU_READ_PREFETCH 0 /* prefetch */
#define DMU_READ_NO_PREFETCH 1 /* don't prefetch */
#define DMU_READ_NO_DECRYPT 2 /* don't decrypt */
int dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
void *buf, uint32_t flags);
int dmu_read_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, void *buf,
uint32_t flags);
void dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
const void *buf, dmu_tx_t *tx);
void dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size,
const void *buf, dmu_tx_t *tx);
void dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
dmu_tx_t *tx);
#ifdef _KERNEL
int dmu_read_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size);
int dmu_read_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size);
int dmu_read_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size);
int dmu_write_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size,
dmu_tx_t *tx);
int dmu_write_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size,
dmu_tx_t *tx);
int dmu_write_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size,
dmu_tx_t *tx);
#endif
struct arc_buf *dmu_request_arcbuf(dmu_buf_t *handle, int size);
void dmu_return_arcbuf(struct arc_buf *buf);
int dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset,
struct arc_buf *buf, dmu_tx_t *tx);
int dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset,
struct arc_buf *buf, dmu_tx_t *tx);
#define dmu_assign_arcbuf dmu_assign_arcbuf_by_dbuf
extern int zfs_max_recordsize;
/*
* Asynchronously try to read in the data.
*/
void dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset,
uint64_t len, enum zio_priority pri);
typedef struct dmu_object_info {
/* All sizes are in bytes unless otherwise indicated. */
uint32_t doi_data_block_size;
uint32_t doi_metadata_block_size;
dmu_object_type_t doi_type;
dmu_object_type_t doi_bonus_type;
uint64_t doi_bonus_size;
uint8_t doi_indirection; /* 2 = dnode->indirect->data */
uint8_t doi_checksum;
uint8_t doi_compress;
uint8_t doi_nblkptr;
uint8_t doi_pad[4];
uint64_t doi_dnodesize;
uint64_t doi_physical_blocks_512; /* data + metadata, 512b blks */
uint64_t doi_max_offset;
uint64_t doi_fill_count; /* number of non-empty blocks */
} dmu_object_info_t;
typedef void (*const arc_byteswap_func_t)(void *buf, size_t size);
typedef struct dmu_object_type_info {
dmu_object_byteswap_t ot_byteswap;
boolean_t ot_metadata;
boolean_t ot_dbuf_metadata_cache;
boolean_t ot_encrypt;
const char *ot_name;
} dmu_object_type_info_t;
typedef const struct dmu_object_byteswap_info {
arc_byteswap_func_t ob_func;
const char *ob_name;
} dmu_object_byteswap_info_t;
extern const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES];
extern const dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS];
/*
* Get information on a DMU object.
*
* Return 0 on success or ENOENT if object is not allocated.
*
* If doi is NULL, just indicates whether the object exists.
*/
int dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi);
void __dmu_object_info_from_dnode(struct dnode *dn, dmu_object_info_t *doi);
/* Like dmu_object_info, but faster if you have a held dnode in hand. */
void dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi);
/* Like dmu_object_info, but faster if you have a held dbuf in hand. */
void dmu_object_info_from_db(dmu_buf_t *db, dmu_object_info_t *doi);
/*
* Like dmu_object_info_from_db, but faster still when you only care about
* the size.
*/
void dmu_object_size_from_db(dmu_buf_t *db, uint32_t *blksize,
u_longlong_t *nblk512);
void dmu_object_dnsize_from_db(dmu_buf_t *db, int *dnsize);
typedef struct dmu_objset_stats {
uint64_t dds_num_clones; /* number of clones of this */
uint64_t dds_creation_txg;
uint64_t dds_guid;
dmu_objset_type_t dds_type;
uint8_t dds_is_snapshot;
uint8_t dds_inconsistent;
uint8_t dds_redacted;
char dds_origin[ZFS_MAX_DATASET_NAME_LEN];
} dmu_objset_stats_t;
/*
* Get stats on a dataset.
*/
void dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat);
/*
* Add entries to the nvlist for all the objset's properties. See
* zfs_prop_table[] and zfs(1m) for details on the properties.
*/
void dmu_objset_stats(objset_t *os, struct nvlist *nv);
/*
* Get the space usage statistics for statvfs().
*
* refdbytes is the amount of space "referenced" by this objset.
* availbytes is the amount of space available to this objset, taking
* into account quotas & reservations, assuming that no other objsets
* use the space first. These values correspond to the 'referenced' and
* 'available' properties, described in the zfs(1m) manpage.
*
* usedobjs and availobjs are the number of objects currently allocated,
* and available.
*/
void dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp,
uint64_t *usedobjsp, uint64_t *availobjsp);
/*
* The fsid_guid is a 56-bit ID that can change to avoid collisions.
* (Contrast with the ds_guid which is a 64-bit ID that will never
* change, so there is a small probability that it will collide.)
*/
uint64_t dmu_objset_fsid_guid(objset_t *os);
/*
* Get the [cm]time for an objset's snapshot dir
*/
inode_timespec_t dmu_objset_snap_cmtime(objset_t *os);
int dmu_objset_is_snapshot(objset_t *os);
extern struct spa *dmu_objset_spa(objset_t *os);
extern struct zilog *dmu_objset_zil(objset_t *os);
extern struct dsl_pool *dmu_objset_pool(objset_t *os);
extern struct dsl_dataset *dmu_objset_ds(objset_t *os);
extern void dmu_objset_name(objset_t *os, char *buf);
extern dmu_objset_type_t dmu_objset_type(objset_t *os);
extern uint64_t dmu_objset_id(objset_t *os);
extern uint64_t dmu_objset_dnodesize(objset_t *os);
extern zfs_sync_type_t dmu_objset_syncprop(objset_t *os);
extern zfs_logbias_op_t dmu_objset_logbias(objset_t *os);
extern int dmu_objset_blksize(objset_t *os);
extern int dmu_snapshot_list_next(objset_t *os, int namelen, char *name,
uint64_t *id, uint64_t *offp, boolean_t *case_conflict);
extern int dmu_snapshot_lookup(objset_t *os, const char *name, uint64_t *val);
extern int dmu_snapshot_realname(objset_t *os, const char *name, char *real,
int maxlen, boolean_t *conflict);
extern int dmu_dir_list_next(objset_t *os, int namelen, char *name,
uint64_t *idp, uint64_t *offp);
typedef struct zfs_file_info {
uint64_t zfi_user;
uint64_t zfi_group;
uint64_t zfi_project;
uint64_t zfi_generation;
} zfs_file_info_t;
typedef int file_info_cb_t(dmu_object_type_t bonustype, const void *data,
struct zfs_file_info *zoi);
extern void dmu_objset_register_type(dmu_objset_type_t ost,
file_info_cb_t *cb);
extern void dmu_objset_set_user(objset_t *os, void *user_ptr);
extern void *dmu_objset_get_user(objset_t *os);
/*
* Return the txg number for the given assigned transaction.
*/
uint64_t dmu_tx_get_txg(dmu_tx_t *tx);
/*
* Synchronous write.
* If a parent zio is provided this function initiates a write on the
* provided buffer as a child of the parent zio.
* In the absence of a parent zio, the write is completed synchronously.
* At write completion, blk is filled with the bp of the written block.
* Note that while the data covered by this function will be on stable
* storage when the write completes this new data does not become a
* permanent part of the file until the associated transaction commits.
*/
/*
* {zfs,zvol,ztest}_get_done() args
*/
typedef struct zgd {
struct lwb *zgd_lwb;
struct blkptr *zgd_bp;
dmu_buf_t *zgd_db;
struct zfs_locked_range *zgd_lr;
void *zgd_private;
} zgd_t;
typedef void dmu_sync_cb_t(zgd_t *arg, int error);
int dmu_sync(struct zio *zio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd);
/*
* Find the next hole or data block in file starting at *off
* Return found offset in *off. Return ESRCH for end of file.
*/
int dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole,
uint64_t *off);
/*
* Initial setup and final teardown.
*/
extern void dmu_init(void);
extern void dmu_fini(void);
typedef void (*dmu_traverse_cb_t)(objset_t *os, void *arg, struct blkptr *bp,
uint64_t object, uint64_t offset, int len);
void dmu_traverse_objset(objset_t *os, uint64_t txg_start,
dmu_traverse_cb_t cb, void *arg);
int dmu_diff(const char *tosnap_name, const char *fromsnap_name,
zfs_file_t *fp, offset_t *offp);
/* CRC64 table */
#define ZFS_CRC64_POLY 0xC96C5795D7870F42ULL /* ECMA-182, reflected form */
extern uint64_t zfs_crc64_table[256];
extern int dmu_prefetch_max;
#ifdef __cplusplus
}
#endif
#endif /* _SYS_DMU_H */
diff --git a/sys/contrib/openzfs/include/sys/dsl_dataset.h b/sys/contrib/openzfs/include/sys/dsl_dataset.h
index 36307c63151f..3450527af7e0 100644
--- a/sys/contrib/openzfs/include/sys/dsl_dataset.h
+++ b/sys/contrib/openzfs/include/sys/dsl_dataset.h
@@ -1,513 +1,523 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2018 by Delphix. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
#ifndef _SYS_DSL_DATASET_H
#define _SYS_DSL_DATASET_H
#include <sys/dmu.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/zio.h>
#include <sys/bplist.h>
#include <sys/dsl_synctask.h>
#include <sys/zfs_context.h>
#include <sys/dsl_deadlist.h>
#include <sys/zfs_refcount.h>
#include <sys/rrwlock.h>
#include <sys/dsl_crypt.h>
#include <zfeature_common.h>
#ifdef __cplusplus
extern "C" {
#endif
struct dsl_dataset;
struct dsl_dir;
struct dsl_pool;
struct dsl_crypto_params;
struct dsl_key_mapping;
struct zfs_bookmark_phys;
#define DS_FLAG_INCONSISTENT (1ULL<<0)
#define DS_IS_INCONSISTENT(ds) \
(dsl_dataset_phys(ds)->ds_flags & DS_FLAG_INCONSISTENT)
/*
* Do not allow this dataset to be promoted.
*/
#define DS_FLAG_NOPROMOTE (1ULL<<1)
/*
* DS_FLAG_UNIQUE_ACCURATE is set if ds_unique_bytes has been correctly
* calculated for head datasets (starting with SPA_VERSION_UNIQUE_ACCURATE,
* refquota/refreservations).
*/
#define DS_FLAG_UNIQUE_ACCURATE (1ULL<<2)
/*
* DS_FLAG_DEFER_DESTROY is set after 'zfs destroy -d' has been called
* on a dataset. This allows the dataset to be destroyed using 'zfs release'.
*/
#define DS_FLAG_DEFER_DESTROY (1ULL<<3)
#define DS_IS_DEFER_DESTROY(ds) \
(dsl_dataset_phys(ds)->ds_flags & DS_FLAG_DEFER_DESTROY)
/*
* DS_FIELD_* are strings that are used in the "extensified" dataset zap object.
* They should be of the format <reverse-dns>:<field>.
*/
/*
* This field's value is the object ID of a zap object which contains the
* bookmarks of this dataset. If it is present, then this dataset is counted
* in the refcount of the SPA_FEATURES_BOOKMARKS feature.
*/
#define DS_FIELD_BOOKMARK_NAMES "com.delphix:bookmarks"
/*
* This field is present (with value=0) if this dataset may contain large
* dnodes (>512B). If it is present, then this dataset is counted in the
* refcount of the SPA_FEATURE_LARGE_DNODE feature.
*/
#define DS_FIELD_LARGE_DNODE "org.zfsonlinux:large_dnode"
/*
* These fields are set on datasets that are in the middle of a resumable
* receive, and allow the sender to resume the send if it is interrupted.
*/
#define DS_FIELD_RESUME_FROMGUID "com.delphix:resume_fromguid"
#define DS_FIELD_RESUME_TONAME "com.delphix:resume_toname"
#define DS_FIELD_RESUME_TOGUID "com.delphix:resume_toguid"
#define DS_FIELD_RESUME_OBJECT "com.delphix:resume_object"
#define DS_FIELD_RESUME_OFFSET "com.delphix:resume_offset"
#define DS_FIELD_RESUME_BYTES "com.delphix:resume_bytes"
#define DS_FIELD_RESUME_LARGEBLOCK "com.delphix:resume_largeblockok"
#define DS_FIELD_RESUME_EMBEDOK "com.delphix:resume_embedok"
#define DS_FIELD_RESUME_COMPRESSOK "com.delphix:resume_compressok"
#define DS_FIELD_RESUME_RAWOK "com.datto:resume_rawok"
/*
* This field is set to the object number of the remap deadlist if one exists.
*/
#define DS_FIELD_REMAP_DEADLIST "com.delphix:remap_deadlist"
/*
* We were receiving an incremental from a redaction bookmark, and these are the
* guids of its snapshots.
*/
#define DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS \
"com.delphix:resume_redact_book_snaps"
/*
* This field is set to the ivset guid for encrypted snapshots. This is used
* for validating raw receives.
*/
#define DS_FIELD_IVSET_GUID "com.datto:ivset_guid"
/*
* DS_FLAG_CI_DATASET is set if the dataset contains a file system whose
* name lookups should be performed case-insensitively.
*/
#define DS_FLAG_CI_DATASET (1ULL<<16)
#define DS_CREATE_FLAG_NODIRTY (1ULL<<24)
typedef struct dsl_dataset_phys {
uint64_t ds_dir_obj; /* DMU_OT_DSL_DIR */
uint64_t ds_prev_snap_obj; /* DMU_OT_DSL_DATASET */
uint64_t ds_prev_snap_txg;
uint64_t ds_next_snap_obj; /* DMU_OT_DSL_DATASET */
uint64_t ds_snapnames_zapobj; /* DMU_OT_DSL_DS_SNAP_MAP 0 for snaps */
uint64_t ds_num_children; /* clone/snap children; ==0 for head */
uint64_t ds_creation_time; /* seconds since 1970 */
uint64_t ds_creation_txg;
uint64_t ds_deadlist_obj; /* DMU_OT_DEADLIST */
/*
* ds_referenced_bytes, ds_compressed_bytes, and ds_uncompressed_bytes
* include all blocks referenced by this dataset, including those
* shared with any other datasets.
*/
uint64_t ds_referenced_bytes;
uint64_t ds_compressed_bytes;
uint64_t ds_uncompressed_bytes;
uint64_t ds_unique_bytes; /* only relevant to snapshots */
/*
* The ds_fsid_guid is a 56-bit ID that can change to avoid
* collisions. The ds_guid is a 64-bit ID that will never
* change, so there is a small probability that it will collide.
*/
uint64_t ds_fsid_guid;
uint64_t ds_guid;
uint64_t ds_flags; /* DS_FLAG_* */
blkptr_t ds_bp;
uint64_t ds_next_clones_obj; /* DMU_OT_DSL_CLONES */
uint64_t ds_props_obj; /* DMU_OT_DSL_PROPS for snaps */
uint64_t ds_userrefs_obj; /* DMU_OT_USERREFS */
uint64_t ds_pad[5]; /* pad out to 320 bytes for good measure */
} dsl_dataset_phys_t;
typedef struct dsl_dataset {
dmu_buf_user_t ds_dbu;
rrwlock_t ds_bp_rwlock; /* Protects ds_phys->ds_bp */
/* Immutable: */
struct dsl_dir *ds_dir;
dmu_buf_t *ds_dbuf;
uint64_t ds_object;
uint64_t ds_fsid_guid;
boolean_t ds_is_snapshot;
struct dsl_key_mapping *ds_key_mapping;
/* only used in syncing context, only valid for non-snapshots: */
struct dsl_dataset *ds_prev;
uint64_t ds_bookmarks_obj; /* DMU_OTN_ZAP_METADATA */
avl_tree_t ds_bookmarks; /* dsl_bookmark_node_t */
/* has internal locking: */
dsl_deadlist_t ds_deadlist;
bplist_t ds_pending_deadlist;
/*
* The remap deadlist contains blocks (DVA's, really) that are
* referenced by the previous snapshot and point to indirect vdevs,
* but in this dataset they have been remapped to point to concrete
* (or at least, less-indirect) vdevs. In other words, the
* physical DVA is referenced by the previous snapshot but not by
* this dataset. Logically, the DVA continues to be referenced,
* but we are using a different (less indirect) physical DVA.
* This deadlist is used to determine when physical DVAs that
* point to indirect vdevs are no longer referenced anywhere,
* and thus should be marked obsolete.
*
* This is only used if SPA_FEATURE_OBSOLETE_COUNTS is enabled.
*/
dsl_deadlist_t ds_remap_deadlist;
/* protects creation of the ds_remap_deadlist */
kmutex_t ds_remap_deadlist_lock;
/* protected by lock on pool's dp_dirty_datasets list */
txg_node_t ds_dirty_link;
list_node_t ds_synced_link;
/*
* ds_phys->ds_<accounting> is also protected by ds_lock.
* Protected by ds_lock:
*/
kmutex_t ds_lock;
objset_t *ds_objset;
uint64_t ds_userrefs;
const void *ds_owner;
/*
* Long holds prevent the ds from being destroyed; they allow the
* ds to remain held even after dropping the dp_config_rwlock.
* Owning counts as a long hold. See the comments above
* dsl_pool_hold() for details.
*/
zfs_refcount_t ds_longholds;
/* no locking; only for making guesses */
uint64_t ds_trysnap_txg;
/* for objset_open() */
kmutex_t ds_opening_lock;
uint64_t ds_reserved; /* cached refreservation */
uint64_t ds_quota; /* cached refquota */
kmutex_t ds_sendstream_lock;
list_t ds_sendstreams;
/*
* When in the middle of a resumable receive, tracks how much
* progress we have made.
*/
uint64_t ds_resume_object[TXG_SIZE];
uint64_t ds_resume_offset[TXG_SIZE];
uint64_t ds_resume_bytes[TXG_SIZE];
/* Protected by our dsl_dir's dd_lock */
list_t ds_prop_cbs;
/*
* For ZFEATURE_FLAG_PER_DATASET features, set if this dataset
* uses this feature.
*/
void *ds_feature[SPA_FEATURES];
/*
* Set if we need to activate the feature on this dataset this txg
* (used only in syncing context).
*/
void *ds_feature_activation[SPA_FEATURES];
/* Protected by ds_lock; keep at end of struct for better locality */
char ds_snapname[ZFS_MAX_DATASET_NAME_LEN];
} dsl_dataset_t;
static inline dsl_dataset_phys_t *
dsl_dataset_phys(dsl_dataset_t *ds)
{
return ((dsl_dataset_phys_t *)ds->ds_dbuf->db_data);
}
typedef struct dsl_dataset_promote_arg {
const char *ddpa_clonename;
dsl_dataset_t *ddpa_clone;
list_t shared_snaps, origin_snaps, clone_snaps;
dsl_dataset_t *origin_origin; /* origin of the origin */
uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap;
nvlist_t *err_ds;
cred_t *cr;
proc_t *proc;
} dsl_dataset_promote_arg_t;
typedef struct dsl_dataset_rollback_arg {
const char *ddra_fsname;
const char *ddra_tosnap;
void *ddra_owner;
nvlist_t *ddra_result;
} dsl_dataset_rollback_arg_t;
typedef struct dsl_dataset_snapshot_arg {
nvlist_t *ddsa_snaps;
nvlist_t *ddsa_props;
nvlist_t *ddsa_errors;
cred_t *ddsa_cr;
proc_t *ddsa_proc;
} dsl_dataset_snapshot_arg_t;
+typedef struct dsl_dataset_rename_snapshot_arg {
+ const char *ddrsa_fsname;
+ const char *ddrsa_oldsnapname;
+ const char *ddrsa_newsnapname;
+ boolean_t ddrsa_recursive;
+ dmu_tx_t *ddrsa_tx;
+} dsl_dataset_rename_snapshot_arg_t;
+
/*
* The max length of a temporary tag prefix is the number of hex digits
* required to express UINT64_MAX plus one for the hyphen.
*/
#define MAX_TAG_PREFIX_LEN 17
#define dsl_dataset_is_snapshot(ds) \
(dsl_dataset_phys(ds)->ds_num_children != 0)
#define DS_UNIQUE_IS_ACCURATE(ds) \
((dsl_dataset_phys(ds)->ds_flags & DS_FLAG_UNIQUE_ACCURATE) != 0)
/* flags for holding the dataset */
typedef enum ds_hold_flags {
DS_HOLD_FLAG_NONE = 0 << 0,
DS_HOLD_FLAG_DECRYPT = 1 << 0 /* needs access to encrypted data */
} ds_hold_flags_t;
int dsl_dataset_hold(struct dsl_pool *dp, const char *name, const void *tag,
dsl_dataset_t **dsp);
int dsl_dataset_hold_flags(struct dsl_pool *dp, const char *name,
ds_hold_flags_t flags, const void *tag, dsl_dataset_t **dsp);
boolean_t dsl_dataset_try_add_ref(struct dsl_pool *dp, dsl_dataset_t *ds,
const void *tag);
int dsl_dataset_create_key_mapping(dsl_dataset_t *ds);
int dsl_dataset_hold_obj_flags(struct dsl_pool *dp, uint64_t dsobj,
ds_hold_flags_t flags, const void *tag, dsl_dataset_t **);
void dsl_dataset_remove_key_mapping(dsl_dataset_t *ds);
int dsl_dataset_hold_obj(struct dsl_pool *dp, uint64_t dsobj,
const void *tag, dsl_dataset_t **);
void dsl_dataset_rele_flags(dsl_dataset_t *ds, ds_hold_flags_t flags,
const void *tag);
void dsl_dataset_rele(dsl_dataset_t *ds, const void *tag);
int dsl_dataset_own(struct dsl_pool *dp, const char *name,
ds_hold_flags_t flags, const void *tag, dsl_dataset_t **dsp);
int dsl_dataset_own_force(struct dsl_pool *dp, const char *name,
ds_hold_flags_t flags, const void *tag, dsl_dataset_t **dsp);
int dsl_dataset_own_obj(struct dsl_pool *dp, uint64_t dsobj,
ds_hold_flags_t flags, const void *tag, dsl_dataset_t **dsp);
int dsl_dataset_own_obj_force(struct dsl_pool *dp, uint64_t dsobj,
ds_hold_flags_t flags, const void *tag, dsl_dataset_t **dsp);
void dsl_dataset_disown(dsl_dataset_t *ds, ds_hold_flags_t flags,
const void *tag);
void dsl_dataset_name(dsl_dataset_t *ds, char *name);
boolean_t dsl_dataset_tryown(dsl_dataset_t *ds, const void *tag,
boolean_t override);
int dsl_dataset_namelen(dsl_dataset_t *ds);
boolean_t dsl_dataset_has_owner(dsl_dataset_t *ds);
uint64_t dsl_dataset_create_sync(dsl_dir_t *pds, const char *lastname,
dsl_dataset_t *origin, uint64_t flags, cred_t *,
struct dsl_crypto_params *, dmu_tx_t *);
uint64_t dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
struct dsl_crypto_params *dcp, uint64_t flags, dmu_tx_t *tx);
void dsl_dataset_snapshot_sync(void *arg, dmu_tx_t *tx);
int dsl_dataset_snapshot_check(void *arg, dmu_tx_t *tx);
int dsl_dataset_snapshot(nvlist_t *snaps, nvlist_t *props, nvlist_t *errors);
void dsl_dataset_promote_sync(void *arg, dmu_tx_t *tx);
int dsl_dataset_promote_check(void *arg, dmu_tx_t *tx);
int dsl_dataset_promote(const char *name, char *conflsnap);
int dsl_dataset_rename_snapshot(const char *fsname,
const char *oldsnapname, const char *newsnapname, boolean_t recursive);
int dsl_dataset_snapshot_tmp(const char *fsname, const char *snapname,
minor_t cleanup_minor, const char *htag);
blkptr_t *dsl_dataset_get_blkptr(dsl_dataset_t *ds);
spa_t *dsl_dataset_get_spa(dsl_dataset_t *ds);
boolean_t dsl_dataset_modified_since_snap(dsl_dataset_t *ds,
dsl_dataset_t *snap);
void dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx);
void dsl_dataset_sync_done(dsl_dataset_t *ds, dmu_tx_t *tx);
-void dsl_dataset_feature_set_activation(const blkptr_t *bp, dsl_dataset_t *ds);
void dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp,
dmu_tx_t *tx);
int dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp,
dmu_tx_t *tx, boolean_t async);
void dsl_dataset_block_remapped(dsl_dataset_t *ds, uint64_t vdev,
uint64_t offset, uint64_t size, uint64_t birth, dmu_tx_t *tx);
int dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name,
uint64_t *value);
void dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx);
int get_clones_stat_impl(dsl_dataset_t *ds, nvlist_t *val);
char *get_receive_resume_token(dsl_dataset_t *ds);
uint64_t dsl_get_refratio(dsl_dataset_t *ds);
uint64_t dsl_get_logicalreferenced(dsl_dataset_t *ds);
uint64_t dsl_get_compressratio(dsl_dataset_t *ds);
uint64_t dsl_get_used(dsl_dataset_t *ds);
uint64_t dsl_get_creation(dsl_dataset_t *ds);
uint64_t dsl_get_creationtxg(dsl_dataset_t *ds);
uint64_t dsl_get_refquota(dsl_dataset_t *ds);
uint64_t dsl_get_refreservation(dsl_dataset_t *ds);
uint64_t dsl_get_guid(dsl_dataset_t *ds);
uint64_t dsl_get_unique(dsl_dataset_t *ds);
uint64_t dsl_get_objsetid(dsl_dataset_t *ds);
uint64_t dsl_get_userrefs(dsl_dataset_t *ds);
uint64_t dsl_get_defer_destroy(dsl_dataset_t *ds);
uint64_t dsl_get_referenced(dsl_dataset_t *ds);
uint64_t dsl_get_numclones(dsl_dataset_t *ds);
uint64_t dsl_get_inconsistent(dsl_dataset_t *ds);
uint64_t dsl_get_redacted(dsl_dataset_t *ds);
uint64_t dsl_get_available(dsl_dataset_t *ds);
int dsl_get_written(dsl_dataset_t *ds, uint64_t *written);
int dsl_get_prev_snap(dsl_dataset_t *ds, char *snap);
void dsl_get_redact_snaps(dsl_dataset_t *ds, nvlist_t *propval);
int dsl_get_mountpoint(dsl_dataset_t *ds, const char *dsname, char *value,
char *source);
void get_clones_stat(dsl_dataset_t *ds, nvlist_t *nv);
void dsl_dataset_stats(dsl_dataset_t *os, nvlist_t *nv);
void dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat);
void dsl_dataset_space(dsl_dataset_t *ds,
uint64_t *refdbytesp, uint64_t *availbytesp,
uint64_t *usedobjsp, uint64_t *availobjsp);
uint64_t dsl_dataset_fsid_guid(dsl_dataset_t *ds);
int dsl_dataset_space_written(dsl_dataset_t *oldsnap, dsl_dataset_t *newds,
uint64_t *usedp, uint64_t *compp, uint64_t *uncompp);
int dsl_dataset_space_written_bookmark(struct zfs_bookmark_phys *bmp,
dsl_dataset_t *newds, uint64_t *usedp, uint64_t *compp, uint64_t *uncompp);
int dsl_dataset_space_wouldfree(dsl_dataset_t *firstsnap, dsl_dataset_t *last,
uint64_t *usedp, uint64_t *compp, uint64_t *uncompp);
int dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf);
int dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
uint64_t asize, uint64_t inflight, uint64_t *used,
uint64_t *ref_rsrv);
int dsl_dataset_set_refquota(const char *dsname, zprop_source_t source,
uint64_t quota);
int dsl_dataset_set_refreservation(const char *dsname, zprop_source_t source,
uint64_t reservation);
int dsl_dataset_set_compression(const char *dsname, zprop_source_t source,
uint64_t compression);
boolean_t dsl_dataset_is_before(dsl_dataset_t *later, dsl_dataset_t *earlier,
uint64_t earlier_txg);
void dsl_dataset_long_hold(dsl_dataset_t *ds, const void *tag);
void dsl_dataset_long_rele(dsl_dataset_t *ds, const void *tag);
boolean_t dsl_dataset_long_held(dsl_dataset_t *ds);
int dsl_dataset_clone_swap_check_impl(dsl_dataset_t *clone,
dsl_dataset_t *origin_head, boolean_t force, void *owner, dmu_tx_t *tx);
void dsl_dataset_clone_swap_sync_impl(dsl_dataset_t *clone,
dsl_dataset_t *origin_head, dmu_tx_t *tx);
int dsl_dataset_snapshot_check_impl(dsl_dataset_t *ds, const char *snapname,
dmu_tx_t *tx, boolean_t recv, uint64_t cnt, cred_t *cr, proc_t *proc);
void dsl_dataset_snapshot_sync_impl(dsl_dataset_t *ds, const char *snapname,
dmu_tx_t *tx);
void dsl_dataset_remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj,
dmu_tx_t *tx);
void dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds);
int dsl_dataset_get_snapname(dsl_dataset_t *ds);
int dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name,
uint64_t *value);
int dsl_dataset_snap_remove(dsl_dataset_t *ds, const char *name, dmu_tx_t *tx,
boolean_t adj_cnt);
void dsl_dataset_set_refreservation_sync_impl(dsl_dataset_t *ds,
zprop_source_t source, uint64_t value, dmu_tx_t *tx);
void dsl_dataset_zapify(dsl_dataset_t *ds, dmu_tx_t *tx);
boolean_t dsl_dataset_is_zapified(dsl_dataset_t *ds);
boolean_t dsl_dataset_has_resume_receive_state(dsl_dataset_t *ds);
int dsl_dataset_rollback_check(void *arg, dmu_tx_t *tx);
void dsl_dataset_rollback_sync(void *arg, dmu_tx_t *tx);
int dsl_dataset_rollback(const char *fsname, const char *tosnap, void *owner,
nvlist_t *result);
+int dsl_dataset_rename_snapshot_check(void *arg, dmu_tx_t *tx);
+void dsl_dataset_rename_snapshot_sync(void *arg, dmu_tx_t *tx);
+
uint64_t dsl_dataset_get_remap_deadlist_object(dsl_dataset_t *ds);
void dsl_dataset_create_remap_deadlist(dsl_dataset_t *ds, dmu_tx_t *tx);
boolean_t dsl_dataset_remap_deadlist_exists(dsl_dataset_t *ds);
void dsl_dataset_destroy_remap_deadlist(dsl_dataset_t *ds, dmu_tx_t *tx);
void dsl_dataset_activate_feature(uint64_t dsobj, spa_feature_t f, void *arg,
dmu_tx_t *tx);
void dsl_dataset_deactivate_feature(dsl_dataset_t *ds, spa_feature_t f,
dmu_tx_t *tx);
boolean_t dsl_dataset_feature_is_active(dsl_dataset_t *ds, spa_feature_t f);
boolean_t dsl_dataset_get_uint64_array_feature(dsl_dataset_t *ds,
spa_feature_t f, uint64_t *outlength, uint64_t **outp);
void dsl_dataset_activate_redaction(dsl_dataset_t *ds, uint64_t *redact_snaps,
uint64_t num_redact_snaps, dmu_tx_t *tx);
int dsl_dataset_oldest_snapshot(spa_t *spa, uint64_t head_ds, uint64_t min_txg,
uint64_t *oldest_dsobj);
#ifdef ZFS_DEBUG
#define dprintf_ds(ds, fmt, ...) do { \
if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
char *__ds_name = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); \
dsl_dataset_name(ds, __ds_name); \
dprintf("ds=%s " fmt, __ds_name, __VA_ARGS__); \
kmem_free(__ds_name, ZFS_MAX_DATASET_NAME_LEN); \
} \
} while (0)
#else
#define dprintf_ds(dd, fmt, ...)
#endif
#ifdef __cplusplus
}
#endif
#endif /* _SYS_DSL_DATASET_H */
diff --git a/sys/contrib/openzfs/include/sys/dsl_dir.h b/sys/contrib/openzfs/include/sys/dsl_dir.h
index 664230f146a2..384f98e8f722 100644
--- a/sys/contrib/openzfs/include/sys/dsl_dir.h
+++ b/sys/contrib/openzfs/include/sys/dsl_dir.h
@@ -1,230 +1,231 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright (c) 2014, Joyent, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
#ifndef _SYS_DSL_DIR_H
#define _SYS_DSL_DIR_H
#include <sys/dmu.h>
#include <sys/dsl_deadlist.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_synctask.h>
#include <sys/zfs_refcount.h>
#include <sys/zfs_context.h>
#include <sys/dsl_crypt.h>
#include <sys/bplist.h>
#ifdef __cplusplus
extern "C" {
#endif
struct dsl_dataset;
struct zthr;
/*
* DD_FIELD_* are strings that are used in the "extensified" dsl_dir zap object.
* They should be of the format <reverse-dns>:<field>.
*/
#define DD_FIELD_FILESYSTEM_COUNT "com.joyent:filesystem_count"
#define DD_FIELD_SNAPSHOT_COUNT "com.joyent:snapshot_count"
#define DD_FIELD_CRYPTO_KEY_OBJ "com.datto:crypto_key_obj"
#define DD_FIELD_LIVELIST "com.delphix:livelist"
+#define DD_FIELD_SNAPSHOTS_CHANGED "com.ixsystems:snapshots_changed"
typedef enum dd_used {
DD_USED_HEAD,
DD_USED_SNAP,
DD_USED_CHILD,
DD_USED_CHILD_RSRV,
DD_USED_REFRSRV,
DD_USED_NUM
} dd_used_t;
#define DD_FLAG_USED_BREAKDOWN (1<<0)
typedef struct dsl_dir_phys {
uint64_t dd_creation_time; /* not actually used */
uint64_t dd_head_dataset_obj;
uint64_t dd_parent_obj;
uint64_t dd_origin_obj;
uint64_t dd_child_dir_zapobj;
/*
* how much space our children are accounting for; for leaf
* datasets, == physical space used by fs + snaps
*/
uint64_t dd_used_bytes;
uint64_t dd_compressed_bytes;
uint64_t dd_uncompressed_bytes;
/* Administrative quota setting */
uint64_t dd_quota;
/* Administrative reservation setting */
uint64_t dd_reserved;
uint64_t dd_props_zapobj;
uint64_t dd_deleg_zapobj; /* dataset delegation permissions */
uint64_t dd_flags;
uint64_t dd_used_breakdown[DD_USED_NUM];
uint64_t dd_clones; /* dsl_dir objects */
uint64_t dd_pad[13]; /* pad out to 256 bytes for good measure */
} dsl_dir_phys_t;
struct dsl_dir {
dmu_buf_user_t dd_dbu;
/* These are immutable; no lock needed: */
uint64_t dd_object;
uint64_t dd_crypto_obj;
dsl_pool_t *dd_pool;
/* Stable until user eviction; no lock needed: */
dmu_buf_t *dd_dbuf;
/* protected by lock on pool's dp_dirty_dirs list */
txg_node_t dd_dirty_link;
/* protected by dp_config_rwlock */
dsl_dir_t *dd_parent;
/* Protected by dd_lock */
kmutex_t dd_lock;
list_t dd_props; /* list of dsl_prop_record_t's */
inode_timespec_t dd_snap_cmtime; /* last snapshot namespace change */
uint64_t dd_origin_txg;
/* gross estimate of space used by in-flight tx's */
uint64_t dd_tempreserved[TXG_SIZE];
/* amount of space we expect to write; == amount of dirty data */
int64_t dd_space_towrite[TXG_SIZE];
dsl_deadlist_t dd_livelist;
bplist_t dd_pending_frees;
bplist_t dd_pending_allocs;
kmutex_t dd_activity_lock;
kcondvar_t dd_activity_cv;
boolean_t dd_activity_cancelled;
uint64_t dd_activity_waiters;
/* protected by dd_lock; keep at end of struct for better locality */
char dd_myname[ZFS_MAX_DATASET_NAME_LEN];
};
static inline dsl_dir_phys_t *
dsl_dir_phys(dsl_dir_t *dd)
{
return (dd->dd_dbuf->db_data);
}
void dsl_dir_rele(dsl_dir_t *dd, const void *tag);
void dsl_dir_async_rele(dsl_dir_t *dd, const void *tag);
int dsl_dir_hold(dsl_pool_t *dp, const char *name, const void *tag,
dsl_dir_t **, const char **tail);
int dsl_dir_hold_obj(dsl_pool_t *dp, uint64_t ddobj,
const char *tail, const void *tag, dsl_dir_t **);
void dsl_dir_name(dsl_dir_t *dd, char *buf);
int dsl_dir_namelen(dsl_dir_t *dd);
uint64_t dsl_dir_create_sync(dsl_pool_t *dp, dsl_dir_t *pds,
const char *name, dmu_tx_t *tx);
uint64_t dsl_dir_get_used(dsl_dir_t *dd);
uint64_t dsl_dir_get_compressed(dsl_dir_t *dd);
uint64_t dsl_dir_get_quota(dsl_dir_t *dd);
uint64_t dsl_dir_get_reservation(dsl_dir_t *dd);
uint64_t dsl_dir_get_compressratio(dsl_dir_t *dd);
uint64_t dsl_dir_get_logicalused(dsl_dir_t *dd);
uint64_t dsl_dir_get_usedsnap(dsl_dir_t *dd);
uint64_t dsl_dir_get_usedds(dsl_dir_t *dd);
uint64_t dsl_dir_get_usedrefreserv(dsl_dir_t *dd);
uint64_t dsl_dir_get_usedchild(dsl_dir_t *dd);
void dsl_dir_get_origin(dsl_dir_t *dd, char *buf);
int dsl_dir_get_filesystem_count(dsl_dir_t *dd, uint64_t *count);
int dsl_dir_get_snapshot_count(dsl_dir_t *dd, uint64_t *count);
void dsl_dir_stats(dsl_dir_t *dd, nvlist_t *nv);
uint64_t dsl_dir_space_available(dsl_dir_t *dd,
dsl_dir_t *ancestor, int64_t delta, int ondiskonly);
void dsl_dir_dirty(dsl_dir_t *dd, dmu_tx_t *tx);
void dsl_dir_sync(dsl_dir_t *dd, dmu_tx_t *tx);
int dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t mem,
uint64_t asize, boolean_t netfree, void **tr_cookiep, dmu_tx_t *tx);
void dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx);
void dsl_dir_willuse_space(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx);
void dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type,
int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx);
void dsl_dir_transfer_space(dsl_dir_t *dd, int64_t delta,
dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx);
void dsl_dir_diduse_transfer_space(dsl_dir_t *dd, int64_t used,
int64_t compressed, int64_t uncompressed, int64_t tonew,
dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx);
int dsl_dir_set_quota(const char *ddname, zprop_source_t source,
uint64_t quota);
int dsl_dir_set_reservation(const char *ddname, zprop_source_t source,
uint64_t reservation);
int dsl_dir_activate_fs_ss_limit(const char *);
int dsl_fs_ss_limit_check(dsl_dir_t *, uint64_t, zfs_prop_t, dsl_dir_t *,
cred_t *, proc_t *);
void dsl_fs_ss_count_adjust(dsl_dir_t *, int64_t, const char *, dmu_tx_t *);
int dsl_dir_rename(const char *oldname, const char *newname);
int dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd,
uint64_t fs_cnt, uint64_t ss_cnt, uint64_t space, cred_t *, proc_t *);
boolean_t dsl_dir_is_clone(dsl_dir_t *dd);
void dsl_dir_new_refreservation(dsl_dir_t *dd, struct dsl_dataset *ds,
uint64_t reservation, cred_t *cr, dmu_tx_t *tx);
void dsl_dir_snap_cmtime_update(dsl_dir_t *dd, dmu_tx_t *tx);
inode_timespec_t dsl_dir_snap_cmtime(dsl_dir_t *dd);
void dsl_dir_set_reservation_sync_impl(dsl_dir_t *dd, uint64_t value,
dmu_tx_t *tx);
void dsl_dir_zapify(dsl_dir_t *dd, dmu_tx_t *tx);
boolean_t dsl_dir_is_zapified(dsl_dir_t *dd);
void dsl_dir_livelist_open(dsl_dir_t *dd, uint64_t obj);
void dsl_dir_livelist_close(dsl_dir_t *dd);
void dsl_dir_remove_livelist(dsl_dir_t *dd, dmu_tx_t *tx, boolean_t total);
int dsl_dir_wait(dsl_dir_t *dd, dsl_dataset_t *ds, zfs_wait_activity_t activity,
boolean_t *waited);
void dsl_dir_cancel_waiters(dsl_dir_t *dd);
/* internal reserved dir name */
#define MOS_DIR_NAME "$MOS"
#define ORIGIN_DIR_NAME "$ORIGIN"
#define FREE_DIR_NAME "$FREE"
#define LEAK_DIR_NAME "$LEAK"
#ifdef ZFS_DEBUG
#define dprintf_dd(dd, fmt, ...) do { \
if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
char *__ds_name = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); \
dsl_dir_name(dd, __ds_name); \
dprintf("dd=%s " fmt, __ds_name, __VA_ARGS__); \
kmem_free(__ds_name, ZFS_MAX_DATASET_NAME_LEN); \
} \
} while (0)
#else
#define dprintf_dd(dd, fmt, ...)
#endif
#ifdef __cplusplus
}
#endif
#endif /* _SYS_DSL_DIR_H */
diff --git a/sys/contrib/openzfs/include/sys/fs/zfs.h b/sys/contrib/openzfs/include/sys/fs/zfs.h
index 8cbd0e6024a4..dedee0e7bd5a 100644
--- a/sys/contrib/openzfs/include/sys/fs/zfs.h
+++ b/sys/contrib/openzfs/include/sys/fs/zfs.h
@@ -1,1826 +1,1826 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2013, 2017 Joyent, Inc. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019 Datto Inc.
* Portions Copyright 2010 Robert Milkowski
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
*/
#ifndef _SYS_FS_ZFS_H
#define _SYS_FS_ZFS_H extern __attribute__((visibility("default")))
#include <sys/time.h>
#include <sys/zio_priority.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Types and constants shared between userland and the kernel.
*/
/*
* Each dataset can be one of the following types. These constants can be
* combined into masks that can be passed to various functions.
*/
typedef enum {
ZFS_TYPE_INVALID = 0,
ZFS_TYPE_FILESYSTEM = (1 << 0),
ZFS_TYPE_SNAPSHOT = (1 << 1),
ZFS_TYPE_VOLUME = (1 << 2),
ZFS_TYPE_POOL = (1 << 3),
ZFS_TYPE_BOOKMARK = (1 << 4),
ZFS_TYPE_VDEV = (1 << 5),
} zfs_type_t;
/*
* NB: lzc_dataset_type should be updated whenever a new objset type is added,
* if it represents a real type of a dataset that can be created from userland.
*/
typedef enum dmu_objset_type {
DMU_OST_NONE,
DMU_OST_META,
DMU_OST_ZFS,
DMU_OST_ZVOL,
DMU_OST_OTHER, /* For testing only! */
DMU_OST_ANY, /* Be careful! */
DMU_OST_NUMTYPES
} dmu_objset_type_t;
#define ZFS_TYPE_DATASET \
(ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME | ZFS_TYPE_SNAPSHOT)
/*
* All of these include the terminating NUL byte.
*/
#define ZAP_MAXNAMELEN 256
#define ZAP_MAXVALUELEN (1024 * 8)
#define ZAP_OLDMAXVALUELEN 1024
#define ZFS_MAX_DATASET_NAME_LEN 256
/*
* Dataset properties are identified by these constants and must be added to
* the end of this list to ensure that external consumers are not affected
* by the change. If you make any changes to this list, be sure to update
* the property table in module/zcommon/zfs_prop.c.
*/
typedef enum {
ZPROP_CONT = -2,
ZPROP_INVAL = -1,
ZPROP_USERPROP = ZPROP_INVAL,
ZFS_PROP_TYPE = 0,
ZFS_PROP_CREATION,
ZFS_PROP_USED,
ZFS_PROP_AVAILABLE,
ZFS_PROP_REFERENCED,
ZFS_PROP_COMPRESSRATIO,
ZFS_PROP_MOUNTED,
ZFS_PROP_ORIGIN,
ZFS_PROP_QUOTA,
ZFS_PROP_RESERVATION,
ZFS_PROP_VOLSIZE,
ZFS_PROP_VOLBLOCKSIZE,
ZFS_PROP_RECORDSIZE,
ZFS_PROP_MOUNTPOINT,
ZFS_PROP_SHARENFS,
ZFS_PROP_CHECKSUM,
ZFS_PROP_COMPRESSION,
ZFS_PROP_ATIME,
ZFS_PROP_DEVICES,
ZFS_PROP_EXEC,
ZFS_PROP_SETUID,
ZFS_PROP_READONLY,
ZFS_PROP_ZONED,
ZFS_PROP_SNAPDIR,
ZFS_PROP_ACLMODE,
ZFS_PROP_ACLINHERIT,
ZFS_PROP_CREATETXG,
ZFS_PROP_NAME, /* not exposed to the user */
ZFS_PROP_CANMOUNT,
ZFS_PROP_ISCSIOPTIONS, /* not exposed to the user */
ZFS_PROP_XATTR,
ZFS_PROP_NUMCLONES, /* not exposed to the user */
ZFS_PROP_COPIES,
ZFS_PROP_VERSION,
ZFS_PROP_UTF8ONLY,
ZFS_PROP_NORMALIZE,
ZFS_PROP_CASE,
ZFS_PROP_VSCAN,
ZFS_PROP_NBMAND,
ZFS_PROP_SHARESMB,
ZFS_PROP_REFQUOTA,
ZFS_PROP_REFRESERVATION,
ZFS_PROP_GUID,
ZFS_PROP_PRIMARYCACHE,
ZFS_PROP_SECONDARYCACHE,
ZFS_PROP_USEDSNAP,
ZFS_PROP_USEDDS,
ZFS_PROP_USEDCHILD,
ZFS_PROP_USEDREFRESERV,
ZFS_PROP_USERACCOUNTING, /* not exposed to the user */
ZFS_PROP_STMF_SHAREINFO, /* not exposed to the user */
ZFS_PROP_DEFER_DESTROY,
ZFS_PROP_USERREFS,
ZFS_PROP_LOGBIAS,
ZFS_PROP_UNIQUE, /* not exposed to the user */
ZFS_PROP_OBJSETID,
ZFS_PROP_DEDUP,
ZFS_PROP_MLSLABEL,
ZFS_PROP_SYNC,
ZFS_PROP_DNODESIZE,
ZFS_PROP_REFRATIO,
ZFS_PROP_WRITTEN,
ZFS_PROP_CLONES,
ZFS_PROP_LOGICALUSED,
ZFS_PROP_LOGICALREFERENCED,
ZFS_PROP_INCONSISTENT, /* not exposed to the user */
ZFS_PROP_VOLMODE,
ZFS_PROP_FILESYSTEM_LIMIT,
ZFS_PROP_SNAPSHOT_LIMIT,
ZFS_PROP_FILESYSTEM_COUNT,
ZFS_PROP_SNAPSHOT_COUNT,
ZFS_PROP_SNAPDEV,
ZFS_PROP_ACLTYPE,
ZFS_PROP_SELINUX_CONTEXT,
ZFS_PROP_SELINUX_FSCONTEXT,
ZFS_PROP_SELINUX_DEFCONTEXT,
ZFS_PROP_SELINUX_ROOTCONTEXT,
ZFS_PROP_RELATIME,
ZFS_PROP_REDUNDANT_METADATA,
ZFS_PROP_OVERLAY,
ZFS_PROP_PREV_SNAP,
ZFS_PROP_RECEIVE_RESUME_TOKEN,
ZFS_PROP_ENCRYPTION,
ZFS_PROP_KEYLOCATION,
ZFS_PROP_KEYFORMAT,
ZFS_PROP_PBKDF2_SALT,
ZFS_PROP_PBKDF2_ITERS,
ZFS_PROP_ENCRYPTION_ROOT,
ZFS_PROP_KEY_GUID,
ZFS_PROP_KEYSTATUS,
ZFS_PROP_REMAPTXG, /* obsolete - no longer used */
ZFS_PROP_SPECIAL_SMALL_BLOCKS,
ZFS_PROP_IVSET_GUID, /* not exposed to the user */
ZFS_PROP_REDACTED,
ZFS_PROP_REDACT_SNAPS,
ZFS_PROP_SNAPSHOTS_CHANGED,
ZFS_NUM_PROPS
} zfs_prop_t;
typedef enum {
ZFS_PROP_USERUSED,
ZFS_PROP_USERQUOTA,
ZFS_PROP_GROUPUSED,
ZFS_PROP_GROUPQUOTA,
ZFS_PROP_USEROBJUSED,
ZFS_PROP_USEROBJQUOTA,
ZFS_PROP_GROUPOBJUSED,
ZFS_PROP_GROUPOBJQUOTA,
ZFS_PROP_PROJECTUSED,
ZFS_PROP_PROJECTQUOTA,
ZFS_PROP_PROJECTOBJUSED,
ZFS_PROP_PROJECTOBJQUOTA,
ZFS_NUM_USERQUOTA_PROPS
} zfs_userquota_prop_t;
_SYS_FS_ZFS_H const char *const zfs_userquota_prop_prefixes[
ZFS_NUM_USERQUOTA_PROPS];
/*
* Pool properties are identified by these constants and must be added to the
* end of this list to ensure that external consumers are not affected
* by the change. Properties must be registered in zfs_prop_init().
*/
typedef enum {
ZPOOL_PROP_INVAL = -1,
ZPOOL_PROP_NAME,
ZPOOL_PROP_SIZE,
ZPOOL_PROP_CAPACITY,
ZPOOL_PROP_ALTROOT,
ZPOOL_PROP_HEALTH,
ZPOOL_PROP_GUID,
ZPOOL_PROP_VERSION,
ZPOOL_PROP_BOOTFS,
ZPOOL_PROP_DELEGATION,
ZPOOL_PROP_AUTOREPLACE,
ZPOOL_PROP_CACHEFILE,
ZPOOL_PROP_FAILUREMODE,
ZPOOL_PROP_LISTSNAPS,
ZPOOL_PROP_AUTOEXPAND,
ZPOOL_PROP_DEDUPDITTO,
ZPOOL_PROP_DEDUPRATIO,
ZPOOL_PROP_FREE,
ZPOOL_PROP_ALLOCATED,
ZPOOL_PROP_READONLY,
ZPOOL_PROP_ASHIFT,
ZPOOL_PROP_COMMENT,
ZPOOL_PROP_EXPANDSZ,
ZPOOL_PROP_FREEING,
ZPOOL_PROP_FRAGMENTATION,
ZPOOL_PROP_LEAKED,
ZPOOL_PROP_MAXBLOCKSIZE,
ZPOOL_PROP_TNAME,
ZPOOL_PROP_MAXDNODESIZE,
ZPOOL_PROP_MULTIHOST,
ZPOOL_PROP_CHECKPOINT,
ZPOOL_PROP_LOAD_GUID,
ZPOOL_PROP_AUTOTRIM,
ZPOOL_PROP_COMPATIBILITY,
ZPOOL_NUM_PROPS
} zpool_prop_t;
/* Small enough to not hog a whole line of printout in zpool(8). */
#define ZPROP_MAX_COMMENT 32
#define ZPROP_BOOLEAN_NA 2
#define ZPROP_VALUE "value"
#define ZPROP_SOURCE "source"
typedef enum {
ZPROP_SRC_NONE = 0x1,
ZPROP_SRC_DEFAULT = 0x2,
ZPROP_SRC_TEMPORARY = 0x4,
ZPROP_SRC_LOCAL = 0x8,
ZPROP_SRC_INHERITED = 0x10,
ZPROP_SRC_RECEIVED = 0x20
} zprop_source_t;
#define ZPROP_SRC_ALL 0x3f
#define ZPROP_SOURCE_VAL_RECVD "$recvd"
#define ZPROP_N_MORE_ERRORS "N_MORE_ERRORS"
/*
* Dataset flag implemented as a special entry in the props zap object
* indicating that the dataset has received properties on or after
* SPA_VERSION_RECVD_PROPS. The first such receive blows away local properties
* just as it did in earlier versions, and thereafter, local properties are
* preserved.
*/
#define ZPROP_HAS_RECVD "$hasrecvd"
typedef enum {
ZPROP_ERR_NOCLEAR = 0x1, /* failure to clear existing props */
ZPROP_ERR_NORESTORE = 0x2 /* failure to restore props on error */
} zprop_errflags_t;
typedef int (*zprop_func)(int, void *);
/*
* Properties to be set on the root file system of a new pool
* are stuffed into their own nvlist, which is then included in
* the properties nvlist with the pool properties.
*/
#define ZPOOL_ROOTFS_PROPS "root-props-nvl"
/*
* Length of 'written@' and 'written#'
*/
#define ZFS_WRITTEN_PROP_PREFIX_LEN 8
/*
* VDEV properties are identified by these constants and must be added to the
* end of this list to ensure that external consumers are not affected
* by the change. If you make any changes to this list, be sure to update
* the property table in usr/src/common/zfs/zpool_prop.c.
*/
typedef enum {
VDEV_PROP_INVAL = -1,
VDEV_PROP_USERPROP = VDEV_PROP_INVAL,
VDEV_PROP_NAME,
VDEV_PROP_CAPACITY,
VDEV_PROP_STATE,
VDEV_PROP_GUID,
VDEV_PROP_ASIZE,
VDEV_PROP_PSIZE,
VDEV_PROP_ASHIFT,
VDEV_PROP_SIZE,
VDEV_PROP_FREE,
VDEV_PROP_ALLOCATED,
VDEV_PROP_COMMENT,
VDEV_PROP_EXPANDSZ,
VDEV_PROP_FRAGMENTATION,
VDEV_PROP_BOOTSIZE,
VDEV_PROP_PARITY,
VDEV_PROP_PATH,
VDEV_PROP_DEVID,
VDEV_PROP_PHYS_PATH,
VDEV_PROP_ENC_PATH,
VDEV_PROP_FRU,
VDEV_PROP_PARENT,
VDEV_PROP_CHILDREN,
VDEV_PROP_NUMCHILDREN,
VDEV_PROP_READ_ERRORS,
VDEV_PROP_WRITE_ERRORS,
VDEV_PROP_CHECKSUM_ERRORS,
VDEV_PROP_INITIALIZE_ERRORS,
VDEV_PROP_OPS_NULL,
VDEV_PROP_OPS_READ,
VDEV_PROP_OPS_WRITE,
VDEV_PROP_OPS_FREE,
VDEV_PROP_OPS_CLAIM,
VDEV_PROP_OPS_TRIM,
VDEV_PROP_BYTES_NULL,
VDEV_PROP_BYTES_READ,
VDEV_PROP_BYTES_WRITE,
VDEV_PROP_BYTES_FREE,
VDEV_PROP_BYTES_CLAIM,
VDEV_PROP_BYTES_TRIM,
VDEV_PROP_REMOVING,
VDEV_PROP_ALLOCATING,
VDEV_NUM_PROPS
} vdev_prop_t;
/*
* Dataset property functions shared between libzfs and kernel.
*/
_SYS_FS_ZFS_H const char *zfs_prop_default_string(zfs_prop_t);
_SYS_FS_ZFS_H uint64_t zfs_prop_default_numeric(zfs_prop_t);
_SYS_FS_ZFS_H boolean_t zfs_prop_readonly(zfs_prop_t);
_SYS_FS_ZFS_H boolean_t zfs_prop_visible(zfs_prop_t prop);
_SYS_FS_ZFS_H boolean_t zfs_prop_inheritable(zfs_prop_t);
_SYS_FS_ZFS_H boolean_t zfs_prop_setonce(zfs_prop_t);
_SYS_FS_ZFS_H boolean_t zfs_prop_encryption_key_param(zfs_prop_t);
_SYS_FS_ZFS_H boolean_t zfs_prop_valid_keylocation(const char *, boolean_t);
_SYS_FS_ZFS_H const char *zfs_prop_to_name(zfs_prop_t);
_SYS_FS_ZFS_H zfs_prop_t zfs_name_to_prop(const char *);
_SYS_FS_ZFS_H boolean_t zfs_prop_user(const char *);
_SYS_FS_ZFS_H boolean_t zfs_prop_userquota(const char *);
_SYS_FS_ZFS_H boolean_t zfs_prop_written(const char *);
_SYS_FS_ZFS_H int zfs_prop_index_to_string(zfs_prop_t, uint64_t, const char **);
_SYS_FS_ZFS_H int zfs_prop_string_to_index(zfs_prop_t, const char *,
uint64_t *);
_SYS_FS_ZFS_H uint64_t zfs_prop_random_value(zfs_prop_t, uint64_t seed);
_SYS_FS_ZFS_H boolean_t zfs_prop_valid_for_type(int, zfs_type_t, boolean_t);
/*
* Pool property functions shared between libzfs and kernel.
*/
_SYS_FS_ZFS_H zpool_prop_t zpool_name_to_prop(const char *);
_SYS_FS_ZFS_H const char *zpool_prop_to_name(zpool_prop_t);
_SYS_FS_ZFS_H const char *zpool_prop_default_string(zpool_prop_t);
_SYS_FS_ZFS_H uint64_t zpool_prop_default_numeric(zpool_prop_t);
_SYS_FS_ZFS_H boolean_t zpool_prop_readonly(zpool_prop_t);
_SYS_FS_ZFS_H boolean_t zpool_prop_setonce(zpool_prop_t);
_SYS_FS_ZFS_H boolean_t zpool_prop_feature(const char *);
_SYS_FS_ZFS_H boolean_t zpool_prop_unsupported(const char *);
_SYS_FS_ZFS_H int zpool_prop_index_to_string(zpool_prop_t, uint64_t,
const char **);
_SYS_FS_ZFS_H int zpool_prop_string_to_index(zpool_prop_t, const char *,
uint64_t *);
_SYS_FS_ZFS_H uint64_t zpool_prop_random_value(zpool_prop_t, uint64_t seed);
/*
* VDEV property functions shared between libzfs and kernel.
*/
_SYS_FS_ZFS_H vdev_prop_t vdev_name_to_prop(const char *);
_SYS_FS_ZFS_H boolean_t vdev_prop_user(const char *name);
_SYS_FS_ZFS_H const char *vdev_prop_to_name(vdev_prop_t);
_SYS_FS_ZFS_H const char *vdev_prop_default_string(vdev_prop_t);
_SYS_FS_ZFS_H uint64_t vdev_prop_default_numeric(vdev_prop_t);
_SYS_FS_ZFS_H boolean_t vdev_prop_readonly(vdev_prop_t prop);
_SYS_FS_ZFS_H int vdev_prop_index_to_string(vdev_prop_t, uint64_t,
const char **);
_SYS_FS_ZFS_H int vdev_prop_string_to_index(vdev_prop_t, const char *,
uint64_t *);
_SYS_FS_ZFS_H boolean_t zpool_prop_vdev(const char *name);
_SYS_FS_ZFS_H uint64_t vdev_prop_random_value(vdev_prop_t prop, uint64_t seed);
/*
* Definitions for the Delegation.
*/
typedef enum {
ZFS_DELEG_WHO_UNKNOWN = 0,
ZFS_DELEG_USER = 'u',
ZFS_DELEG_USER_SETS = 'U',
ZFS_DELEG_GROUP = 'g',
ZFS_DELEG_GROUP_SETS = 'G',
ZFS_DELEG_EVERYONE = 'e',
ZFS_DELEG_EVERYONE_SETS = 'E',
ZFS_DELEG_CREATE = 'c',
ZFS_DELEG_CREATE_SETS = 'C',
ZFS_DELEG_NAMED_SET = 's',
ZFS_DELEG_NAMED_SET_SETS = 'S'
} zfs_deleg_who_type_t;
typedef enum {
ZFS_DELEG_NONE = 0,
ZFS_DELEG_PERM_LOCAL = 1,
ZFS_DELEG_PERM_DESCENDENT = 2,
ZFS_DELEG_PERM_LOCALDESCENDENT = 3,
ZFS_DELEG_PERM_CREATE = 4
} zfs_deleg_inherit_t;
#define ZFS_DELEG_PERM_UID "uid"
#define ZFS_DELEG_PERM_GID "gid"
#define ZFS_DELEG_PERM_GROUPS "groups"
#define ZFS_MLSLABEL_DEFAULT "none"
#define ZFS_SMB_ACL_SRC "src"
#define ZFS_SMB_ACL_TARGET "target"
typedef enum {
ZFS_CANMOUNT_OFF = 0,
ZFS_CANMOUNT_ON = 1,
ZFS_CANMOUNT_NOAUTO = 2
} zfs_canmount_type_t;
typedef enum {
ZFS_LOGBIAS_LATENCY = 0,
ZFS_LOGBIAS_THROUGHPUT = 1
} zfs_logbias_op_t;
typedef enum zfs_share_op {
ZFS_SHARE_NFS = 0,
ZFS_UNSHARE_NFS = 1,
ZFS_SHARE_SMB = 2,
ZFS_UNSHARE_SMB = 3
} zfs_share_op_t;
typedef enum zfs_smb_acl_op {
ZFS_SMB_ACL_ADD,
ZFS_SMB_ACL_REMOVE,
ZFS_SMB_ACL_RENAME,
ZFS_SMB_ACL_PURGE
} zfs_smb_acl_op_t;
typedef enum zfs_cache_type {
ZFS_CACHE_NONE = 0,
ZFS_CACHE_METADATA = 1,
ZFS_CACHE_ALL = 2
} zfs_cache_type_t;
typedef enum {
ZFS_SYNC_STANDARD = 0,
ZFS_SYNC_ALWAYS = 1,
ZFS_SYNC_DISABLED = 2
} zfs_sync_type_t;
typedef enum {
ZFS_XATTR_OFF = 0,
ZFS_XATTR_DIR = 1,
ZFS_XATTR_SA = 2
} zfs_xattr_type_t;
typedef enum {
ZFS_DNSIZE_LEGACY = 0,
ZFS_DNSIZE_AUTO = 1,
ZFS_DNSIZE_1K = 1024,
ZFS_DNSIZE_2K = 2048,
ZFS_DNSIZE_4K = 4096,
ZFS_DNSIZE_8K = 8192,
ZFS_DNSIZE_16K = 16384
} zfs_dnsize_type_t;
typedef enum {
ZFS_REDUNDANT_METADATA_ALL,
ZFS_REDUNDANT_METADATA_MOST
} zfs_redundant_metadata_type_t;
typedef enum {
ZFS_VOLMODE_DEFAULT = 0,
ZFS_VOLMODE_GEOM = 1,
ZFS_VOLMODE_DEV = 2,
ZFS_VOLMODE_NONE = 3
} zfs_volmode_t;
typedef enum zfs_keystatus {
ZFS_KEYSTATUS_NONE = 0,
ZFS_KEYSTATUS_UNAVAILABLE,
ZFS_KEYSTATUS_AVAILABLE,
} zfs_keystatus_t;
typedef enum zfs_keyformat {
ZFS_KEYFORMAT_NONE = 0,
ZFS_KEYFORMAT_RAW,
ZFS_KEYFORMAT_HEX,
ZFS_KEYFORMAT_PASSPHRASE,
ZFS_KEYFORMAT_FORMATS
} zfs_keyformat_t;
typedef enum zfs_key_location {
ZFS_KEYLOCATION_NONE = 0,
ZFS_KEYLOCATION_PROMPT,
ZFS_KEYLOCATION_URI,
ZFS_KEYLOCATION_LOCATIONS
} zfs_keylocation_t;
#define DEFAULT_PBKDF2_ITERATIONS 350000
#define MIN_PBKDF2_ITERATIONS 100000
/*
* On-disk version number.
*/
#define SPA_VERSION_1 1ULL
#define SPA_VERSION_2 2ULL
#define SPA_VERSION_3 3ULL
#define SPA_VERSION_4 4ULL
#define SPA_VERSION_5 5ULL
#define SPA_VERSION_6 6ULL
#define SPA_VERSION_7 7ULL
#define SPA_VERSION_8 8ULL
#define SPA_VERSION_9 9ULL
#define SPA_VERSION_10 10ULL
#define SPA_VERSION_11 11ULL
#define SPA_VERSION_12 12ULL
#define SPA_VERSION_13 13ULL
#define SPA_VERSION_14 14ULL
#define SPA_VERSION_15 15ULL
#define SPA_VERSION_16 16ULL
#define SPA_VERSION_17 17ULL
#define SPA_VERSION_18 18ULL
#define SPA_VERSION_19 19ULL
#define SPA_VERSION_20 20ULL
#define SPA_VERSION_21 21ULL
#define SPA_VERSION_22 22ULL
#define SPA_VERSION_23 23ULL
#define SPA_VERSION_24 24ULL
#define SPA_VERSION_25 25ULL
#define SPA_VERSION_26 26ULL
#define SPA_VERSION_27 27ULL
#define SPA_VERSION_28 28ULL
#define SPA_VERSION_5000 5000ULL
/*
* The incrementing pool version number has been replaced by pool feature
* flags. For more details, see zfeature.c.
*/
#define SPA_VERSION SPA_VERSION_5000
#define SPA_VERSION_STRING "5000"
/*
* Symbolic names for the changes that caused a SPA_VERSION switch.
* Used in the code when checking for presence or absence of a feature.
* Feel free to define multiple symbolic names for each version if there
* were multiple changes to on-disk structures during that version.
*
* NOTE: When checking the current SPA_VERSION in your code, be sure
* to use spa_version() since it reports the version of the
* last synced uberblock. Checking the in-flight version can
* be dangerous in some cases.
*/
#define SPA_VERSION_INITIAL SPA_VERSION_1
#define SPA_VERSION_DITTO_BLOCKS SPA_VERSION_2
#define SPA_VERSION_SPARES SPA_VERSION_3
#define SPA_VERSION_RAIDZ2 SPA_VERSION_3
#define SPA_VERSION_BPOBJ_ACCOUNT SPA_VERSION_3
#define SPA_VERSION_RAIDZ_DEFLATE SPA_VERSION_3
#define SPA_VERSION_DNODE_BYTES SPA_VERSION_3
#define SPA_VERSION_ZPOOL_HISTORY SPA_VERSION_4
#define SPA_VERSION_GZIP_COMPRESSION SPA_VERSION_5
#define SPA_VERSION_BOOTFS SPA_VERSION_6
#define SPA_VERSION_SLOGS SPA_VERSION_7
#define SPA_VERSION_DELEGATED_PERMS SPA_VERSION_8
#define SPA_VERSION_FUID SPA_VERSION_9
#define SPA_VERSION_REFRESERVATION SPA_VERSION_9
#define SPA_VERSION_REFQUOTA SPA_VERSION_9
#define SPA_VERSION_UNIQUE_ACCURATE SPA_VERSION_9
#define SPA_VERSION_L2CACHE SPA_VERSION_10
#define SPA_VERSION_NEXT_CLONES SPA_VERSION_11
#define SPA_VERSION_ORIGIN SPA_VERSION_11
#define SPA_VERSION_DSL_SCRUB SPA_VERSION_11
#define SPA_VERSION_SNAP_PROPS SPA_VERSION_12
#define SPA_VERSION_USED_BREAKDOWN SPA_VERSION_13
#define SPA_VERSION_PASSTHROUGH_X SPA_VERSION_14
#define SPA_VERSION_USERSPACE SPA_VERSION_15
#define SPA_VERSION_STMF_PROP SPA_VERSION_16
#define SPA_VERSION_RAIDZ3 SPA_VERSION_17
#define SPA_VERSION_USERREFS SPA_VERSION_18
#define SPA_VERSION_HOLES SPA_VERSION_19
#define SPA_VERSION_ZLE_COMPRESSION SPA_VERSION_20
#define SPA_VERSION_DEDUP SPA_VERSION_21
#define SPA_VERSION_RECVD_PROPS SPA_VERSION_22
#define SPA_VERSION_SLIM_ZIL SPA_VERSION_23
#define SPA_VERSION_SA SPA_VERSION_24
#define SPA_VERSION_SCAN SPA_VERSION_25
#define SPA_VERSION_DIR_CLONES SPA_VERSION_26
#define SPA_VERSION_DEADLISTS SPA_VERSION_26
#define SPA_VERSION_FAST_SNAP SPA_VERSION_27
#define SPA_VERSION_MULTI_REPLACE SPA_VERSION_28
#define SPA_VERSION_BEFORE_FEATURES SPA_VERSION_28
#define SPA_VERSION_FEATURES SPA_VERSION_5000
#define SPA_VERSION_IS_SUPPORTED(v) \
(((v) >= SPA_VERSION_INITIAL && (v) <= SPA_VERSION_BEFORE_FEATURES) || \
((v) >= SPA_VERSION_FEATURES && (v) <= SPA_VERSION))
/*
* ZPL version - rev'd whenever an incompatible on-disk format change
* occurs. This is independent of SPA/DMU/ZAP versioning. You must
* also update the version_table[] and help message in zfs_prop.c.
*/
#define ZPL_VERSION_1 1ULL
#define ZPL_VERSION_2 2ULL
#define ZPL_VERSION_3 3ULL
#define ZPL_VERSION_4 4ULL
#define ZPL_VERSION_5 5ULL
#define ZPL_VERSION ZPL_VERSION_5
#define ZPL_VERSION_STRING "5"
#define ZPL_VERSION_INITIAL ZPL_VERSION_1
#define ZPL_VERSION_DIRENT_TYPE ZPL_VERSION_2
#define ZPL_VERSION_FUID ZPL_VERSION_3
#define ZPL_VERSION_NORMALIZATION ZPL_VERSION_3
#define ZPL_VERSION_SYSATTR ZPL_VERSION_3
#define ZPL_VERSION_USERSPACE ZPL_VERSION_4
#define ZPL_VERSION_SA ZPL_VERSION_5
/* Persistent L2ARC version */
#define L2ARC_PERSISTENT_VERSION_1 1ULL
#define L2ARC_PERSISTENT_VERSION L2ARC_PERSISTENT_VERSION_1
#define L2ARC_PERSISTENT_VERSION_STRING "1"
/* Rewind policy information */
#define ZPOOL_NO_REWIND 1 /* No policy - default behavior */
#define ZPOOL_NEVER_REWIND 2 /* Do not search for best txg or rewind */
#define ZPOOL_TRY_REWIND 4 /* Search for best txg, but do not rewind */
#define ZPOOL_DO_REWIND 8 /* Rewind to best txg w/in deferred frees */
#define ZPOOL_EXTREME_REWIND 16 /* Allow extreme measures to find best txg */
#define ZPOOL_REWIND_MASK 28 /* All the possible rewind bits */
#define ZPOOL_REWIND_POLICIES 31 /* All the possible policy bits */
typedef struct zpool_load_policy {
uint32_t zlp_rewind; /* rewind policy requested */
uint64_t zlp_maxmeta; /* max acceptable meta-data errors */
uint64_t zlp_maxdata; /* max acceptable data errors */
uint64_t zlp_txg; /* specific txg to load */
} zpool_load_policy_t;
/*
* The following are configuration names used in the nvlist describing a pool's
* configuration. New on-disk names should be prefixed with "<reversed-DNS>:"
* (e.g. "org.openzfs:") to avoid conflicting names being developed
* independently.
*/
#define ZPOOL_CONFIG_VERSION "version"
#define ZPOOL_CONFIG_POOL_NAME "name"
#define ZPOOL_CONFIG_POOL_STATE "state"
#define ZPOOL_CONFIG_POOL_TXG "txg"
#define ZPOOL_CONFIG_POOL_GUID "pool_guid"
#define ZPOOL_CONFIG_CREATE_TXG "create_txg"
#define ZPOOL_CONFIG_TOP_GUID "top_guid"
#define ZPOOL_CONFIG_VDEV_TREE "vdev_tree"
#define ZPOOL_CONFIG_TYPE "type"
#define ZPOOL_CONFIG_CHILDREN "children"
#define ZPOOL_CONFIG_ID "id"
#define ZPOOL_CONFIG_GUID "guid"
#define ZPOOL_CONFIG_INDIRECT_OBJECT "com.delphix:indirect_object"
#define ZPOOL_CONFIG_INDIRECT_BIRTHS "com.delphix:indirect_births"
#define ZPOOL_CONFIG_PREV_INDIRECT_VDEV "com.delphix:prev_indirect_vdev"
#define ZPOOL_CONFIG_PATH "path"
#define ZPOOL_CONFIG_DEVID "devid"
#define ZPOOL_CONFIG_SPARE_ID "spareid"
#define ZPOOL_CONFIG_METASLAB_ARRAY "metaslab_array"
#define ZPOOL_CONFIG_METASLAB_SHIFT "metaslab_shift"
#define ZPOOL_CONFIG_ASHIFT "ashift"
#define ZPOOL_CONFIG_ASIZE "asize"
#define ZPOOL_CONFIG_DTL "DTL"
#define ZPOOL_CONFIG_SCAN_STATS "scan_stats" /* not stored on disk */
#define ZPOOL_CONFIG_REMOVAL_STATS "removal_stats" /* not stored on disk */
#define ZPOOL_CONFIG_CHECKPOINT_STATS "checkpoint_stats" /* not on disk */
#define ZPOOL_CONFIG_VDEV_STATS "vdev_stats" /* not stored on disk */
#define ZPOOL_CONFIG_INDIRECT_SIZE "indirect_size" /* not stored on disk */
/* container nvlist of extended stats */
#define ZPOOL_CONFIG_VDEV_STATS_EX "vdev_stats_ex"
/* Active queue read/write stats */
#define ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE "vdev_sync_r_active_queue"
#define ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE "vdev_sync_w_active_queue"
#define ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE "vdev_async_r_active_queue"
#define ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE "vdev_async_w_active_queue"
#define ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE "vdev_async_scrub_active_queue"
#define ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE "vdev_async_trim_active_queue"
#define ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE "vdev_rebuild_active_queue"
/* Queue sizes */
#define ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE "vdev_sync_r_pend_queue"
#define ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE "vdev_sync_w_pend_queue"
#define ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE "vdev_async_r_pend_queue"
#define ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE "vdev_async_w_pend_queue"
#define ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE "vdev_async_scrub_pend_queue"
#define ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE "vdev_async_trim_pend_queue"
#define ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE "vdev_rebuild_pend_queue"
/* Latency read/write histogram stats */
#define ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO "vdev_tot_r_lat_histo"
#define ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO "vdev_tot_w_lat_histo"
#define ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO "vdev_disk_r_lat_histo"
#define ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO "vdev_disk_w_lat_histo"
#define ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO "vdev_sync_r_lat_histo"
#define ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO "vdev_sync_w_lat_histo"
#define ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO "vdev_async_r_lat_histo"
#define ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO "vdev_async_w_lat_histo"
#define ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO "vdev_scrub_histo"
#define ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO "vdev_trim_histo"
#define ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO "vdev_rebuild_histo"
/* Request size histograms */
#define ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO "vdev_sync_ind_r_histo"
#define ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO "vdev_sync_ind_w_histo"
#define ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO "vdev_async_ind_r_histo"
#define ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO "vdev_async_ind_w_histo"
#define ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO "vdev_ind_scrub_histo"
#define ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO "vdev_ind_trim_histo"
#define ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO "vdev_ind_rebuild_histo"
#define ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO "vdev_sync_agg_r_histo"
#define ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO "vdev_sync_agg_w_histo"
#define ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO "vdev_async_agg_r_histo"
#define ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO "vdev_async_agg_w_histo"
#define ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO "vdev_agg_scrub_histo"
#define ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO "vdev_agg_trim_histo"
#define ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO "vdev_agg_rebuild_histo"
/* Number of slow IOs */
#define ZPOOL_CONFIG_VDEV_SLOW_IOS "vdev_slow_ios"
/* vdev enclosure sysfs path */
#define ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH "vdev_enc_sysfs_path"
#define ZPOOL_CONFIG_WHOLE_DISK "whole_disk"
#define ZPOOL_CONFIG_ERRCOUNT "error_count"
#define ZPOOL_CONFIG_NOT_PRESENT "not_present"
#define ZPOOL_CONFIG_SPARES "spares"
#define ZPOOL_CONFIG_IS_SPARE "is_spare"
#define ZPOOL_CONFIG_NPARITY "nparity"
#define ZPOOL_CONFIG_HOSTID "hostid"
#define ZPOOL_CONFIG_HOSTNAME "hostname"
#define ZPOOL_CONFIG_LOADED_TIME "initial_load_time"
#define ZPOOL_CONFIG_UNSPARE "unspare"
#define ZPOOL_CONFIG_PHYS_PATH "phys_path"
#define ZPOOL_CONFIG_IS_LOG "is_log"
#define ZPOOL_CONFIG_L2CACHE "l2cache"
#define ZPOOL_CONFIG_HOLE_ARRAY "hole_array"
#define ZPOOL_CONFIG_VDEV_CHILDREN "vdev_children"
#define ZPOOL_CONFIG_IS_HOLE "is_hole"
#define ZPOOL_CONFIG_DDT_HISTOGRAM "ddt_histogram"
#define ZPOOL_CONFIG_DDT_OBJ_STATS "ddt_object_stats"
#define ZPOOL_CONFIG_DDT_STATS "ddt_stats"
#define ZPOOL_CONFIG_SPLIT "splitcfg"
#define ZPOOL_CONFIG_ORIG_GUID "orig_guid"
#define ZPOOL_CONFIG_SPLIT_GUID "split_guid"
#define ZPOOL_CONFIG_SPLIT_LIST "guid_list"
#define ZPOOL_CONFIG_NONALLOCATING "non_allocating"
#define ZPOOL_CONFIG_REMOVING "removing"
#define ZPOOL_CONFIG_RESILVER_TXG "resilver_txg"
#define ZPOOL_CONFIG_REBUILD_TXG "rebuild_txg"
#define ZPOOL_CONFIG_COMMENT "comment"
#define ZPOOL_CONFIG_SUSPENDED "suspended" /* not stored on disk */
#define ZPOOL_CONFIG_SUSPENDED_REASON "suspended_reason" /* not stored */
#define ZPOOL_CONFIG_TIMESTAMP "timestamp" /* not stored on disk */
#define ZPOOL_CONFIG_BOOTFS "bootfs" /* not stored on disk */
#define ZPOOL_CONFIG_MISSING_DEVICES "missing_vdevs" /* not stored on disk */
#define ZPOOL_CONFIG_LOAD_INFO "load_info" /* not stored on disk */
#define ZPOOL_CONFIG_REWIND_INFO "rewind_info" /* not stored on disk */
#define ZPOOL_CONFIG_UNSUP_FEAT "unsup_feat" /* not stored on disk */
#define ZPOOL_CONFIG_ENABLED_FEAT "enabled_feat" /* not stored on disk */
#define ZPOOL_CONFIG_CAN_RDONLY "can_rdonly" /* not stored on disk */
#define ZPOOL_CONFIG_FEATURES_FOR_READ "features_for_read"
#define ZPOOL_CONFIG_FEATURE_STATS "feature_stats" /* not stored on disk */
#define ZPOOL_CONFIG_ERRATA "errata" /* not stored on disk */
#define ZPOOL_CONFIG_VDEV_TOP_ZAP "com.delphix:vdev_zap_top"
#define ZPOOL_CONFIG_VDEV_LEAF_ZAP "com.delphix:vdev_zap_leaf"
#define ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS "com.delphix:has_per_vdev_zaps"
#define ZPOOL_CONFIG_RESILVER_DEFER "com.datto:resilver_defer"
#define ZPOOL_CONFIG_CACHEFILE "cachefile" /* not stored on disk */
#define ZPOOL_CONFIG_MMP_STATE "mmp_state" /* not stored on disk */
#define ZPOOL_CONFIG_MMP_TXG "mmp_txg" /* not stored on disk */
#define ZPOOL_CONFIG_MMP_SEQ "mmp_seq" /* not stored on disk */
#define ZPOOL_CONFIG_MMP_HOSTNAME "mmp_hostname" /* not stored on disk */
#define ZPOOL_CONFIG_MMP_HOSTID "mmp_hostid" /* not stored on disk */
#define ZPOOL_CONFIG_ALLOCATION_BIAS "alloc_bias" /* not stored on disk */
#define ZPOOL_CONFIG_EXPANSION_TIME "expansion_time" /* not stored */
#define ZPOOL_CONFIG_REBUILD_STATS "org.openzfs:rebuild_stats"
#define ZPOOL_CONFIG_COMPATIBILITY "compatibility"
/*
* The persistent vdev state is stored as separate values rather than a single
* 'vdev_state' entry. This is because a device can be in multiple states, such
* as offline and degraded.
*/
#define ZPOOL_CONFIG_OFFLINE "offline"
#define ZPOOL_CONFIG_FAULTED "faulted"
#define ZPOOL_CONFIG_DEGRADED "degraded"
#define ZPOOL_CONFIG_REMOVED "removed"
#define ZPOOL_CONFIG_FRU "fru"
#define ZPOOL_CONFIG_AUX_STATE "aux_state"
/* Pool load policy parameters */
#define ZPOOL_LOAD_POLICY "load-policy"
#define ZPOOL_LOAD_REWIND_POLICY "load-rewind-policy"
#define ZPOOL_LOAD_REQUEST_TXG "load-request-txg"
#define ZPOOL_LOAD_META_THRESH "load-meta-thresh"
#define ZPOOL_LOAD_DATA_THRESH "load-data-thresh"
/* Rewind data discovered */
#define ZPOOL_CONFIG_LOAD_TIME "rewind_txg_ts"
#define ZPOOL_CONFIG_LOAD_META_ERRORS "verify_meta_errors"
#define ZPOOL_CONFIG_LOAD_DATA_ERRORS "verify_data_errors"
#define ZPOOL_CONFIG_REWIND_TIME "seconds_of_rewind"
/* dRAID configuration */
#define ZPOOL_CONFIG_DRAID_NDATA "draid_ndata"
#define ZPOOL_CONFIG_DRAID_NSPARES "draid_nspares"
#define ZPOOL_CONFIG_DRAID_NGROUPS "draid_ngroups"
#define VDEV_TYPE_ROOT "root"
#define VDEV_TYPE_MIRROR "mirror"
#define VDEV_TYPE_REPLACING "replacing"
#define VDEV_TYPE_RAIDZ "raidz"
#define VDEV_TYPE_DRAID "draid"
#define VDEV_TYPE_DRAID_SPARE "dspare"
#define VDEV_TYPE_DISK "disk"
#define VDEV_TYPE_FILE "file"
#define VDEV_TYPE_MISSING "missing"
#define VDEV_TYPE_HOLE "hole"
#define VDEV_TYPE_SPARE "spare"
#define VDEV_TYPE_LOG "log"
#define VDEV_TYPE_L2CACHE "l2cache"
#define VDEV_TYPE_INDIRECT "indirect"
#define VDEV_RAIDZ_MAXPARITY 3
#define VDEV_DRAID_MAXPARITY 3
#define VDEV_DRAID_MIN_CHILDREN 2
#define VDEV_DRAID_MAX_CHILDREN UINT8_MAX
/* VDEV_TOP_ZAP_* are used in top-level vdev ZAP objects. */
#define VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM \
"com.delphix:indirect_obsolete_sm"
#define VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE \
"com.delphix:obsolete_counts_are_precise"
#define VDEV_TOP_ZAP_POOL_CHECKPOINT_SM \
"com.delphix:pool_checkpoint_sm"
#define VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS \
"com.delphix:ms_unflushed_phys_txgs"
#define VDEV_TOP_ZAP_VDEV_REBUILD_PHYS \
"org.openzfs:vdev_rebuild"
#define VDEV_TOP_ZAP_ALLOCATION_BIAS \
"org.zfsonlinux:allocation_bias"
/* vdev metaslab allocation bias */
#define VDEV_ALLOC_BIAS_LOG "log"
#define VDEV_ALLOC_BIAS_SPECIAL "special"
#define VDEV_ALLOC_BIAS_DEDUP "dedup"
/* vdev initialize state */
#define VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET \
"com.delphix:next_offset_to_initialize"
#define VDEV_LEAF_ZAP_INITIALIZE_STATE \
"com.delphix:vdev_initialize_state"
#define VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME \
"com.delphix:vdev_initialize_action_time"
/* vdev TRIM state */
#define VDEV_LEAF_ZAP_TRIM_LAST_OFFSET \
"org.zfsonlinux:next_offset_to_trim"
#define VDEV_LEAF_ZAP_TRIM_STATE \
"org.zfsonlinux:vdev_trim_state"
#define VDEV_LEAF_ZAP_TRIM_ACTION_TIME \
"org.zfsonlinux:vdev_trim_action_time"
#define VDEV_LEAF_ZAP_TRIM_RATE \
"org.zfsonlinux:vdev_trim_rate"
#define VDEV_LEAF_ZAP_TRIM_PARTIAL \
"org.zfsonlinux:vdev_trim_partial"
#define VDEV_LEAF_ZAP_TRIM_SECURE \
"org.zfsonlinux:vdev_trim_secure"
/*
* This is needed in userland to report the minimum necessary device size.
*/
#define SPA_MINDEVSIZE (64ULL << 20)
/*
* Set if the fragmentation has not yet been calculated. This can happen
* because the space maps have not been upgraded or the histogram feature
* is not enabled.
*/
#define ZFS_FRAG_INVALID UINT64_MAX
/*
* The location of the pool configuration repository, shared between kernel and
* userland.
*/
#define ZPOOL_CACHE_BOOT "/boot/zfs/zpool.cache"
#define ZPOOL_CACHE "/etc/zfs/zpool.cache"
/*
* Settings for zpool compatibility features files
*/
#define ZPOOL_SYSCONF_COMPAT_D SYSCONFDIR "/zfs/compatibility.d"
#define ZPOOL_DATA_COMPAT_D PKGDATADIR "/compatibility.d"
#define ZPOOL_COMPAT_MAXSIZE 16384
/*
* Hard-wired compatibility settings
*/
#define ZPOOL_COMPAT_LEGACY "legacy"
#define ZPOOL_COMPAT_OFF "off"
/*
* vdev states are ordered from least to most healthy.
* A vdev that's CANT_OPEN or below is considered unusable.
*/
typedef enum vdev_state {
VDEV_STATE_UNKNOWN = 0, /* Uninitialized vdev */
VDEV_STATE_CLOSED, /* Not currently open */
VDEV_STATE_OFFLINE, /* Not allowed to open */
VDEV_STATE_REMOVED, /* Explicitly removed from system */
VDEV_STATE_CANT_OPEN, /* Tried to open, but failed */
VDEV_STATE_FAULTED, /* External request to fault device */
VDEV_STATE_DEGRADED, /* Replicated vdev with unhealthy kids */
VDEV_STATE_HEALTHY /* Presumed good */
} vdev_state_t;
#define VDEV_STATE_ONLINE VDEV_STATE_HEALTHY
/*
* vdev aux states. When a vdev is in the CANT_OPEN state, the aux field
* of the vdev stats structure uses these constants to distinguish why.
*/
typedef enum vdev_aux {
VDEV_AUX_NONE, /* no error */
VDEV_AUX_OPEN_FAILED, /* ldi_open_*() or vn_open() failed */
VDEV_AUX_CORRUPT_DATA, /* bad label or disk contents */
VDEV_AUX_NO_REPLICAS, /* insufficient number of replicas */
VDEV_AUX_BAD_GUID_SUM, /* vdev guid sum doesn't match */
VDEV_AUX_TOO_SMALL, /* vdev size is too small */
VDEV_AUX_BAD_LABEL, /* the label is OK but invalid */
VDEV_AUX_VERSION_NEWER, /* on-disk version is too new */
VDEV_AUX_VERSION_OLDER, /* on-disk version is too old */
VDEV_AUX_UNSUP_FEAT, /* unsupported features */
VDEV_AUX_SPARED, /* hot spare used in another pool */
VDEV_AUX_ERR_EXCEEDED, /* too many errors */
VDEV_AUX_IO_FAILURE, /* experienced I/O failure */
VDEV_AUX_BAD_LOG, /* cannot read log chain(s) */
VDEV_AUX_EXTERNAL, /* external diagnosis or forced fault */
VDEV_AUX_SPLIT_POOL, /* vdev was split off into another pool */
VDEV_AUX_BAD_ASHIFT, /* vdev ashift is invalid */
VDEV_AUX_EXTERNAL_PERSIST, /* persistent forced fault */
VDEV_AUX_ACTIVE, /* vdev active on a different host */
VDEV_AUX_CHILDREN_OFFLINE, /* all children are offline */
VDEV_AUX_ASHIFT_TOO_BIG, /* vdev's min block size is too large */
} vdev_aux_t;
/*
* pool state. The following states are written to disk as part of the normal
* SPA lifecycle: ACTIVE, EXPORTED, DESTROYED, SPARE, L2CACHE. The remaining
* states are software abstractions used at various levels to communicate
* pool state.
*/
typedef enum pool_state {
POOL_STATE_ACTIVE = 0, /* In active use */
POOL_STATE_EXPORTED, /* Explicitly exported */
POOL_STATE_DESTROYED, /* Explicitly destroyed */
POOL_STATE_SPARE, /* Reserved for hot spare use */
POOL_STATE_L2CACHE, /* Level 2 ARC device */
POOL_STATE_UNINITIALIZED, /* Internal spa_t state */
POOL_STATE_UNAVAIL, /* Internal libzfs state */
POOL_STATE_POTENTIALLY_ACTIVE /* Internal libzfs state */
} pool_state_t;
/*
* mmp state. The following states provide additional detail describing
* why a pool couldn't be safely imported.
*/
typedef enum mmp_state {
MMP_STATE_ACTIVE = 0, /* In active use */
MMP_STATE_INACTIVE, /* Inactive and safe to import */
MMP_STATE_NO_HOSTID /* System hostid is not set */
} mmp_state_t;
/*
* Scan Functions.
*/
typedef enum pool_scan_func {
POOL_SCAN_NONE,
POOL_SCAN_SCRUB,
POOL_SCAN_RESILVER,
POOL_SCAN_FUNCS
} pool_scan_func_t;
/*
* Used to control scrub pause and resume.
*/
typedef enum pool_scrub_cmd {
POOL_SCRUB_NORMAL = 0,
POOL_SCRUB_PAUSE,
POOL_SCRUB_FLAGS_END
} pool_scrub_cmd_t;
typedef enum {
CS_NONE,
CS_CHECKPOINT_EXISTS,
CS_CHECKPOINT_DISCARDING,
CS_NUM_STATES
} checkpoint_state_t;
typedef struct pool_checkpoint_stat {
uint64_t pcs_state; /* checkpoint_state_t */
uint64_t pcs_start_time; /* time checkpoint/discard started */
uint64_t pcs_space; /* checkpointed space */
} pool_checkpoint_stat_t;
/*
* ZIO types. Needed to interpret vdev statistics below.
*/
typedef enum zio_type {
ZIO_TYPE_NULL = 0,
ZIO_TYPE_READ,
ZIO_TYPE_WRITE,
ZIO_TYPE_FREE,
ZIO_TYPE_CLAIM,
ZIO_TYPE_IOCTL,
ZIO_TYPE_TRIM,
ZIO_TYPES
} zio_type_t;
/*
* Pool statistics. Note: all fields should be 64-bit because this
* is passed between kernel and userland as an nvlist uint64 array.
*/
typedef struct pool_scan_stat {
/* values stored on disk */
uint64_t pss_func; /* pool_scan_func_t */
uint64_t pss_state; /* dsl_scan_state_t */
uint64_t pss_start_time; /* scan start time */
uint64_t pss_end_time; /* scan end time */
uint64_t pss_to_examine; /* total bytes to scan */
uint64_t pss_examined; /* total bytes located by scanner */
uint64_t pss_to_process; /* total bytes to process */
uint64_t pss_processed; /* total processed bytes */
uint64_t pss_errors; /* scan errors */
/* values not stored on disk */
uint64_t pss_pass_exam; /* examined bytes per scan pass */
uint64_t pss_pass_start; /* start time of a scan pass */
uint64_t pss_pass_scrub_pause; /* pause time of a scrub pass */
/* cumulative time scrub spent paused, needed for rate calculation */
uint64_t pss_pass_scrub_spent_paused;
uint64_t pss_pass_issued; /* issued bytes per scan pass */
uint64_t pss_issued; /* total bytes checked by scanner */
} pool_scan_stat_t;
typedef struct pool_removal_stat {
uint64_t prs_state; /* dsl_scan_state_t */
uint64_t prs_removing_vdev;
uint64_t prs_start_time;
uint64_t prs_end_time;
uint64_t prs_to_copy; /* bytes that need to be copied */
uint64_t prs_copied; /* bytes copied so far */
/*
* bytes of memory used for indirect mappings.
* This includes all removed vdevs.
*/
uint64_t prs_mapping_memory;
} pool_removal_stat_t;
typedef enum dsl_scan_state {
DSS_NONE,
DSS_SCANNING,
DSS_FINISHED,
DSS_CANCELED,
DSS_NUM_STATES
} dsl_scan_state_t;
typedef struct vdev_rebuild_stat {
uint64_t vrs_state; /* vdev_rebuild_state_t */
uint64_t vrs_start_time; /* time_t */
uint64_t vrs_end_time; /* time_t */
uint64_t vrs_scan_time_ms; /* total run time (millisecs) */
uint64_t vrs_bytes_scanned; /* allocated bytes scanned */
uint64_t vrs_bytes_issued; /* read bytes issued */
uint64_t vrs_bytes_rebuilt; /* rebuilt bytes */
uint64_t vrs_bytes_est; /* total bytes to scan */
uint64_t vrs_errors; /* scanning errors */
uint64_t vrs_pass_time_ms; /* pass run time (millisecs) */
uint64_t vrs_pass_bytes_scanned; /* bytes scanned since start/resume */
uint64_t vrs_pass_bytes_issued; /* bytes rebuilt since start/resume */
} vdev_rebuild_stat_t;
/*
* Errata described by https://openzfs.github.io/openzfs-docs/msg/ZFS-8000-ER.
* The ordering of this enum must be maintained to ensure the errata identifiers
* map to the correct documentation. New errata may only be appended to the
* list and must contain corresponding documentation at the above link.
*/
typedef enum zpool_errata {
ZPOOL_ERRATA_NONE,
ZPOOL_ERRATA_ZOL_2094_SCRUB,
ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY,
ZPOOL_ERRATA_ZOL_6845_ENCRYPTION,
ZPOOL_ERRATA_ZOL_8308_ENCRYPTION,
} zpool_errata_t;
/*
* Vdev statistics. Note: all fields should be 64-bit because this
* is passed between kernel and user land as an nvlist uint64 array.
*
* The vs_ops[] and vs_bytes[] arrays must always be an array size of 6 in
* order to keep subsequent members at their known fixed offsets. When
* adding a new field it must be added to the end the structure.
*/
#define VS_ZIO_TYPES 6
typedef struct vdev_stat {
hrtime_t vs_timestamp; /* time since vdev load */
uint64_t vs_state; /* vdev state */
uint64_t vs_aux; /* see vdev_aux_t */
uint64_t vs_alloc; /* space allocated */
uint64_t vs_space; /* total capacity */
uint64_t vs_dspace; /* deflated capacity */
uint64_t vs_rsize; /* replaceable dev size */
uint64_t vs_esize; /* expandable dev size */
uint64_t vs_ops[VS_ZIO_TYPES]; /* operation count */
uint64_t vs_bytes[VS_ZIO_TYPES]; /* bytes read/written */
uint64_t vs_read_errors; /* read errors */
uint64_t vs_write_errors; /* write errors */
uint64_t vs_checksum_errors; /* checksum errors */
uint64_t vs_initialize_errors; /* initializing errors */
uint64_t vs_self_healed; /* self-healed bytes */
uint64_t vs_scan_removing; /* removing? */
uint64_t vs_scan_processed; /* scan processed bytes */
uint64_t vs_fragmentation; /* device fragmentation */
uint64_t vs_initialize_bytes_done; /* bytes initialized */
uint64_t vs_initialize_bytes_est; /* total bytes to initialize */
uint64_t vs_initialize_state; /* vdev_initializing_state_t */
uint64_t vs_initialize_action_time; /* time_t */
uint64_t vs_checkpoint_space; /* checkpoint-consumed space */
uint64_t vs_resilver_deferred; /* resilver deferred */
uint64_t vs_slow_ios; /* slow IOs */
uint64_t vs_trim_errors; /* trimming errors */
uint64_t vs_trim_notsup; /* supported by device */
uint64_t vs_trim_bytes_done; /* bytes trimmed */
uint64_t vs_trim_bytes_est; /* total bytes to trim */
uint64_t vs_trim_state; /* vdev_trim_state_t */
uint64_t vs_trim_action_time; /* time_t */
uint64_t vs_rebuild_processed; /* bytes rebuilt */
uint64_t vs_configured_ashift; /* TLV vdev_ashift */
uint64_t vs_logical_ashift; /* vdev_logical_ashift */
uint64_t vs_physical_ashift; /* vdev_physical_ashift */
uint64_t vs_noalloc; /* allocations halted? */
uint64_t vs_pspace; /* physical capacity */
} vdev_stat_t;
#define VDEV_STAT_VALID(field, uint64_t_field_count) \
((uint64_t_field_count * sizeof (uint64_t)) >= \
(offsetof(vdev_stat_t, field) + sizeof (((vdev_stat_t *)NULL)->field)))
/*
* Extended stats
*
* These are stats which aren't included in the original iostat output. For
* convenience, they are grouped together in vdev_stat_ex, although each stat
* is individually exported as an nvlist.
*/
typedef struct vdev_stat_ex {
/* Number of ZIOs issued to disk and waiting to finish */
uint64_t vsx_active_queue[ZIO_PRIORITY_NUM_QUEUEABLE];
/* Number of ZIOs pending to be issued to disk */
uint64_t vsx_pend_queue[ZIO_PRIORITY_NUM_QUEUEABLE];
/*
* Below are the histograms for various latencies. Buckets are in
* units of nanoseconds.
*/
/*
* 2^37 nanoseconds = 134s. Timeouts will probably start kicking in
* before this.
*/
#define VDEV_L_HISTO_BUCKETS 37 /* Latency histo buckets */
#define VDEV_RQ_HISTO_BUCKETS 25 /* Request size histo buckets */
/* Amount of time in ZIO queue (ns) */
uint64_t vsx_queue_histo[ZIO_PRIORITY_NUM_QUEUEABLE]
[VDEV_L_HISTO_BUCKETS];
/* Total ZIO latency (ns). Includes queuing and disk access time */
uint64_t vsx_total_histo[ZIO_TYPES][VDEV_L_HISTO_BUCKETS];
/* Amount of time to read/write the disk (ns) */
uint64_t vsx_disk_histo[ZIO_TYPES][VDEV_L_HISTO_BUCKETS];
/* "lookup the bucket for a value" histogram macros */
#define HISTO(val, buckets) (val != 0 ? MIN(highbit64(val) - 1, \
buckets - 1) : 0)
#define L_HISTO(a) HISTO(a, VDEV_L_HISTO_BUCKETS)
#define RQ_HISTO(a) HISTO(a, VDEV_RQ_HISTO_BUCKETS)
/* Physical IO histogram */
uint64_t vsx_ind_histo[ZIO_PRIORITY_NUM_QUEUEABLE]
[VDEV_RQ_HISTO_BUCKETS];
/* Delegated (aggregated) physical IO histogram */
uint64_t vsx_agg_histo[ZIO_PRIORITY_NUM_QUEUEABLE]
[VDEV_RQ_HISTO_BUCKETS];
} vdev_stat_ex_t;
/*
* Initialize functions.
*/
typedef enum pool_initialize_func {
POOL_INITIALIZE_START,
POOL_INITIALIZE_CANCEL,
POOL_INITIALIZE_SUSPEND,
POOL_INITIALIZE_FUNCS
} pool_initialize_func_t;
/*
* TRIM functions.
*/
typedef enum pool_trim_func {
POOL_TRIM_START,
POOL_TRIM_CANCEL,
POOL_TRIM_SUSPEND,
POOL_TRIM_FUNCS
} pool_trim_func_t;
/*
* DDT statistics. Note: all fields should be 64-bit because this
* is passed between kernel and userland as an nvlist uint64 array.
*/
typedef struct ddt_object {
uint64_t ddo_count; /* number of elements in ddt */
uint64_t ddo_dspace; /* size of ddt on disk */
uint64_t ddo_mspace; /* size of ddt in-core */
} ddt_object_t;
typedef struct ddt_stat {
uint64_t dds_blocks; /* blocks */
uint64_t dds_lsize; /* logical size */
uint64_t dds_psize; /* physical size */
uint64_t dds_dsize; /* deflated allocated size */
uint64_t dds_ref_blocks; /* referenced blocks */
uint64_t dds_ref_lsize; /* referenced lsize * refcnt */
uint64_t dds_ref_psize; /* referenced psize * refcnt */
uint64_t dds_ref_dsize; /* referenced dsize * refcnt */
} ddt_stat_t;
typedef struct ddt_histogram {
ddt_stat_t ddh_stat[64]; /* power-of-two histogram buckets */
} ddt_histogram_t;
#define ZVOL_DRIVER "zvol"
#define ZFS_DRIVER "zfs"
#define ZFS_DEV "/dev/zfs"
#define ZFS_DEVDIR "/dev"
#define ZFS_SUPER_MAGIC 0x2fc12fc1
/* general zvol path */
#define ZVOL_DIR "/dev/zvol/"
#define ZVOL_MAJOR 230
#define ZVOL_MINOR_BITS 4
#define ZVOL_MINOR_MASK ((1U << ZVOL_MINOR_BITS) - 1)
#define ZVOL_MINORS (1 << 4)
#define ZVOL_DEV_NAME "zd"
#define ZVOL_PROP_NAME "name"
#define ZVOL_DEFAULT_BLOCKSIZE 16384
typedef enum {
VDEV_INITIALIZE_NONE,
VDEV_INITIALIZE_ACTIVE,
VDEV_INITIALIZE_CANCELED,
VDEV_INITIALIZE_SUSPENDED,
VDEV_INITIALIZE_COMPLETE
} vdev_initializing_state_t;
typedef enum {
VDEV_TRIM_NONE,
VDEV_TRIM_ACTIVE,
VDEV_TRIM_CANCELED,
VDEV_TRIM_SUSPENDED,
VDEV_TRIM_COMPLETE,
} vdev_trim_state_t;
typedef enum {
VDEV_REBUILD_NONE,
VDEV_REBUILD_ACTIVE,
VDEV_REBUILD_CANCELED,
VDEV_REBUILD_COMPLETE,
} vdev_rebuild_state_t;
/*
* nvlist name constants. Facilitate restricting snapshot iteration range for
* the "list next snapshot" ioctl
*/
#define SNAP_ITER_MIN_TXG "snap_iter_min_txg"
#define SNAP_ITER_MAX_TXG "snap_iter_max_txg"
/*
* /dev/zfs ioctl numbers.
*
* These numbers cannot change over time. New ioctl numbers must be appended.
*/
typedef enum zfs_ioc {
/*
* Core features - 81/128 numbers reserved.
*/
#ifdef __FreeBSD__
ZFS_IOC_FIRST = 0,
#else
ZFS_IOC_FIRST = ('Z' << 8),
#endif
ZFS_IOC = ZFS_IOC_FIRST,
ZFS_IOC_POOL_CREATE = ZFS_IOC_FIRST, /* 0x5a00 */
ZFS_IOC_POOL_DESTROY, /* 0x5a01 */
ZFS_IOC_POOL_IMPORT, /* 0x5a02 */
ZFS_IOC_POOL_EXPORT, /* 0x5a03 */
ZFS_IOC_POOL_CONFIGS, /* 0x5a04 */
ZFS_IOC_POOL_STATS, /* 0x5a05 */
ZFS_IOC_POOL_TRYIMPORT, /* 0x5a06 */
ZFS_IOC_POOL_SCAN, /* 0x5a07 */
ZFS_IOC_POOL_FREEZE, /* 0x5a08 */
ZFS_IOC_POOL_UPGRADE, /* 0x5a09 */
ZFS_IOC_POOL_GET_HISTORY, /* 0x5a0a */
ZFS_IOC_VDEV_ADD, /* 0x5a0b */
ZFS_IOC_VDEV_REMOVE, /* 0x5a0c */
ZFS_IOC_VDEV_SET_STATE, /* 0x5a0d */
ZFS_IOC_VDEV_ATTACH, /* 0x5a0e */
ZFS_IOC_VDEV_DETACH, /* 0x5a0f */
ZFS_IOC_VDEV_SETPATH, /* 0x5a10 */
ZFS_IOC_VDEV_SETFRU, /* 0x5a11 */
ZFS_IOC_OBJSET_STATS, /* 0x5a12 */
ZFS_IOC_OBJSET_ZPLPROPS, /* 0x5a13 */
ZFS_IOC_DATASET_LIST_NEXT, /* 0x5a14 */
ZFS_IOC_SNAPSHOT_LIST_NEXT, /* 0x5a15 */
ZFS_IOC_SET_PROP, /* 0x5a16 */
ZFS_IOC_CREATE, /* 0x5a17 */
ZFS_IOC_DESTROY, /* 0x5a18 */
ZFS_IOC_ROLLBACK, /* 0x5a19 */
ZFS_IOC_RENAME, /* 0x5a1a */
ZFS_IOC_RECV, /* 0x5a1b */
ZFS_IOC_SEND, /* 0x5a1c */
ZFS_IOC_INJECT_FAULT, /* 0x5a1d */
ZFS_IOC_CLEAR_FAULT, /* 0x5a1e */
ZFS_IOC_INJECT_LIST_NEXT, /* 0x5a1f */
ZFS_IOC_ERROR_LOG, /* 0x5a20 */
ZFS_IOC_CLEAR, /* 0x5a21 */
ZFS_IOC_PROMOTE, /* 0x5a22 */
ZFS_IOC_SNAPSHOT, /* 0x5a23 */
ZFS_IOC_DSOBJ_TO_DSNAME, /* 0x5a24 */
ZFS_IOC_OBJ_TO_PATH, /* 0x5a25 */
ZFS_IOC_POOL_SET_PROPS, /* 0x5a26 */
ZFS_IOC_POOL_GET_PROPS, /* 0x5a27 */
ZFS_IOC_SET_FSACL, /* 0x5a28 */
ZFS_IOC_GET_FSACL, /* 0x5a29 */
ZFS_IOC_SHARE, /* 0x5a2a */
ZFS_IOC_INHERIT_PROP, /* 0x5a2b */
ZFS_IOC_SMB_ACL, /* 0x5a2c */
ZFS_IOC_USERSPACE_ONE, /* 0x5a2d */
ZFS_IOC_USERSPACE_MANY, /* 0x5a2e */
ZFS_IOC_USERSPACE_UPGRADE, /* 0x5a2f */
ZFS_IOC_HOLD, /* 0x5a30 */
ZFS_IOC_RELEASE, /* 0x5a31 */
ZFS_IOC_GET_HOLDS, /* 0x5a32 */
ZFS_IOC_OBJSET_RECVD_PROPS, /* 0x5a33 */
ZFS_IOC_VDEV_SPLIT, /* 0x5a34 */
ZFS_IOC_NEXT_OBJ, /* 0x5a35 */
ZFS_IOC_DIFF, /* 0x5a36 */
ZFS_IOC_TMP_SNAPSHOT, /* 0x5a37 */
ZFS_IOC_OBJ_TO_STATS, /* 0x5a38 */
ZFS_IOC_SPACE_WRITTEN, /* 0x5a39 */
ZFS_IOC_SPACE_SNAPS, /* 0x5a3a */
ZFS_IOC_DESTROY_SNAPS, /* 0x5a3b */
ZFS_IOC_POOL_REGUID, /* 0x5a3c */
ZFS_IOC_POOL_REOPEN, /* 0x5a3d */
ZFS_IOC_SEND_PROGRESS, /* 0x5a3e */
ZFS_IOC_LOG_HISTORY, /* 0x5a3f */
ZFS_IOC_SEND_NEW, /* 0x5a40 */
ZFS_IOC_SEND_SPACE, /* 0x5a41 */
ZFS_IOC_CLONE, /* 0x5a42 */
ZFS_IOC_BOOKMARK, /* 0x5a43 */
ZFS_IOC_GET_BOOKMARKS, /* 0x5a44 */
ZFS_IOC_DESTROY_BOOKMARKS, /* 0x5a45 */
ZFS_IOC_RECV_NEW, /* 0x5a46 */
ZFS_IOC_POOL_SYNC, /* 0x5a47 */
ZFS_IOC_CHANNEL_PROGRAM, /* 0x5a48 */
ZFS_IOC_LOAD_KEY, /* 0x5a49 */
ZFS_IOC_UNLOAD_KEY, /* 0x5a4a */
ZFS_IOC_CHANGE_KEY, /* 0x5a4b */
ZFS_IOC_REMAP, /* 0x5a4c */
ZFS_IOC_POOL_CHECKPOINT, /* 0x5a4d */
ZFS_IOC_POOL_DISCARD_CHECKPOINT, /* 0x5a4e */
ZFS_IOC_POOL_INITIALIZE, /* 0x5a4f */
ZFS_IOC_POOL_TRIM, /* 0x5a50 */
ZFS_IOC_REDACT, /* 0x5a51 */
ZFS_IOC_GET_BOOKMARK_PROPS, /* 0x5a52 */
ZFS_IOC_WAIT, /* 0x5a53 */
ZFS_IOC_WAIT_FS, /* 0x5a54 */
ZFS_IOC_VDEV_GET_PROPS, /* 0x5a55 */
ZFS_IOC_VDEV_SET_PROPS, /* 0x5a56 */
/*
* Per-platform (Optional) - 8/128 numbers reserved.
*/
ZFS_IOC_PLATFORM = ZFS_IOC_FIRST + 0x80,
ZFS_IOC_EVENTS_NEXT, /* 0x81 (Linux) */
ZFS_IOC_EVENTS_CLEAR, /* 0x82 (Linux) */
ZFS_IOC_EVENTS_SEEK, /* 0x83 (Linux) */
ZFS_IOC_NEXTBOOT, /* 0x84 (FreeBSD) */
ZFS_IOC_JAIL, /* 0x85 (FreeBSD) */
ZFS_IOC_USERNS_ATTACH = ZFS_IOC_JAIL, /* 0x85 (Linux) */
ZFS_IOC_UNJAIL, /* 0x86 (FreeBSD) */
ZFS_IOC_USERNS_DETACH = ZFS_IOC_UNJAIL, /* 0x86 (Linux) */
ZFS_IOC_SET_BOOTENV, /* 0x87 */
ZFS_IOC_GET_BOOTENV, /* 0x88 */
ZFS_IOC_LAST
} zfs_ioc_t;
/*
* zvol ioctl to get dataset name
*/
#define BLKZNAME _IOR(0x12, 125, char[ZFS_MAX_DATASET_NAME_LEN])
#ifdef __linux__
/*
* IOCTLs to update and retrieve additional file level attributes on
* Linux.
*/
#define ZFS_IOC_GETDOSFLAGS _IOR(0x83, 1, uint64_t)
#define ZFS_IOC_SETDOSFLAGS _IOW(0x83, 2, uint64_t)
/*
* Additional file level attributes, that are stored
* in the upper half of z_pflags
*/
#define ZFS_READONLY 0x0000000100000000ull
#define ZFS_HIDDEN 0x0000000200000000ull
#define ZFS_SYSTEM 0x0000000400000000ull
#define ZFS_ARCHIVE 0x0000000800000000ull
#define ZFS_IMMUTABLE 0x0000001000000000ull
#define ZFS_NOUNLINK 0x0000002000000000ull
#define ZFS_APPENDONLY 0x0000004000000000ull
#define ZFS_NODUMP 0x0000008000000000ull
#define ZFS_OPAQUE 0x0000010000000000ull
#define ZFS_AV_QUARANTINED 0x0000020000000000ull
#define ZFS_AV_MODIFIED 0x0000040000000000ull
#define ZFS_REPARSE 0x0000080000000000ull
#define ZFS_OFFLINE 0x0000100000000000ull
#define ZFS_SPARSE 0x0000200000000000ull
#define ZFS_DOS_FL_USER_VISIBLE (ZFS_IMMUTABLE | ZFS_APPENDONLY | \
ZFS_NOUNLINK | ZFS_ARCHIVE | ZFS_NODUMP | ZFS_SYSTEM | \
ZFS_HIDDEN | ZFS_READONLY | ZFS_REPARSE | ZFS_OFFLINE | \
ZFS_SPARSE)
#endif
/*
* ZFS-specific error codes used for returning descriptive errors
* to the userland through zfs ioctls.
*
* The enum implicitly includes all the error codes from errno.h.
* New code should use and extend this enum for errors that are
* not described precisely by generic errno codes.
*
* These numbers should not change over time. New entries should be appended.
*
* (Keep in sync with contrib/pyzfs/libzfs_core/_constants.py)
*/
typedef enum {
ZFS_ERR_CHECKPOINT_EXISTS = 1024,
ZFS_ERR_DISCARDING_CHECKPOINT,
ZFS_ERR_NO_CHECKPOINT,
ZFS_ERR_DEVRM_IN_PROGRESS,
ZFS_ERR_VDEV_TOO_BIG,
ZFS_ERR_IOC_CMD_UNAVAIL,
ZFS_ERR_IOC_ARG_UNAVAIL,
ZFS_ERR_IOC_ARG_REQUIRED,
ZFS_ERR_IOC_ARG_BADTYPE,
ZFS_ERR_WRONG_PARENT,
ZFS_ERR_FROM_IVSET_GUID_MISSING,
ZFS_ERR_FROM_IVSET_GUID_MISMATCH,
ZFS_ERR_SPILL_BLOCK_FLAG_MISSING,
ZFS_ERR_UNKNOWN_SEND_STREAM_FEATURE,
ZFS_ERR_EXPORT_IN_PROGRESS,
ZFS_ERR_BOOKMARK_SOURCE_NOT_ANCESTOR,
ZFS_ERR_STREAM_TRUNCATED,
ZFS_ERR_STREAM_LARGE_BLOCK_MISMATCH,
ZFS_ERR_RESILVER_IN_PROGRESS,
ZFS_ERR_REBUILD_IN_PROGRESS,
ZFS_ERR_BADPROP,
ZFS_ERR_VDEV_NOTSUP,
ZFS_ERR_NOT_USER_NAMESPACE,
} zfs_errno_t;
/*
* Internal SPA load state. Used by FMA diagnosis engine.
*/
typedef enum {
SPA_LOAD_NONE, /* no load in progress */
SPA_LOAD_OPEN, /* normal open */
SPA_LOAD_IMPORT, /* import in progress */
SPA_LOAD_TRYIMPORT, /* tryimport in progress */
SPA_LOAD_RECOVER, /* recovery requested */
SPA_LOAD_ERROR, /* load failed */
SPA_LOAD_CREATE /* creation in progress */
} spa_load_state_t;
typedef enum {
ZPOOL_WAIT_CKPT_DISCARD,
ZPOOL_WAIT_FREE,
ZPOOL_WAIT_INITIALIZE,
ZPOOL_WAIT_REPLACE,
ZPOOL_WAIT_REMOVE,
ZPOOL_WAIT_RESILVER,
ZPOOL_WAIT_SCRUB,
ZPOOL_WAIT_TRIM,
ZPOOL_WAIT_NUM_ACTIVITIES
} zpool_wait_activity_t;
typedef enum {
ZFS_WAIT_DELETEQ,
ZFS_WAIT_NUM_ACTIVITIES
} zfs_wait_activity_t;
/*
* Bookmark name values.
*/
#define ZPOOL_ERR_LIST "error list"
#define ZPOOL_ERR_DATASET "dataset"
#define ZPOOL_ERR_OBJECT "object"
#define HIS_MAX_RECORD_LEN (MAXPATHLEN + MAXPATHLEN + 1)
/*
* The following are names used in the nvlist describing
* the pool's history log.
*/
#define ZPOOL_HIST_RECORD "history record"
#define ZPOOL_HIST_TIME "history time"
#define ZPOOL_HIST_CMD "history command"
#define ZPOOL_HIST_WHO "history who"
#define ZPOOL_HIST_ZONE "history zone"
#define ZPOOL_HIST_HOST "history hostname"
#define ZPOOL_HIST_TXG "history txg"
#define ZPOOL_HIST_INT_EVENT "history internal event"
#define ZPOOL_HIST_INT_STR "history internal str"
#define ZPOOL_HIST_INT_NAME "internal_name"
#define ZPOOL_HIST_IOCTL "ioctl"
#define ZPOOL_HIST_INPUT_NVL "in_nvl"
#define ZPOOL_HIST_OUTPUT_NVL "out_nvl"
#define ZPOOL_HIST_OUTPUT_SIZE "out_size"
#define ZPOOL_HIST_DSNAME "dsname"
#define ZPOOL_HIST_DSID "dsid"
#define ZPOOL_HIST_ERRNO "errno"
#define ZPOOL_HIST_ELAPSED_NS "elapsed_ns"
/*
* Special nvlist name that will not have its args recorded in the pool's
* history log.
*/
#define ZPOOL_HIDDEN_ARGS "hidden_args"
/*
* The following are names used when invoking ZFS_IOC_POOL_INITIALIZE.
*/
#define ZPOOL_INITIALIZE_COMMAND "initialize_command"
#define ZPOOL_INITIALIZE_VDEVS "initialize_vdevs"
/*
* The following are names used when invoking ZFS_IOC_POOL_TRIM.
*/
#define ZPOOL_TRIM_COMMAND "trim_command"
#define ZPOOL_TRIM_VDEVS "trim_vdevs"
#define ZPOOL_TRIM_RATE "trim_rate"
#define ZPOOL_TRIM_SECURE "trim_secure"
/*
* The following are names used when invoking ZFS_IOC_POOL_WAIT.
*/
#define ZPOOL_WAIT_ACTIVITY "wait_activity"
#define ZPOOL_WAIT_TAG "wait_tag"
#define ZPOOL_WAIT_WAITED "wait_waited"
/*
* The following are names used when invoking ZFS_IOC_VDEV_GET_PROP.
*/
#define ZPOOL_VDEV_PROPS_GET_VDEV "vdevprops_get_vdev"
#define ZPOOL_VDEV_PROPS_GET_PROPS "vdevprops_get_props"
/*
* The following are names used when invoking ZFS_IOC_VDEV_SET_PROP.
*/
#define ZPOOL_VDEV_PROPS_SET_VDEV "vdevprops_set_vdev"
#define ZPOOL_VDEV_PROPS_SET_PROPS "vdevprops_set_props"
/*
* The following are names used when invoking ZFS_IOC_WAIT_FS.
*/
#define ZFS_WAIT_ACTIVITY "wait_activity"
#define ZFS_WAIT_WAITED "wait_waited"
/*
* Flags for ZFS_IOC_VDEV_SET_STATE
*/
#define ZFS_ONLINE_CHECKREMOVE 0x1
#define ZFS_ONLINE_UNSPARE 0x2
#define ZFS_ONLINE_FORCEFAULT 0x4
#define ZFS_ONLINE_EXPAND 0x8
#define ZFS_OFFLINE_TEMPORARY 0x1
/*
* Flags for ZFS_IOC_POOL_IMPORT
*/
#define ZFS_IMPORT_NORMAL 0x0
#define ZFS_IMPORT_VERBATIM 0x1
#define ZFS_IMPORT_ANY_HOST 0x2
#define ZFS_IMPORT_MISSING_LOG 0x4
#define ZFS_IMPORT_ONLY 0x8
#define ZFS_IMPORT_TEMP_NAME 0x10
#define ZFS_IMPORT_SKIP_MMP 0x20
#define ZFS_IMPORT_LOAD_KEYS 0x40
#define ZFS_IMPORT_CHECKPOINT 0x80
/*
* Channel program argument/return nvlist keys and defaults.
*/
#define ZCP_ARG_PROGRAM "program"
#define ZCP_ARG_ARGLIST "arg"
#define ZCP_ARG_SYNC "sync"
#define ZCP_ARG_INSTRLIMIT "instrlimit"
#define ZCP_ARG_MEMLIMIT "memlimit"
#define ZCP_ARG_CLIARGV "argv"
#define ZCP_RET_ERROR "error"
#define ZCP_RET_RETURN "return"
#define ZCP_DEFAULT_INSTRLIMIT (10 * 1000 * 1000)
#define ZCP_MAX_INSTRLIMIT (10 * ZCP_DEFAULT_INSTRLIMIT)
#define ZCP_DEFAULT_MEMLIMIT (10 * 1024 * 1024)
#define ZCP_MAX_MEMLIMIT (10 * ZCP_DEFAULT_MEMLIMIT)
/*
* Sysevent payload members. ZFS will generate the following sysevents with the
* given payloads:
*
* ESC_ZFS_RESILVER_START
* ESC_ZFS_RESILVER_FINISH
*
* ZFS_EV_POOL_NAME DATA_TYPE_STRING
* ZFS_EV_POOL_GUID DATA_TYPE_UINT64
* ZFS_EV_RESILVER_TYPE DATA_TYPE_STRING
*
* ESC_ZFS_POOL_DESTROY
* ESC_ZFS_POOL_REGUID
*
* ZFS_EV_POOL_NAME DATA_TYPE_STRING
* ZFS_EV_POOL_GUID DATA_TYPE_UINT64
*
* ESC_ZFS_VDEV_REMOVE
* ESC_ZFS_VDEV_CLEAR
* ESC_ZFS_VDEV_CHECK
*
* ZFS_EV_POOL_NAME DATA_TYPE_STRING
* ZFS_EV_POOL_GUID DATA_TYPE_UINT64
* ZFS_EV_VDEV_PATH DATA_TYPE_STRING (optional)
* ZFS_EV_VDEV_GUID DATA_TYPE_UINT64
*
* ESC_ZFS_HISTORY_EVENT
*
* ZFS_EV_POOL_NAME DATA_TYPE_STRING
* ZFS_EV_POOL_GUID DATA_TYPE_UINT64
* ZFS_EV_HIST_TIME DATA_TYPE_UINT64 (optional)
* ZFS_EV_HIST_CMD DATA_TYPE_STRING (optional)
* ZFS_EV_HIST_WHO DATA_TYPE_UINT64 (optional)
* ZFS_EV_HIST_ZONE DATA_TYPE_STRING (optional)
* ZFS_EV_HIST_HOST DATA_TYPE_STRING (optional)
* ZFS_EV_HIST_TXG DATA_TYPE_UINT64 (optional)
* ZFS_EV_HIST_INT_EVENT DATA_TYPE_UINT64 (optional)
* ZFS_EV_HIST_INT_STR DATA_TYPE_STRING (optional)
* ZFS_EV_HIST_INT_NAME DATA_TYPE_STRING (optional)
* ZFS_EV_HIST_IOCTL DATA_TYPE_STRING (optional)
* ZFS_EV_HIST_DSNAME DATA_TYPE_STRING (optional)
* ZFS_EV_HIST_DSID DATA_TYPE_UINT64 (optional)
*
* The ZFS_EV_HIST_* members will correspond to the ZPOOL_HIST_* members in the
* history log nvlist. The keynames will be free of any spaces or other
* characters that could be potentially unexpected to consumers of the
* sysevents.
*/
#define ZFS_EV_POOL_NAME "pool_name"
#define ZFS_EV_POOL_GUID "pool_guid"
#define ZFS_EV_VDEV_PATH "vdev_path"
#define ZFS_EV_VDEV_GUID "vdev_guid"
#define ZFS_EV_HIST_TIME "history_time"
#define ZFS_EV_HIST_CMD "history_command"
#define ZFS_EV_HIST_WHO "history_who"
#define ZFS_EV_HIST_ZONE "history_zone"
#define ZFS_EV_HIST_HOST "history_hostname"
#define ZFS_EV_HIST_TXG "history_txg"
#define ZFS_EV_HIST_INT_EVENT "history_internal_event"
#define ZFS_EV_HIST_INT_STR "history_internal_str"
#define ZFS_EV_HIST_INT_NAME "history_internal_name"
#define ZFS_EV_HIST_IOCTL "history_ioctl"
#define ZFS_EV_HIST_DSNAME "history_dsname"
#define ZFS_EV_HIST_DSID "history_dsid"
#define ZFS_EV_RESILVER_TYPE "resilver_type"
/*
* We currently support block sizes from 512 bytes to 16MB.
* The benefits of larger blocks, and thus larger IO, need to be weighed
* against the cost of COWing a giant block to modify one byte, and the
* large latency of reading or writing a large block.
*
- * Note that although blocks up to 16MB are supported, the recordsize
- * property can not be set larger than zfs_max_recordsize (default 1MB).
- * See the comment near zfs_max_recordsize in dsl_dataset.c for details.
+ * The recordsize property can not be set larger than zfs_max_recordsize
+ * (default 16MB on 64-bit and 1MB on 32-bit). See the comment near
+ * zfs_max_recordsize in dsl_dataset.c for details.
*
* Note that although the LSIZE field of the blkptr_t can store sizes up
* to 32MB, the dnode's dn_datablkszsec can only store sizes up to
* 32MB - 512 bytes. Therefore, we limit SPA_MAXBLOCKSIZE to 16MB.
*/
#define SPA_MINBLOCKSHIFT 9
#define SPA_OLD_MAXBLOCKSHIFT 17
#define SPA_MAXBLOCKSHIFT 24
#define SPA_MINBLOCKSIZE (1ULL << SPA_MINBLOCKSHIFT)
#define SPA_OLD_MAXBLOCKSIZE (1ULL << SPA_OLD_MAXBLOCKSHIFT)
#define SPA_MAXBLOCKSIZE (1ULL << SPA_MAXBLOCKSHIFT)
/* supported encryption algorithms */
enum zio_encrypt {
ZIO_CRYPT_INHERIT = 0,
ZIO_CRYPT_ON,
ZIO_CRYPT_OFF,
ZIO_CRYPT_AES_128_CCM,
ZIO_CRYPT_AES_192_CCM,
ZIO_CRYPT_AES_256_CCM,
ZIO_CRYPT_AES_128_GCM,
ZIO_CRYPT_AES_192_GCM,
ZIO_CRYPT_AES_256_GCM,
ZIO_CRYPT_FUNCTIONS
};
#define ZIO_CRYPT_ON_VALUE ZIO_CRYPT_AES_256_GCM
#define ZIO_CRYPT_DEFAULT ZIO_CRYPT_OFF
/*
* xattr namespace prefixes. These are forbidden in xattr names.
*
* For cross-platform compatibility, xattrs in the user namespace should not be
* prefixed with the namespace name, but for backwards compatibility with older
* ZFS on Linux versions we do prefix the namespace.
*/
#define ZFS_XA_NS_FREEBSD_PREFIX "freebsd:"
#define ZFS_XA_NS_FREEBSD_PREFIX_LEN strlen("freebsd:")
#define ZFS_XA_NS_LINUX_SECURITY_PREFIX "security."
#define ZFS_XA_NS_LINUX_SECURITY_PREFIX_LEN strlen("security.")
#define ZFS_XA_NS_LINUX_SYSTEM_PREFIX "system."
#define ZFS_XA_NS_LINUX_SYSTEM_PREFIX_LEN strlen("system.")
#define ZFS_XA_NS_LINUX_TRUSTED_PREFIX "trusted."
#define ZFS_XA_NS_LINUX_TRUSTED_PREFIX_LEN strlen("trusted.")
#define ZFS_XA_NS_LINUX_USER_PREFIX "user."
#define ZFS_XA_NS_LINUX_USER_PREFIX_LEN strlen("user.")
#define ZFS_XA_NS_PREFIX_MATCH(ns, name) \
(strncmp(name, ZFS_XA_NS_##ns##_PREFIX, \
ZFS_XA_NS_##ns##_PREFIX_LEN) == 0)
#define ZFS_XA_NS_PREFIX_FORBIDDEN(name) \
(ZFS_XA_NS_PREFIX_MATCH(FREEBSD, name) || \
ZFS_XA_NS_PREFIX_MATCH(LINUX_SECURITY, name) || \
ZFS_XA_NS_PREFIX_MATCH(LINUX_SYSTEM, name) || \
ZFS_XA_NS_PREFIX_MATCH(LINUX_TRUSTED, name) || \
ZFS_XA_NS_PREFIX_MATCH(LINUX_USER, name))
#ifdef __cplusplus
}
#endif
#endif /* _SYS_FS_ZFS_H */
diff --git a/sys/contrib/openzfs/include/sys/spa_boot.h b/sys/contrib/openzfs/include/sys/spa_boot.h
deleted file mode 100644
index 4a69efdda945..000000000000
--- a/sys/contrib/openzfs/include/sys/spa_boot.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or https://opensource.org/licenses/CDDL-1.0.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef _SYS_SPA_BOOT_H
-#define _SYS_SPA_BOOT_H
-
-#include <sys/nvpair.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-extern char *spa_get_bootprop(char *prop);
-extern void spa_free_bootprop(char *prop);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _SYS_SPA_BOOT_H */
diff --git a/sys/contrib/openzfs/include/sys/sysevent/dev.h b/sys/contrib/openzfs/include/sys/sysevent/dev.h
index da6539b4a0d5..0783d0073162 100644
--- a/sys/contrib/openzfs/include/sys/sysevent/dev.h
+++ b/sys/contrib/openzfs/include/sys/sysevent/dev.h
@@ -1,261 +1,264 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_SYSEVENT_DEV_H
#define _SYS_SYSEVENT_DEV_H
#include <sys/sysevent/eventdefs.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Event schema for EC_DEV_ADD/ESC_DISK
*
* Event Class - EC_DEV_ADD
* Event Sub-Class - ESC_DISK
*
* Attribute Name - EV_VERSION
* Attribute Type - DATA_TYPE_INT32
* Attribute Value - event version number
*
* Attribute Name - DEV_NAME
* Attribute Type - DATA_TYPE_STRING
* Attribute Value - /dev name to the raw device.
* The name does not include the slice number component.
*
* Attribute Name - DEV_PHYS_PATH
* Attribute Type - DATA_TYPE_STRING
* Attribute Value - physical path of the device without the "/devices"
* prefix.
*
* Attribute Name - DEV_DRIVER_NAME
* Attribute Type - DATA_TYPE_STRING
* Attribute Value - driver name
*
* Attribute Name - DEV_INSTANCE
* Attribute Type - DATA_TYPE_INT32
* Attribute Value - driver instance number
*
* Attribute Name - DEV_PROP_PREFIX<devinfo_node_property>
* Attribute Type - data type of the devinfo_node_property
* Attribute Value - value of the devinfo_node_property
*
*
* Event schema for EC_DEV_ADD/ESC_NETWORK
*
* Event Class - EC_DEV_ADD
* Event Sub-Class - ESC_NETWORK
*
* Attribute Name - EV_VERSION
* Attribute Type - DATA_TYPE_INT32
* Attribute Value - event version number
*
* Attribute Name - DEV_NAME
* Attribute Type - DATA_TYPE_STRING
* Attribute Value - /dev name associated with the device if exists.
* /dev name associated with the driver for DLPI
* Style-2 only drivers.
*
* Attribute Name - DEV_PHYS_PATH
* Attribute Type - DATA_TYPE_STRING
* Attribute Value - physical path of the device without the "/devices"
* prefix.
*
* Attribute Name - DEV_DRIVER_NAME
* Attribute Type - DATA_TYPE_STRING
* Attribute Value - driver name
*
* Attribute Name - DEV_INSTANCE
* Attribute Type - DATA_TYPE_INT32
* Attribute Value - driver instance number
*
* Attribute Name - DEV_PROP_PREFIX<devinfo_node_property>
* Attribute Type - data type of the devinfo_node_property
* Attribute Value - value of the devinfo_node_property
*
*
* Event schema for EC_DEV_ADD/ESC_PRINTER
*
* Event Class - EC_DEV_ADD
* Event Sub-Class - ESC_PRINTER
*
* Attribute Name - EV_VERSION
* Attribute Type - DATA_TYPE_INT32
* Attribute Value - event version number
*
* Attribute Name - DEV_NAME
* Attribute Type - DATA_TYPE_STRING
* Attribute Value - /dev/printers name associated with the device
* if exists.
* /dev name associated with the device if it exists
*
* Attribute Name - DEV_PHYS_PATH
* Attribute Type - DATA_TYPE_STRING
* Attribute Value - physical path of the device without the "/devices"
* prefix.
*
* Attribute Name - DEV_DRIVER_NAME
* Attribute Type - DATA_TYPE_STRING
* Attribute Value - driver name
*
* Attribute Name - DEV_INSTANCE
* Attribute Type - DATA_TYPE_INT32
* Attribute Value - driver instance number
*
* Attribute Name - DEV_PROP_PREFIX<devinfo_node_property>
* Attribute Type - data type of the devinfo_node_property
* Attribute Value - value of the devinfo_node_property
*
*
* Event schema for EC_DEV_REMOVE/ESC_DISK
*
* Event Class - EC_DEV_REMOVE
* Event Sub-Class - ESC_DISK
*
* Attribute Name - EV_VERSION
* Attribute Type - DATA_TYPE_INT32
* Attribute Value - event version number
*
* Attribute Name - DEV_NAME
* Attribute Type - DATA_TYPE_STRING
* Attribute Value - /dev name to the raw device.
* The name does not include the slice number component.
*
* Attribute Name - DEV_PHYS_PATH
* Attribute Type - DATA_TYPE_STRING
* Attribute Value - physical path of the device without the "/devices"
* prefix.
*
* Attribute Name - DEV_DRIVER_NAME
* Attribute Type - DATA_TYPE_STRING
* Attribute Value - driver name
*
* Attribute Name - DEV_INSTANCE
* Attribute Type - DATA_TYPE_INT32
* Attribute Value - driver instance number
*
*
* Event schema for EC_DEV_REMOVE/ESC_NETWORK
*
* Event Class - EC_DEV_REMOVE
* Event Sub-Class - ESC_NETWORK
*
* Attribute Name - EV_VERSION
* Attribute Type - DATA_TYPE_INT32
* Attribute Value - event version number
*
* Attribute Name - DEV_NAME
* Attribute Type - DATA_TYPE_STRING
* Attribute Value - /dev name associated with the device if exists.
* /dev name associated with the driver for DLPI
* Style-2 only drivers.
*
* Attribute Name - DEV_PHYS_PATH
* Attribute Type - DATA_TYPE_STRING
* Attribute Value - physical path of the device without the "/devices"
* prefix.
*
* Attribute Name - DEV_DRIVER_NAME
* Attribute Type - DATA_TYPE_STRING
* Attribute Value - driver name
*
* Attribute Name - DEV_INSTANCE
* Attribute Type - DATA_TYPE_INT32
* Attribute Value - driver instance number
*
*
* Event schema for EC_DEV_REMOVE/ESC_PRINTER
*
* Event Class - EC_DEV_REMOVE
* Event Sub-Class - ESC_PRINTER
*
* Attribute Name - EV_VERSION
* Attribute Type - DATA_TYPE_INT32
* Attribute Value - event version number
*
* Attribute Name - DEV_NAME
* Attribute Type - DATA_TYPE_STRING
* Attribute Value - /dev/printers name associated with the device
* if exists.
* /dev name associated with the device if it exists
*
* Attribute Name - DEV_PHYS_PATH
* Attribute Type - DATA_TYPE_STRING
* Attribute Value - physical path of the device without the "/devices"
* prefix.
*
* Attribute Name - DEV_DRIVER_NAME
* Attribute Type - DATA_TYPE_STRING
* Attribute Value - driver name
*
* Attribute Name - DEV_INSTANCE
* Attribute Type - DATA_TYPE_INT32
* Attribute Value - driver instance number
*
*
* Event schema for EC_DEV_BRANCH/ESC_DEV_BRANCH_ADD or ESC_DEV_BRANCH_REMOVE
*
* Event Class - EC_DEV_BRANCH
* Event Sub-Class - ESC_DEV_BRANCH_ADD or ESC_DEV_BRANCH_REMOVE
*
* Attribute Name - EV_VERSION
* Attribute Type - DATA_TYPE_INT32
* Attribute Value - event version number
*
* Attribute Name - DEV_PHYS_PATH
* Attribute Type - DATA_TYPE_STRING
* Attribute Value - physical path to the root node of the device subtree
* without the "/devices" prefix.
*/
#define EV_VERSION "version"
#define DEV_PHYS_PATH "phys_path"
#define DEV_NAME "dev_name"
#define DEV_DRIVER_NAME "driver_name"
#define DEV_INSTANCE "instance"
#define DEV_PROP_PREFIX "prop-"
#ifdef __linux__
#define DEV_IDENTIFIER "devid"
#define DEV_PATH "path"
#define DEV_IS_PART "is_slice"
#define DEV_SIZE "dev_size"
+
+/* Size of the whole parent block device (if dev is a partition) */
+#define DEV_PARENT_SIZE "dev_parent_size"
#endif /* __linux__ */
#define EV_V1 1
/* maximum number of devinfo node properties added to the event */
#define MAX_PROP_COUNT 100
/* only properties with size less than PROP_LEN_LIMIT are added to the event */
#define PROP_LEN_LIMIT 1024
#ifdef __cplusplus
}
#endif
#endif /* _SYS_SYSEVENT_DEV_H */
diff --git a/sys/contrib/openzfs/include/sys/vdev_impl.h b/sys/contrib/openzfs/include/sys/vdev_impl.h
index d22abfbc2598..470eaa763d5a 100644
--- a/sys/contrib/openzfs/include/sys/vdev_impl.h
+++ b/sys/contrib/openzfs/include/sys/vdev_impl.h
@@ -1,657 +1,658 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
*/
#ifndef _SYS_VDEV_IMPL_H
#define _SYS_VDEV_IMPL_H
#include <sys/avl.h>
#include <sys/bpobj.h>
#include <sys/dmu.h>
#include <sys/metaslab.h>
#include <sys/nvpair.h>
#include <sys/space_map.h>
#include <sys/vdev.h>
#include <sys/dkio.h>
#include <sys/uberblock_impl.h>
#include <sys/vdev_indirect_mapping.h>
#include <sys/vdev_indirect_births.h>
#include <sys/vdev_rebuild.h>
#include <sys/vdev_removal.h>
#include <sys/zfs_ratelimit.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Virtual device descriptors.
*
* All storage pool operations go through the virtual device framework,
* which provides data replication and I/O scheduling.
*/
/*
* Forward declarations that lots of things need.
*/
typedef struct vdev_queue vdev_queue_t;
typedef struct vdev_cache vdev_cache_t;
typedef struct vdev_cache_entry vdev_cache_entry_t;
struct abd;
extern int zfs_vdev_queue_depth_pct;
extern int zfs_vdev_def_queue_depth;
extern uint32_t zfs_vdev_async_write_max_active;
/*
* Virtual device operations
*/
typedef int vdev_init_func_t(spa_t *spa, nvlist_t *nv, void **tsd);
typedef void vdev_fini_func_t(vdev_t *vd);
typedef int vdev_open_func_t(vdev_t *vd, uint64_t *size, uint64_t *max_size,
uint64_t *ashift, uint64_t *pshift);
typedef void vdev_close_func_t(vdev_t *vd);
typedef uint64_t vdev_asize_func_t(vdev_t *vd, uint64_t psize);
typedef uint64_t vdev_min_asize_func_t(vdev_t *vd);
typedef uint64_t vdev_min_alloc_func_t(vdev_t *vd);
typedef void vdev_io_start_func_t(zio_t *zio);
typedef void vdev_io_done_func_t(zio_t *zio);
typedef void vdev_state_change_func_t(vdev_t *vd, int, int);
typedef boolean_t vdev_need_resilver_func_t(vdev_t *vd, const dva_t *dva,
size_t psize, uint64_t phys_birth);
typedef void vdev_hold_func_t(vdev_t *vd);
typedef void vdev_rele_func_t(vdev_t *vd);
typedef void vdev_remap_cb_t(uint64_t inner_offset, vdev_t *vd,
uint64_t offset, uint64_t size, void *arg);
typedef void vdev_remap_func_t(vdev_t *vd, uint64_t offset, uint64_t size,
vdev_remap_cb_t callback, void *arg);
/*
* Given a target vdev, translates the logical range "in" to the physical
* range "res"
*/
typedef void vdev_xlation_func_t(vdev_t *cvd, const range_seg64_t *logical,
range_seg64_t *physical, range_seg64_t *remain);
typedef uint64_t vdev_rebuild_asize_func_t(vdev_t *vd, uint64_t start,
uint64_t size, uint64_t max_segment);
typedef void vdev_metaslab_init_func_t(vdev_t *vd, uint64_t *startp,
uint64_t *sizep);
typedef void vdev_config_generate_func_t(vdev_t *vd, nvlist_t *nv);
typedef uint64_t vdev_nparity_func_t(vdev_t *vd);
typedef uint64_t vdev_ndisks_func_t(vdev_t *vd);
typedef const struct vdev_ops {
vdev_init_func_t *vdev_op_init;
vdev_fini_func_t *vdev_op_fini;
vdev_open_func_t *vdev_op_open;
vdev_close_func_t *vdev_op_close;
vdev_asize_func_t *vdev_op_asize;
vdev_min_asize_func_t *vdev_op_min_asize;
vdev_min_alloc_func_t *vdev_op_min_alloc;
vdev_io_start_func_t *vdev_op_io_start;
vdev_io_done_func_t *vdev_op_io_done;
vdev_state_change_func_t *vdev_op_state_change;
vdev_need_resilver_func_t *vdev_op_need_resilver;
vdev_hold_func_t *vdev_op_hold;
vdev_rele_func_t *vdev_op_rele;
vdev_remap_func_t *vdev_op_remap;
vdev_xlation_func_t *vdev_op_xlate;
vdev_rebuild_asize_func_t *vdev_op_rebuild_asize;
vdev_metaslab_init_func_t *vdev_op_metaslab_init;
vdev_config_generate_func_t *vdev_op_config_generate;
vdev_nparity_func_t *vdev_op_nparity;
vdev_ndisks_func_t *vdev_op_ndisks;
char vdev_op_type[16];
boolean_t vdev_op_leaf;
} vdev_ops_t;
/*
* Virtual device properties
*/
struct vdev_cache_entry {
struct abd *ve_abd;
uint64_t ve_offset;
clock_t ve_lastused;
avl_node_t ve_offset_node;
avl_node_t ve_lastused_node;
uint32_t ve_hits;
uint16_t ve_missed_update;
zio_t *ve_fill_io;
};
struct vdev_cache {
avl_tree_t vc_offset_tree;
avl_tree_t vc_lastused_tree;
kmutex_t vc_lock;
};
typedef struct vdev_queue_class {
uint32_t vqc_active;
/*
* Sorted by offset or timestamp, depending on if the queue is
* LBA-ordered vs FIFO.
*/
avl_tree_t vqc_queued_tree;
} vdev_queue_class_t;
struct vdev_queue {
vdev_t *vq_vdev;
vdev_queue_class_t vq_class[ZIO_PRIORITY_NUM_QUEUEABLE];
avl_tree_t vq_active_tree;
avl_tree_t vq_read_offset_tree;
avl_tree_t vq_write_offset_tree;
avl_tree_t vq_trim_offset_tree;
uint64_t vq_last_offset;
zio_priority_t vq_last_prio; /* Last sent I/O priority. */
uint32_t vq_ia_active; /* Active interactive I/Os. */
uint32_t vq_nia_credit; /* Non-interactive I/Os credit. */
hrtime_t vq_io_complete_ts; /* time last i/o completed */
hrtime_t vq_io_delta_ts;
zio_t vq_io_search; /* used as local for stack reduction */
kmutex_t vq_lock;
};
typedef enum vdev_alloc_bias {
VDEV_BIAS_NONE,
VDEV_BIAS_LOG, /* dedicated to ZIL data (SLOG) */
VDEV_BIAS_SPECIAL, /* dedicated to ddt, metadata, and small blks */
VDEV_BIAS_DEDUP /* dedicated to dedup metadata */
} vdev_alloc_bias_t;
/*
* On-disk indirect vdev state.
*
* An indirect vdev is described exclusively in the MOS config of a pool.
* The config for an indirect vdev includes several fields, which are
* accessed in memory by a vdev_indirect_config_t.
*/
typedef struct vdev_indirect_config {
/*
* Object (in MOS) which contains the indirect mapping. This object
* contains an array of vdev_indirect_mapping_entry_phys_t ordered by
* vimep_src. The bonus buffer for this object is a
* vdev_indirect_mapping_phys_t. This object is allocated when a vdev
* removal is initiated.
*
* Note that this object can be empty if none of the data on the vdev
* has been copied yet.
*/
uint64_t vic_mapping_object;
/*
* Object (in MOS) which contains the birth times for the mapping
* entries. This object contains an array of
* vdev_indirect_birth_entry_phys_t sorted by vibe_offset. The bonus
* buffer for this object is a vdev_indirect_birth_phys_t. This object
* is allocated when a vdev removal is initiated.
*
* Note that this object can be empty if none of the vdev has yet been
* copied.
*/
uint64_t vic_births_object;
/*
* This is the vdev ID which was removed previous to this vdev, or
* UINT64_MAX if there are no previously removed vdevs.
*/
uint64_t vic_prev_indirect_vdev;
} vdev_indirect_config_t;
/*
* Virtual device descriptor
*/
struct vdev {
/*
* Common to all vdev types.
*/
uint64_t vdev_id; /* child number in vdev parent */
uint64_t vdev_guid; /* unique ID for this vdev */
uint64_t vdev_guid_sum; /* self guid + all child guids */
uint64_t vdev_orig_guid; /* orig. guid prior to remove */
uint64_t vdev_asize; /* allocatable device capacity */
uint64_t vdev_min_asize; /* min acceptable asize */
uint64_t vdev_max_asize; /* max acceptable asize */
uint64_t vdev_ashift; /* block alignment shift */
/*
* Logical block alignment shift
*
* The smallest sized/aligned I/O supported by the device.
*/
uint64_t vdev_logical_ashift;
/*
* Physical block alignment shift
*
* The device supports logical I/Os with vdev_logical_ashift
* size/alignment, but optimum performance will be achieved by
* aligning/sizing requests to vdev_physical_ashift. Smaller
* requests may be inflated or incur device level read-modify-write
* operations.
*
* May be 0 to indicate no preference (i.e. use vdev_logical_ashift).
*/
uint64_t vdev_physical_ashift;
uint64_t vdev_state; /* see VDEV_STATE_* #defines */
uint64_t vdev_prevstate; /* used when reopening a vdev */
vdev_ops_t *vdev_ops; /* vdev operations */
spa_t *vdev_spa; /* spa for this vdev */
void *vdev_tsd; /* type-specific data */
vdev_t *vdev_top; /* top-level vdev */
vdev_t *vdev_parent; /* parent vdev */
vdev_t **vdev_child; /* array of children */
uint64_t vdev_children; /* number of children */
vdev_stat_t vdev_stat; /* virtual device statistics */
vdev_stat_ex_t vdev_stat_ex; /* extended statistics */
boolean_t vdev_expanding; /* expand the vdev? */
boolean_t vdev_reopening; /* reopen in progress? */
boolean_t vdev_nonrot; /* true if solid state */
int vdev_load_error; /* error on last load */
int vdev_open_error; /* error on last open */
int vdev_validate_error; /* error on last validate */
kthread_t *vdev_open_thread; /* thread opening children */
kthread_t *vdev_validate_thread; /* thread validating children */
uint64_t vdev_crtxg; /* txg when top-level was added */
/*
* Top-level vdev state.
*/
uint64_t vdev_ms_array; /* metaslab array object */
uint64_t vdev_ms_shift; /* metaslab size shift */
uint64_t vdev_ms_count; /* number of metaslabs */
metaslab_group_t *vdev_mg; /* metaslab group */
metaslab_group_t *vdev_log_mg; /* embedded slog metaslab group */
metaslab_t **vdev_ms; /* metaslab array */
uint64_t vdev_pending_fastwrite; /* allocated fastwrites */
txg_list_t vdev_ms_list; /* per-txg dirty metaslab lists */
txg_list_t vdev_dtl_list; /* per-txg dirty DTL lists */
txg_node_t vdev_txg_node; /* per-txg dirty vdev linkage */
boolean_t vdev_remove_wanted; /* async remove wanted? */
boolean_t vdev_probe_wanted; /* async probe wanted? */
list_node_t vdev_config_dirty_node; /* config dirty list */
list_node_t vdev_state_dirty_node; /* state dirty list */
uint64_t vdev_deflate_ratio; /* deflation ratio (x512) */
uint64_t vdev_islog; /* is an intent log device */
uint64_t vdev_noalloc; /* device is passivated? */
uint64_t vdev_removing; /* device is being removed? */
boolean_t vdev_ishole; /* is a hole in the namespace */
uint64_t vdev_top_zap;
vdev_alloc_bias_t vdev_alloc_bias; /* metaslab allocation bias */
/* pool checkpoint related */
space_map_t *vdev_checkpoint_sm; /* contains reserved blocks */
/* Initialize related */
boolean_t vdev_initialize_exit_wanted;
vdev_initializing_state_t vdev_initialize_state;
list_node_t vdev_initialize_node;
kthread_t *vdev_initialize_thread;
/* Protects vdev_initialize_thread and vdev_initialize_state. */
kmutex_t vdev_initialize_lock;
kcondvar_t vdev_initialize_cv;
uint64_t vdev_initialize_offset[TXG_SIZE];
uint64_t vdev_initialize_last_offset;
range_tree_t *vdev_initialize_tree; /* valid while initializing */
uint64_t vdev_initialize_bytes_est;
uint64_t vdev_initialize_bytes_done;
uint64_t vdev_initialize_action_time; /* start and end time */
/* TRIM related */
boolean_t vdev_trim_exit_wanted;
boolean_t vdev_autotrim_exit_wanted;
vdev_trim_state_t vdev_trim_state;
list_node_t vdev_trim_node;
kmutex_t vdev_autotrim_lock;
kcondvar_t vdev_autotrim_cv;
kthread_t *vdev_autotrim_thread;
/* Protects vdev_trim_thread and vdev_trim_state. */
kmutex_t vdev_trim_lock;
kcondvar_t vdev_trim_cv;
kthread_t *vdev_trim_thread;
uint64_t vdev_trim_offset[TXG_SIZE];
uint64_t vdev_trim_last_offset;
uint64_t vdev_trim_bytes_est;
uint64_t vdev_trim_bytes_done;
uint64_t vdev_trim_rate; /* requested rate (bytes/sec) */
uint64_t vdev_trim_partial; /* requested partial TRIM */
uint64_t vdev_trim_secure; /* requested secure TRIM */
uint64_t vdev_trim_action_time; /* start and end time */
/* Rebuild related */
boolean_t vdev_rebuilding;
boolean_t vdev_rebuild_exit_wanted;
boolean_t vdev_rebuild_cancel_wanted;
boolean_t vdev_rebuild_reset_wanted;
kmutex_t vdev_rebuild_lock;
kcondvar_t vdev_rebuild_cv;
kthread_t *vdev_rebuild_thread;
vdev_rebuild_t vdev_rebuild_config;
/* For limiting outstanding I/Os (initialize, TRIM) */
kmutex_t vdev_initialize_io_lock;
kcondvar_t vdev_initialize_io_cv;
uint64_t vdev_initialize_inflight;
kmutex_t vdev_trim_io_lock;
kcondvar_t vdev_trim_io_cv;
uint64_t vdev_trim_inflight[3];
/*
* Values stored in the config for an indirect or removing vdev.
*/
vdev_indirect_config_t vdev_indirect_config;
/*
* The vdev_indirect_rwlock protects the vdev_indirect_mapping
* pointer from changing on indirect vdevs (when it is condensed).
* Note that removing (not yet indirect) vdevs have different
* access patterns (the mapping is not accessed from open context,
* e.g. from zio_read) and locking strategy (e.g. svr_lock).
*/
krwlock_t vdev_indirect_rwlock;
vdev_indirect_mapping_t *vdev_indirect_mapping;
vdev_indirect_births_t *vdev_indirect_births;
/*
* In memory data structures used to manage the obsolete sm, for
* indirect or removing vdevs.
*
* The vdev_obsolete_segments is the in-core record of the segments
* that are no longer referenced anywhere in the pool (due to
* being freed or remapped and not referenced by any snapshots).
* During a sync, segments are added to vdev_obsolete_segments
* via vdev_indirect_mark_obsolete(); at the end of each sync
* pass, this is appended to vdev_obsolete_sm via
* vdev_indirect_sync_obsolete(). The vdev_obsolete_lock
* protects against concurrent modifications of vdev_obsolete_segments
* from multiple zio threads.
*/
kmutex_t vdev_obsolete_lock;
range_tree_t *vdev_obsolete_segments;
space_map_t *vdev_obsolete_sm;
/*
* Protects the vdev_scan_io_queue field itself as well as the
* structure's contents (when present).
*/
kmutex_t vdev_scan_io_queue_lock;
struct dsl_scan_io_queue *vdev_scan_io_queue;
/*
* Leaf vdev state.
*/
range_tree_t *vdev_dtl[DTL_TYPES]; /* dirty time logs */
space_map_t *vdev_dtl_sm; /* dirty time log space map */
txg_node_t vdev_dtl_node; /* per-txg dirty DTL linkage */
uint64_t vdev_dtl_object; /* DTL object */
uint64_t vdev_psize; /* physical device capacity */
uint64_t vdev_wholedisk; /* true if this is a whole disk */
uint64_t vdev_offline; /* persistent offline state */
uint64_t vdev_faulted; /* persistent faulted state */
uint64_t vdev_degraded; /* persistent degraded state */
uint64_t vdev_removed; /* persistent removed state */
uint64_t vdev_resilver_txg; /* persistent resilvering state */
uint64_t vdev_rebuild_txg; /* persistent rebuilding state */
char *vdev_path; /* vdev path (if any) */
char *vdev_devid; /* vdev devid (if any) */
char *vdev_physpath; /* vdev device path (if any) */
char *vdev_enc_sysfs_path; /* enclosure sysfs path */
char *vdev_fru; /* physical FRU location */
uint64_t vdev_not_present; /* not present during import */
uint64_t vdev_unspare; /* unspare when resilvering done */
boolean_t vdev_nowritecache; /* true if flushwritecache failed */
boolean_t vdev_has_trim; /* TRIM is supported */
boolean_t vdev_has_securetrim; /* secure TRIM is supported */
boolean_t vdev_checkremove; /* temporary online test */
boolean_t vdev_forcefault; /* force online fault */
boolean_t vdev_splitting; /* split or repair in progress */
boolean_t vdev_delayed_close; /* delayed device close? */
boolean_t vdev_tmpoffline; /* device taken offline temporarily? */
boolean_t vdev_detached; /* device detached? */
boolean_t vdev_cant_read; /* vdev is failing all reads */
boolean_t vdev_cant_write; /* vdev is failing all writes */
boolean_t vdev_isspare; /* was a hot spare */
boolean_t vdev_isl2cache; /* was a l2cache device */
boolean_t vdev_copy_uberblocks; /* post expand copy uberblocks */
boolean_t vdev_resilver_deferred; /* resilver deferred */
vdev_queue_t vdev_queue; /* I/O deadline schedule queue */
vdev_cache_t vdev_cache; /* physical block cache */
spa_aux_vdev_t *vdev_aux; /* for l2cache and spares vdevs */
zio_t *vdev_probe_zio; /* root of current probe */
vdev_aux_t vdev_label_aux; /* on-disk aux state */
uint64_t vdev_leaf_zap;
hrtime_t vdev_mmp_pending; /* 0 if write finished */
uint64_t vdev_mmp_kstat_id; /* to find kstat entry */
uint64_t vdev_expansion_time; /* vdev's last expansion time */
list_node_t vdev_leaf_node; /* leaf vdev list */
/*
* For DTrace to work in userland (libzpool) context, these fields must
* remain at the end of the structure. DTrace will use the kernel's
* CTF definition for 'struct vdev', and since the size of a kmutex_t is
* larger in userland, the offsets for the rest of the fields would be
* incorrect.
*/
kmutex_t vdev_dtl_lock; /* vdev_dtl_{map,resilver} */
kmutex_t vdev_stat_lock; /* vdev_stat */
kmutex_t vdev_probe_lock; /* protects vdev_probe_zio */
/*
* We rate limit ZIO delay, deadman, and checksum events, since they
* can flood ZED with tons of events when a drive is acting up.
*/
zfs_ratelimit_t vdev_delay_rl;
zfs_ratelimit_t vdev_deadman_rl;
zfs_ratelimit_t vdev_checksum_rl;
};
#define VDEV_PAD_SIZE (8 << 10)
/* 2 padding areas (vl_pad1 and vl_be) to skip */
#define VDEV_SKIP_SIZE VDEV_PAD_SIZE * 2
#define VDEV_PHYS_SIZE (112 << 10)
#define VDEV_UBERBLOCK_RING (128 << 10)
/*
* MMP blocks occupy the last MMP_BLOCKS_PER_LABEL slots in the uberblock
* ring when MMP is enabled.
*/
#define MMP_BLOCKS_PER_LABEL 1
/* The largest uberblock we support is 8k. */
#define MAX_UBERBLOCK_SHIFT (13)
#define VDEV_UBERBLOCK_SHIFT(vd) \
MIN(MAX((vd)->vdev_top->vdev_ashift, UBERBLOCK_SHIFT), \
MAX_UBERBLOCK_SHIFT)
#define VDEV_UBERBLOCK_COUNT(vd) \
(VDEV_UBERBLOCK_RING >> VDEV_UBERBLOCK_SHIFT(vd))
#define VDEV_UBERBLOCK_OFFSET(vd, n) \
offsetof(vdev_label_t, vl_uberblock[(n) << VDEV_UBERBLOCK_SHIFT(vd)])
#define VDEV_UBERBLOCK_SIZE(vd) (1ULL << VDEV_UBERBLOCK_SHIFT(vd))
typedef struct vdev_phys {
char vp_nvlist[VDEV_PHYS_SIZE - sizeof (zio_eck_t)];
zio_eck_t vp_zbt;
} vdev_phys_t;
typedef enum vbe_vers {
/*
* The bootenv file is stored as ascii text in the envblock.
* It is used by the GRUB bootloader used on Linux to store the
* contents of the grubenv file. The file is stored as raw ASCII,
* and is protected by an embedded checksum. By default, GRUB will
* check if the boot filesystem supports storing the environment data
* in a special location, and if so, will invoke filesystem specific
* logic to retrieve it. This can be overridden by a variable, should
* the user so desire.
*/
VB_RAW = 0,
/*
* The bootenv file is converted to an nvlist and then packed into the
* envblock.
*/
VB_NVLIST = 1
} vbe_vers_t;
typedef struct vdev_boot_envblock {
uint64_t vbe_version;
char vbe_bootenv[VDEV_PAD_SIZE - sizeof (uint64_t) -
sizeof (zio_eck_t)];
zio_eck_t vbe_zbt;
} vdev_boot_envblock_t;
_Static_assert(sizeof (vdev_boot_envblock_t) == VDEV_PAD_SIZE,
"vdev_boot_envblock_t wrong size");
typedef struct vdev_label {
char vl_pad1[VDEV_PAD_SIZE]; /* 8K */
vdev_boot_envblock_t vl_be; /* 8K */
vdev_phys_t vl_vdev_phys; /* 112K */
char vl_uberblock[VDEV_UBERBLOCK_RING]; /* 128K */
} vdev_label_t; /* 256K total */
/*
* vdev_dirty() flags
*/
#define VDD_METASLAB 0x01
#define VDD_DTL 0x02
/* Offset of embedded boot loader region on each label */
#define VDEV_BOOT_OFFSET (2 * sizeof (vdev_label_t))
/*
* Size of embedded boot loader region on each label.
* The total size of the first two labels plus the boot area is 4MB.
*/
#define VDEV_BOOT_SIZE (7ULL << 19) /* 3.5M */
/*
* Size of label regions at the start and end of each leaf device.
*/
#define VDEV_LABEL_START_SIZE (2 * sizeof (vdev_label_t) + VDEV_BOOT_SIZE)
#define VDEV_LABEL_END_SIZE (2 * sizeof (vdev_label_t))
#define VDEV_LABELS 4
#define VDEV_BEST_LABEL VDEV_LABELS
#define VDEV_OFFSET_IS_LABEL(vd, off) \
(((off) < VDEV_LABEL_START_SIZE) || \
((off) >= ((vd)->vdev_psize - VDEV_LABEL_END_SIZE)))
#define VDEV_ALLOC_LOAD 0
#define VDEV_ALLOC_ADD 1
#define VDEV_ALLOC_SPARE 2
#define VDEV_ALLOC_L2CACHE 3
#define VDEV_ALLOC_ROOTPOOL 4
#define VDEV_ALLOC_SPLIT 5
#define VDEV_ALLOC_ATTACH 6
/*
* Allocate or free a vdev
*/
extern vdev_t *vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid,
vdev_ops_t *ops);
extern int vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *config,
vdev_t *parent, uint_t id, int alloctype);
extern void vdev_free(vdev_t *vd);
/*
* Add or remove children and parents
*/
extern void vdev_add_child(vdev_t *pvd, vdev_t *cvd);
extern void vdev_remove_child(vdev_t *pvd, vdev_t *cvd);
extern void vdev_compact_children(vdev_t *pvd);
extern vdev_t *vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops);
extern void vdev_remove_parent(vdev_t *cvd);
/*
* vdev sync load and sync
*/
extern boolean_t vdev_log_state_valid(vdev_t *vd);
extern int vdev_load(vdev_t *vd);
extern int vdev_dtl_load(vdev_t *vd);
extern void vdev_sync(vdev_t *vd, uint64_t txg);
extern void vdev_sync_done(vdev_t *vd, uint64_t txg);
extern void vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg);
extern void vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg);
/*
* Available vdev types.
*/
extern vdev_ops_t vdev_root_ops;
extern vdev_ops_t vdev_mirror_ops;
extern vdev_ops_t vdev_replacing_ops;
extern vdev_ops_t vdev_raidz_ops;
extern vdev_ops_t vdev_draid_ops;
extern vdev_ops_t vdev_draid_spare_ops;
extern vdev_ops_t vdev_disk_ops;
extern vdev_ops_t vdev_file_ops;
extern vdev_ops_t vdev_missing_ops;
extern vdev_ops_t vdev_hole_ops;
extern vdev_ops_t vdev_spare_ops;
extern vdev_ops_t vdev_indirect_ops;
/*
* Common size functions
*/
extern void vdev_default_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
range_seg64_t *physical_rs, range_seg64_t *remain_rs);
extern uint64_t vdev_default_asize(vdev_t *vd, uint64_t psize);
extern uint64_t vdev_default_min_asize(vdev_t *vd);
extern uint64_t vdev_get_min_asize(vdev_t *vd);
extern void vdev_set_min_asize(vdev_t *vd);
extern uint64_t vdev_get_min_alloc(vdev_t *vd);
extern uint64_t vdev_get_nparity(vdev_t *vd);
extern uint64_t vdev_get_ndisks(vdev_t *vd);
/*
* Global variables
*/
extern int zfs_vdev_standard_sm_blksz;
/*
* Functions from vdev_indirect.c
*/
extern void vdev_indirect_sync_obsolete(vdev_t *vd, dmu_tx_t *tx);
extern boolean_t vdev_indirect_should_condense(vdev_t *vd);
extern void spa_condense_indirect_start_sync(vdev_t *vd, dmu_tx_t *tx);
extern int vdev_obsolete_sm_object(vdev_t *vd, uint64_t *sm_obj);
extern int vdev_obsolete_counts_are_precise(vdev_t *vd, boolean_t *are_precise);
/*
* Other miscellaneous functions
*/
int vdev_checkpoint_sm_object(vdev_t *vd, uint64_t *sm_obj);
void vdev_metaslab_group_create(vdev_t *vd);
+uint64_t vdev_best_ashift(uint64_t logical, uint64_t a, uint64_t b);
/*
* Vdev ashift optimization tunables
*/
extern uint64_t zfs_vdev_min_auto_ashift;
extern uint64_t zfs_vdev_max_auto_ashift;
int param_set_min_auto_ashift(ZFS_MODULE_PARAM_ARGS);
int param_set_max_auto_ashift(ZFS_MODULE_PARAM_ARGS);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_VDEV_IMPL_H */
diff --git a/sys/contrib/openzfs/include/sys/vdev_raidz_impl.h b/sys/contrib/openzfs/include/sys/vdev_raidz_impl.h
index 12f5eff22c62..c1037fa12e30 100644
--- a/sys/contrib/openzfs/include/sys/vdev_raidz_impl.h
+++ b/sys/contrib/openzfs/include/sys/vdev_raidz_impl.h
@@ -1,389 +1,389 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (C) 2016 Gvozden Nešković. All rights reserved.
*/
#ifndef _VDEV_RAIDZ_H
#define _VDEV_RAIDZ_H
#include <sys/types.h>
#include <sys/debug.h>
#include <sys/kstat.h>
#include <sys/abd.h>
#include <sys/vdev_impl.h>
#ifdef __cplusplus
extern "C" {
#endif
#define CODE_P (0U)
#define CODE_Q (1U)
#define CODE_R (2U)
#define PARITY_P (1U)
#define PARITY_PQ (2U)
#define PARITY_PQR (3U)
#define TARGET_X (0U)
#define TARGET_Y (1U)
#define TARGET_Z (2U)
/*
* Parity generation methods indexes
*/
enum raidz_math_gen_op {
RAIDZ_GEN_P = 0,
RAIDZ_GEN_PQ,
RAIDZ_GEN_PQR,
RAIDZ_GEN_NUM = 3
};
/*
* Data reconstruction methods indexes
*/
enum raidz_rec_op {
RAIDZ_REC_P = 0,
RAIDZ_REC_Q,
RAIDZ_REC_R,
RAIDZ_REC_PQ,
RAIDZ_REC_PR,
RAIDZ_REC_QR,
RAIDZ_REC_PQR,
RAIDZ_REC_NUM = 7
};
extern const char *const raidz_gen_name[RAIDZ_GEN_NUM];
extern const char *const raidz_rec_name[RAIDZ_REC_NUM];
/*
* Methods used to define raidz implementation
*
* @raidz_gen_f Parity generation function
* @par1 pointer to raidz_map
* @raidz_rec_f Data reconstruction function
* @par1 pointer to raidz_map
* @par2 array of reconstruction targets
* @will_work_f Function returns TRUE if impl. is supported on the system
* @init_impl_f Function is called once on init
* @fini_impl_f Function is called once on fini
*/
typedef void (*raidz_gen_f)(void *);
typedef int (*raidz_rec_f)(void *, const int *);
typedef boolean_t (*will_work_f)(void);
typedef void (*init_impl_f)(void);
typedef void (*fini_impl_f)(void);
#define RAIDZ_IMPL_NAME_MAX (20)
typedef struct raidz_impl_ops {
init_impl_f init;
fini_impl_f fini;
raidz_gen_f gen[RAIDZ_GEN_NUM]; /* Parity generate functions */
raidz_rec_f rec[RAIDZ_REC_NUM]; /* Data reconstruction functions */
will_work_f is_supported; /* Support check function */
char name[RAIDZ_IMPL_NAME_MAX]; /* Name of the implementation */
} raidz_impl_ops_t;
typedef struct raidz_col {
uint64_t rc_devidx; /* child device index for I/O */
uint64_t rc_offset; /* device offset */
uint64_t rc_size; /* I/O size */
abd_t rc_abdstruct; /* rc_abd probably points here */
abd_t *rc_abd; /* I/O data */
abd_t *rc_orig_data; /* pre-reconstruction */
int rc_error; /* I/O error for this device */
uint8_t rc_tried; /* Did we attempt this I/O column? */
uint8_t rc_skipped; /* Did we skip this I/O column? */
uint8_t rc_need_orig_restore; /* need to restore from orig_data? */
uint8_t rc_force_repair; /* Write good data to this column */
uint8_t rc_allow_repair; /* Allow repair I/O to this column */
} raidz_col_t;
typedef struct raidz_row {
uint64_t rr_cols; /* Regular column count */
uint64_t rr_scols; /* Count including skipped columns */
uint64_t rr_bigcols; /* Remainder data column count */
uint64_t rr_missingdata; /* Count of missing data devices */
uint64_t rr_missingparity; /* Count of missing parity devices */
uint64_t rr_firstdatacol; /* First data column/parity count */
abd_t *rr_abd_empty; /* dRAID empty sector buffer */
int rr_nempty; /* empty sectors included in parity */
#ifdef ZFS_DEBUG
uint64_t rr_offset; /* Logical offset for *_io_verify() */
uint64_t rr_size; /* Physical size for *_io_verify() */
#endif
raidz_col_t rr_col[0]; /* Flexible array of I/O columns */
} raidz_row_t;
typedef struct raidz_map {
boolean_t rm_ecksuminjected; /* checksum error was injected */
int rm_nrows; /* Regular row count */
int rm_nskip; /* RAIDZ sectors skipped for padding */
int rm_skipstart; /* Column index of padding start */
const raidz_impl_ops_t *rm_ops; /* RAIDZ math operations */
raidz_row_t *rm_row[0]; /* flexible array of rows */
} raidz_map_t;
#define RAIDZ_ORIGINAL_IMPL (INT_MAX)
extern const raidz_impl_ops_t vdev_raidz_scalar_impl;
extern boolean_t raidz_will_scalar_work(void);
#if defined(__x86_64) && defined(HAVE_SSE2) /* only x86_64 for now */
extern const raidz_impl_ops_t vdev_raidz_sse2_impl;
#endif
#if defined(__x86_64) && defined(HAVE_SSSE3) /* only x86_64 for now */
extern const raidz_impl_ops_t vdev_raidz_ssse3_impl;
#endif
#if defined(__x86_64) && defined(HAVE_AVX2) /* only x86_64 for now */
extern const raidz_impl_ops_t vdev_raidz_avx2_impl;
#endif
#if defined(__x86_64) && defined(HAVE_AVX512F) /* only x86_64 for now */
extern const raidz_impl_ops_t vdev_raidz_avx512f_impl;
#endif
#if defined(__x86_64) && defined(HAVE_AVX512BW) /* only x86_64 for now */
extern const raidz_impl_ops_t vdev_raidz_avx512bw_impl;
#endif
#if defined(__aarch64__)
extern const raidz_impl_ops_t vdev_raidz_aarch64_neon_impl;
extern const raidz_impl_ops_t vdev_raidz_aarch64_neonx2_impl;
#endif
#if defined(__powerpc__)
extern const raidz_impl_ops_t vdev_raidz_powerpc_altivec_impl;
#endif
/*
* Commonly used raidz_map helpers
*
* raidz_parity Returns parity of the RAIDZ block
* raidz_ncols Returns number of columns the block spans
* Note, all rows have the same number of columns.
* raidz_nbigcols Returns number of big columns
* raidz_col_p Returns pointer to a column
* raidz_col_size Returns size of a column
* raidz_big_size Returns size of big columns
* raidz_short_size Returns size of short columns
*/
#define raidz_parity(rm) ((rm)->rm_row[0]->rr_firstdatacol)
#define raidz_ncols(rm) ((rm)->rm_row[0]->rr_cols)
#define raidz_nbigcols(rm) ((rm)->rm_bigcols)
#define raidz_col_p(rm, c) ((rm)->rm_col + (c))
#define raidz_col_size(rm, c) ((rm)->rm_col[c].rc_size)
#define raidz_big_size(rm) (raidz_col_size(rm, CODE_P))
#define raidz_short_size(rm) (raidz_col_size(rm, raidz_ncols(rm)-1))
/*
* Macro defines an RAIDZ parity generation method
*
* @code parity the function produce
* @impl name of the implementation
*/
#define _RAIDZ_GEN_WRAP(code, impl) \
static void \
impl ## _gen_ ## code(void *rrp) \
{ \
raidz_row_t *rr = (raidz_row_t *)rrp; \
raidz_generate_## code ## _impl(rr); \
}
/*
* Macro defines an RAIDZ data reconstruction method
*
* @code parity the function produce
* @impl name of the implementation
*/
#define _RAIDZ_REC_WRAP(code, impl) \
static int \
impl ## _rec_ ## code(void *rrp, const int *tgtidx) \
{ \
raidz_row_t *rr = (raidz_row_t *)rrp; \
return (raidz_reconstruct_## code ## _impl(rr, tgtidx)); \
}
/*
* Define all gen methods for an implementation
*
* @impl name of the implementation
*/
#define DEFINE_GEN_METHODS(impl) \
_RAIDZ_GEN_WRAP(p, impl); \
_RAIDZ_GEN_WRAP(pq, impl); \
_RAIDZ_GEN_WRAP(pqr, impl)
/*
* Define all rec functions for an implementation
*
* @impl name of the implementation
*/
#define DEFINE_REC_METHODS(impl) \
_RAIDZ_REC_WRAP(p, impl); \
_RAIDZ_REC_WRAP(q, impl); \
_RAIDZ_REC_WRAP(r, impl); \
_RAIDZ_REC_WRAP(pq, impl); \
_RAIDZ_REC_WRAP(pr, impl); \
_RAIDZ_REC_WRAP(qr, impl); \
_RAIDZ_REC_WRAP(pqr, impl)
#define RAIDZ_GEN_METHODS(impl) \
{ \
[RAIDZ_GEN_P] = & impl ## _gen_p, \
[RAIDZ_GEN_PQ] = & impl ## _gen_pq, \
[RAIDZ_GEN_PQR] = & impl ## _gen_pqr \
}
#define RAIDZ_REC_METHODS(impl) \
{ \
[RAIDZ_REC_P] = & impl ## _rec_p, \
[RAIDZ_REC_Q] = & impl ## _rec_q, \
[RAIDZ_REC_R] = & impl ## _rec_r, \
[RAIDZ_REC_PQ] = & impl ## _rec_pq, \
[RAIDZ_REC_PR] = & impl ## _rec_pr, \
[RAIDZ_REC_QR] = & impl ## _rec_qr, \
[RAIDZ_REC_PQR] = & impl ## _rec_pqr \
}
typedef struct raidz_impl_kstat {
uint64_t gen[RAIDZ_GEN_NUM]; /* gen method speed B/s */
uint64_t rec[RAIDZ_REC_NUM]; /* rec method speed B/s */
} raidz_impl_kstat_t;
/*
* Enumerate various multiplication constants
* used in reconstruction methods
*/
typedef enum raidz_mul_info {
/* Reconstruct Q */
MUL_Q_X = 0,
/* Reconstruct R */
MUL_R_X = 0,
/* Reconstruct PQ */
MUL_PQ_X = 0,
MUL_PQ_Y = 1,
/* Reconstruct PR */
MUL_PR_X = 0,
MUL_PR_Y = 1,
/* Reconstruct QR */
MUL_QR_XQ = 0,
MUL_QR_X = 1,
MUL_QR_YQ = 2,
MUL_QR_Y = 3,
/* Reconstruct PQR */
MUL_PQR_XP = 0,
MUL_PQR_XQ = 1,
MUL_PQR_XR = 2,
MUL_PQR_YU = 3,
MUL_PQR_YP = 4,
MUL_PQR_YQ = 5,
MUL_CNT = 6
} raidz_mul_info_t;
/*
* Powers of 2 in the Galois field.
*/
extern const uint8_t vdev_raidz_pow2[256] __attribute__((aligned(256)));
/* Logs of 2 in the Galois field defined above. */
extern const uint8_t vdev_raidz_log2[256] __attribute__((aligned(256)));
/*
* Multiply a given number by 2 raised to the given power.
*/
static inline uint8_t
vdev_raidz_exp2(const uint8_t a, const unsigned exp)
{
if (a == 0)
return (0);
return (vdev_raidz_pow2[(exp + (unsigned)vdev_raidz_log2[a]) % 255]);
}
/*
* Galois Field operations.
*
* gf_exp2 - computes 2 raised to the given power
- * gf_exp2 - computes 4 raised to the given power
+ * gf_exp4 - computes 4 raised to the given power
* gf_mul - multiplication
* gf_div - division
* gf_inv - multiplicative inverse
*/
typedef unsigned gf_t;
typedef unsigned gf_log_t;
static inline gf_t
gf_mul(const gf_t a, const gf_t b)
{
gf_log_t logsum;
if (a == 0 || b == 0)
return (0);
logsum = (gf_log_t)vdev_raidz_log2[a] + (gf_log_t)vdev_raidz_log2[b];
return ((gf_t)vdev_raidz_pow2[logsum % 255]);
}
static inline gf_t
gf_div(const gf_t a, const gf_t b)
{
gf_log_t logsum;
ASSERT3U(b, >, 0);
if (a == 0)
return (0);
logsum = (gf_log_t)255 + (gf_log_t)vdev_raidz_log2[a] -
(gf_log_t)vdev_raidz_log2[b];
return ((gf_t)vdev_raidz_pow2[logsum % 255]);
}
static inline gf_t
gf_inv(const gf_t a)
{
gf_log_t logsum;
ASSERT3U(a, >, 0);
logsum = (gf_log_t)255 - (gf_log_t)vdev_raidz_log2[a];
return ((gf_t)vdev_raidz_pow2[logsum]);
}
static inline gf_t
gf_exp2(gf_log_t exp)
{
return (vdev_raidz_pow2[exp % 255]);
}
static inline gf_t
gf_exp4(gf_log_t exp)
{
ASSERT3U(exp, <=, 255);
return ((gf_t)vdev_raidz_pow2[(2 * exp) % 255]);
}
#ifdef __cplusplus
}
#endif
#endif /* _VDEV_RAIDZ_H */
diff --git a/sys/contrib/openzfs/include/sys/zfs_context.h b/sys/contrib/openzfs/include/sys/zfs_context.h
index aa4f78789631..83ed97fbec7f 100644
--- a/sys/contrib/openzfs/include/sys/zfs_context.h
+++ b/sys/contrib/openzfs/include/sys/zfs_context.h
@@ -1,777 +1,778 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright (c) 2012, Joyent, Inc. All rights reserved.
*/
#ifndef _SYS_ZFS_CONTEXT_H
#define _SYS_ZFS_CONTEXT_H
#ifdef __cplusplus
extern "C" {
#endif
/*
* This code compiles in three different contexts. When __KERNEL__ is defined,
* the code uses "unix-like" kernel interfaces. When _STANDALONE is defined, the
* code is running in a reduced capacity environment of the boot loader which is
* generally a subset of both POSIX and kernel interfaces (with a few unique
* interfaces too). When neither are defined, it's in a userland POSIX or
* similar environment.
*/
#if defined(__KERNEL__) || defined(_STANDALONE)
#include <sys/types.h>
#include <sys/atomic.h>
#include <sys/sysmacros.h>
#include <sys/vmsystm.h>
#include <sys/condvar.h>
#include <sys/cmn_err.h>
#include <sys/kmem.h>
#include <sys/kmem_cache.h>
#include <sys/vmem.h>
#include <sys/taskq.h>
#include <sys/param.h>
#include <sys/disp.h>
#include <sys/debug.h>
#include <sys/random.h>
#include <sys/string.h>
#include <sys/byteorder.h>
#include <sys/list.h>
#include <sys/time.h>
#include <sys/zone.h>
#include <sys/kstat.h>
#include <sys/zfs_debug.h>
#include <sys/sysevent.h>
#include <sys/sysevent/eventdefs.h>
#include <sys/zfs_delay.h>
#include <sys/sunddi.h>
#include <sys/ctype.h>
#include <sys/disp.h>
#include <sys/trace.h>
#include <sys/procfs_list.h>
#include <sys/mod.h>
#include <sys/uio_impl.h>
#include <sys/zfs_context_os.h>
#else /* _KERNEL || _STANDALONE */
#define _SYS_MUTEX_H
#define _SYS_RWLOCK_H
#define _SYS_CONDVAR_H
#define _SYS_VNODE_H
#define _SYS_VFS_H
#define _SYS_SUNDDI_H
#define _SYS_CALLB_H
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <stdarg.h>
#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <pthread.h>
#include <setjmp.h>
#include <assert.h>
#include <umem.h>
#include <limits.h>
#include <atomic.h>
#include <dirent.h>
#include <time.h>
#include <ctype.h>
#include <signal.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/cred.h>
#include <sys/sysmacros.h>
#include <sys/resource.h>
#include <sys/byteorder.h>
#include <sys/list.h>
#include <sys/mod.h>
#include <sys/uio.h>
#include <sys/zfs_debug.h>
#include <sys/kstat.h>
#include <sys/u8_textprep.h>
#include <sys/sysevent.h>
#include <sys/sysevent/eventdefs.h>
#include <sys/sunddi.h>
#include <sys/debug.h>
#include <sys/utsname.h>
#include <sys/trace_zfs.h>
#include <sys/zfs_context_os.h>
/*
* Stack
*/
#define noinline __attribute__((noinline))
#define likely(x) __builtin_expect((x), 1)
#define unlikely(x) __builtin_expect((x), 0)
/*
* Debugging
*/
/*
* Note that we are not using the debugging levels.
*/
#define CE_CONT 0 /* continuation */
#define CE_NOTE 1 /* notice */
#define CE_WARN 2 /* warning */
#define CE_PANIC 3 /* panic */
#define CE_IGNORE 4 /* print nothing */
/*
* ZFS debugging
*/
extern void dprintf_setup(int *argc, char **argv);
extern void cmn_err(int, const char *, ...);
extern void vcmn_err(int, const char *, va_list);
extern __attribute__((noreturn)) void panic(const char *, ...);
extern __attribute__((noreturn)) void vpanic(const char *, va_list);
#define fm_panic panic
/*
* DTrace SDT probes have different signatures in userland than they do in
* the kernel. If they're being used in kernel code, re-define them out of
* existence for their counterparts in libzpool.
*
* Here's an example of how to use the set-error probes in userland:
* zfs$target:::set-error /arg0 == EBUSY/ {stack();}
*
* Here's an example of how to use DTRACE_PROBE probes in userland:
* If there is a probe declared as follows:
* DTRACE_PROBE2(zfs__probe_name, uint64_t, blkid, dnode_t *, dn);
* Then you can use it as follows:
* zfs$target:::probe2 /copyinstr(arg0) == "zfs__probe_name"/
* {printf("%u %p\n", arg1, arg2);}
*/
#ifdef DTRACE_PROBE
#undef DTRACE_PROBE
#endif /* DTRACE_PROBE */
#define DTRACE_PROBE(a)
#ifdef DTRACE_PROBE1
#undef DTRACE_PROBE1
#endif /* DTRACE_PROBE1 */
#define DTRACE_PROBE1(a, b, c)
#ifdef DTRACE_PROBE2
#undef DTRACE_PROBE2
#endif /* DTRACE_PROBE2 */
#define DTRACE_PROBE2(a, b, c, d, e)
#ifdef DTRACE_PROBE3
#undef DTRACE_PROBE3
#endif /* DTRACE_PROBE3 */
#define DTRACE_PROBE3(a, b, c, d, e, f, g)
#ifdef DTRACE_PROBE4
#undef DTRACE_PROBE4
#endif /* DTRACE_PROBE4 */
#define DTRACE_PROBE4(a, b, c, d, e, f, g, h, i)
/*
* Tunables.
*/
typedef struct zfs_kernel_param {
const char *name; /* unused stub */
} zfs_kernel_param_t;
#define ZFS_MODULE_PARAM(scope_prefix, name_prefix, name, type, perm, desc)
#define ZFS_MODULE_PARAM_ARGS void
#define ZFS_MODULE_PARAM_CALL(scope_prefix, name_prefix, name, setfunc, \
getfunc, perm, desc)
/*
* Threads.
*/
typedef pthread_t kthread_t;
#define TS_RUN 0x00000002
#define TS_JOINABLE 0x00000004
#define curthread ((void *)(uintptr_t)pthread_self())
-#define kpreempt(x) yield()
#define getcomm() "unknown"
#define thread_create_named(name, stk, stksize, func, arg, len, \
pp, state, pri) \
zk_thread_create(func, arg, stksize, state)
#define thread_create(stk, stksize, func, arg, len, pp, state, pri) \
zk_thread_create(func, arg, stksize, state)
#define thread_exit() pthread_exit(NULL)
#define thread_join(t) pthread_join((pthread_t)(t), NULL)
#define newproc(f, a, cid, pri, ctp, pid) (ENOSYS)
/* in libzpool, p0 exists only to have its address taken */
typedef struct proc {
uintptr_t this_is_never_used_dont_dereference_it;
} proc_t;
extern struct proc p0;
#define curproc (&p0)
#define PS_NONE -1
extern kthread_t *zk_thread_create(void (*func)(void *), void *arg,
size_t stksize, int state);
#define issig(why) (FALSE)
#define ISSIG(thr, why) (FALSE)
+#define KPREEMPT_SYNC (-1)
+
+#define kpreempt(x) sched_yield()
#define kpreempt_disable() ((void)0)
#define kpreempt_enable() ((void)0)
-#define cond_resched() sched_yield()
/*
* Mutexes
*/
typedef struct kmutex {
pthread_mutex_t m_lock;
pthread_t m_owner;
} kmutex_t;
#define MUTEX_DEFAULT 0
#define MUTEX_NOLOCKDEP MUTEX_DEFAULT
#define MUTEX_HELD(mp) pthread_equal((mp)->m_owner, pthread_self())
#define MUTEX_NOT_HELD(mp) !MUTEX_HELD(mp)
extern void mutex_init(kmutex_t *mp, char *name, int type, void *cookie);
extern void mutex_destroy(kmutex_t *mp);
extern void mutex_enter(kmutex_t *mp);
extern void mutex_exit(kmutex_t *mp);
extern int mutex_tryenter(kmutex_t *mp);
#define NESTED_SINGLE 1
#define mutex_enter_nested(mp, class) mutex_enter(mp)
/*
* RW locks
*/
typedef struct krwlock {
pthread_rwlock_t rw_lock;
pthread_t rw_owner;
uint_t rw_readers;
} krwlock_t;
typedef int krw_t;
#define RW_READER 0
#define RW_WRITER 1
#define RW_DEFAULT RW_READER
#define RW_NOLOCKDEP RW_READER
#define RW_READ_HELD(rw) ((rw)->rw_readers > 0)
#define RW_WRITE_HELD(rw) pthread_equal((rw)->rw_owner, pthread_self())
#define RW_LOCK_HELD(rw) (RW_READ_HELD(rw) || RW_WRITE_HELD(rw))
extern void rw_init(krwlock_t *rwlp, char *name, int type, void *arg);
extern void rw_destroy(krwlock_t *rwlp);
extern void rw_enter(krwlock_t *rwlp, krw_t rw);
extern int rw_tryenter(krwlock_t *rwlp, krw_t rw);
extern int rw_tryupgrade(krwlock_t *rwlp);
extern void rw_exit(krwlock_t *rwlp);
#define rw_downgrade(rwlp) do { } while (0)
/*
* Credentials
*/
extern uid_t crgetuid(cred_t *cr);
extern uid_t crgetruid(cred_t *cr);
extern gid_t crgetgid(cred_t *cr);
extern int crgetngroups(cred_t *cr);
extern gid_t *crgetgroups(cred_t *cr);
/*
* Condition variables
*/
typedef pthread_cond_t kcondvar_t;
#define CV_DEFAULT 0
#define CALLOUT_FLAG_ABSOLUTE 0x2
extern void cv_init(kcondvar_t *cv, char *name, int type, void *arg);
extern void cv_destroy(kcondvar_t *cv);
extern void cv_wait(kcondvar_t *cv, kmutex_t *mp);
extern int cv_wait_sig(kcondvar_t *cv, kmutex_t *mp);
extern int cv_timedwait(kcondvar_t *cv, kmutex_t *mp, clock_t abstime);
extern int cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim,
hrtime_t res, int flag);
extern void cv_signal(kcondvar_t *cv);
extern void cv_broadcast(kcondvar_t *cv);
#define cv_timedwait_io(cv, mp, at) cv_timedwait(cv, mp, at)
#define cv_timedwait_idle(cv, mp, at) cv_timedwait(cv, mp, at)
#define cv_timedwait_sig(cv, mp, at) cv_timedwait(cv, mp, at)
#define cv_wait_io(cv, mp) cv_wait(cv, mp)
#define cv_wait_idle(cv, mp) cv_wait(cv, mp)
#define cv_wait_io_sig(cv, mp) cv_wait_sig(cv, mp)
#define cv_timedwait_sig_hires(cv, mp, t, r, f) \
cv_timedwait_hires(cv, mp, t, r, f)
#define cv_timedwait_idle_hires(cv, mp, t, r, f) \
cv_timedwait_hires(cv, mp, t, r, f)
/*
* Thread-specific data
*/
#define tsd_get(k) pthread_getspecific(k)
#define tsd_set(k, v) pthread_setspecific(k, v)
#define tsd_create(kp, d) pthread_key_create((pthread_key_t *)kp, d)
#define tsd_destroy(kp) /* nothing */
#ifdef __FreeBSD__
typedef off_t loff_t;
#endif
/*
* kstat creation, installation and deletion
*/
extern kstat_t *kstat_create(const char *, int,
const char *, const char *, uchar_t, ulong_t, uchar_t);
extern void kstat_install(kstat_t *);
extern void kstat_delete(kstat_t *);
extern void kstat_set_raw_ops(kstat_t *ksp,
int (*headers)(char *buf, size_t size),
int (*data)(char *buf, size_t size, void *data),
void *(*addr)(kstat_t *ksp, loff_t index));
/*
* procfs list manipulation
*/
typedef struct procfs_list {
void *pl_private;
kmutex_t pl_lock;
list_t pl_list;
uint64_t pl_next_id;
size_t pl_node_offset;
} procfs_list_t;
#ifndef __cplusplus
struct seq_file { };
void seq_printf(struct seq_file *m, const char *fmt, ...);
typedef struct procfs_list_node {
list_node_t pln_link;
uint64_t pln_id;
} procfs_list_node_t;
void procfs_list_install(const char *module,
const char *submodule,
const char *name,
mode_t mode,
procfs_list_t *procfs_list,
int (*show)(struct seq_file *f, void *p),
int (*show_header)(struct seq_file *f),
int (*clear)(procfs_list_t *procfs_list),
size_t procfs_list_node_off);
void procfs_list_uninstall(procfs_list_t *procfs_list);
void procfs_list_destroy(procfs_list_t *procfs_list);
void procfs_list_add(procfs_list_t *procfs_list, void *p);
#endif
/*
* Kernel memory
*/
#define KM_SLEEP UMEM_NOFAIL
#define KM_PUSHPAGE KM_SLEEP
#define KM_NOSLEEP UMEM_DEFAULT
#define KM_NORMALPRI 0 /* not needed with UMEM_DEFAULT */
#define KMC_NODEBUG UMC_NODEBUG
#define KMC_KVMEM 0x0
#define kmem_alloc(_s, _f) umem_alloc(_s, _f)
#define kmem_zalloc(_s, _f) umem_zalloc(_s, _f)
#define kmem_free(_b, _s) umem_free(_b, _s)
#define vmem_alloc(_s, _f) kmem_alloc(_s, _f)
#define vmem_zalloc(_s, _f) kmem_zalloc(_s, _f)
#define vmem_free(_b, _s) kmem_free(_b, _s)
#define kmem_cache_create(_a, _b, _c, _d, _e, _f, _g, _h, _i) \
umem_cache_create(_a, _b, _c, _d, _e, _f, _g, _h, _i)
#define kmem_cache_destroy(_c) umem_cache_destroy(_c)
#define kmem_cache_alloc(_c, _f) umem_cache_alloc(_c, _f)
#define kmem_cache_free(_c, _b) umem_cache_free(_c, _b)
#define kmem_debugging() 0
#define kmem_cache_reap_now(_c) umem_cache_reap_now(_c);
#define kmem_cache_set_move(_c, _cb) /* nothing */
#define POINTER_INVALIDATE(_pp) /* nothing */
#define POINTER_IS_VALID(_p) 0
typedef umem_cache_t kmem_cache_t;
typedef enum kmem_cbrc {
KMEM_CBRC_YES,
KMEM_CBRC_NO,
KMEM_CBRC_LATER,
KMEM_CBRC_DONT_NEED,
KMEM_CBRC_DONT_KNOW
} kmem_cbrc_t;
/*
* Task queues
*/
#define TASKQ_NAMELEN 31
typedef uintptr_t taskqid_t;
typedef void (task_func_t)(void *);
typedef struct taskq_ent {
struct taskq_ent *tqent_next;
struct taskq_ent *tqent_prev;
task_func_t *tqent_func;
void *tqent_arg;
uintptr_t tqent_flags;
} taskq_ent_t;
typedef struct taskq {
char tq_name[TASKQ_NAMELEN + 1];
kmutex_t tq_lock;
krwlock_t tq_threadlock;
kcondvar_t tq_dispatch_cv;
kcondvar_t tq_wait_cv;
kthread_t **tq_threadlist;
int tq_flags;
int tq_active;
int tq_nthreads;
int tq_nalloc;
int tq_minalloc;
int tq_maxalloc;
kcondvar_t tq_maxalloc_cv;
int tq_maxalloc_wait;
taskq_ent_t *tq_freelist;
taskq_ent_t tq_task;
} taskq_t;
#define TQENT_FLAG_PREALLOC 0x1 /* taskq_dispatch_ent used */
#define TASKQ_PREPOPULATE 0x0001
#define TASKQ_CPR_SAFE 0x0002 /* Use CPR safe protocol */
#define TASKQ_DYNAMIC 0x0004 /* Use dynamic thread scheduling */
#define TASKQ_THREADS_CPU_PCT 0x0008 /* Scale # threads by # cpus */
#define TASKQ_DC_BATCH 0x0010 /* Mark threads as batch */
#define TQ_SLEEP KM_SLEEP /* Can block for memory */
#define TQ_NOSLEEP KM_NOSLEEP /* cannot block for memory; may fail */
#define TQ_NOQUEUE 0x02 /* Do not enqueue if can't dispatch */
#define TQ_FRONT 0x08 /* Queue in front */
#define TASKQID_INVALID ((taskqid_t)0)
extern taskq_t *system_taskq;
extern taskq_t *system_delay_taskq;
extern taskq_t *taskq_create(const char *, int, pri_t, int, int, uint_t);
#define taskq_create_proc(a, b, c, d, e, p, f) \
(taskq_create(a, b, c, d, e, f))
#define taskq_create_sysdc(a, b, d, e, p, dc, f) \
((void) sizeof (dc), taskq_create(a, b, maxclsyspri, d, e, f))
extern taskqid_t taskq_dispatch(taskq_t *, task_func_t, void *, uint_t);
extern taskqid_t taskq_dispatch_delay(taskq_t *, task_func_t, void *, uint_t,
clock_t);
extern void taskq_dispatch_ent(taskq_t *, task_func_t, void *, uint_t,
taskq_ent_t *);
extern int taskq_empty_ent(taskq_ent_t *);
extern void taskq_init_ent(taskq_ent_t *);
extern void taskq_destroy(taskq_t *);
extern void taskq_wait(taskq_t *);
extern void taskq_wait_id(taskq_t *, taskqid_t);
extern void taskq_wait_outstanding(taskq_t *, taskqid_t);
extern int taskq_member(taskq_t *, kthread_t *);
extern taskq_t *taskq_of_curthread(void);
extern int taskq_cancel_id(taskq_t *, taskqid_t);
extern void system_taskq_init(void);
extern void system_taskq_fini(void);
#define XVA_MAPSIZE 3
#define XVA_MAGIC 0x78766174
extern char *vn_dumpdir;
#define AV_SCANSTAMP_SZ 32 /* length of anti-virus scanstamp */
typedef struct xoptattr {
inode_timespec_t xoa_createtime; /* Create time of file */
uint8_t xoa_archive;
uint8_t xoa_system;
uint8_t xoa_readonly;
uint8_t xoa_hidden;
uint8_t xoa_nounlink;
uint8_t xoa_immutable;
uint8_t xoa_appendonly;
uint8_t xoa_nodump;
uint8_t xoa_settable;
uint8_t xoa_opaque;
uint8_t xoa_av_quarantined;
uint8_t xoa_av_modified;
uint8_t xoa_av_scanstamp[AV_SCANSTAMP_SZ];
uint8_t xoa_reparse;
uint8_t xoa_offline;
uint8_t xoa_sparse;
} xoptattr_t;
typedef struct vattr {
uint_t va_mask; /* bit-mask of attributes */
u_offset_t va_size; /* file size in bytes */
} vattr_t;
typedef struct xvattr {
vattr_t xva_vattr; /* Embedded vattr structure */
uint32_t xva_magic; /* Magic Number */
uint32_t xva_mapsize; /* Size of attr bitmap (32-bit words) */
uint32_t *xva_rtnattrmapp; /* Ptr to xva_rtnattrmap[] */
uint32_t xva_reqattrmap[XVA_MAPSIZE]; /* Requested attrs */
uint32_t xva_rtnattrmap[XVA_MAPSIZE]; /* Returned attrs */
xoptattr_t xva_xoptattrs; /* Optional attributes */
} xvattr_t;
typedef struct vsecattr {
uint_t vsa_mask; /* See below */
int vsa_aclcnt; /* ACL entry count */
void *vsa_aclentp; /* pointer to ACL entries */
int vsa_dfaclcnt; /* default ACL entry count */
void *vsa_dfaclentp; /* pointer to default ACL entries */
size_t vsa_aclentsz; /* ACE size in bytes of vsa_aclentp */
} vsecattr_t;
#define AT_MODE 0x00002
#define AT_UID 0x00004
#define AT_GID 0x00008
#define AT_FSID 0x00010
#define AT_NODEID 0x00020
#define AT_NLINK 0x00040
#define AT_SIZE 0x00080
#define AT_ATIME 0x00100
#define AT_MTIME 0x00200
#define AT_CTIME 0x00400
#define AT_RDEV 0x00800
#define AT_BLKSIZE 0x01000
#define AT_NBLOCKS 0x02000
#define AT_SEQ 0x08000
#define AT_XVATTR 0x10000
#define CRCREAT 0
#define F_FREESP 11
#define FIGNORECASE 0x80000 /* request case-insensitive lookups */
/*
* Random stuff
*/
#define ddi_get_lbolt() (gethrtime() >> 23)
#define ddi_get_lbolt64() (gethrtime() >> 23)
#define hz 119 /* frequency when using gethrtime() >> 23 for lbolt */
#define ddi_time_before(a, b) (a < b)
#define ddi_time_after(a, b) ddi_time_before(b, a)
#define ddi_time_before_eq(a, b) (!ddi_time_after(a, b))
#define ddi_time_after_eq(a, b) ddi_time_before_eq(b, a)
#define ddi_time_before64(a, b) (a < b)
#define ddi_time_after64(a, b) ddi_time_before64(b, a)
#define ddi_time_before_eq64(a, b) (!ddi_time_after64(a, b))
#define ddi_time_after_eq64(a, b) ddi_time_before_eq64(b, a)
extern void delay(clock_t ticks);
#define SEC_TO_TICK(sec) ((sec) * hz)
#define MSEC_TO_TICK(msec) (howmany((hrtime_t)(msec) * hz, MILLISEC))
#define USEC_TO_TICK(usec) (howmany((hrtime_t)(usec) * hz, MICROSEC))
#define NSEC_TO_TICK(nsec) (howmany((hrtime_t)(nsec) * hz, NANOSEC))
#define max_ncpus 64
#define boot_ncpus (sysconf(_SC_NPROCESSORS_ONLN))
/*
* Process priorities as defined by setpriority(2) and getpriority(2).
*/
#define minclsyspri 19
#define maxclsyspri -20
#define defclsyspri 0
#define CPU_SEQID ((uintptr_t)pthread_self() & (max_ncpus - 1))
#define CPU_SEQID_UNSTABLE CPU_SEQID
#define kcred NULL
#define CRED() NULL
#define ptob(x) ((x) * PAGESIZE)
#define NN_DIVISOR_1000 (1U << 0)
#define NN_NUMBUF_SZ (6)
extern uint64_t physmem;
extern const char *random_path;
extern const char *urandom_path;
extern int highbit64(uint64_t i);
extern int lowbit64(uint64_t i);
extern int random_get_bytes(uint8_t *ptr, size_t len);
extern int random_get_pseudo_bytes(uint8_t *ptr, size_t len);
static __inline__ uint32_t
random_in_range(uint32_t range)
{
uint32_t r;
ASSERT(range != 0);
if (range == 1)
return (0);
(void) random_get_pseudo_bytes((uint8_t *)&r, sizeof (r));
return (r % range);
}
extern void kernel_init(int mode);
extern void kernel_fini(void);
extern void random_init(void);
extern void random_fini(void);
struct spa;
extern void show_pool_stats(struct spa *);
extern int set_global_var(char const *arg);
typedef struct callb_cpr {
kmutex_t *cc_lockp;
} callb_cpr_t;
#define CALLB_CPR_INIT(cp, lockp, func, name) { \
(cp)->cc_lockp = lockp; \
}
#define CALLB_CPR_SAFE_BEGIN(cp) { \
ASSERT(MUTEX_HELD((cp)->cc_lockp)); \
}
#define CALLB_CPR_SAFE_END(cp, lockp) { \
ASSERT(MUTEX_HELD((cp)->cc_lockp)); \
}
#define CALLB_CPR_EXIT(cp) { \
ASSERT(MUTEX_HELD((cp)->cc_lockp)); \
mutex_exit((cp)->cc_lockp); \
}
#define zone_dataset_visible(x, y) (1)
#define INGLOBALZONE(z) (1)
extern uint32_t zone_get_hostid(void *zonep);
extern char *kmem_vasprintf(const char *fmt, va_list adx);
extern char *kmem_asprintf(const char *fmt, ...);
#define kmem_strfree(str) kmem_free((str), strlen(str) + 1)
#define kmem_strdup(s) strdup(s)
/*
* Hostname information
*/
extern int ddi_strtoull(const char *str, char **nptr, int base,
u_longlong_t *result);
typedef struct utsname utsname_t;
extern utsname_t *utsname(void);
/* ZFS Boot Related stuff. */
struct _buf {
intptr_t _fd;
};
struct bootstat {
uint64_t st_size;
};
typedef struct ace_object {
uid_t a_who;
uint32_t a_access_mask;
uint16_t a_flags;
uint16_t a_type;
uint8_t a_obj_type[16];
uint8_t a_inherit_obj_type[16];
} ace_object_t;
#define ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE 0x05
#define ACE_ACCESS_DENIED_OBJECT_ACE_TYPE 0x06
#define ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE 0x07
#define ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE 0x08
extern int zfs_secpolicy_snapshot_perms(const char *name, cred_t *cr);
extern int zfs_secpolicy_rename_perms(const char *from, const char *to,
cred_t *cr);
extern int zfs_secpolicy_destroy_perms(const char *name, cred_t *cr);
extern int secpolicy_zfs(const cred_t *cr);
extern int secpolicy_zfs_proc(const cred_t *cr, proc_t *proc);
extern zoneid_t getzoneid(void);
/* SID stuff */
typedef struct ksiddomain {
uint_t kd_ref;
uint_t kd_len;
char *kd_name;
} ksiddomain_t;
ksiddomain_t *ksid_lookupdomain(const char *);
void ksiddomain_rele(ksiddomain_t *);
#define DDI_SLEEP KM_SLEEP
#define ddi_log_sysevent(_a, _b, _c, _d, _e, _f, _g) \
sysevent_post_event(_c, _d, _b, "libzpool", _e, _f)
#define zfs_sleep_until(wakeup) \
do { \
hrtime_t delta = wakeup - gethrtime(); \
struct timespec ts; \
ts.tv_sec = delta / NANOSEC; \
ts.tv_nsec = delta % NANOSEC; \
(void) nanosleep(&ts, NULL); \
} while (0)
typedef int fstrans_cookie_t;
extern fstrans_cookie_t spl_fstrans_mark(void);
extern void spl_fstrans_unmark(fstrans_cookie_t);
extern int __spl_pf_fstrans_check(void);
extern int kmem_cache_reap_active(void);
/*
* Kernel modules
*/
#define __init
#define __exit
#endif /* _KERNEL || _STANDALONE */
#ifdef __cplusplus
};
#endif
#endif /* _SYS_ZFS_CONTEXT_H */
diff --git a/sys/contrib/openzfs/include/sys/zfs_znode.h b/sys/contrib/openzfs/include/sys/zfs_znode.h
index b223c4b3b30b..7c906050bc47 100644
--- a/sys/contrib/openzfs/include/sys/zfs_znode.h
+++ b/sys/contrib/openzfs/include/sys/zfs_znode.h
@@ -1,301 +1,324 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright 2016 Nexenta Systems, Inc. All rights reserved.
*/
#ifndef _SYS_FS_ZFS_ZNODE_H
#define _SYS_FS_ZFS_ZNODE_H
#include <sys/zfs_acl.h>
#include <sys/zil.h>
#include <sys/zfs_project.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Additional file level attributes, that are stored
* in the upper half of z_pflags
*/
#define ZFS_READONLY 0x0000000100000000ull
#define ZFS_HIDDEN 0x0000000200000000ull
#define ZFS_SYSTEM 0x0000000400000000ull
#define ZFS_ARCHIVE 0x0000000800000000ull
#define ZFS_IMMUTABLE 0x0000001000000000ull
#define ZFS_NOUNLINK 0x0000002000000000ull
#define ZFS_APPENDONLY 0x0000004000000000ull
#define ZFS_NODUMP 0x0000008000000000ull
#define ZFS_OPAQUE 0x0000010000000000ull
#define ZFS_AV_QUARANTINED 0x0000020000000000ull
#define ZFS_AV_MODIFIED 0x0000040000000000ull
#define ZFS_REPARSE 0x0000080000000000ull
#define ZFS_OFFLINE 0x0000100000000000ull
#define ZFS_SPARSE 0x0000200000000000ull
/*
* PROJINHERIT attribute is used to indicate that the child object under the
* directory which has the PROJINHERIT attribute needs to inherit its parent
* project ID that is used by project quota.
*/
#define ZFS_PROJINHERIT 0x0000400000000000ull
/*
* PROJID attr is used internally to indicate that the object has project ID.
*/
#define ZFS_PROJID 0x0000800000000000ull
#define ZFS_ATTR_SET(zp, attr, value, pflags, tx) \
{ \
if (value) \
pflags |= attr; \
else \
pflags &= ~attr; \
VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_FLAGS(ZTOZSB(zp)), \
&pflags, sizeof (pflags), tx)); \
}
/*
* Define special zfs pflags
*/
#define ZFS_XATTR 0x1 /* is an extended attribute */
#define ZFS_INHERIT_ACE 0x2 /* ace has inheritable ACEs */
#define ZFS_ACL_TRIVIAL 0x4 /* files ACL is trivial */
#define ZFS_ACL_OBJ_ACE 0x8 /* ACL has CMPLX Object ACE */
#define ZFS_ACL_PROTECTED 0x10 /* ACL protected */
#define ZFS_ACL_DEFAULTED 0x20 /* ACL should be defaulted */
#define ZFS_ACL_AUTO_INHERIT 0x40 /* ACL should be inherited */
#define ZFS_BONUS_SCANSTAMP 0x80 /* Scanstamp in bonus area */
#define ZFS_NO_EXECS_DENIED 0x100 /* exec was given to everyone */
#define SA_ZPL_ATIME(z) z->z_attr_table[ZPL_ATIME]
#define SA_ZPL_MTIME(z) z->z_attr_table[ZPL_MTIME]
#define SA_ZPL_CTIME(z) z->z_attr_table[ZPL_CTIME]
#define SA_ZPL_CRTIME(z) z->z_attr_table[ZPL_CRTIME]
#define SA_ZPL_GEN(z) z->z_attr_table[ZPL_GEN]
#define SA_ZPL_DACL_ACES(z) z->z_attr_table[ZPL_DACL_ACES]
#define SA_ZPL_XATTR(z) z->z_attr_table[ZPL_XATTR]
#define SA_ZPL_SYMLINK(z) z->z_attr_table[ZPL_SYMLINK]
#define SA_ZPL_RDEV(z) z->z_attr_table[ZPL_RDEV]
#define SA_ZPL_SCANSTAMP(z) z->z_attr_table[ZPL_SCANSTAMP]
#define SA_ZPL_UID(z) z->z_attr_table[ZPL_UID]
#define SA_ZPL_GID(z) z->z_attr_table[ZPL_GID]
#define SA_ZPL_PARENT(z) z->z_attr_table[ZPL_PARENT]
#define SA_ZPL_LINKS(z) z->z_attr_table[ZPL_LINKS]
#define SA_ZPL_MODE(z) z->z_attr_table[ZPL_MODE]
#define SA_ZPL_DACL_COUNT(z) z->z_attr_table[ZPL_DACL_COUNT]
#define SA_ZPL_FLAGS(z) z->z_attr_table[ZPL_FLAGS]
#define SA_ZPL_SIZE(z) z->z_attr_table[ZPL_SIZE]
#define SA_ZPL_ZNODE_ACL(z) z->z_attr_table[ZPL_ZNODE_ACL]
#define SA_ZPL_DXATTR(z) z->z_attr_table[ZPL_DXATTR]
#define SA_ZPL_PAD(z) z->z_attr_table[ZPL_PAD]
#define SA_ZPL_PROJID(z) z->z_attr_table[ZPL_PROJID]
/*
* Is ID ephemeral?
*/
#define IS_EPHEMERAL(x) (x > MAXUID)
/*
* Should we use FUIDs?
*/
#define USE_FUIDS(version, os) (version >= ZPL_VERSION_FUID && \
spa_version(dmu_objset_spa(os)) >= SPA_VERSION_FUID)
#define USE_SA(version, os) (version >= ZPL_VERSION_SA && \
spa_version(dmu_objset_spa(os)) >= SPA_VERSION_SA)
#define MASTER_NODE_OBJ 1
/*
* Special attributes for master node.
* "userquota@", "groupquota@" and "projectquota@" are also valid (from
* zfs_userquota_prop_prefixes[]).
*/
#define ZFS_FSID "FSID"
#define ZFS_UNLINKED_SET "DELETE_QUEUE"
#define ZFS_ROOT_OBJ "ROOT"
#define ZPL_VERSION_STR "VERSION"
#define ZFS_FUID_TABLES "FUID"
#define ZFS_SHARES_DIR "SHARES"
#define ZFS_SA_ATTRS "SA_ATTRS"
/*
* Convert mode bits (zp_mode) to BSD-style DT_* values for storing in
* the directory entries. On Linux systems this value is already
* defined correctly as part of the /usr/include/dirent.h header file.
*/
#ifndef IFTODT
#define IFTODT(mode) (((mode) & S_IFMT) >> 12)
#endif
/*
* The directory entry has the type (currently unused on Solaris) in the
* top 4 bits, and the object number in the low 48 bits. The "middle"
* 12 bits are unused.
*/
#define ZFS_DIRENT_TYPE(de) BF64_GET(de, 60, 4)
#define ZFS_DIRENT_OBJ(de) BF64_GET(de, 0, 48)
extern int zfs_obj_to_path(objset_t *osp, uint64_t obj, char *buf, int len);
#ifdef _KERNEL
#include <sys/zfs_znode_impl.h>
/*
* Directory entry locks control access to directory entries.
* They are used to protect creates, deletes, and renames.
* Each directory znode has a mutex and a list of locked names.
*/
typedef struct zfs_dirlock {
char *dl_name; /* directory entry being locked */
uint32_t dl_sharecnt; /* 0 if exclusive, > 0 if shared */
uint8_t dl_namelock; /* 1 if z_name_lock is NOT held */
uint16_t dl_namesize; /* set if dl_name was allocated */
kcondvar_t dl_cv; /* wait for entry to be unlocked */
struct znode *dl_dzp; /* directory znode */
struct zfs_dirlock *dl_next; /* next in z_dirlocks list */
} zfs_dirlock_t;
typedef struct znode {
uint64_t z_id; /* object ID for this znode */
kmutex_t z_lock; /* znode modification lock */
krwlock_t z_parent_lock; /* parent lock for directories */
krwlock_t z_name_lock; /* "master" lock for dirent locks */
zfs_dirlock_t *z_dirlocks; /* directory entry lock list */
zfs_rangelock_t z_rangelock; /* file range locks */
boolean_t z_unlinked; /* file has been unlinked */
boolean_t z_atime_dirty; /* atime needs to be synced */
boolean_t z_zn_prefetch; /* Prefetch znodes? */
boolean_t z_is_sa; /* are we native sa? */
boolean_t z_is_mapped; /* are we mmap'ed */
boolean_t z_is_ctldir; /* are we .zfs entry */
boolean_t z_is_stale; /* are we stale due to rollback? */
boolean_t z_suspended; /* extra ref from a suspend? */
uint_t z_blksz; /* block size in bytes */
uint_t z_seq; /* modification sequence number */
uint64_t z_mapcnt; /* number of pages mapped to file */
uint64_t z_dnodesize; /* dnode size */
uint64_t z_size; /* file size (cached) */
uint64_t z_pflags; /* pflags (cached) */
uint32_t z_sync_cnt; /* synchronous open count */
uint32_t z_sync_writes_cnt; /* synchronous write count */
uint32_t z_async_writes_cnt; /* asynchronous write count */
mode_t z_mode; /* mode (cached) */
kmutex_t z_acl_lock; /* acl data lock */
zfs_acl_t *z_acl_cached; /* cached acl */
krwlock_t z_xattr_lock; /* xattr data lock */
nvlist_t *z_xattr_cached; /* cached xattrs */
uint64_t z_xattr_parent; /* parent obj for this xattr */
uint64_t z_projid; /* project ID */
list_node_t z_link_node; /* all znodes in fs link */
sa_handle_t *z_sa_hdl; /* handle to sa data */
/*
* Platform specific field, defined by each platform and only
* accessible from platform specific code.
*/
ZNODE_OS_FIELDS;
} znode_t;
+/* Verifies the znode is valid. */
+static inline int
+zfs_verify_zp(znode_t *zp)
+{
+ if (unlikely(zp->z_sa_hdl == NULL))
+ return (SET_ERROR(EIO));
+ return (0);
+}
+
+/* zfs_enter and zfs_verify_zp together */
+static inline int
+zfs_enter_verify_zp(zfsvfs_t *zfsvfs, znode_t *zp, const char *tag)
+{
+ int error;
+ if ((error = zfs_enter(zfsvfs, tag)) != 0)
+ return (error);
+ if ((error = zfs_verify_zp(zp)) != 0) {
+ zfs_exit(zfsvfs, tag);
+ return (error);
+ }
+ return (0);
+}
+
typedef struct znode_hold {
uint64_t zh_obj; /* object id */
kmutex_t zh_lock; /* lock serializing object access */
avl_node_t zh_node; /* avl tree linkage */
zfs_refcount_t zh_refcount; /* active consumer reference count */
} znode_hold_t;
static inline uint64_t
zfs_inherit_projid(znode_t *dzp)
{
return ((dzp->z_pflags & ZFS_PROJINHERIT) ? dzp->z_projid :
ZFS_DEFAULT_PROJID);
}
/*
* Timestamp defines
*/
#define ACCESSED (ATTR_ATIME)
#define STATE_CHANGED (ATTR_CTIME)
#define CONTENT_MODIFIED (ATTR_MTIME | ATTR_CTIME)
extern int zfs_init_fs(zfsvfs_t *, znode_t **);
extern void zfs_set_dataprop(objset_t *);
extern void zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *,
dmu_tx_t *tx);
extern void zfs_tstamp_update_setup(znode_t *, uint_t, uint64_t [2],
uint64_t [2]);
extern void zfs_grow_blocksize(znode_t *, uint64_t, dmu_tx_t *);
extern int zfs_freesp(znode_t *, uint64_t, uint64_t, int, boolean_t);
extern void zfs_znode_init(void);
extern void zfs_znode_fini(void);
extern int zfs_znode_hold_compare(const void *, const void *);
extern int zfs_zget(zfsvfs_t *, uint64_t, znode_t **);
extern int zfs_rezget(znode_t *);
extern void zfs_zinactive(znode_t *);
extern void zfs_znode_delete(znode_t *, dmu_tx_t *);
extern void zfs_remove_op_tables(void);
extern int zfs_create_op_tables(void);
extern dev_t zfs_cmpldev(uint64_t);
extern int zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value);
extern int zfs_get_stats(objset_t *os, nvlist_t *nv);
extern boolean_t zfs_get_vfs_flag_unmounted(objset_t *os);
extern void zfs_znode_dmu_fini(znode_t *);
extern void zfs_log_create(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
znode_t *dzp, znode_t *zp, const char *name, vsecattr_t *,
zfs_fuid_info_t *, vattr_t *vap);
extern int zfs_log_create_txtype(zil_create_t, vsecattr_t *vsecp,
vattr_t *vap);
extern void zfs_log_remove(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
znode_t *dzp, const char *name, uint64_t foid, boolean_t unlinked);
#define ZFS_NO_OBJECT 0 /* no object id */
extern void zfs_log_link(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
znode_t *dzp, znode_t *zp, const char *name);
extern void zfs_log_symlink(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
znode_t *dzp, znode_t *zp, const char *name, const char *link);
extern void zfs_log_rename(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
znode_t *sdzp, const char *sname, znode_t *tdzp, const char *dname,
znode_t *szp);
extern void zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype,
znode_t *zp, offset_t off, ssize_t len, int ioflag,
zil_callback_t callback, void *callback_data);
extern void zfs_log_truncate(zilog_t *zilog, dmu_tx_t *tx, int txtype,
znode_t *zp, uint64_t off, uint64_t len);
extern void zfs_log_setattr(zilog_t *zilog, dmu_tx_t *tx, int txtype,
znode_t *zp, vattr_t *vap, uint_t mask_applied, zfs_fuid_info_t *fuidp);
extern void zfs_log_acl(zilog_t *zilog, dmu_tx_t *tx, znode_t *zp,
vsecattr_t *vsecp, zfs_fuid_info_t *fuidp);
extern void zfs_xvattr_set(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx);
extern void zfs_upgrade(zfsvfs_t *zfsvfs, dmu_tx_t *tx);
extern void zfs_log_setsaxattr(zilog_t *zilog, dmu_tx_t *tx, int txtype,
znode_t *zp, const char *name, const void *value, size_t size);
extern void zfs_znode_update_vfs(struct znode *);
#endif
#ifdef __cplusplus
}
#endif
#endif /* _SYS_FS_ZFS_ZNODE_H */
diff --git a/sys/contrib/openzfs/lib/libshare/libshare.c b/sys/contrib/openzfs/lib/libshare/libshare.c
index d6257aa1ef30..d50b4550d6d0 100644
--- a/sys/contrib/openzfs/lib/libshare/libshare.c
+++ b/sys/contrib/openzfs/lib/libshare/libshare.c
@@ -1,193 +1,203 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011 Gunnar Beutner
- * Copyright (c) 2018, 2020 by Delphix. All rights reserved.
+ * Copyright (c) 2018, 2022 by Delphix. All rights reserved.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <string.h>
#include <errno.h>
#include <libintl.h>
#include <sys/file.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <libzfs.h>
#include <libshare.h>
#include "libshare_impl.h"
#define init_share(zfsname, path, shareopts) \
{ \
.sa_zfsname = zfsname, \
.sa_mountpoint = path, \
.sa_shareopts = shareopts, \
}
#define VALIDATE_PROTOCOL(proto, ...) \
if ((proto) < 0 || (proto) >= SA_PROTOCOL_COUNT) \
return __VA_ARGS__
const char *const sa_protocol_names[SA_PROTOCOL_COUNT] = {
[SA_PROTOCOL_NFS] = "nfs",
[SA_PROTOCOL_SMB] = "smb",
};
static const sa_fstype_t *fstypes[SA_PROTOCOL_COUNT] =
{&libshare_nfs_type, &libshare_smb_type};
int
sa_enable_share(const char *zfsname, const char *mountpoint,
const char *shareopts, enum sa_protocol protocol)
{
VALIDATE_PROTOCOL(protocol, SA_INVALID_PROTOCOL);
const struct sa_share_impl args =
init_share(zfsname, mountpoint, shareopts);
return (fstypes[protocol]->enable_share(&args));
}
int
sa_disable_share(const char *mountpoint, enum sa_protocol protocol)
{
VALIDATE_PROTOCOL(protocol, SA_INVALID_PROTOCOL);
const struct sa_share_impl args = init_share(NULL, mountpoint, NULL);
return (fstypes[protocol]->disable_share(&args));
}
boolean_t
sa_is_shared(const char *mountpoint, enum sa_protocol protocol)
{
VALIDATE_PROTOCOL(protocol, B_FALSE);
const struct sa_share_impl args = init_share(NULL, mountpoint, NULL);
return (fstypes[protocol]->is_shared(&args));
}
void
sa_commit_shares(enum sa_protocol protocol)
{
/* CSTYLED */
VALIDATE_PROTOCOL(protocol, );
fstypes[protocol]->commit_shares();
}
+void
+sa_truncate_shares(enum sa_protocol protocol)
+{
+ /* CSTYLED */
+ VALIDATE_PROTOCOL(protocol, );
+
+ if (fstypes[protocol]->truncate_shares != NULL)
+ fstypes[protocol]->truncate_shares();
+}
+
int
sa_validate_shareopts(const char *options, enum sa_protocol protocol)
{
VALIDATE_PROTOCOL(protocol, SA_INVALID_PROTOCOL);
return (fstypes[protocol]->validate_shareopts(options));
}
/*
* sa_errorstr(err)
*
* convert an error value to an error string
*/
const char *
sa_errorstr(int err)
{
static char errstr[32];
switch (err) {
case SA_OK:
return (dgettext(TEXT_DOMAIN, "ok"));
case SA_NO_SUCH_PATH:
return (dgettext(TEXT_DOMAIN, "path doesn't exist"));
case SA_NO_MEMORY:
return (dgettext(TEXT_DOMAIN, "no memory"));
case SA_DUPLICATE_NAME:
return (dgettext(TEXT_DOMAIN, "name in use"));
case SA_BAD_PATH:
return (dgettext(TEXT_DOMAIN, "bad path"));
case SA_NO_SUCH_GROUP:
return (dgettext(TEXT_DOMAIN, "no such group"));
case SA_CONFIG_ERR:
return (dgettext(TEXT_DOMAIN, "configuration error"));
case SA_SYSTEM_ERR:
return (dgettext(TEXT_DOMAIN, "system error"));
case SA_SYNTAX_ERR:
return (dgettext(TEXT_DOMAIN, "syntax error"));
case SA_NO_PERMISSION:
return (dgettext(TEXT_DOMAIN, "no permission"));
case SA_BUSY:
return (dgettext(TEXT_DOMAIN, "busy"));
case SA_NO_SUCH_PROP:
return (dgettext(TEXT_DOMAIN, "no such property"));
case SA_INVALID_NAME:
return (dgettext(TEXT_DOMAIN, "invalid name"));
case SA_INVALID_PROTOCOL:
return (dgettext(TEXT_DOMAIN, "invalid protocol"));
case SA_NOT_ALLOWED:
return (dgettext(TEXT_DOMAIN, "operation not allowed"));
case SA_BAD_VALUE:
return (dgettext(TEXT_DOMAIN, "bad property value"));
case SA_INVALID_SECURITY:
return (dgettext(TEXT_DOMAIN, "invalid security type"));
case SA_NO_SUCH_SECURITY:
return (dgettext(TEXT_DOMAIN, "security type not found"));
case SA_VALUE_CONFLICT:
return (dgettext(TEXT_DOMAIN, "property value conflict"));
case SA_NOT_IMPLEMENTED:
return (dgettext(TEXT_DOMAIN, "not implemented"));
case SA_INVALID_PATH:
return (dgettext(TEXT_DOMAIN, "invalid path"));
case SA_NOT_SUPPORTED:
return (dgettext(TEXT_DOMAIN, "operation not supported"));
case SA_PROP_SHARE_ONLY:
return (dgettext(TEXT_DOMAIN, "property not valid for group"));
case SA_NOT_SHARED:
return (dgettext(TEXT_DOMAIN, "not shared"));
case SA_NO_SUCH_RESOURCE:
return (dgettext(TEXT_DOMAIN, "no such resource"));
case SA_RESOURCE_REQUIRED:
return (dgettext(TEXT_DOMAIN, "resource name required"));
case SA_MULTIPLE_ERROR:
return (dgettext(TEXT_DOMAIN,
"errors from multiple protocols"));
case SA_PATH_IS_SUBDIR:
return (dgettext(TEXT_DOMAIN, "path is a subpath of share"));
case SA_PATH_IS_PARENTDIR:
return (dgettext(TEXT_DOMAIN, "path is parent of a share"));
case SA_NO_SECTION:
return (dgettext(TEXT_DOMAIN, "protocol requires a section"));
case SA_NO_PROPERTIES:
return (dgettext(TEXT_DOMAIN, "properties not found"));
case SA_NO_SUCH_SECTION:
return (dgettext(TEXT_DOMAIN, "section not found"));
case SA_PASSWORD_ENC:
return (dgettext(TEXT_DOMAIN, "passwords must be encrypted"));
case SA_SHARE_EXISTS:
return (dgettext(TEXT_DOMAIN,
"path or file is already shared"));
default:
(void) snprintf(errstr, sizeof (errstr),
dgettext(TEXT_DOMAIN, "unknown %d"), err);
return (errstr);
}
}
diff --git a/sys/contrib/openzfs/lib/libshare/libshare_impl.h b/sys/contrib/openzfs/lib/libshare/libshare_impl.h
index b845eb2d8acd..d8c924757fe1 100644
--- a/sys/contrib/openzfs/lib/libshare/libshare_impl.h
+++ b/sys/contrib/openzfs/lib/libshare/libshare_impl.h
@@ -1,46 +1,47 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011 Gunnar Beutner
- * Copyright (c) 2019, 2020 by Delphix. All rights reserved.
+ * Copyright (c) 2019, 2022 by Delphix. All rights reserved.
*/
#ifndef _LIBSPL_LIBSHARE_IMPL_H
#define _LIBSPL_LIBSHARE_IMPL_H
typedef const struct sa_share_impl {
const char *sa_zfsname;
const char *sa_mountpoint;
const char *sa_shareopts;
} *sa_share_impl_t;
typedef struct {
int (*const enable_share)(sa_share_impl_t share);
int (*const disable_share)(sa_share_impl_t share);
boolean_t (*const is_shared)(sa_share_impl_t share);
int (*const validate_shareopts)(const char *shareopts);
int (*const commit_shares)(void);
+ void (*const truncate_shares)(void);
} sa_fstype_t;
extern const sa_fstype_t libshare_nfs_type, libshare_smb_type;
#endif /* _LIBSPL_LIBSHARE_IMPL_H */
diff --git a/sys/contrib/openzfs/lib/libshare/nfs.c b/sys/contrib/openzfs/lib/libshare/nfs.c
index 161bbfb0ceb9..bbaea93fca5c 100644
--- a/sys/contrib/openzfs/lib/libshare/nfs.c
+++ b/sys/contrib/openzfs/lib/libshare/nfs.c
@@ -1,301 +1,313 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/file.h>
#include <fcntl.h>
#include <ctype.h>
#include <stdio.h>
#include <errno.h>
#include <libshare.h>
+#include <unistd.h>
#include "nfs.h"
/*
* nfs_exports_[lock|unlock] are used to guard against conconcurrent
* updates to the exports file. Each protocol is responsible for
* providing the necessary locking to ensure consistency.
*/
static int
nfs_exports_lock(const char *name, int *nfs_lock_fd)
{
int err;
*nfs_lock_fd = open(name, O_RDWR | O_CREAT | O_CLOEXEC, 0600);
if (*nfs_lock_fd == -1) {
err = errno;
fprintf(stderr, "failed to lock %s: %s\n", name, strerror(err));
return (err);
}
while ((err = flock(*nfs_lock_fd, LOCK_EX)) != 0 && errno == EINTR)
;
if (err != 0) {
err = errno;
fprintf(stderr, "failed to lock %s: %s\n", name, strerror(err));
(void) close(*nfs_lock_fd);
*nfs_lock_fd = -1;
return (err);
}
return (0);
}
static void
nfs_exports_unlock(const char *name, int *nfs_lock_fd)
{
verify(*nfs_lock_fd > 0);
if (flock(*nfs_lock_fd, LOCK_UN) != 0)
fprintf(stderr, "failed to unlock %s: %s\n",
name, strerror(errno));
(void) close(*nfs_lock_fd);
*nfs_lock_fd = -1;
}
struct tmpfile {
/*
* This only needs to be as wide as ZFS_EXPORTS_FILE and mktemp suffix,
* 64 is more than enough.
*/
char name[64];
FILE *fp;
};
static boolean_t
nfs_init_tmpfile(const char *prefix, const char *mdir, struct tmpfile *tmpf)
{
if (mdir != NULL &&
mkdir(mdir, 0755) < 0 &&
errno != EEXIST) {
fprintf(stderr, "failed to create %s: %s\n",
mdir, strerror(errno));
return (B_FALSE);
}
strcpy(tmpf->name, prefix);
strcat(tmpf->name, ".XXXXXXXX");
int fd = mkostemp(tmpf->name, O_CLOEXEC);
if (fd == -1) {
fprintf(stderr, "Unable to create temporary file: %s",
strerror(errno));
return (B_FALSE);
}
tmpf->fp = fdopen(fd, "w+");
if (tmpf->fp == NULL) {
fprintf(stderr, "Unable to reopen temporary file: %s",
strerror(errno));
close(fd);
return (B_FALSE);
}
return (B_TRUE);
}
static void
nfs_abort_tmpfile(struct tmpfile *tmpf)
{
unlink(tmpf->name);
fclose(tmpf->fp);
}
static int
nfs_fini_tmpfile(const char *exports, struct tmpfile *tmpf)
{
if (fflush(tmpf->fp) != 0) {
fprintf(stderr, "Failed to write to temporary file: %s\n",
strerror(errno));
nfs_abort_tmpfile(tmpf);
return (SA_SYSTEM_ERR);
}
if (rename(tmpf->name, exports) == -1) {
fprintf(stderr, "Unable to rename %s -> %s: %s\n",
tmpf->name, exports, strerror(errno));
nfs_abort_tmpfile(tmpf);
return (SA_SYSTEM_ERR);
}
(void) fchmod(fileno(tmpf->fp), 0644);
fclose(tmpf->fp);
return (SA_OK);
}
int
nfs_escape_mountpoint(const char *mp, char **out, boolean_t *need_free)
{
if (strpbrk(mp, "\t\n\v\f\r \\") == NULL) {
*out = (char *)mp;
*need_free = B_FALSE;
return (SA_OK);
} else {
size_t len = strlen(mp);
*out = malloc(len * 4 + 1);
if (!*out)
return (SA_NO_MEMORY);
*need_free = B_TRUE;
char *oc = *out;
for (const char *c = mp; c < mp + len; ++c)
if (memchr("\t\n\v\f\r \\", *c,
strlen("\t\n\v\f\r \\"))) {
sprintf(oc, "\\%03hho", *c);
oc += 4;
} else
*oc++ = *c;
*oc = '\0';
}
return (SA_OK);
}
static int
nfs_process_exports(const char *exports, const char *mountpoint,
boolean_t (*cbk)(void *userdata, char *line, boolean_t found_mountpoint),
void *userdata)
{
int error = SA_OK;
boolean_t cont = B_TRUE;
FILE *oldfp = fopen(exports, "re");
if (oldfp != NULL) {
boolean_t need_mp_free;
char *mp;
if ((error = nfs_escape_mountpoint(mountpoint,
&mp, &need_mp_free)) != SA_OK) {
(void) fclose(oldfp);
return (error);
}
char *buf = NULL, *sep;
size_t buflen = 0, mplen = strlen(mp);
while (cont && getline(&buf, &buflen, oldfp) != -1) {
if (buf[0] == '\n' || buf[0] == '#')
continue;
cont = cbk(userdata, buf,
(sep = strpbrk(buf, "\t \n")) != NULL &&
sep - buf == mplen &&
strncmp(buf, mp, mplen) == 0);
}
free(buf);
if (need_mp_free)
free(mp);
if (ferror(oldfp) != 0)
error = ferror(oldfp);
if (fclose(oldfp) != 0) {
fprintf(stderr, "Unable to close file %s: %s\n",
exports, strerror(errno));
error = error != SA_OK ? error : SA_SYSTEM_ERR;
}
}
return (error);
}
static boolean_t
nfs_copy_entries_cb(void *userdata, char *line, boolean_t found_mountpoint)
{
FILE *newfp = userdata;
if (!found_mountpoint)
fputs(line, newfp);
return (B_TRUE);
}
/*
* Copy all entries from the exports file (if it exists) to newfp,
* omitting any entries for the specified mountpoint.
*/
static int
nfs_copy_entries(FILE *newfp, const char *exports, const char *mountpoint)
{
fputs(FILE_HEADER, newfp);
int error = nfs_process_exports(
exports, mountpoint, nfs_copy_entries_cb, newfp);
if (error == SA_OK && ferror(newfp) != 0)
error = ferror(newfp);
return (error);
}
int
nfs_toggle_share(const char *lockfile, const char *exports,
const char *expdir, sa_share_impl_t impl_share,
int(*cbk)(sa_share_impl_t impl_share, FILE *tmpfile))
{
int error, nfs_lock_fd = -1;
struct tmpfile tmpf;
if (!nfs_init_tmpfile(exports, expdir, &tmpf))
return (SA_SYSTEM_ERR);
error = nfs_exports_lock(lockfile, &nfs_lock_fd);
if (error != 0) {
nfs_abort_tmpfile(&tmpf);
return (error);
}
error = nfs_copy_entries(tmpf.fp, exports, impl_share->sa_mountpoint);
if (error != SA_OK)
goto fullerr;
error = cbk(impl_share, tmpf.fp);
if (error != SA_OK)
goto fullerr;
error = nfs_fini_tmpfile(exports, &tmpf);
nfs_exports_unlock(lockfile, &nfs_lock_fd);
return (error);
fullerr:
nfs_abort_tmpfile(&tmpf);
nfs_exports_unlock(lockfile, &nfs_lock_fd);
return (error);
}
+void
+nfs_reset_shares(const char *lockfile, const char *exports)
+{
+ int nfs_lock_fd = -1;
+
+ if (nfs_exports_lock(lockfile, &nfs_lock_fd) == 0) {
+ (void) ! truncate(exports, 0);
+ nfs_exports_unlock(lockfile, &nfs_lock_fd);
+ }
+}
+
static boolean_t
nfs_is_shared_cb(void *userdata, char *line, boolean_t found_mountpoint)
{
(void) line;
boolean_t *found = userdata;
*found = found_mountpoint;
return (!found_mountpoint);
}
boolean_t
nfs_is_shared_impl(const char *exports, sa_share_impl_t impl_share)
{
boolean_t found = B_FALSE;
nfs_process_exports(exports, impl_share->sa_mountpoint,
nfs_is_shared_cb, &found);
return (found);
}
diff --git a/sys/contrib/openzfs/lib/libshare/nfs.h b/sys/contrib/openzfs/lib/libshare/nfs.h
index 58523c8f02e2..f4340b18f89c 100644
--- a/sys/contrib/openzfs/lib/libshare/nfs.h
+++ b/sys/contrib/openzfs/lib/libshare/nfs.h
@@ -1,35 +1,37 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011 Gunnar Beutner
+ * Copyright (c) 2022 by Delphix. All rights reserved.
*/
#include "libshare_impl.h"
#define FILE_HEADER "# !!! DO NOT EDIT THIS FILE MANUALLY !!!\n\n"
int nfs_escape_mountpoint(const char *mp, char **out, boolean_t *need_free);
boolean_t nfs_is_shared_impl(const char *exports, sa_share_impl_t impl_share);
int nfs_toggle_share(const char *lockfile, const char *exports,
const char *expdir, sa_share_impl_t impl_share,
int(*cbk)(sa_share_impl_t impl_share, FILE *tmpfile));
+void nfs_reset_shares(const char *lockfile, const char *exports);
diff --git a/sys/contrib/openzfs/lib/libshare/os/freebsd/nfs.c b/sys/contrib/openzfs/lib/libshare/os/freebsd/nfs.c
index 78977a25f4f5..521631c51f07 100644
--- a/sys/contrib/openzfs/lib/libshare/os/freebsd/nfs.c
+++ b/sys/contrib/openzfs/lib/libshare/os/freebsd/nfs.c
@@ -1,205 +1,212 @@
/*
* Copyright (c) 2007 Pawel Jakub Dawidek <pjd@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * Copyright (c) 2020 by Delphix. All rights reserved.
+ * Copyright (c) 2020, 2022 by Delphix. All rights reserved.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/vfs.h>
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <libutil.h>
#include <signal.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <libintl.h>
#include <libshare.h>
#include "libshare_impl.h"
#include "nfs.h"
#define _PATH_MOUNTDPID "/var/run/mountd.pid"
#define OPTSSIZE 1024
#define MAXLINESIZE (PATH_MAX + OPTSSIZE)
#define ZFS_EXPORTS_FILE "/etc/zfs/exports"
#define ZFS_EXPORTS_LOCK ZFS_EXPORTS_FILE".lock"
/*
* This function translates options to a format acceptable by exports(5), eg.
*
* -ro -network=192.168.0.0 -mask=255.255.255.0 -maproot=0 \
* zfs.freebsd.org 69.147.83.54
*
* Accepted input formats:
*
* ro,network=192.168.0.0,mask=255.255.255.0,maproot=0,zfs.freebsd.org
* ro network=192.168.0.0 mask=255.255.255.0 maproot=0 zfs.freebsd.org
* -ro,-network=192.168.0.0,-mask=255.255.255.0,-maproot=0,zfs.freebsd.org
* -ro -network=192.168.0.0 -mask=255.255.255.0 -maproot=0 \
* zfs.freebsd.org
*
* Recognized keywords:
*
* ro, maproot, mapall, mask, network, sec, alldirs, public, webnfs,
* index, quiet
*/
static int
translate_opts(const char *shareopts, FILE *out)
{
static const char *const known_opts[] = { "ro", "maproot", "mapall",
"mask", "network", "sec", "alldirs", "public", "webnfs", "index",
"quiet" };
char oldopts[OPTSSIZE], newopts[OPTSSIZE];
char *o, *s = NULL;
unsigned int i;
size_t len;
strlcpy(oldopts, shareopts, sizeof (oldopts));
newopts[0] = '\0';
s = oldopts;
while ((o = strsep(&s, "-, ")) != NULL) {
if (o[0] == '\0')
continue;
for (i = 0; i < ARRAY_SIZE(known_opts); ++i) {
len = strlen(known_opts[i]);
if (strncmp(known_opts[i], o, len) == 0 &&
(o[len] == '\0' || o[len] == '=')) {
strlcat(newopts, "-", sizeof (newopts));
break;
}
}
strlcat(newopts, o, sizeof (newopts));
strlcat(newopts, " ", sizeof (newopts));
}
return (fputs(newopts, out));
}
static int
nfs_enable_share_impl(sa_share_impl_t impl_share, FILE *tmpfile)
{
const char *shareopts = impl_share->sa_shareopts;
if (strcmp(shareopts, "on") == 0)
shareopts = "";
boolean_t need_free;
char *mp;
int rc = nfs_escape_mountpoint(impl_share->sa_mountpoint, &mp,
&need_free);
if (rc != SA_OK)
return (rc);
if (fputs(mp, tmpfile) == EOF ||
fputc('\t', tmpfile) == EOF ||
translate_opts(shareopts, tmpfile) == EOF ||
fputc('\n', tmpfile) == EOF) {
fprintf(stderr, "failed to write to temporary file\n");
rc = SA_SYSTEM_ERR;
}
if (need_free)
free(mp);
return (rc);
}
static int
nfs_enable_share(sa_share_impl_t impl_share)
{
return (nfs_toggle_share(
ZFS_EXPORTS_LOCK, ZFS_EXPORTS_FILE, NULL, impl_share,
nfs_enable_share_impl));
}
static int
nfs_disable_share_impl(sa_share_impl_t impl_share, FILE *tmpfile)
{
(void) impl_share, (void) tmpfile;
return (SA_OK);
}
static int
nfs_disable_share(sa_share_impl_t impl_share)
{
return (nfs_toggle_share(
ZFS_EXPORTS_LOCK, ZFS_EXPORTS_FILE, NULL, impl_share,
nfs_disable_share_impl));
}
static boolean_t
nfs_is_shared(sa_share_impl_t impl_share)
{
return (nfs_is_shared_impl(ZFS_EXPORTS_FILE, impl_share));
}
static int
nfs_validate_shareopts(const char *shareopts)
{
(void) shareopts;
return (SA_OK);
}
/*
* Commit the shares by restarting mountd.
*/
static int
nfs_commit_shares(void)
{
struct pidfh *pfh;
pid_t mountdpid;
start:
pfh = pidfile_open(_PATH_MOUNTDPID, 0600, &mountdpid);
if (pfh != NULL) {
/* mountd(8) is not running. */
pidfile_remove(pfh);
return (SA_OK);
}
if (errno != EEXIST) {
/* Cannot open pidfile for some reason. */
return (SA_SYSTEM_ERR);
}
if (mountdpid == -1) {
/* mountd(8) exists, but didn't write the PID yet */
usleep(500);
goto start;
}
/* We have mountd(8) PID in mountdpid variable. */
kill(mountdpid, SIGHUP);
return (SA_OK);
}
+static void
+nfs_truncate_shares(void)
+{
+ nfs_reset_shares(ZFS_EXPORTS_LOCK, ZFS_EXPORTS_FILE);
+}
+
const sa_fstype_t libshare_nfs_type = {
.enable_share = nfs_enable_share,
.disable_share = nfs_disable_share,
.is_shared = nfs_is_shared,
.validate_shareopts = nfs_validate_shareopts,
.commit_shares = nfs_commit_shares,
+ .truncate_shares = nfs_truncate_shares,
};
diff --git a/sys/contrib/openzfs/lib/libshare/os/linux/nfs.c b/sys/contrib/openzfs/lib/libshare/os/linux/nfs.c
index 0870f37e5818..c27e5564c1e1 100644
--- a/sys/contrib/openzfs/lib/libshare/os/linux/nfs.c
+++ b/sys/contrib/openzfs/lib/libshare/os/linux/nfs.c
@@ -1,520 +1,527 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011 Gunnar Beutner
* Copyright (c) 2012 Cyril Plisko. All rights reserved.
- * Copyright (c) 2019, 2020 by Delphix. All rights reserved.
+ * Copyright (c) 2019, 2022 by Delphix. All rights reserved.
*/
#include <dirent.h>
#include <stdio.h>
#include <string.h>
#include <errno.h>
#include <fcntl.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include <libzfs.h>
#include <libshare.h>
#include "libshare_impl.h"
#include "nfs.h"
#define ZFS_EXPORTS_DIR "/etc/exports.d"
#define ZFS_EXPORTS_FILE ZFS_EXPORTS_DIR"/zfs.exports"
#define ZFS_EXPORTS_LOCK ZFS_EXPORTS_FILE".lock"
static boolean_t nfs_available(void);
typedef int (*nfs_shareopt_callback_t)(const char *opt, const char *value,
void *cookie);
typedef int (*nfs_host_callback_t)(FILE *tmpfile, const char *sharepath,
const char *host, const char *security, const char *access, void *cookie);
/*
* Invokes the specified callback function for each Solaris share option
* listed in the specified string.
*/
static int
foreach_nfs_shareopt(const char *shareopts,
nfs_shareopt_callback_t callback, void *cookie)
{
char *shareopts_dup, *opt, *cur, *value;
int was_nul, error;
if (shareopts == NULL)
return (SA_OK);
if (strcmp(shareopts, "on") == 0)
shareopts = "rw,crossmnt";
shareopts_dup = strdup(shareopts);
if (shareopts_dup == NULL)
return (SA_NO_MEMORY);
opt = shareopts_dup;
was_nul = 0;
while (1) {
cur = opt;
while (*cur != ',' && *cur != '\0')
cur++;
if (*cur == '\0')
was_nul = 1;
*cur = '\0';
if (cur > opt) {
value = strchr(opt, '=');
if (value != NULL) {
*value = '\0';
value++;
}
error = callback(opt, value, cookie);
if (error != SA_OK) {
free(shareopts_dup);
return (error);
}
}
opt = cur + 1;
if (was_nul)
break;
}
free(shareopts_dup);
return (SA_OK);
}
typedef struct nfs_host_cookie_s {
nfs_host_callback_t callback;
const char *sharepath;
void *cookie;
FILE *tmpfile;
const char *security;
} nfs_host_cookie_t;
/*
* Helper function for foreach_nfs_host. This function checks whether the
* current share option is a host specification and invokes a callback
* function with information about the host.
*/
static int
foreach_nfs_host_cb(const char *opt, const char *value, void *pcookie)
{
int error;
const char *access;
char *host_dup, *host, *next, *v6Literal;
nfs_host_cookie_t *udata = (nfs_host_cookie_t *)pcookie;
int cidr_len;
#ifdef DEBUG
fprintf(stderr, "foreach_nfs_host_cb: key=%s, value=%s\n", opt, value);
#endif
if (strcmp(opt, "sec") == 0)
udata->security = value;
if (strcmp(opt, "rw") == 0 || strcmp(opt, "ro") == 0) {
if (value == NULL)
value = "*";
access = opt;
host_dup = strdup(value);
if (host_dup == NULL)
return (SA_NO_MEMORY);
host = host_dup;
do {
if (*host == '[') {
host++;
v6Literal = strchr(host, ']');
if (v6Literal == NULL) {
free(host_dup);
return (SA_SYNTAX_ERR);
}
if (v6Literal[1] == '\0') {
*v6Literal = '\0';
next = NULL;
} else if (v6Literal[1] == '/') {
next = strchr(v6Literal + 2, ':');
if (next == NULL) {
cidr_len =
strlen(v6Literal + 1);
memmove(v6Literal,
v6Literal + 1,
cidr_len);
v6Literal[cidr_len] = '\0';
} else {
cidr_len = next - v6Literal - 1;
memmove(v6Literal,
v6Literal + 1,
cidr_len);
v6Literal[cidr_len] = '\0';
next++;
}
} else if (v6Literal[1] == ':') {
*v6Literal = '\0';
next = v6Literal + 2;
} else {
free(host_dup);
return (SA_SYNTAX_ERR);
}
} else {
next = strchr(host, ':');
if (next != NULL) {
*next = '\0';
next++;
}
}
error = udata->callback(udata->tmpfile,
udata->sharepath, host, udata->security,
access, udata->cookie);
if (error != SA_OK) {
free(host_dup);
return (error);
}
host = next;
} while (host != NULL);
free(host_dup);
}
return (SA_OK);
}
/*
* Invokes a callback function for all NFS hosts that are set for a share.
*/
static int
foreach_nfs_host(sa_share_impl_t impl_share, FILE *tmpfile,
nfs_host_callback_t callback, void *cookie)
{
nfs_host_cookie_t udata;
udata.callback = callback;
udata.sharepath = impl_share->sa_mountpoint;
udata.cookie = cookie;
udata.tmpfile = tmpfile;
udata.security = "sys";
return (foreach_nfs_shareopt(impl_share->sa_shareopts,
foreach_nfs_host_cb, &udata));
}
/*
* Converts a Solaris NFS host specification to its Linux equivalent.
*/
static const char *
get_linux_hostspec(const char *solaris_hostspec)
{
/*
* For now we just support CIDR masks (e.g. @192.168.0.0/16) and host
* wildcards (e.g. *.example.org).
*/
if (solaris_hostspec[0] == '@') {
/*
* Solaris host specifier, e.g. @192.168.0.0/16; we just need
* to skip the @ in this case
*/
return (solaris_hostspec + 1);
} else {
return (solaris_hostspec);
}
}
/*
* Adds a Linux share option to an array of NFS options.
*/
static int
add_linux_shareopt(char **plinux_opts, const char *key, const char *value)
{
size_t len = 0;
char *new_linux_opts;
if (*plinux_opts != NULL)
len = strlen(*plinux_opts);
new_linux_opts = realloc(*plinux_opts, len + 1 + strlen(key) +
(value ? 1 + strlen(value) : 0) + 1);
if (new_linux_opts == NULL)
return (SA_NO_MEMORY);
new_linux_opts[len] = '\0';
if (len > 0)
strcat(new_linux_opts, ",");
strcat(new_linux_opts, key);
if (value != NULL) {
strcat(new_linux_opts, "=");
strcat(new_linux_opts, value);
}
*plinux_opts = new_linux_opts;
return (SA_OK);
}
static int string_cmp(const void *lhs, const void *rhs) {
const char *const *l = lhs, *const *r = rhs;
return (strcmp(*l, *r));
}
/*
* Validates and converts a single Solaris share option to its Linux
* equivalent.
*/
static int
get_linux_shareopts_cb(const char *key, const char *value, void *cookie)
{
/* This list must remain sorted, since we bsearch() it */
static const char *const valid_keys[] = { "all_squash", "anongid",
"anonuid", "async", "auth_nlm", "crossmnt", "fsid", "fsuid", "hide",
"insecure", "insecure_locks", "mountpoint", "mp", "no_acl",
"no_all_squash", "no_auth_nlm", "no_root_squash",
"no_subtree_check", "no_wdelay", "nohide", "refer", "replicas",
"root_squash", "secure", "secure_locks", "subtree_check", "sync",
"wdelay" };
char **plinux_opts = (char **)cookie;
/* host-specific options, these are taken care of elsewhere */
if (strcmp(key, "ro") == 0 || strcmp(key, "rw") == 0 ||
strcmp(key, "sec") == 0)
return (SA_OK);
if (strcmp(key, "anon") == 0)
key = "anonuid";
if (strcmp(key, "root_mapping") == 0) {
(void) add_linux_shareopt(plinux_opts, "root_squash", NULL);
key = "anonuid";
}
if (strcmp(key, "nosub") == 0)
key = "subtree_check";
if (bsearch(&key, valid_keys, ARRAY_SIZE(valid_keys),
sizeof (*valid_keys), string_cmp) == NULL)
return (SA_SYNTAX_ERR);
(void) add_linux_shareopt(plinux_opts, key, value);
return (SA_OK);
}
/*
* Takes a string containing Solaris share options (e.g. "sync,no_acl") and
* converts them to a NULL-terminated array of Linux NFS options.
*/
static int
get_linux_shareopts(const char *shareopts, char **plinux_opts)
{
int error;
assert(plinux_opts != NULL);
*plinux_opts = NULL;
/* no_subtree_check - Default as of nfs-utils v1.1.0 */
(void) add_linux_shareopt(plinux_opts, "no_subtree_check", NULL);
/* mountpoint - Restrict exports to ZFS mountpoints */
(void) add_linux_shareopt(plinux_opts, "mountpoint", NULL);
error = foreach_nfs_shareopt(shareopts, get_linux_shareopts_cb,
plinux_opts);
if (error != SA_OK) {
free(*plinux_opts);
*plinux_opts = NULL;
}
return (error);
}
/*
* This function populates an entry into /etc/exports.d/zfs.exports.
* This file is consumed by the linux nfs server so that zfs shares are
* automatically exported upon boot or whenever the nfs server restarts.
*/
static int
nfs_add_entry(FILE *tmpfile, const char *sharepath,
const char *host, const char *security, const char *access_opts,
void *pcookie)
{
const char *linux_opts = (const char *)pcookie;
if (linux_opts == NULL)
linux_opts = "";
boolean_t need_free;
char *mp;
int rc = nfs_escape_mountpoint(sharepath, &mp, &need_free);
if (rc != SA_OK)
return (rc);
if (fprintf(tmpfile, "%s %s(sec=%s,%s,%s)\n", mp,
get_linux_hostspec(host), security, access_opts,
linux_opts) < 0) {
fprintf(stderr, "failed to write to temporary file\n");
rc = SA_SYSTEM_ERR;
}
if (need_free)
free(mp);
return (rc);
}
/*
* Enables NFS sharing for the specified share.
*/
static int
nfs_enable_share_impl(sa_share_impl_t impl_share, FILE *tmpfile)
{
char *linux_opts = NULL;
int error = get_linux_shareopts(impl_share->sa_shareopts, &linux_opts);
if (error != SA_OK)
return (error);
error = foreach_nfs_host(impl_share, tmpfile, nfs_add_entry,
linux_opts);
free(linux_opts);
return (error);
}
static int
nfs_enable_share(sa_share_impl_t impl_share)
{
if (!nfs_available())
return (SA_SYSTEM_ERR);
return (nfs_toggle_share(
ZFS_EXPORTS_LOCK, ZFS_EXPORTS_FILE, ZFS_EXPORTS_DIR, impl_share,
nfs_enable_share_impl));
}
/*
* Disables NFS sharing for the specified share.
*/
static int
nfs_disable_share_impl(sa_share_impl_t impl_share, FILE *tmpfile)
{
(void) impl_share, (void) tmpfile;
return (SA_OK);
}
static int
nfs_disable_share(sa_share_impl_t impl_share)
{
if (!nfs_available())
- return (SA_SYSTEM_ERR);
+ return (SA_OK);
return (nfs_toggle_share(
ZFS_EXPORTS_LOCK, ZFS_EXPORTS_FILE, ZFS_EXPORTS_DIR, impl_share,
nfs_disable_share_impl));
}
static boolean_t
nfs_is_shared(sa_share_impl_t impl_share)
{
if (!nfs_available())
return (SA_SYSTEM_ERR);
return (nfs_is_shared_impl(ZFS_EXPORTS_FILE, impl_share));
}
/*
* Checks whether the specified NFS share options are syntactically correct.
*/
static int
nfs_validate_shareopts(const char *shareopts)
{
char *linux_opts = NULL;
int error = get_linux_shareopts(shareopts, &linux_opts);
if (error != SA_OK)
return (error);
free(linux_opts);
return (SA_OK);
}
static int
nfs_commit_shares(void)
{
if (!nfs_available())
return (SA_SYSTEM_ERR);
char *argv[] = {
(char *)"/usr/sbin/exportfs",
(char *)"-ra",
NULL
};
return (libzfs_run_process(argv[0], argv, 0));
}
+static void
+nfs_truncate_shares(void)
+{
+ nfs_reset_shares(ZFS_EXPORTS_LOCK, ZFS_EXPORTS_FILE);
+}
+
const sa_fstype_t libshare_nfs_type = {
.enable_share = nfs_enable_share,
.disable_share = nfs_disable_share,
.is_shared = nfs_is_shared,
.validate_shareopts = nfs_validate_shareopts,
.commit_shares = nfs_commit_shares,
+ .truncate_shares = nfs_truncate_shares,
};
static boolean_t
nfs_available(void)
{
static int avail;
if (!avail) {
if (access("/usr/sbin/exportfs", F_OK) != 0)
avail = -1;
else
avail = 1;
}
return (avail == 1);
}
diff --git a/sys/contrib/openzfs/lib/libspl/atomic.c b/sys/contrib/openzfs/lib/libspl/atomic.c
index ba14b113f585..8cc350710ba0 100644
--- a/sys/contrib/openzfs/lib/libspl/atomic.c
+++ b/sys/contrib/openzfs/lib/libspl/atomic.c
@@ -1,394 +1,400 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2009 by Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <atomic.h>
/*
* These are the void returning variants
*/
#define ATOMIC_INC(name, type) \
void atomic_inc_##name(volatile type *target) \
{ \
(void) __atomic_add_fetch(target, 1, __ATOMIC_SEQ_CST); \
}
/* BEGIN CSTYLED */
ATOMIC_INC(8, uint8_t)
ATOMIC_INC(16, uint16_t)
ATOMIC_INC(32, uint32_t)
ATOMIC_INC(64, uint64_t)
ATOMIC_INC(uchar, uchar_t)
ATOMIC_INC(ushort, ushort_t)
ATOMIC_INC(uint, uint_t)
ATOMIC_INC(ulong, ulong_t)
/* END CSTYLED */
#define ATOMIC_DEC(name, type) \
void atomic_dec_##name(volatile type *target) \
{ \
(void) __atomic_sub_fetch(target, 1, __ATOMIC_SEQ_CST); \
}
/* BEGIN CSTYLED */
ATOMIC_DEC(8, uint8_t)
ATOMIC_DEC(16, uint16_t)
ATOMIC_DEC(32, uint32_t)
ATOMIC_DEC(64, uint64_t)
ATOMIC_DEC(uchar, uchar_t)
ATOMIC_DEC(ushort, ushort_t)
ATOMIC_DEC(uint, uint_t)
ATOMIC_DEC(ulong, ulong_t)
/* END CSTYLED */
#define ATOMIC_ADD(name, type1, type2) \
void atomic_add_##name(volatile type1 *target, type2 bits) \
{ \
(void) __atomic_add_fetch(target, bits, __ATOMIC_SEQ_CST); \
}
void
atomic_add_ptr(volatile void *target, ssize_t bits)
{
(void) __atomic_add_fetch((void **)target, bits, __ATOMIC_SEQ_CST);
}
/* BEGIN CSTYLED */
ATOMIC_ADD(8, uint8_t, int8_t)
ATOMIC_ADD(16, uint16_t, int16_t)
ATOMIC_ADD(32, uint32_t, int32_t)
ATOMIC_ADD(64, uint64_t, int64_t)
ATOMIC_ADD(char, uchar_t, signed char)
ATOMIC_ADD(short, ushort_t, short)
ATOMIC_ADD(int, uint_t, int)
ATOMIC_ADD(long, ulong_t, long)
/* END CSTYLED */
#define ATOMIC_SUB(name, type1, type2) \
void atomic_sub_##name(volatile type1 *target, type2 bits) \
{ \
(void) __atomic_sub_fetch(target, bits, __ATOMIC_SEQ_CST); \
}
void
atomic_sub_ptr(volatile void *target, ssize_t bits)
{
(void) __atomic_sub_fetch((void **)target, bits, __ATOMIC_SEQ_CST);
}
/* BEGIN CSTYLED */
ATOMIC_SUB(8, uint8_t, int8_t)
ATOMIC_SUB(16, uint16_t, int16_t)
ATOMIC_SUB(32, uint32_t, int32_t)
ATOMIC_SUB(64, uint64_t, int64_t)
ATOMIC_SUB(char, uchar_t, signed char)
ATOMIC_SUB(short, ushort_t, short)
ATOMIC_SUB(int, uint_t, int)
ATOMIC_SUB(long, ulong_t, long)
/* END CSTYLED */
#define ATOMIC_OR(name, type) \
void atomic_or_##name(volatile type *target, type bits) \
{ \
(void) __atomic_or_fetch(target, bits, __ATOMIC_SEQ_CST); \
}
/* BEGIN CSTYLED */
ATOMIC_OR(8, uint8_t)
ATOMIC_OR(16, uint16_t)
ATOMIC_OR(32, uint32_t)
ATOMIC_OR(64, uint64_t)
ATOMIC_OR(uchar, uchar_t)
ATOMIC_OR(ushort, ushort_t)
ATOMIC_OR(uint, uint_t)
ATOMIC_OR(ulong, ulong_t)
/* END CSTYLED */
#define ATOMIC_AND(name, type) \
void atomic_and_##name(volatile type *target, type bits) \
{ \
(void) __atomic_and_fetch(target, bits, __ATOMIC_SEQ_CST); \
}
/* BEGIN CSTYLED */
ATOMIC_AND(8, uint8_t)
ATOMIC_AND(16, uint16_t)
ATOMIC_AND(32, uint32_t)
ATOMIC_AND(64, uint64_t)
ATOMIC_AND(uchar, uchar_t)
ATOMIC_AND(ushort, ushort_t)
ATOMIC_AND(uint, uint_t)
ATOMIC_AND(ulong, ulong_t)
/* END CSTYLED */
/*
* New value returning variants
*/
#define ATOMIC_INC_NV(name, type) \
type atomic_inc_##name##_nv(volatile type *target) \
{ \
return (__atomic_add_fetch(target, 1, __ATOMIC_SEQ_CST)); \
}
/* BEGIN CSTYLED */
ATOMIC_INC_NV(8, uint8_t)
ATOMIC_INC_NV(16, uint16_t)
ATOMIC_INC_NV(32, uint32_t)
ATOMIC_INC_NV(64, uint64_t)
ATOMIC_INC_NV(uchar, uchar_t)
ATOMIC_INC_NV(ushort, ushort_t)
ATOMIC_INC_NV(uint, uint_t)
ATOMIC_INC_NV(ulong, ulong_t)
/* END CSTYLED */
#define ATOMIC_DEC_NV(name, type) \
type atomic_dec_##name##_nv(volatile type *target) \
{ \
return (__atomic_sub_fetch(target, 1, __ATOMIC_SEQ_CST)); \
}
/* BEGIN CSTYLED */
ATOMIC_DEC_NV(8, uint8_t)
ATOMIC_DEC_NV(16, uint16_t)
ATOMIC_DEC_NV(32, uint32_t)
ATOMIC_DEC_NV(64, uint64_t)
ATOMIC_DEC_NV(uchar, uchar_t)
ATOMIC_DEC_NV(ushort, ushort_t)
ATOMIC_DEC_NV(uint, uint_t)
ATOMIC_DEC_NV(ulong, ulong_t)
/* END CSTYLED */
#define ATOMIC_ADD_NV(name, type1, type2) \
type1 atomic_add_##name##_nv(volatile type1 *target, type2 bits) \
{ \
return (__atomic_add_fetch(target, bits, __ATOMIC_SEQ_CST)); \
}
void *
atomic_add_ptr_nv(volatile void *target, ssize_t bits)
{
return (__atomic_add_fetch((void **)target, bits, __ATOMIC_SEQ_CST));
}
/* BEGIN CSTYLED */
ATOMIC_ADD_NV(8, uint8_t, int8_t)
ATOMIC_ADD_NV(16, uint16_t, int16_t)
ATOMIC_ADD_NV(32, uint32_t, int32_t)
ATOMIC_ADD_NV(64, uint64_t, int64_t)
ATOMIC_ADD_NV(char, uchar_t, signed char)
ATOMIC_ADD_NV(short, ushort_t, short)
ATOMIC_ADD_NV(int, uint_t, int)
ATOMIC_ADD_NV(long, ulong_t, long)
/* END CSTYLED */
#define ATOMIC_SUB_NV(name, type1, type2) \
type1 atomic_sub_##name##_nv(volatile type1 *target, type2 bits) \
{ \
return (__atomic_sub_fetch(target, bits, __ATOMIC_SEQ_CST)); \
}
void *
atomic_sub_ptr_nv(volatile void *target, ssize_t bits)
{
return (__atomic_sub_fetch((void **)target, bits, __ATOMIC_SEQ_CST));
}
/* BEGIN CSTYLED */
ATOMIC_SUB_NV(8, uint8_t, int8_t)
ATOMIC_SUB_NV(char, uchar_t, signed char)
ATOMIC_SUB_NV(16, uint16_t, int16_t)
ATOMIC_SUB_NV(short, ushort_t, short)
ATOMIC_SUB_NV(32, uint32_t, int32_t)
ATOMIC_SUB_NV(int, uint_t, int)
ATOMIC_SUB_NV(long, ulong_t, long)
ATOMIC_SUB_NV(64, uint64_t, int64_t)
/* END CSTYLED */
#define ATOMIC_OR_NV(name, type) \
type atomic_or_##name##_nv(volatile type *target, type bits) \
{ \
return (__atomic_or_fetch(target, bits, __ATOMIC_SEQ_CST)); \
}
/* BEGIN CSTYLED */
ATOMIC_OR_NV(8, uint8_t)
ATOMIC_OR_NV(16, uint16_t)
ATOMIC_OR_NV(32, uint32_t)
ATOMIC_OR_NV(64, uint64_t)
ATOMIC_OR_NV(uchar, uchar_t)
ATOMIC_OR_NV(ushort, ushort_t)
ATOMIC_OR_NV(uint, uint_t)
ATOMIC_OR_NV(ulong, ulong_t)
/* END CSTYLED */
#define ATOMIC_AND_NV(name, type) \
type atomic_and_##name##_nv(volatile type *target, type bits) \
{ \
return (__atomic_and_fetch(target, bits, __ATOMIC_SEQ_CST)); \
}
/* BEGIN CSTYLED */
ATOMIC_AND_NV(8, uint8_t)
ATOMIC_AND_NV(16, uint16_t)
ATOMIC_AND_NV(32, uint32_t)
ATOMIC_AND_NV(64, uint64_t)
ATOMIC_AND_NV(uchar, uchar_t)
ATOMIC_AND_NV(ushort, ushort_t)
ATOMIC_AND_NV(uint, uint_t)
ATOMIC_AND_NV(ulong, ulong_t)
/* END CSTYLED */
/*
* If *tgt == exp, set *tgt = des; return old value
*
* This may not look right on the first pass (or the sixteenth), but,
* from https://gcc.gnu.org/onlinedocs/gcc/_005f_005fatomic-Builtins.html:
* > If they are not equal, the operation is a read
* > and the current contents of *ptr are written into *expected.
* And, in the converse case, exp is already *target by definition.
*/
#define ATOMIC_CAS(name, type) \
type atomic_cas_##name(volatile type *target, type exp, type des) \
{ \
__atomic_compare_exchange_n(target, &exp, des, B_FALSE, \
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \
return (exp); \
}
void *
atomic_cas_ptr(volatile void *target, void *exp, void *des)
{
__atomic_compare_exchange_n((void **)target, &exp, des, B_FALSE,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
return (exp);
}
/* BEGIN CSTYLED */
ATOMIC_CAS(8, uint8_t)
ATOMIC_CAS(16, uint16_t)
ATOMIC_CAS(32, uint32_t)
ATOMIC_CAS(64, uint64_t)
ATOMIC_CAS(uchar, uchar_t)
ATOMIC_CAS(ushort, ushort_t)
ATOMIC_CAS(uint, uint_t)
ATOMIC_CAS(ulong, ulong_t)
/* END CSTYLED */
/*
* Swap target and return old value
*/
#define ATOMIC_SWAP(name, type) \
type atomic_swap_##name(volatile type *target, type bits) \
{ \
return (__atomic_exchange_n(target, bits, __ATOMIC_SEQ_CST)); \
}
/* BEGIN CSTYLED */
ATOMIC_SWAP(8, uint8_t)
ATOMIC_SWAP(16, uint16_t)
ATOMIC_SWAP(32, uint32_t)
ATOMIC_SWAP(64, uint64_t)
ATOMIC_SWAP(uchar, uchar_t)
ATOMIC_SWAP(ushort, ushort_t)
ATOMIC_SWAP(uint, uint_t)
ATOMIC_SWAP(ulong, ulong_t)
/* END CSTYLED */
void *
atomic_swap_ptr(volatile void *target, void *bits)
{
return (__atomic_exchange_n((void **)target, bits, __ATOMIC_SEQ_CST));
}
#ifndef _LP64
uint64_t
atomic_load_64(volatile uint64_t *target)
{
return (__atomic_load_n(target, __ATOMIC_RELAXED));
}
void
atomic_store_64(volatile uint64_t *target, uint64_t bits)
{
return (__atomic_store_n(target, bits, __ATOMIC_RELAXED));
}
#endif
int
atomic_set_long_excl(volatile ulong_t *target, uint_t value)
{
ulong_t bit = 1UL << value;
ulong_t old = __atomic_fetch_or(target, bit, __ATOMIC_SEQ_CST);
return ((old & bit) ? -1 : 0);
}
int
atomic_clear_long_excl(volatile ulong_t *target, uint_t value)
{
ulong_t bit = 1UL << value;
ulong_t old = __atomic_fetch_and(target, ~bit, __ATOMIC_SEQ_CST);
return ((old & bit) ? 0 : -1);
}
void
membar_enter(void)
{
__atomic_thread_fence(__ATOMIC_SEQ_CST);
}
void
membar_exit(void)
{
__atomic_thread_fence(__ATOMIC_SEQ_CST);
}
+void
+membar_sync(void)
+{
+ __atomic_thread_fence(__ATOMIC_SEQ_CST);
+}
+
void
membar_producer(void)
{
__atomic_thread_fence(__ATOMIC_RELEASE);
}
void
membar_consumer(void)
{
__atomic_thread_fence(__ATOMIC_ACQUIRE);
}
diff --git a/sys/contrib/openzfs/lib/libspl/include/atomic.h b/sys/contrib/openzfs/lib/libspl/include/atomic.h
index 1249d42b604a..4ebdbbda9864 100644
--- a/sys/contrib/openzfs/lib/libspl/include/atomic.h
+++ b/sys/contrib/openzfs/lib/libspl/include/atomic.h
@@ -1,339 +1,346 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_ATOMIC_H
#define _SYS_ATOMIC_H
#include <sys/types.h>
#include <sys/inttypes.h>
#ifdef __cplusplus
extern "C" {
#endif
#if defined(__STDC__)
/*
* Increment target.
*/
extern void atomic_inc_8(volatile uint8_t *);
extern void atomic_inc_uchar(volatile uchar_t *);
extern void atomic_inc_16(volatile uint16_t *);
extern void atomic_inc_ushort(volatile ushort_t *);
extern void atomic_inc_32(volatile uint32_t *);
extern void atomic_inc_uint(volatile uint_t *);
extern void atomic_inc_ulong(volatile ulong_t *);
#if defined(_INT64_TYPE)
extern void atomic_inc_64(volatile uint64_t *);
#endif
/*
* Decrement target
*/
extern void atomic_dec_8(volatile uint8_t *);
extern void atomic_dec_uchar(volatile uchar_t *);
extern void atomic_dec_16(volatile uint16_t *);
extern void atomic_dec_ushort(volatile ushort_t *);
extern void atomic_dec_32(volatile uint32_t *);
extern void atomic_dec_uint(volatile uint_t *);
extern void atomic_dec_ulong(volatile ulong_t *);
#if defined(_INT64_TYPE)
extern void atomic_dec_64(volatile uint64_t *);
#endif
/*
* Add delta to target
*/
extern void atomic_add_8(volatile uint8_t *, int8_t);
extern void atomic_add_char(volatile uchar_t *, signed char);
extern void atomic_add_16(volatile uint16_t *, int16_t);
extern void atomic_add_short(volatile ushort_t *, short);
extern void atomic_add_32(volatile uint32_t *, int32_t);
extern void atomic_add_int(volatile uint_t *, int);
extern void atomic_add_ptr(volatile void *, ssize_t);
extern void atomic_add_long(volatile ulong_t *, long);
#if defined(_INT64_TYPE)
extern void atomic_add_64(volatile uint64_t *, int64_t);
#endif
/*
* Subtract delta from target
*/
extern void atomic_sub_8(volatile uint8_t *, int8_t);
extern void atomic_sub_char(volatile uchar_t *, signed char);
extern void atomic_sub_16(volatile uint16_t *, int16_t);
extern void atomic_sub_short(volatile ushort_t *, short);
extern void atomic_sub_32(volatile uint32_t *, int32_t);
extern void atomic_sub_int(volatile uint_t *, int);
extern void atomic_sub_ptr(volatile void *, ssize_t);
extern void atomic_sub_long(volatile ulong_t *, long);
#if defined(_INT64_TYPE)
extern void atomic_sub_64(volatile uint64_t *, int64_t);
#endif
/*
* logical OR bits with target
*/
extern void atomic_or_8(volatile uint8_t *, uint8_t);
extern void atomic_or_uchar(volatile uchar_t *, uchar_t);
extern void atomic_or_16(volatile uint16_t *, uint16_t);
extern void atomic_or_ushort(volatile ushort_t *, ushort_t);
extern void atomic_or_32(volatile uint32_t *, uint32_t);
extern void atomic_or_uint(volatile uint_t *, uint_t);
extern void atomic_or_ulong(volatile ulong_t *, ulong_t);
#if defined(_INT64_TYPE)
extern void atomic_or_64(volatile uint64_t *, uint64_t);
#endif
/*
* logical AND bits with target
*/
extern void atomic_and_8(volatile uint8_t *, uint8_t);
extern void atomic_and_uchar(volatile uchar_t *, uchar_t);
extern void atomic_and_16(volatile uint16_t *, uint16_t);
extern void atomic_and_ushort(volatile ushort_t *, ushort_t);
extern void atomic_and_32(volatile uint32_t *, uint32_t);
extern void atomic_and_uint(volatile uint_t *, uint_t);
extern void atomic_and_ulong(volatile ulong_t *, ulong_t);
#if defined(_INT64_TYPE)
extern void atomic_and_64(volatile uint64_t *, uint64_t);
#endif
/*
* As above, but return the new value. Note that these _nv() variants are
* substantially more expensive on some platforms than the no-return-value
* versions above, so don't use them unless you really need to know the
* new value *atomically* (e.g. when decrementing a reference count and
* checking whether it went to zero).
*/
/*
* Increment target and return new value.
*/
extern uint8_t atomic_inc_8_nv(volatile uint8_t *);
extern uchar_t atomic_inc_uchar_nv(volatile uchar_t *);
extern uint16_t atomic_inc_16_nv(volatile uint16_t *);
extern ushort_t atomic_inc_ushort_nv(volatile ushort_t *);
extern uint32_t atomic_inc_32_nv(volatile uint32_t *);
extern uint_t atomic_inc_uint_nv(volatile uint_t *);
extern ulong_t atomic_inc_ulong_nv(volatile ulong_t *);
#if defined(_INT64_TYPE)
extern uint64_t atomic_inc_64_nv(volatile uint64_t *);
#endif
/*
* Decrement target and return new value.
*/
extern uint8_t atomic_dec_8_nv(volatile uint8_t *);
extern uchar_t atomic_dec_uchar_nv(volatile uchar_t *);
extern uint16_t atomic_dec_16_nv(volatile uint16_t *);
extern ushort_t atomic_dec_ushort_nv(volatile ushort_t *);
extern uint32_t atomic_dec_32_nv(volatile uint32_t *);
extern uint_t atomic_dec_uint_nv(volatile uint_t *);
extern ulong_t atomic_dec_ulong_nv(volatile ulong_t *);
#if defined(_INT64_TYPE)
extern uint64_t atomic_dec_64_nv(volatile uint64_t *);
#endif
/*
* Add delta to target
*/
extern uint8_t atomic_add_8_nv(volatile uint8_t *, int8_t);
extern uchar_t atomic_add_char_nv(volatile uchar_t *, signed char);
extern uint16_t atomic_add_16_nv(volatile uint16_t *, int16_t);
extern ushort_t atomic_add_short_nv(volatile ushort_t *, short);
extern uint32_t atomic_add_32_nv(volatile uint32_t *, int32_t);
extern uint_t atomic_add_int_nv(volatile uint_t *, int);
extern void *atomic_add_ptr_nv(volatile void *, ssize_t);
extern ulong_t atomic_add_long_nv(volatile ulong_t *, long);
#if defined(_INT64_TYPE)
extern uint64_t atomic_add_64_nv(volatile uint64_t *, int64_t);
#endif
/*
* Subtract delta from target
*/
extern uint8_t atomic_sub_8_nv(volatile uint8_t *, int8_t);
extern uchar_t atomic_sub_char_nv(volatile uchar_t *, signed char);
extern uint16_t atomic_sub_16_nv(volatile uint16_t *, int16_t);
extern ushort_t atomic_sub_short_nv(volatile ushort_t *, short);
extern uint32_t atomic_sub_32_nv(volatile uint32_t *, int32_t);
extern uint_t atomic_sub_int_nv(volatile uint_t *, int);
extern void *atomic_sub_ptr_nv(volatile void *, ssize_t);
extern ulong_t atomic_sub_long_nv(volatile ulong_t *, long);
#if defined(_INT64_TYPE)
extern uint64_t atomic_sub_64_nv(volatile uint64_t *, int64_t);
#endif
/*
* logical OR bits with target and return new value.
*/
extern uint8_t atomic_or_8_nv(volatile uint8_t *, uint8_t);
extern uchar_t atomic_or_uchar_nv(volatile uchar_t *, uchar_t);
extern uint16_t atomic_or_16_nv(volatile uint16_t *, uint16_t);
extern ushort_t atomic_or_ushort_nv(volatile ushort_t *, ushort_t);
extern uint32_t atomic_or_32_nv(volatile uint32_t *, uint32_t);
extern uint_t atomic_or_uint_nv(volatile uint_t *, uint_t);
extern ulong_t atomic_or_ulong_nv(volatile ulong_t *, ulong_t);
#if defined(_INT64_TYPE)
extern uint64_t atomic_or_64_nv(volatile uint64_t *, uint64_t);
#endif
/*
* logical AND bits with target and return new value.
*/
extern uint8_t atomic_and_8_nv(volatile uint8_t *, uint8_t);
extern uchar_t atomic_and_uchar_nv(volatile uchar_t *, uchar_t);
extern uint16_t atomic_and_16_nv(volatile uint16_t *, uint16_t);
extern ushort_t atomic_and_ushort_nv(volatile ushort_t *, ushort_t);
extern uint32_t atomic_and_32_nv(volatile uint32_t *, uint32_t);
extern uint_t atomic_and_uint_nv(volatile uint_t *, uint_t);
extern ulong_t atomic_and_ulong_nv(volatile ulong_t *, ulong_t);
#if defined(_INT64_TYPE)
extern uint64_t atomic_and_64_nv(volatile uint64_t *, uint64_t);
#endif
/*
* If *arg1 == arg2, set *arg1 = arg3; return old value
*/
extern uint8_t atomic_cas_8(volatile uint8_t *, uint8_t, uint8_t);
extern uchar_t atomic_cas_uchar(volatile uchar_t *, uchar_t, uchar_t);
extern uint16_t atomic_cas_16(volatile uint16_t *, uint16_t, uint16_t);
extern ushort_t atomic_cas_ushort(volatile ushort_t *, ushort_t, ushort_t);
extern uint32_t atomic_cas_32(volatile uint32_t *, uint32_t, uint32_t);
extern uint_t atomic_cas_uint(volatile uint_t *, uint_t, uint_t);
extern void *atomic_cas_ptr(volatile void *, void *, void *);
extern ulong_t atomic_cas_ulong(volatile ulong_t *, ulong_t, ulong_t);
#if defined(_INT64_TYPE)
extern uint64_t atomic_cas_64(volatile uint64_t *, uint64_t, uint64_t);
#endif
/*
* Swap target and return old value
*/
extern uint8_t atomic_swap_8(volatile uint8_t *, uint8_t);
extern uchar_t atomic_swap_uchar(volatile uchar_t *, uchar_t);
extern uint16_t atomic_swap_16(volatile uint16_t *, uint16_t);
extern ushort_t atomic_swap_ushort(volatile ushort_t *, ushort_t);
extern uint32_t atomic_swap_32(volatile uint32_t *, uint32_t);
extern uint_t atomic_swap_uint(volatile uint_t *, uint_t);
extern void *atomic_swap_ptr(volatile void *, void *);
extern ulong_t atomic_swap_ulong(volatile ulong_t *, ulong_t);
#if defined(_INT64_TYPE)
extern uint64_t atomic_swap_64(volatile uint64_t *, uint64_t);
#endif
/*
* Atomically read variable.
*/
#define atomic_load_char(p) (*(volatile uchar_t *)(p))
#define atomic_load_short(p) (*(volatile ushort_t *)(p))
#define atomic_load_int(p) (*(volatile uint_t *)(p))
#define atomic_load_long(p) (*(volatile ulong_t *)(p))
#define atomic_load_ptr(p) (*(volatile __typeof(*p) *)(p))
#define atomic_load_8(p) (*(volatile uint8_t *)(p))
#define atomic_load_16(p) (*(volatile uint16_t *)(p))
#define atomic_load_32(p) (*(volatile uint32_t *)(p))
#ifdef _LP64
#define atomic_load_64(p) (*(volatile uint64_t *)(p))
#elif defined(_INT64_TYPE)
extern uint64_t atomic_load_64(volatile uint64_t *);
#endif
/*
* Atomically write variable.
*/
#define atomic_store_char(p, v) \
(*(volatile uchar_t *)(p) = (uchar_t)(v))
#define atomic_store_short(p, v) \
(*(volatile ushort_t *)(p) = (ushort_t)(v))
#define atomic_store_int(p, v) \
(*(volatile uint_t *)(p) = (uint_t)(v))
#define atomic_store_long(p, v) \
(*(volatile ulong_t *)(p) = (ulong_t)(v))
#define atomic_store_ptr(p, v) \
(*(volatile __typeof(*p) *)(p) = (v))
#define atomic_store_8(p, v) \
(*(volatile uint8_t *)(p) = (uint8_t)(v))
#define atomic_store_16(p, v) \
(*(volatile uint16_t *)(p) = (uint16_t)(v))
#define atomic_store_32(p, v) \
(*(volatile uint32_t *)(p) = (uint32_t)(v))
#ifdef _LP64
#define atomic_store_64(p, v) \
(*(volatile uint64_t *)(p) = (uint64_t)(v))
#elif defined(_INT64_TYPE)
extern void atomic_store_64(volatile uint64_t *, uint64_t);
#endif
/*
* Perform an exclusive atomic bit set/clear on a target.
* Returns 0 if bit was successfully set/cleared, or -1
* if the bit was already set/cleared.
*/
extern int atomic_set_long_excl(volatile ulong_t *, uint_t);
extern int atomic_clear_long_excl(volatile ulong_t *, uint_t);
/*
* Generic memory barrier used during lock entry, placed after the
* memory operation that acquires the lock to guarantee that the lock
* protects its data. No stores from after the memory barrier will
* reach visibility, and no loads from after the barrier will be
* resolved, before the lock acquisition reaches global visibility.
*/
extern void membar_enter(void);
/*
* Generic memory barrier used during lock exit, placed before the
* memory operation that releases the lock to guarantee that the lock
* protects its data. All loads and stores issued before the barrier
* will be resolved before the subsequent lock update reaches visibility.
*/
extern void membar_exit(void);
+/*
+ * Make all stores and loads emitted prior to the the barrier complete before
+ * crossing it, while also making sure stores and loads emitted after the
+ * barrier only start being executed after crossing it.
+ */
+extern void membar_sync(void);
+
/*
* Arrange that all stores issued before this point in the code reach
* global visibility before any stores that follow; useful in producer
* modules that update a data item, then set a flag that it is available.
* The memory barrier guarantees that the available flag is not visible
* earlier than the updated data, i.e. it imposes store ordering.
*/
extern void membar_producer(void);
/*
* Arrange that all loads issued before this point in the code are
* completed before any subsequent loads; useful in consumer modules
* that check to see if data is available and read the data.
* The memory barrier guarantees that the data is not sampled until
* after the available flag has been seen, i.e. it imposes load ordering.
*/
extern void membar_consumer(void);
#endif /* __STDC__ */
#ifdef __cplusplus
}
#endif
#endif /* _SYS_ATOMIC_H */
diff --git a/sys/contrib/openzfs/lib/libspl/include/libshare.h b/sys/contrib/openzfs/lib/libspl/include/libshare.h
index d976f096ac39..deeb15c97704 100644
--- a/sys/contrib/openzfs/lib/libspl/include/libshare.h
+++ b/sys/contrib/openzfs/lib/libspl/include/libshare.h
@@ -1,95 +1,96 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
- * Copyright (c) 2019, 2020 by Delphix. All rights reserved.
+ * Copyright (c) 2019, 2022 by Delphix. All rights reserved.
*/
#ifndef _LIBSPL_LIBSHARE_H
#define _LIBSPL_LIBSHARE_H extern __attribute__((visibility("default")))
#include <sys/types.h>
/*
* defined error values
*/
#define SA_OK 0
#define SA_SYSTEM_ERR 7 /* system error, use errno */
#define SA_SYNTAX_ERR 8 /* syntax error on command line */
#define SA_NO_MEMORY 2 /* no memory for data structures */
#define SA_INVALID_PROTOCOL 13 /* specified protocol not valid */
#define SA_NOT_SUPPORTED 21 /* operation not supported for proto */
/* The following errors are never returned by libshare */
#define SA_NO_SUCH_PATH 1 /* provided path doesn't exist */
#define SA_DUPLICATE_NAME 3 /* object name is already in use */
#define SA_BAD_PATH 4 /* not a full path */
#define SA_NO_SUCH_GROUP 5 /* group is not defined */
#define SA_CONFIG_ERR 6 /* system configuration error */
#define SA_NO_PERMISSION 9 /* no permission for operation */
#define SA_BUSY 10 /* resource is busy */
#define SA_NO_SUCH_PROP 11 /* property doesn't exist */
#define SA_INVALID_NAME 12 /* name of object is invalid */
#define SA_NOT_ALLOWED 14 /* operation not allowed */
#define SA_BAD_VALUE 15 /* bad value for property */
#define SA_INVALID_SECURITY 16 /* invalid security type */
#define SA_NO_SUCH_SECURITY 17 /* security set not found */
#define SA_VALUE_CONFLICT 18 /* property value conflict */
#define SA_NOT_IMPLEMENTED 19 /* plugin interface not implemented */
#define SA_INVALID_PATH 20 /* path is sub-dir of existing share */
#define SA_PROP_SHARE_ONLY 22 /* property valid on share only */
#define SA_NOT_SHARED 23 /* path is not shared */
#define SA_NO_SUCH_RESOURCE 24 /* resource not found */
#define SA_RESOURCE_REQUIRED 25 /* resource name is required */
#define SA_MULTIPLE_ERROR 26 /* multiple protocols reported error */
#define SA_PATH_IS_SUBDIR 27 /* check_path found path is subdir */
#define SA_PATH_IS_PARENTDIR 28 /* check_path found path is parent */
#define SA_NO_SECTION 29 /* protocol requires section info */
#define SA_NO_SUCH_SECTION 30 /* no section found */
#define SA_NO_PROPERTIES 31 /* no properties found */
#define SA_PASSWORD_ENC 32 /* passwords must be encrypted */
#define SA_SHARE_EXISTS 33 /* path or file is already shared */
/* initialization */
_LIBSPL_LIBSHARE_H const char *sa_errorstr(int);
/* available protocols */
enum sa_protocol {
SA_PROTOCOL_NFS,
SA_PROTOCOL_SMB, /* ABI: add before _COUNT */
SA_PROTOCOL_COUNT,
};
/* lower-case */
_LIBSPL_LIBSHARE_H const char *const sa_protocol_names[SA_PROTOCOL_COUNT];
/* share control */
_LIBSPL_LIBSHARE_H int sa_enable_share(const char *, const char *, const char *,
enum sa_protocol);
_LIBSPL_LIBSHARE_H int sa_disable_share(const char *, enum sa_protocol);
_LIBSPL_LIBSHARE_H boolean_t sa_is_shared(const char *, enum sa_protocol);
_LIBSPL_LIBSHARE_H void sa_commit_shares(enum sa_protocol);
+_LIBSPL_LIBSHARE_H void sa_truncate_shares(enum sa_protocol);
/* protocol specific interfaces */
_LIBSPL_LIBSHARE_H int sa_validate_shareopts(const char *, enum sa_protocol);
#endif /* _LIBSPL_LIBSHARE_H */
diff --git a/sys/contrib/openzfs/lib/libspl/include/sys/simd.h b/sys/contrib/openzfs/lib/libspl/include/sys/simd.h
index c9d86a0808f1..c0099dd7919b 100644
--- a/sys/contrib/openzfs/lib/libspl/include/sys/simd.h
+++ b/sys/contrib/openzfs/lib/libspl/include/sys/simd.h
@@ -1,521 +1,518 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2022 Tino Reichardt <milky-zfs@mcmilk.de>
*/
#ifndef _LIBSPL_SYS_SIMD_H
#define _LIBSPL_SYS_SIMD_H
#include <sys/isa_defs.h>
#include <sys/types.h>
#if defined(__x86)
#include <cpuid.h>
#define kfpu_allowed() 1
#define kfpu_begin() do {} while (0)
#define kfpu_end() do {} while (0)
#define kfpu_init() 0
#define kfpu_fini() ((void) 0)
/*
* CPUID feature tests for user-space.
*
* x86 registers used implicitly by CPUID
*/
typedef enum cpuid_regs {
EAX = 0,
EBX,
ECX,
EDX,
CPUID_REG_CNT = 4
} cpuid_regs_t;
/*
* List of instruction sets identified by CPUID
*/
typedef enum cpuid_inst_sets {
SSE = 0,
SSE2,
SSE3,
SSSE3,
SSE4_1,
SSE4_2,
OSXSAVE,
AVX,
AVX2,
BMI1,
BMI2,
AVX512F,
AVX512CD,
AVX512DQ,
AVX512BW,
AVX512IFMA,
AVX512VBMI,
AVX512PF,
AVX512ER,
AVX512VL,
AES,
PCLMULQDQ,
MOVBE
} cpuid_inst_sets_t;
/*
* Instruction set descriptor.
*/
typedef struct cpuid_feature_desc {
uint32_t leaf; /* CPUID leaf */
uint32_t subleaf; /* CPUID sub-leaf */
uint32_t flag; /* bit mask of the feature */
cpuid_regs_t reg; /* which CPUID return register to test */
} cpuid_feature_desc_t;
#define _AVX512F_BIT (1U << 16)
#define _AVX512CD_BIT (_AVX512F_BIT | (1U << 28))
#define _AVX512DQ_BIT (_AVX512F_BIT | (1U << 17))
#define _AVX512BW_BIT (_AVX512F_BIT | (1U << 30))
#define _AVX512IFMA_BIT (_AVX512F_BIT | (1U << 21))
#define _AVX512VBMI_BIT (1U << 1) /* AVX512F_BIT is on another leaf */
#define _AVX512PF_BIT (_AVX512F_BIT | (1U << 26))
#define _AVX512ER_BIT (_AVX512F_BIT | (1U << 27))
#define _AVX512VL_BIT (1U << 31) /* if used also check other levels */
#define _AES_BIT (1U << 25)
#define _PCLMULQDQ_BIT (1U << 1)
#define _MOVBE_BIT (1U << 22)
/*
* Descriptions of supported instruction sets
*/
static const cpuid_feature_desc_t cpuid_features[] = {
[SSE] = {1U, 0U, 1U << 25, EDX },
[SSE2] = {1U, 0U, 1U << 26, EDX },
[SSE3] = {1U, 0U, 1U << 0, ECX },
[SSSE3] = {1U, 0U, 1U << 9, ECX },
[SSE4_1] = {1U, 0U, 1U << 19, ECX },
[SSE4_2] = {1U, 0U, 1U << 20, ECX },
[OSXSAVE] = {1U, 0U, 1U << 27, ECX },
[AVX] = {1U, 0U, 1U << 28, ECX },
[AVX2] = {7U, 0U, 1U << 5, EBX },
[BMI1] = {7U, 0U, 1U << 3, EBX },
[BMI2] = {7U, 0U, 1U << 8, EBX },
[AVX512F] = {7U, 0U, _AVX512F_BIT, EBX },
[AVX512CD] = {7U, 0U, _AVX512CD_BIT, EBX },
[AVX512DQ] = {7U, 0U, _AVX512DQ_BIT, EBX },
[AVX512BW] = {7U, 0U, _AVX512BW_BIT, EBX },
[AVX512IFMA] = {7U, 0U, _AVX512IFMA_BIT, EBX },
[AVX512VBMI] = {7U, 0U, _AVX512VBMI_BIT, ECX },
[AVX512PF] = {7U, 0U, _AVX512PF_BIT, EBX },
[AVX512ER] = {7U, 0U, _AVX512ER_BIT, EBX },
[AVX512VL] = {7U, 0U, _AVX512ER_BIT, EBX },
[AES] = {1U, 0U, _AES_BIT, ECX },
[PCLMULQDQ] = {1U, 0U, _PCLMULQDQ_BIT, ECX },
[MOVBE] = {1U, 0U, _MOVBE_BIT, ECX },
};
/*
* Check if OS supports AVX and AVX2 by checking XCR0
* Only call this function if CPUID indicates that AVX feature is
* supported by the CPU, otherwise it might be an illegal instruction.
*/
static inline uint64_t
xgetbv(uint32_t index)
{
uint32_t eax, edx;
/* xgetbv - instruction byte code */
__asm__ __volatile__(".byte 0x0f; .byte 0x01; .byte 0xd0"
: "=a" (eax), "=d" (edx)
: "c" (index));
return ((((uint64_t)edx)<<32) | (uint64_t)eax);
}
/*
* Check if CPU supports a feature
*/
static inline boolean_t
__cpuid_check_feature(const cpuid_feature_desc_t *desc)
{
uint32_t r[CPUID_REG_CNT];
if (__get_cpuid_max(0, NULL) >= desc->leaf) {
/*
* __cpuid_count is needed to properly check
* for AVX2. It is a macro, so return parameters
* are passed by value.
*/
__cpuid_count(desc->leaf, desc->subleaf,
r[EAX], r[EBX], r[ECX], r[EDX]);
return ((r[desc->reg] & desc->flag) == desc->flag);
}
return (B_FALSE);
}
#define CPUID_FEATURE_CHECK(name, id) \
static inline boolean_t \
__cpuid_has_ ## name(void) \
{ \
return (__cpuid_check_feature(&cpuid_features[id])); \
}
/*
* Define functions for user-space CPUID features testing
*/
CPUID_FEATURE_CHECK(sse, SSE);
CPUID_FEATURE_CHECK(sse2, SSE2);
CPUID_FEATURE_CHECK(sse3, SSE3);
CPUID_FEATURE_CHECK(ssse3, SSSE3);
CPUID_FEATURE_CHECK(sse4_1, SSE4_1);
CPUID_FEATURE_CHECK(sse4_2, SSE4_2);
CPUID_FEATURE_CHECK(avx, AVX);
CPUID_FEATURE_CHECK(avx2, AVX2);
CPUID_FEATURE_CHECK(osxsave, OSXSAVE);
CPUID_FEATURE_CHECK(bmi1, BMI1);
CPUID_FEATURE_CHECK(bmi2, BMI2);
CPUID_FEATURE_CHECK(avx512f, AVX512F);
CPUID_FEATURE_CHECK(avx512cd, AVX512CD);
CPUID_FEATURE_CHECK(avx512dq, AVX512DQ);
CPUID_FEATURE_CHECK(avx512bw, AVX512BW);
CPUID_FEATURE_CHECK(avx512ifma, AVX512IFMA);
CPUID_FEATURE_CHECK(avx512vbmi, AVX512VBMI);
CPUID_FEATURE_CHECK(avx512pf, AVX512PF);
CPUID_FEATURE_CHECK(avx512er, AVX512ER);
CPUID_FEATURE_CHECK(avx512vl, AVX512VL);
CPUID_FEATURE_CHECK(aes, AES);
CPUID_FEATURE_CHECK(pclmulqdq, PCLMULQDQ);
CPUID_FEATURE_CHECK(movbe, MOVBE);
/*
* Detect register set support
*/
static inline boolean_t
__simd_state_enabled(const uint64_t state)
{
boolean_t has_osxsave;
uint64_t xcr0;
has_osxsave = __cpuid_has_osxsave();
if (!has_osxsave)
return (B_FALSE);
xcr0 = xgetbv(0);
return ((xcr0 & state) == state);
}
#define _XSTATE_SSE_AVX (0x2 | 0x4)
#define _XSTATE_AVX512 (0xE0 | _XSTATE_SSE_AVX)
#define __ymm_enabled() __simd_state_enabled(_XSTATE_SSE_AVX)
#define __zmm_enabled() __simd_state_enabled(_XSTATE_AVX512)
/*
* Check if SSE instruction set is available
*/
static inline boolean_t
zfs_sse_available(void)
{
return (__cpuid_has_sse());
}
/*
* Check if SSE2 instruction set is available
*/
static inline boolean_t
zfs_sse2_available(void)
{
return (__cpuid_has_sse2());
}
/*
* Check if SSE3 instruction set is available
*/
static inline boolean_t
zfs_sse3_available(void)
{
return (__cpuid_has_sse3());
}
/*
* Check if SSSE3 instruction set is available
*/
static inline boolean_t
zfs_ssse3_available(void)
{
return (__cpuid_has_ssse3());
}
/*
* Check if SSE4.1 instruction set is available
*/
static inline boolean_t
zfs_sse4_1_available(void)
{
return (__cpuid_has_sse4_1());
}
/*
* Check if SSE4.2 instruction set is available
*/
static inline boolean_t
zfs_sse4_2_available(void)
{
return (__cpuid_has_sse4_2());
}
/*
* Check if AVX instruction set is available
*/
static inline boolean_t
zfs_avx_available(void)
{
return (__cpuid_has_avx() && __ymm_enabled());
}
/*
* Check if AVX2 instruction set is available
*/
static inline boolean_t
zfs_avx2_available(void)
{
return (__cpuid_has_avx2() && __ymm_enabled());
}
/*
* Check if BMI1 instruction set is available
*/
static inline boolean_t
zfs_bmi1_available(void)
{
return (__cpuid_has_bmi1());
}
/*
* Check if BMI2 instruction set is available
*/
static inline boolean_t
zfs_bmi2_available(void)
{
return (__cpuid_has_bmi2());
}
/*
* Check if AES instruction set is available
*/
static inline boolean_t
zfs_aes_available(void)
{
return (__cpuid_has_aes());
}
/*
* Check if PCLMULQDQ instruction set is available
*/
static inline boolean_t
zfs_pclmulqdq_available(void)
{
return (__cpuid_has_pclmulqdq());
}
/*
* Check if MOVBE instruction is available
*/
static inline boolean_t
zfs_movbe_available(void)
{
return (__cpuid_has_movbe());
}
/*
* AVX-512 family of instruction sets:
*
* AVX512F Foundation
* AVX512CD Conflict Detection Instructions
* AVX512ER Exponential and Reciprocal Instructions
* AVX512PF Prefetch Instructions
*
* AVX512BW Byte and Word Instructions
* AVX512DQ Double-word and Quadword Instructions
* AVX512VL Vector Length Extensions
*
* AVX512IFMA Integer Fused Multiply Add (Not supported by kernel 4.4)
* AVX512VBMI Vector Byte Manipulation Instructions
*/
/*
* Check if AVX512F instruction set is available
*/
static inline boolean_t
zfs_avx512f_available(void)
{
return (__cpuid_has_avx512f() && __zmm_enabled());
}
/*
* Check if AVX512CD instruction set is available
*/
static inline boolean_t
zfs_avx512cd_available(void)
{
return (__cpuid_has_avx512cd() && __zmm_enabled());
}
/*
* Check if AVX512ER instruction set is available
*/
static inline boolean_t
zfs_avx512er_available(void)
{
return (__cpuid_has_avx512er() && __zmm_enabled());
}
/*
* Check if AVX512PF instruction set is available
*/
static inline boolean_t
zfs_avx512pf_available(void)
{
return (__cpuid_has_avx512pf() && __zmm_enabled());
}
/*
* Check if AVX512BW instruction set is available
*/
static inline boolean_t
zfs_avx512bw_available(void)
{
return (__cpuid_has_avx512bw() && __zmm_enabled());
}
/*
* Check if AVX512DQ instruction set is available
*/
static inline boolean_t
zfs_avx512dq_available(void)
{
return (__cpuid_has_avx512dq() && __zmm_enabled());
}
/*
* Check if AVX512VL instruction set is available
*/
static inline boolean_t
zfs_avx512vl_available(void)
{
return (__cpuid_has_avx512vl() && __zmm_enabled());
}
/*
* Check if AVX512IFMA instruction set is available
*/
static inline boolean_t
zfs_avx512ifma_available(void)
{
return (__cpuid_has_avx512ifma() && __zmm_enabled());
}
/*
* Check if AVX512VBMI instruction set is available
*/
static inline boolean_t
zfs_avx512vbmi_available(void)
{
return (__cpuid_has_avx512f() && __cpuid_has_avx512vbmi() &&
__zmm_enabled());
}
#elif defined(__aarch64__)
#define kfpu_allowed() 1
#define kfpu_initialize(tsk) do {} while (0)
#define kfpu_begin() do {} while (0)
#define kfpu_end() do {} while (0)
#elif defined(__powerpc__)
+/* including <sys/auxv.h> clashes with AT_UID and others */
+extern unsigned long getauxval(unsigned long type);
+#if defined(__FreeBSD__)
+#define AT_HWCAP 25 /* CPU feature flags. */
+#define AT_HWCAP2 26 /* CPU feature flags 2. */
+extern int elf_aux_info(int aux, void *buf, int buflen);
+static unsigned long getauxval(unsigned long key)
+{
+ unsigned long val = 0UL;
+
+ if (elf_aux_info((int)key, &val, sizeof (val)) != 0)
+ return (0UL);
+
+ return (val);
+}
+#elif defined(__linux__)
+#define AT_HWCAP 16 /* CPU feature flags. */
+#define AT_HWCAP2 26 /* CPU feature flags 2. */
+#endif
+
#define kfpu_allowed() 1
#define kfpu_initialize(tsk) do {} while (0)
#define kfpu_begin() do {} while (0)
#define kfpu_end() do {} while (0)
-/*
- * Check if AltiVec instruction set is available
- * No easy way beyond 'altivec works' :-(
- */
-#include <signal.h>
-#include <setjmp.h>
-
-#if defined(__ALTIVEC__) && !defined(__FreeBSD__)
-static jmp_buf env;
-static void sigillhandler(int x)
+#define PPC_FEATURE_HAS_ALTIVEC 0x10000000
+static inline boolean_t
+zfs_altivec_available(void)
{
- (void) x;
- longjmp(env, 1);
+ unsigned long hwcap = getauxval(AT_HWCAP);
+
+ return (hwcap & PPC_FEATURE_HAS_ALTIVEC);
}
-#endif
+#define PPC_FEATURE_HAS_VSX 0x00000080
static inline boolean_t
-zfs_altivec_available(void)
+zfs_vsx_available(void)
{
- boolean_t has_altivec = B_FALSE;
-#if defined(__ALTIVEC__) && !defined(__FreeBSD__)
- sighandler_t savesig;
- savesig = signal(SIGILL, sigillhandler);
- if (setjmp(env)) {
- signal(SIGILL, savesig);
- has_altivec = B_FALSE;
- } else {
- __asm__ __volatile__("vor 0,0,0\n" : : : "v0");
- signal(SIGILL, savesig);
- has_altivec = B_TRUE;
- }
-#endif
- return (has_altivec);
+ unsigned long hwcap = getauxval(AT_HWCAP);
+
+ return (hwcap & PPC_FEATURE_HAS_VSX);
}
+
+#define PPC_FEATURE2_ARCH_2_07 0x80000000
static inline boolean_t
-zfs_vsx_available(void)
+zfs_isa207_available(void)
{
- boolean_t has_vsx = B_FALSE;
-#if defined(__ALTIVEC__) && !defined(__FreeBSD__)
- sighandler_t savesig;
- savesig = signal(SIGILL, sigillhandler);
- if (setjmp(env)) {
- signal(SIGILL, savesig);
- has_vsx = B_FALSE;
- } else {
- __asm__ __volatile__("xssubsp 0,0,0\n");
- signal(SIGILL, savesig);
- has_vsx = B_TRUE;
- }
-#endif
- return (has_vsx);
+ unsigned long hwcap = getauxval(AT_HWCAP);
+ unsigned long hwcap2 = getauxval(AT_HWCAP2);
+
+ return ((hwcap & PPC_FEATURE_HAS_VSX) &&
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07));
}
+
#else
#define kfpu_allowed() 0
#define kfpu_initialize(tsk) do {} while (0)
#define kfpu_begin() do {} while (0)
#define kfpu_end() do {} while (0)
#endif
#endif /* _LIBSPL_SYS_SIMD_H */
diff --git a/sys/contrib/openzfs/lib/libuutil/Makefile.am b/sys/contrib/openzfs/lib/libuutil/Makefile.am
index 339f9a064745..b973ce3cca4c 100644
--- a/sys/contrib/openzfs/lib/libuutil/Makefile.am
+++ b/sys/contrib/openzfs/lib/libuutil/Makefile.am
@@ -1,29 +1,28 @@
libuutil_la_CFLAGS = $(AM_CFLAGS) $(LIBRARY_CFLAGS) $(LIBRARY_CFLAGS)
lib_LTLIBRARIES += libuutil.la
CPPCHECKTARGETS += libuutil.la
libuutil_la_SOURCES = \
%D%/uu_alloc.c \
%D%/uu_avl.c \
%D%/uu_ident.c \
%D%/uu_list.c \
%D%/uu_misc.c \
- %D%/uu_pname.c \
%D%/uu_string.c
libuutil_la_LIBADD = \
libavl.la \
libspl.la
libuutil_la_LIBADD += $(LTLIBINTL)
libuutil_la_LDFLAGS = -pthread
if !ASAN_ENABLED
libuutil_la_LDFLAGS += -Wl,-z,defs
endif
libuutil_la_LDFLAGS += -version-info 3:0:0
dist_noinst_DATA += %D%/libuutil.abi %D%/libuutil.suppr
diff --git a/sys/contrib/openzfs/lib/libuutil/libuutil.abi b/sys/contrib/openzfs/lib/libuutil/libuutil.abi
index 766d8843000d..f5186a0837af 100644
--- a/sys/contrib/openzfs/lib/libuutil/libuutil.abi
+++ b/sys/contrib/openzfs/lib/libuutil/libuutil.abi
@@ -1,1844 +1,1771 @@
<abi-corpus version='2.0' architecture='elf-amd-x86_64' soname='libuutil.so.3'>
<elf-needed>
<dependency name='libpthread.so.0'/>
<dependency name='libc.so.6'/>
<dependency name='ld-linux-x86-64.so.2'/>
</elf-needed>
<elf-function-symbols>
<elf-symbol name='_sol_getmntent' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_char' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_char_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_int_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_long' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_long_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_ptr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_ptr_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_short' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_short_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_uchar_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_uint_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_ulong_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_ushort_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_ptr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_clear_long_excl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_uchar_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_uint_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_ulong_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_ushort_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_uchar_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_uint_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_ulong_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_ushort_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_uchar_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_uint_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_ulong_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_ushort_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_set_long_excl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_char' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_char_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_int_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_long' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_long_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_ptr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_ptr_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_short' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_short_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_ptr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_add' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_destroy_nodes' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_find' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_first' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_insert' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_insert_here' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_is_empty' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_last' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_nearest' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_numnodes' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_swap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_update' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_update_gt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_update_lt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_walk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='get_system_hostid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getexecname' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getextmntent' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getmntany' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getzoneid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libspl_assertf' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libspl_set_assert_ok' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_head' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_insert_after' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_insert_before' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_insert_head' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_insert_tail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_is_empty' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_link_active' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_link_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_link_replace' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_move_tail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_next' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_prev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_remove_head' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_remove_tail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_tail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='membar_consumer' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='membar_enter' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='membar_exit' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='membar_producer' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='mkdirp' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='print_timestamp' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='spl_pagesize' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='strlcat' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='strlcpy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_alt_exit' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_find' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_first' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_insert' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_last' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_lockup' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_nearest_next' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_nearest_prev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_next' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_node_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_node_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_numnodes' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_pool_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_pool_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_prev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_release' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_teardown' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_walk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_walk_end' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_walk_next' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_walk_start' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_check_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_die' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_error' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_exit_fatal' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_exit_ok' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_exit_usage' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_free' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_getpname' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_find' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_first' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_insert' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_insert_after' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_insert_before' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_last' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_lockup' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_nearest_next' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_nearest_prev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_next' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_node_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_node_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_numnodes' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_pool_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_pool_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_prev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_release' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_teardown' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_walk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_walk_end' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_walk_next' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_walk_start' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_memdup' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_msprintf' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_panic' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_set_error' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_setpname' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_strbw' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_strcaseeq' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_strdup' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_streq' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_strerror' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_strndup' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_vdie' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_vwarn' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_vxdie' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_warn' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_xdie' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_zalloc' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
</elf-function-symbols>
<elf-variable-symbols>
<elf-symbol name='uu_exit_fatal_value' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_exit_ok_value' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_exit_usage_value' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
</elf-variable-symbols>
<abi-instr address-size='64' path='../../module/avl/avl.c' language='LANG_C99'>
<typedef-decl name='avl_tree_t' type-id='b351119f' id='f20fbd51'/>
<typedef-decl name='avl_index_t' type-id='e475ab95' id='fba6cb51'/>
<pointer-type-def type-id='fba6cb51' size-in-bits='64' id='32adbf30'/>
<pointer-type-def type-id='f20fbd51' size-in-bits='64' id='a3681dea'/>
<function-decl name='avl_walk' mangled-name='avl_walk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_walk'>
<parameter type-id='a3681dea' name='tree'/>
<parameter type-id='eaa32e2f' name='oldnode'/>
<parameter type-id='95e97e5e' name='left'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='avl_first' mangled-name='avl_first' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_first'>
<parameter type-id='a3681dea' name='tree'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='avl_last' mangled-name='avl_last' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_last'>
<parameter type-id='a3681dea' name='tree'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='avl_nearest' mangled-name='avl_nearest' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_nearest'>
<parameter type-id='a3681dea' name='tree'/>
<parameter type-id='fba6cb51' name='where'/>
<parameter type-id='95e97e5e' name='direction'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='avl_find' mangled-name='avl_find' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_find'>
<parameter type-id='a3681dea' name='tree'/>
<parameter type-id='eaa32e2f' name='value'/>
<parameter type-id='32adbf30' name='where'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='avl_insert' mangled-name='avl_insert' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_insert'>
<parameter type-id='a3681dea' name='tree'/>
<parameter type-id='eaa32e2f' name='new_data'/>
<parameter type-id='fba6cb51' name='where'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='avl_insert_here' mangled-name='avl_insert_here' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_insert_here'>
<parameter type-id='a3681dea' name='tree'/>
<parameter type-id='eaa32e2f' name='new_data'/>
<parameter type-id='eaa32e2f' name='here'/>
<parameter type-id='95e97e5e' name='direction'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='avl_add' mangled-name='avl_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_add'>
<parameter type-id='a3681dea' name='tree'/>
<parameter type-id='eaa32e2f' name='new_node'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='avl_remove' mangled-name='avl_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_remove'>
<parameter type-id='a3681dea' name='tree'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='avl_update_lt' mangled-name='avl_update_lt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_update_lt'>
<parameter type-id='a3681dea' name='t'/>
<parameter type-id='eaa32e2f' name='obj'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='avl_update_gt' mangled-name='avl_update_gt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_update_gt'>
<parameter type-id='a3681dea' name='t'/>
<parameter type-id='eaa32e2f' name='obj'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='avl_update' mangled-name='avl_update' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_update'>
<parameter type-id='a3681dea' name='t'/>
<parameter type-id='eaa32e2f' name='obj'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='avl_swap' mangled-name='avl_swap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_swap'>
<parameter type-id='a3681dea' name='tree1'/>
<parameter type-id='a3681dea' name='tree2'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='avl_create' mangled-name='avl_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_create'>
<parameter type-id='a3681dea' name='tree'/>
<parameter type-id='585e1de9' name='compar'/>
<parameter type-id='b59d7dce' name='size'/>
<parameter type-id='b59d7dce' name='offset'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='avl_destroy' mangled-name='avl_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_destroy'>
<parameter type-id='a3681dea' name='tree'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='avl_numnodes' mangled-name='avl_numnodes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_numnodes'>
<parameter type-id='a3681dea' name='tree'/>
<return type-id='ee1f298e'/>
</function-decl>
<function-decl name='avl_is_empty' mangled-name='avl_is_empty' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_is_empty'>
<parameter type-id='a3681dea' name='tree'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='avl_destroy_nodes' mangled-name='avl_destroy_nodes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_destroy_nodes'>
<parameter type-id='a3681dea' name='tree'/>
<parameter type-id='63e171df' name='cookie'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-type size-in-bits='64' id='96ee24a5'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='assert.c' language='LANG_C99'>
<function-decl name='libspl_set_assert_ok' mangled-name='libspl_set_assert_ok' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libspl_set_assert_ok'>
<parameter type-id='c19b74c3' name='val'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='libspl_assertf' mangled-name='libspl_assertf' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libspl_assertf'>
<parameter type-id='80f4b756' name='file'/>
<parameter type-id='80f4b756' name='func'/>
<parameter type-id='95e97e5e' name='line'/>
<parameter type-id='80f4b756' name='format'/>
<parameter is-variadic='yes'/>
<return type-id='48b5725f'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='atomic.c' language='LANG_C99'>
<type-decl name='unsigned short int' size-in-bits='16' id='8efea9e5'/>
<typedef-decl name='int16_t' type-id='03896e23' id='23bd8cb5'/>
<typedef-decl name='int32_t' type-id='33f57a65' id='3ff5601b'/>
<typedef-decl name='uint16_t' type-id='253c2d2a' id='149c6638'/>
<typedef-decl name='__int16_t' type-id='a2185560' id='03896e23'/>
<typedef-decl name='__uint16_t' type-id='8efea9e5' id='253c2d2a'/>
<typedef-decl name='__int32_t' type-id='95e97e5e' id='33f57a65'/>
<typedef-decl name='__ssize_t' type-id='bd54fe1a' id='41060289'/>
<typedef-decl name='ssize_t' type-id='41060289' id='79a0948f'/>
<qualified-type-def type-id='149c6638' volatile='yes' id='5120c5f7'/>
<pointer-type-def type-id='5120c5f7' size-in-bits='64' id='93977ae7'/>
<qualified-type-def type-id='8f92235e' volatile='yes' id='430e0681'/>
<pointer-type-def type-id='430e0681' size-in-bits='64' id='3a147f31'/>
<qualified-type-def type-id='b96825af' volatile='yes' id='84ff7d66'/>
<pointer-type-def type-id='84ff7d66' size-in-bits='64' id='aa323ea4'/>
<qualified-type-def type-id='ee1f298e' volatile='yes' id='6f7e09cb'/>
<pointer-type-def type-id='6f7e09cb' size-in-bits='64' id='64698d33'/>
<qualified-type-def type-id='48b5725f' volatile='yes' id='b0b3cbf9'/>
<pointer-type-def type-id='b0b3cbf9' size-in-bits='64' id='fe09dd29'/>
<function-decl name='atomic_inc_8' mangled-name='atomic_inc_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_8'>
<parameter type-id='aa323ea4' name='target'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_inc_16' mangled-name='atomic_inc_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_16'>
<parameter type-id='93977ae7' name='target'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_inc_32' mangled-name='atomic_inc_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_32'>
<parameter type-id='3a147f31' name='target'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_inc_ulong' mangled-name='atomic_inc_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_ulong'>
<parameter type-id='64698d33' name='target'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_dec_8' mangled-name='atomic_dec_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_8'>
<parameter type-id='aa323ea4' name='target'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_dec_16' mangled-name='atomic_dec_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_16'>
<parameter type-id='93977ae7' name='target'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_dec_32' mangled-name='atomic_dec_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_32'>
<parameter type-id='3a147f31' name='target'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_dec_ulong' mangled-name='atomic_dec_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_ulong'>
<parameter type-id='64698d33' name='target'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_add_ptr' mangled-name='atomic_add_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_ptr'>
<parameter type-id='fe09dd29' name='target'/>
<parameter type-id='79a0948f' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_add_8' mangled-name='atomic_add_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_8'>
<parameter type-id='aa323ea4' name='target'/>
<parameter type-id='ee31ee44' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_add_16' mangled-name='atomic_add_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_16'>
<parameter type-id='93977ae7' name='target'/>
<parameter type-id='23bd8cb5' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_add_32' mangled-name='atomic_add_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_32'>
<parameter type-id='3a147f31' name='target'/>
<parameter type-id='3ff5601b' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_add_long' mangled-name='atomic_add_long' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_long'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='bd54fe1a' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_sub_ptr' mangled-name='atomic_sub_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_ptr'>
<parameter type-id='fe09dd29' name='target'/>
<parameter type-id='79a0948f' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_sub_8' mangled-name='atomic_sub_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_8'>
<parameter type-id='aa323ea4' name='target'/>
<parameter type-id='ee31ee44' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_sub_16' mangled-name='atomic_sub_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_16'>
<parameter type-id='93977ae7' name='target'/>
<parameter type-id='23bd8cb5' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_sub_32' mangled-name='atomic_sub_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_32'>
<parameter type-id='3a147f31' name='target'/>
<parameter type-id='3ff5601b' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_sub_long' mangled-name='atomic_sub_long' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_long'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='bd54fe1a' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_or_8' mangled-name='atomic_or_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_8'>
<parameter type-id='aa323ea4' name='target'/>
<parameter type-id='b96825af' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_or_16' mangled-name='atomic_or_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_16'>
<parameter type-id='93977ae7' name='target'/>
<parameter type-id='149c6638' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_or_32' mangled-name='atomic_or_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_32'>
<parameter type-id='3a147f31' name='target'/>
<parameter type-id='8f92235e' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_or_ulong' mangled-name='atomic_or_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_ulong'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='ee1f298e' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_and_8' mangled-name='atomic_and_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_8'>
<parameter type-id='aa323ea4' name='target'/>
<parameter type-id='b96825af' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_and_16' mangled-name='atomic_and_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_16'>
<parameter type-id='93977ae7' name='target'/>
<parameter type-id='149c6638' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_and_32' mangled-name='atomic_and_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_32'>
<parameter type-id='3a147f31' name='target'/>
<parameter type-id='8f92235e' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_and_ulong' mangled-name='atomic_and_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_ulong'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='ee1f298e' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_inc_8_nv' mangled-name='atomic_inc_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_8_nv'>
<parameter type-id='aa323ea4' name='target'/>
<return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_inc_16_nv' mangled-name='atomic_inc_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_16_nv'>
<parameter type-id='93977ae7' name='target'/>
<return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_inc_32_nv' mangled-name='atomic_inc_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_32_nv'>
<parameter type-id='3a147f31' name='target'/>
<return type-id='8f92235e'/>
</function-decl>
<function-decl name='atomic_inc_ulong_nv' mangled-name='atomic_inc_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_ulong_nv'>
<parameter type-id='64698d33' name='target'/>
<return type-id='ee1f298e'/>
</function-decl>
<function-decl name='atomic_dec_8_nv' mangled-name='atomic_dec_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_8_nv'>
<parameter type-id='aa323ea4' name='target'/>
<return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_dec_16_nv' mangled-name='atomic_dec_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_16_nv'>
<parameter type-id='93977ae7' name='target'/>
<return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_dec_32_nv' mangled-name='atomic_dec_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_32_nv'>
<parameter type-id='3a147f31' name='target'/>
<return type-id='8f92235e'/>
</function-decl>
<function-decl name='atomic_dec_ulong_nv' mangled-name='atomic_dec_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_ulong_nv'>
<parameter type-id='64698d33' name='target'/>
<return type-id='ee1f298e'/>
</function-decl>
<function-decl name='atomic_add_ptr_nv' mangled-name='atomic_add_ptr_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_ptr_nv'>
<parameter type-id='fe09dd29' name='target'/>
<parameter type-id='79a0948f' name='bits'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='atomic_add_8_nv' mangled-name='atomic_add_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_8_nv'>
<parameter type-id='aa323ea4' name='target'/>
<parameter type-id='ee31ee44' name='bits'/>
<return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_add_16_nv' mangled-name='atomic_add_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_16_nv'>
<parameter type-id='93977ae7' name='target'/>
<parameter type-id='23bd8cb5' name='bits'/>
<return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_add_32_nv' mangled-name='atomic_add_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_32_nv'>
<parameter type-id='3a147f31' name='target'/>
<parameter type-id='3ff5601b' name='bits'/>
<return type-id='8f92235e'/>
</function-decl>
<function-decl name='atomic_add_long_nv' mangled-name='atomic_add_long_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_long_nv'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='bd54fe1a' name='bits'/>
<return type-id='ee1f298e'/>
</function-decl>
<function-decl name='atomic_sub_ptr_nv' mangled-name='atomic_sub_ptr_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_ptr_nv'>
<parameter type-id='fe09dd29' name='target'/>
<parameter type-id='79a0948f' name='bits'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='atomic_sub_8_nv' mangled-name='atomic_sub_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_8_nv'>
<parameter type-id='aa323ea4' name='target'/>
<parameter type-id='ee31ee44' name='bits'/>
<return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_sub_16_nv' mangled-name='atomic_sub_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_16_nv'>
<parameter type-id='93977ae7' name='target'/>
<parameter type-id='23bd8cb5' name='bits'/>
<return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_sub_32_nv' mangled-name='atomic_sub_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_32_nv'>
<parameter type-id='3a147f31' name='target'/>
<parameter type-id='3ff5601b' name='bits'/>
<return type-id='8f92235e'/>
</function-decl>
<function-decl name='atomic_sub_long_nv' mangled-name='atomic_sub_long_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_long_nv'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='bd54fe1a' name='bits'/>
<return type-id='ee1f298e'/>
</function-decl>
<function-decl name='atomic_or_8_nv' mangled-name='atomic_or_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_8_nv'>
<parameter type-id='aa323ea4' name='target'/>
<parameter type-id='b96825af' name='bits'/>
<return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_or_16_nv' mangled-name='atomic_or_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_16_nv'>
<parameter type-id='93977ae7' name='target'/>
<parameter type-id='149c6638' name='bits'/>
<return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_or_32_nv' mangled-name='atomic_or_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_32_nv'>
<parameter type-id='3a147f31' name='target'/>
<parameter type-id='8f92235e' name='bits'/>
<return type-id='8f92235e'/>
</function-decl>
<function-decl name='atomic_or_ulong_nv' mangled-name='atomic_or_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_ulong_nv'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='ee1f298e' name='bits'/>
<return type-id='ee1f298e'/>
</function-decl>
<function-decl name='atomic_and_8_nv' mangled-name='atomic_and_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_8_nv'>
<parameter type-id='aa323ea4' name='target'/>
<parameter type-id='b96825af' name='bits'/>
<return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_and_16_nv' mangled-name='atomic_and_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_16_nv'>
<parameter type-id='93977ae7' name='target'/>
<parameter type-id='149c6638' name='bits'/>
<return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_and_32_nv' mangled-name='atomic_and_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_32_nv'>
<parameter type-id='3a147f31' name='target'/>
<parameter type-id='8f92235e' name='bits'/>
<return type-id='8f92235e'/>
</function-decl>
<function-decl name='atomic_and_ulong_nv' mangled-name='atomic_and_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_ulong_nv'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='ee1f298e' name='bits'/>
<return type-id='ee1f298e'/>
</function-decl>
<function-decl name='atomic_cas_ptr' mangled-name='atomic_cas_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_ptr'>
<parameter type-id='fe09dd29' name='target'/>
<parameter type-id='eaa32e2f' name='exp'/>
<parameter type-id='eaa32e2f' name='des'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='atomic_cas_8' mangled-name='atomic_cas_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_8'>
<parameter type-id='aa323ea4' name='target'/>
<parameter type-id='b96825af' name='exp'/>
<parameter type-id='b96825af' name='des'/>
<return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_cas_16' mangled-name='atomic_cas_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_16'>
<parameter type-id='93977ae7' name='target'/>
<parameter type-id='149c6638' name='exp'/>
<parameter type-id='149c6638' name='des'/>
<return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_cas_32' mangled-name='atomic_cas_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_32'>
<parameter type-id='3a147f31' name='target'/>
<parameter type-id='8f92235e' name='exp'/>
<parameter type-id='8f92235e' name='des'/>
<return type-id='8f92235e'/>
</function-decl>
<function-decl name='atomic_cas_ulong' mangled-name='atomic_cas_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_ulong'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='ee1f298e' name='exp'/>
<parameter type-id='ee1f298e' name='des'/>
<return type-id='ee1f298e'/>
</function-decl>
<function-decl name='atomic_swap_8' mangled-name='atomic_swap_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_8'>
<parameter type-id='aa323ea4' name='target'/>
<parameter type-id='b96825af' name='bits'/>
<return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_swap_16' mangled-name='atomic_swap_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_16'>
<parameter type-id='93977ae7' name='target'/>
<parameter type-id='149c6638' name='bits'/>
<return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_swap_32' mangled-name='atomic_swap_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_32'>
<parameter type-id='3a147f31' name='target'/>
<parameter type-id='8f92235e' name='bits'/>
<return type-id='8f92235e'/>
</function-decl>
<function-decl name='atomic_swap_ulong' mangled-name='atomic_swap_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_ulong'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='ee1f298e' name='bits'/>
<return type-id='ee1f298e'/>
</function-decl>
<function-decl name='atomic_swap_ptr' mangled-name='atomic_swap_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_ptr'>
<parameter type-id='fe09dd29' name='target'/>
<parameter type-id='eaa32e2f' name='bits'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='atomic_set_long_excl' mangled-name='atomic_set_long_excl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_set_long_excl'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='3502e3ff' name='value'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='atomic_clear_long_excl' mangled-name='atomic_clear_long_excl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_clear_long_excl'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='3502e3ff' name='value'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='membar_enter' mangled-name='membar_enter' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_enter'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='membar_producer' mangled-name='membar_producer' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_producer'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='membar_consumer' mangled-name='membar_consumer' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_consumer'>
<return type-id='48b5725f'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='getexecname.c' language='LANG_C99'>
<function-decl name='getexecname' mangled-name='getexecname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getexecname'>
<return type-id='80f4b756'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='list.c' language='LANG_C99'>
<typedef-decl name='list_node_t' type-id='b0b5e45e' id='b21843b2'/>
<typedef-decl name='list_t' type-id='e824dae9' id='0899125f'/>
<class-decl name='list_node' size-in-bits='128' is-struct='yes' visibility='default' id='b0b5e45e'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='next' type-id='b03eadb4' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='prev' type-id='b03eadb4' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='list' size-in-bits='256' is-struct='yes' visibility='default' id='e824dae9'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='list_size' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='list_offset' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='list_head' type-id='b0b5e45e' visibility='default'/>
</data-member>
</class-decl>
<pointer-type-def type-id='b0b5e45e' size-in-bits='64' id='b03eadb4'/>
<pointer-type-def type-id='b21843b2' size-in-bits='64' id='ccc38265'/>
<pointer-type-def type-id='0899125f' size-in-bits='64' id='352ec160'/>
<function-decl name='list_create' mangled-name='list_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_create'>
<parameter type-id='352ec160' name='list'/>
<parameter type-id='b59d7dce' name='size'/>
<parameter type-id='b59d7dce' name='offset'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_destroy' mangled-name='list_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_destroy'>
<parameter type-id='352ec160' name='list'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_insert_after' mangled-name='list_insert_after' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_after'>
<parameter type-id='352ec160' name='list'/>
<parameter type-id='eaa32e2f' name='object'/>
<parameter type-id='eaa32e2f' name='nobject'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_insert_before' mangled-name='list_insert_before' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_before'>
<parameter type-id='352ec160' name='list'/>
<parameter type-id='eaa32e2f' name='object'/>
<parameter type-id='eaa32e2f' name='nobject'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_insert_head' mangled-name='list_insert_head' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_head'>
<parameter type-id='352ec160' name='list'/>
<parameter type-id='eaa32e2f' name='object'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_insert_tail' mangled-name='list_insert_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_tail'>
<parameter type-id='352ec160' name='list'/>
<parameter type-id='eaa32e2f' name='object'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_remove' mangled-name='list_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_remove'>
<parameter type-id='352ec160' name='list'/>
<parameter type-id='eaa32e2f' name='object'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_remove_head' mangled-name='list_remove_head' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_remove_head'>
<parameter type-id='352ec160' name='list'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='list_remove_tail' mangled-name='list_remove_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_remove_tail'>
<parameter type-id='352ec160' name='list'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='list_head' mangled-name='list_head' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_head'>
<parameter type-id='352ec160' name='list'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='list_tail' mangled-name='list_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_tail'>
<parameter type-id='352ec160' name='list'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='list_next' mangled-name='list_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_next'>
<parameter type-id='352ec160' name='list'/>
<parameter type-id='eaa32e2f' name='object'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='list_prev' mangled-name='list_prev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_prev'>
<parameter type-id='352ec160' name='list'/>
<parameter type-id='eaa32e2f' name='object'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='list_move_tail' mangled-name='list_move_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_move_tail'>
<parameter type-id='352ec160' name='dst'/>
<parameter type-id='352ec160' name='src'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_link_replace' mangled-name='list_link_replace' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_link_replace'>
<parameter type-id='ccc38265' name='lold'/>
<parameter type-id='ccc38265' name='lnew'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_link_init' mangled-name='list_link_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_link_init'>
<parameter type-id='ccc38265' name='ln'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_link_active' mangled-name='list_link_active' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_link_active'>
<parameter type-id='ccc38265' name='ln'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='list_is_empty' mangled-name='list_is_empty' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_is_empty'>
<parameter type-id='352ec160' name='list'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='mkdirp.c' language='LANG_C99'>
<typedef-decl name='__mode_t' type-id='f0981eeb' id='e1c52942'/>
<typedef-decl name='mode_t' type-id='e1c52942' id='d50d396c'/>
<function-decl name='mkdirp' mangled-name='mkdirp' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='mkdirp'>
<parameter type-id='80f4b756' name='d'/>
<parameter type-id='d50d396c' name='mode'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='os/linux/gethostid.c' language='LANG_C99'>
<function-decl name='get_system_hostid' mangled-name='get_system_hostid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='get_system_hostid'>
<return type-id='7359adad'/>
</function-decl>
<type-decl name='unsigned long int' size-in-bits='64' id='7359adad'/>
</abi-instr>
<abi-instr address-size='64' path='os/linux/getmntany.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='03085adc' size-in-bits='192' id='083f8d58'>
<subrange length='3' type-id='7359adad' id='56f209d2'/>
</array-type-def>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='8' id='89feb1ec'>
<subrange length='1' type-id='7359adad' id='52f813b4'/>
</array-type-def>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='160' id='664ac0b7'>
<subrange length='20' type-id='7359adad' id='fdca39cf'/>
</array-type-def>
<class-decl name='_IO_codecvt' is-struct='yes' visibility='default' is-declaration-only='yes' id='a4036571'/>
<class-decl name='_IO_marker' is-struct='yes' visibility='default' is-declaration-only='yes' id='010ae0b9'/>
<class-decl name='_IO_wide_data' is-struct='yes' visibility='default' is-declaration-only='yes' id='79bd3751'/>
<class-decl name='mnttab' size-in-bits='256' is-struct='yes' visibility='default' id='1b055409'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='mnt_special' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='mnt_mountp' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='mnt_fstype' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='mnt_mntopts' type-id='26a90f95' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='extmnttab' size-in-bits='320' is-struct='yes' visibility='default' id='0c544dc0'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='mnt_special' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='mnt_mountp' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='mnt_fstype' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='mnt_mntopts' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='mnt_major' type-id='3502e3ff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
<var-decl name='mnt_minor' type-id='3502e3ff' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='stat64' size-in-bits='1152' is-struct='yes' visibility='default' id='0bbec9cd'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='st_dev' type-id='35ed8932' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='st_ino' type-id='71288a47' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='st_nlink' type-id='80f0b9df' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='st_mode' type-id='e1c52942' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='224'>
<var-decl name='st_uid' type-id='cc5fcceb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='st_gid' type-id='d94ec6d9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
<var-decl name='__pad0' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='st_rdev' type-id='35ed8932' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='st_size' type-id='79989e9c' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='st_blksize' type-id='d3f10a7f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='st_blocks' type-id='4e711bf1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
<var-decl name='st_atim' type-id='a9c79a1f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='704'>
<var-decl name='st_mtim' type-id='a9c79a1f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='832'>
<var-decl name='st_ctim' type-id='a9c79a1f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='960'>
<var-decl name='__glibc_reserved' type-id='083f8d58' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='__dev_t' type-id='7359adad' id='35ed8932'/>
<typedef-decl name='__uid_t' type-id='f0981eeb' id='cc5fcceb'/>
<typedef-decl name='__gid_t' type-id='f0981eeb' id='d94ec6d9'/>
<typedef-decl name='__ino64_t' type-id='7359adad' id='71288a47'/>
<typedef-decl name='__nlink_t' type-id='7359adad' id='80f0b9df'/>
<typedef-decl name='__off_t' type-id='bd54fe1a' id='79989e9c'/>
<typedef-decl name='__off64_t' type-id='bd54fe1a' id='724e4de6'/>
<typedef-decl name='__time_t' type-id='bd54fe1a' id='65eda9c0'/>
<typedef-decl name='__blksize_t' type-id='bd54fe1a' id='d3f10a7f'/>
<typedef-decl name='__blkcnt64_t' type-id='bd54fe1a' id='4e711bf1'/>
<typedef-decl name='__syscall_slong_t' type-id='bd54fe1a' id='03085adc'/>
<typedef-decl name='FILE' type-id='ec1ed955' id='aa12d1ba'/>
<typedef-decl name='_IO_lock_t' type-id='48b5725f' id='bb4788fa'/>
<class-decl name='_IO_FILE' size-in-bits='1728' is-struct='yes' visibility='default' id='ec1ed955'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='_flags' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='_IO_read_ptr' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='_IO_read_end' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='_IO_read_base' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='_IO_write_base' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='_IO_write_ptr' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='_IO_write_end' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='_IO_buf_base' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='_IO_buf_end' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
<var-decl name='_IO_save_base' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='640'>
<var-decl name='_IO_backup_base' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='704'>
<var-decl name='_IO_save_end' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='768'>
<var-decl name='_markers' type-id='e4c6fa61' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='832'>
<var-decl name='_chain' type-id='dca988a5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='896'>
<var-decl name='_fileno' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='928'>
<var-decl name='_flags2' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='960'>
<var-decl name='_old_offset' type-id='79989e9c' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1024'>
<var-decl name='_cur_column' type-id='8efea9e5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1040'>
<var-decl name='_vtable_offset' type-id='28577a57' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1048'>
<var-decl name='_shortbuf' type-id='89feb1ec' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1088'>
<var-decl name='_lock' type-id='cecf4ea7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1152'>
<var-decl name='_offset' type-id='724e4de6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1216'>
<var-decl name='_codecvt' type-id='570f8c59' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1280'>
<var-decl name='_wide_data' type-id='c65a1f29' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1344'>
<var-decl name='_freeres_list' type-id='dca988a5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1408'>
<var-decl name='_freeres_buf' type-id='eaa32e2f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1472'>
<var-decl name='__pad5' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1536'>
<var-decl name='_mode' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1568'>
<var-decl name='_unused2' type-id='664ac0b7' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='timespec' size-in-bits='128' is-struct='yes' visibility='default' id='a9c79a1f'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='tv_sec' type-id='65eda9c0' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='tv_nsec' type-id='03085adc' visibility='default'/>
</data-member>
</class-decl>
<pointer-type-def type-id='aa12d1ba' size-in-bits='64' id='822cd80b'/>
<pointer-type-def type-id='ec1ed955' size-in-bits='64' id='dca988a5'/>
<pointer-type-def type-id='a4036571' size-in-bits='64' id='570f8c59'/>
<pointer-type-def type-id='bb4788fa' size-in-bits='64' id='cecf4ea7'/>
<pointer-type-def type-id='010ae0b9' size-in-bits='64' id='e4c6fa61'/>
<pointer-type-def type-id='79bd3751' size-in-bits='64' id='c65a1f29'/>
<pointer-type-def type-id='0c544dc0' size-in-bits='64' id='394fc496'/>
<pointer-type-def type-id='1b055409' size-in-bits='64' id='9d424d31'/>
<pointer-type-def type-id='0bbec9cd' size-in-bits='64' id='62f7a03d'/>
<class-decl name='_IO_codecvt' is-struct='yes' visibility='default' is-declaration-only='yes' id='a4036571'/>
<class-decl name='_IO_marker' is-struct='yes' visibility='default' is-declaration-only='yes' id='010ae0b9'/>
<class-decl name='_IO_wide_data' is-struct='yes' visibility='default' is-declaration-only='yes' id='79bd3751'/>
<function-decl name='getmntany' mangled-name='getmntany' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getmntany'>
<parameter type-id='822cd80b' name='fp'/>
<parameter type-id='9d424d31' name='mgetp'/>
<parameter type-id='9d424d31' name='mrefp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='_sol_getmntent' mangled-name='_sol_getmntent' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='_sol_getmntent'>
<parameter type-id='822cd80b' name='fp'/>
<parameter type-id='9d424d31' name='mgetp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='getextmntent' mangled-name='getextmntent' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getextmntent'>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='394fc496' name='entry'/>
<parameter type-id='62f7a03d' name='statbuf'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='os/linux/zone.c' language='LANG_C99'>
<typedef-decl name='zoneid_t' type-id='3502e3ff' id='4da03624'/>
<function-decl name='getzoneid' mangled-name='getzoneid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getzoneid'>
<return type-id='4da03624'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='page.c' language='LANG_C99'>
<function-decl name='spl_pagesize' mangled-name='spl_pagesize' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='spl_pagesize'>
<return type-id='b59d7dce'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='strlcat.c' language='LANG_C99'>
<function-decl name='strlcat' mangled-name='strlcat' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='strlcat'>
<parameter type-id='26a90f95' name='dst'/>
<parameter type-id='80f4b756' name='src'/>
<parameter type-id='b59d7dce' name='dstsize'/>
<return type-id='b59d7dce'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='strlcpy.c' language='LANG_C99'>
<function-decl name='strlcpy' mangled-name='strlcpy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='strlcpy'>
<parameter type-id='26a90f95' name='dst'/>
<parameter type-id='80f4b756' name='src'/>
<parameter type-id='b59d7dce' name='len'/>
<return type-id='b59d7dce'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='timestamp.c' language='LANG_C99'>
<function-decl name='print_timestamp' mangled-name='print_timestamp' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='print_timestamp'>
<parameter type-id='3502e3ff' name='timestamp_fmt'/>
<return type-id='48b5725f'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='uu_alloc.c' language='LANG_C99'>
<type-decl name='char' size-in-bits='8' id='a84c031d'/>
<type-decl name='unsigned long int' size-in-bits='64' id='7359adad'/>
<type-decl name='variadic parameter type' id='2c1145c5'/>
<type-decl name='void' id='48b5725f'/>
<typedef-decl name='size_t' type-id='7359adad' id='b59d7dce'/>
<pointer-type-def type-id='a84c031d' size-in-bits='64' id='26a90f95'/>
<qualified-type-def type-id='a84c031d' const='yes' id='9b45d938'/>
<pointer-type-def type-id='9b45d938' size-in-bits='64' id='80f4b756'/>
<pointer-type-def type-id='48b5725f' size-in-bits='64' id='eaa32e2f'/>
<function-decl name='uu_zalloc' mangled-name='uu_zalloc' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_zalloc'>
<parameter type-id='b59d7dce' name='n'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='uu_free' mangled-name='uu_free' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_free'>
<parameter type-id='eaa32e2f' name='p'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='uu_strdup' mangled-name='uu_strdup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_strdup'>
<parameter type-id='80f4b756' name='str'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='uu_strndup' mangled-name='uu_strndup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_strndup'>
<parameter type-id='80f4b756' name='s'/>
<parameter type-id='b59d7dce' name='n'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='uu_memdup' mangled-name='uu_memdup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_memdup'>
<parameter type-id='eaa32e2f' name='buf'/>
<parameter type-id='b59d7dce' name='sz'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='uu_msprintf' mangled-name='uu_msprintf' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_msprintf'>
<parameter type-id='80f4b756' name='format'/>
<parameter is-variadic='yes'/>
<return type-id='26a90f95'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='uu_avl.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='bf311473' size-in-bits='128' id='f0f65199'>
<subrange length='2' type-id='7359adad' id='52efc4ef'/>
</array-type-def>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='320' id='36c46961'>
<subrange length='40' type-id='7359adad' id='8f80b239'/>
</array-type-def>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='512' id='59daf3ef'>
<subrange length='64' type-id='7359adad' id='b10be967'/>
</array-type-def>
<type-decl name='int' size-in-bits='32' id='95e97e5e'/>
<type-decl name='long int' size-in-bits='64' id='bd54fe1a'/>
<type-decl name='short int' size-in-bits='16' id='a2185560'/>
<type-decl name='signed char' size-in-bits='8' id='28577a57'/>
<array-type-def dimensions='1' type-id='e475ab95' size-in-bits='192' id='0ce65a8b'>
<subrange length='3' type-id='7359adad' id='56f209d2'/>
</array-type-def>
<type-decl name='unsigned char' size-in-bits='8' id='002ac4a6'/>
<type-decl name='unsigned int' size-in-bits='32' id='f0981eeb'/>
<typedef-decl name='uu_compare_fn_t' type-id='add6e811' id='40f93560'/>
<typedef-decl name='uu_walk_fn_t' type-id='96ee24a5' id='9d1aa0dc'/>
<typedef-decl name='uu_avl_pool_t' type-id='12a530a8' id='7f84e390'/>
<typedef-decl name='uu_avl_t' type-id='4af029d1' id='bb7f0973'/>
<class-decl name='uu_avl_node' size-in-bits='192' is-struct='yes' visibility='default' id='f65f4326'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='uan_opaque' type-id='0ce65a8b' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='uu_avl_node_t' type-id='f65f4326' id='73a65116'/>
<typedef-decl name='uu_avl_walk_t' type-id='e70a39e3' id='edd8457b'/>
<typedef-decl name='uu_avl_index_t' type-id='e475ab95' id='5d7f5fc8'/>
<class-decl name='uu_avl_walk' size-in-bits='320' is-struct='yes' visibility='default' id='e70a39e3'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='uaw_next' type-id='5842d146' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='uaw_prev' type-id='5842d146' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='uaw_avl' type-id='a5c21a38' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='uaw_next_result' type-id='eaa32e2f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='uaw_dir' type-id='ee31ee44' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='264'>
<var-decl name='uaw_robust' type-id='b96825af' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='uu_avl' size-in-bits='960' is-struct='yes' visibility='default' id='4af029d1'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='ua_next_enc' type-id='e475ab95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='ua_prev_enc' type-id='e475ab95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='ua_pool' type-id='de82c773' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='ua_parent_enc' type-id='e475ab95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='ua_debug' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='264'>
<var-decl name='ua_index' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='ua_tree' type-id='b351119f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='640'>
<var-decl name='ua_null_walk' type-id='edd8457b' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='uu_avl_pool' size-in-bits='2176' is-struct='yes' visibility='default' id='12a530a8'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='uap_next' type-id='de82c773' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='uap_prev' type-id='de82c773' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='uap_name' type-id='59daf3ef' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='640'>
<var-decl name='uap_nodeoffset' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='704'>
<var-decl name='uap_objsize' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='768'>
<var-decl name='uap_cmp' type-id='d502b39f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='832'>
<var-decl name='uap_debug' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='840'>
<var-decl name='uap_last_index' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='896'>
<var-decl name='uap_lock' type-id='7a6844eb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1216'>
<var-decl name='uap_null_avl' type-id='bb7f0973' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='avl_node' size-in-bits='192' is-struct='yes' visibility='default' id='428b67b3'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='avl_child' type-id='f0f65199' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='avl_pcb' type-id='e475ab95' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='avl_tree' size-in-bits='320' is-struct='yes' visibility='default' id='b351119f'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='avl_root' type-id='bf311473' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='avl_compar' type-id='585e1de9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='avl_offset' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='avl_numnodes' type-id='ee1f298e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='avl_pad' type-id='b59d7dce' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='ulong_t' type-id='7359adad' id='ee1f298e'/>
<typedef-decl name='uintptr_t' type-id='7359adad' id='e475ab95'/>
<union-decl name='pthread_mutex_t' size-in-bits='320' naming-typedef-id='7a6844eb' visibility='default' id='70681f9b'>
<data-member access='public'>
<var-decl name='__data' type-id='4c734837' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='__size' type-id='36c46961' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='__align' type-id='bd54fe1a' visibility='default'/>
</data-member>
</union-decl>
<typedef-decl name='pthread_mutex_t' type-id='70681f9b' id='7a6844eb'/>
<typedef-decl name='int8_t' type-id='2171a512' id='ee31ee44'/>
<typedef-decl name='uint8_t' type-id='c51d6389' id='b96825af'/>
<typedef-decl name='uint32_t' type-id='62f1140c' id='8f92235e'/>
<class-decl name='__pthread_mutex_s' size-in-bits='320' is-struct='yes' visibility='default' id='4c734837'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='__lock' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='__count' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='__owner' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='__nusers' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='__kind' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='160'>
<var-decl name='__spins' type-id='a2185560' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='176'>
<var-decl name='__elision' type-id='a2185560' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='__list' type-id='518fb49c' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='__pthread_internal_list' size-in-bits='128' is-struct='yes' visibility='default' id='0e01899c'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='__prev' type-id='4d98cd5a' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='__next' type-id='4d98cd5a' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='__pthread_list_t' type-id='0e01899c' id='518fb49c'/>
<typedef-decl name='__int8_t' type-id='28577a57' id='2171a512'/>
<typedef-decl name='__uint8_t' type-id='002ac4a6' id='c51d6389'/>
<typedef-decl name='__uint32_t' type-id='f0981eeb' id='62f1140c'/>
<pointer-type-def type-id='0e01899c' size-in-bits='64' id='4d98cd5a'/>
<pointer-type-def type-id='428b67b3' size-in-bits='64' id='bf311473'/>
<pointer-type-def type-id='96ee24a5' size-in-bits='64' id='585e1de9'/>
<pointer-type-def type-id='5d7f5fc8' size-in-bits='64' id='813a2225'/>
<pointer-type-def type-id='73a65116' size-in-bits='64' id='2dc35b9d'/>
<pointer-type-def type-id='7f84e390' size-in-bits='64' id='de82c773'/>
<pointer-type-def type-id='bb7f0973' size-in-bits='64' id='a5c21a38'/>
<pointer-type-def type-id='edd8457b' size-in-bits='64' id='5842d146'/>
<pointer-type-def type-id='40f93560' size-in-bits='64' id='d502b39f'/>
<pointer-type-def type-id='9d1aa0dc' size-in-bits='64' id='30a42b6d'/>
<pointer-type-def type-id='eaa32e2f' size-in-bits='64' id='63e171df'/>
<function-decl name='uu_avl_pool_create' mangled-name='uu_avl_pool_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_pool_create'>
<parameter type-id='80f4b756' name='name'/>
<parameter type-id='b59d7dce' name='objsize'/>
<parameter type-id='b59d7dce' name='nodeoffset'/>
<parameter type-id='d502b39f' name='compare_func'/>
<parameter type-id='8f92235e' name='flags'/>
<return type-id='de82c773'/>
</function-decl>
<function-decl name='uu_avl_pool_destroy' mangled-name='uu_avl_pool_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_pool_destroy'>
<parameter type-id='de82c773' name='pp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='uu_avl_node_init' mangled-name='uu_avl_node_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_node_init'>
<parameter type-id='eaa32e2f' name='base'/>
<parameter type-id='2dc35b9d' name='np'/>
<parameter type-id='de82c773' name='pp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='uu_avl_node_fini' mangled-name='uu_avl_node_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_node_fini'>
<parameter type-id='eaa32e2f' name='base'/>
<parameter type-id='2dc35b9d' name='np'/>
<parameter type-id='de82c773' name='pp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='uu_avl_create' mangled-name='uu_avl_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_create'>
<parameter type-id='de82c773' name='pp'/>
<parameter type-id='eaa32e2f' name='parent'/>
<parameter type-id='8f92235e' name='flags'/>
<return type-id='a5c21a38'/>
</function-decl>
<function-decl name='uu_avl_destroy' mangled-name='uu_avl_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_destroy'>
<parameter type-id='a5c21a38' name='ap'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='uu_avl_numnodes' mangled-name='uu_avl_numnodes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_numnodes'>
<parameter type-id='a5c21a38' name='ap'/>
<return type-id='b59d7dce'/>
</function-decl>
<function-decl name='uu_avl_first' mangled-name='uu_avl_first' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_first'>
<parameter type-id='a5c21a38' name='ap'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='uu_avl_last' mangled-name='uu_avl_last' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_last'>
<parameter type-id='a5c21a38' name='ap'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='uu_avl_next' mangled-name='uu_avl_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_next'>
<parameter type-id='a5c21a38' name='ap'/>
<parameter type-id='eaa32e2f' name='node'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='uu_avl_prev' mangled-name='uu_avl_prev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_prev'>
<parameter type-id='a5c21a38' name='ap'/>
<parameter type-id='eaa32e2f' name='node'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='uu_avl_walk_start' mangled-name='uu_avl_walk_start' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_walk_start'>
<parameter type-id='a5c21a38' name='ap'/>
<parameter type-id='8f92235e' name='flags'/>
<return type-id='5842d146'/>
</function-decl>
<function-decl name='uu_avl_walk_next' mangled-name='uu_avl_walk_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_walk_next'>
<parameter type-id='5842d146' name='wp'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='uu_avl_walk_end' mangled-name='uu_avl_walk_end' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_walk_end'>
<parameter type-id='5842d146' name='wp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='uu_avl_walk' mangled-name='uu_avl_walk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_walk'>
<parameter type-id='a5c21a38' name='ap'/>
<parameter type-id='30a42b6d' name='func'/>
<parameter type-id='eaa32e2f' name='private'/>
<parameter type-id='8f92235e' name='flags'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='uu_avl_remove' mangled-name='uu_avl_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_remove'>
<parameter type-id='a5c21a38' name='ap'/>
<parameter type-id='eaa32e2f' name='elem'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='uu_avl_teardown' mangled-name='uu_avl_teardown' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_teardown'>
<parameter type-id='a5c21a38' name='ap'/>
<parameter type-id='63e171df' name='cookie'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='uu_avl_find' mangled-name='uu_avl_find' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_find'>
<parameter type-id='a5c21a38' name='ap'/>
<parameter type-id='eaa32e2f' name='elem'/>
<parameter type-id='eaa32e2f' name='private'/>
<parameter type-id='813a2225' name='out'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='uu_avl_insert' mangled-name='uu_avl_insert' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_insert'>
<parameter type-id='a5c21a38' name='ap'/>
<parameter type-id='eaa32e2f' name='elem'/>
<parameter type-id='5d7f5fc8' name='idx'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='uu_avl_nearest_next' mangled-name='uu_avl_nearest_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_nearest_next'>
<parameter type-id='a5c21a38' name='ap'/>
<parameter type-id='5d7f5fc8' name='idx'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='uu_avl_nearest_prev' mangled-name='uu_avl_nearest_prev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_nearest_prev'>
<parameter type-id='a5c21a38' name='ap'/>
<parameter type-id='5d7f5fc8' name='idx'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='uu_avl_lockup' mangled-name='uu_avl_lockup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_lockup'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='uu_avl_release' mangled-name='uu_avl_release' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_release'>
<return type-id='48b5725f'/>
</function-decl>
<function-type size-in-bits='64' id='add6e811'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='uu_ident.c' language='LANG_C99'>
<typedef-decl name='uint_t' type-id='f0981eeb' id='3502e3ff'/>
<function-decl name='uu_check_name' mangled-name='uu_check_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_check_name'>
<parameter type-id='80f4b756' name='name'/>
<parameter type-id='3502e3ff' name='flags'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='uu_list.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='e475ab95' size-in-bits='128' id='d0e9cdae'>
<subrange length='2' type-id='7359adad' id='52efc4ef'/>
</array-type-def>
<typedef-decl name='uu_list_pool_t' type-id='55168cab' id='38a2549d'/>
<typedef-decl name='uu_list_t' type-id='1d04bdf0' id='82e88484'/>
<class-decl name='uu_list_node' size-in-bits='128' is-struct='yes' visibility='default' id='f8f3cec5'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='uln_opaque' type-id='d0e9cdae' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='uu_list_node_t' type-id='f8f3cec5' id='c4dc472f'/>
<typedef-decl name='uu_list_walk_t' type-id='b80e3208' id='9fed32d2'/>
<typedef-decl name='uu_list_index_t' type-id='e475ab95' id='f0dd35ff'/>
<class-decl name='uu_list_node_impl' size-in-bits='128' is-struct='yes' visibility='default' id='700a795c'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='uln_next' type-id='5af1298a' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='uln_prev' type-id='5af1298a' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='uu_list_node_impl_t' type-id='700a795c' id='8e5864b0'/>
<class-decl name='uu_list_walk' size-in-bits='320' is-struct='yes' visibility='default' id='b80e3208'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='ulw_next' type-id='4d848103' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='ulw_prev' type-id='4d848103' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='ulw_list' type-id='0c0b229b' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='ulw_dir' type-id='ee31ee44' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='200'>
<var-decl name='ulw_robust' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='ulw_next_result' type-id='a085247f' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='uu_list' size-in-bits='896' is-struct='yes' visibility='default' id='1d04bdf0'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='ul_next_enc' type-id='e475ab95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='ul_prev_enc' type-id='e475ab95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='ul_pool' type-id='0941e04e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='ul_parent_enc' type-id='e475ab95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='ul_offset' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='ul_numnodes' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='ul_debug' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='392'>
<var-decl name='ul_sorted' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='400'>
<var-decl name='ul_index' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='ul_null_node' type-id='8e5864b0' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
<var-decl name='ul_null_walk' type-id='9fed32d2' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='uu_list_pool' size-in-bits='2112' is-struct='yes' visibility='default' id='55168cab'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='ulp_next' type-id='0941e04e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='ulp_prev' type-id='0941e04e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='ulp_name' type-id='59daf3ef' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='640'>
<var-decl name='ulp_nodeoffset' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='704'>
<var-decl name='ulp_objsize' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='768'>
<var-decl name='ulp_cmp' type-id='d502b39f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='832'>
<var-decl name='ulp_debug' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='840'>
<var-decl name='ulp_last_index' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='896'>
<var-decl name='ulp_lock' type-id='7a6844eb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1216'>
<var-decl name='ulp_null_list' type-id='82e88484' visibility='default'/>
</data-member>
</class-decl>
<pointer-type-def type-id='f0dd35ff' size-in-bits='64' id='ecbc0046'/>
<pointer-type-def type-id='700a795c' size-in-bits='64' id='5af1298a'/>
<pointer-type-def type-id='8e5864b0' size-in-bits='64' id='a085247f'/>
<pointer-type-def type-id='c4dc472f' size-in-bits='64' id='dbe143f4'/>
<pointer-type-def type-id='38a2549d' size-in-bits='64' id='0941e04e'/>
<pointer-type-def type-id='82e88484' size-in-bits='64' id='0c0b229b'/>
<pointer-type-def type-id='9fed32d2' size-in-bits='64' id='4d848103'/>
<function-decl name='uu_list_pool_create' mangled-name='uu_list_pool_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_pool_create'>
<parameter type-id='80f4b756' name='name'/>
<parameter type-id='b59d7dce' name='objsize'/>
<parameter type-id='b59d7dce' name='nodeoffset'/>
<parameter type-id='d502b39f' name='compare_func'/>
<parameter type-id='8f92235e' name='flags'/>
<return type-id='0941e04e'/>
</function-decl>
<function-decl name='uu_list_pool_destroy' mangled-name='uu_list_pool_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_pool_destroy'>
<parameter type-id='0941e04e' name='pp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='uu_list_node_init' mangled-name='uu_list_node_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_node_init'>
<parameter type-id='eaa32e2f' name='base'/>
<parameter type-id='dbe143f4' name='np_arg'/>
<parameter type-id='0941e04e' name='pp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='uu_list_node_fini' mangled-name='uu_list_node_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_node_fini'>
<parameter type-id='eaa32e2f' name='base'/>
<parameter type-id='dbe143f4' name='np_arg'/>
<parameter type-id='0941e04e' name='pp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='uu_list_create' mangled-name='uu_list_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_create'>
<parameter type-id='0941e04e' name='pp'/>
<parameter type-id='eaa32e2f' name='parent'/>
<parameter type-id='8f92235e' name='flags'/>
<return type-id='0c0b229b'/>
</function-decl>
<function-decl name='uu_list_destroy' mangled-name='uu_list_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_destroy'>
<parameter type-id='0c0b229b' name='lp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='uu_list_insert' mangled-name='uu_list_insert' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_insert'>
<parameter type-id='0c0b229b' name='lp'/>
<parameter type-id='eaa32e2f' name='elem'/>
<parameter type-id='f0dd35ff' name='idx'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='uu_list_find' mangled-name='uu_list_find' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_find'>
<parameter type-id='0c0b229b' name='lp'/>
<parameter type-id='eaa32e2f' name='elem'/>
<parameter type-id='eaa32e2f' name='private'/>
<parameter type-id='ecbc0046' name='out'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='uu_list_nearest_next' mangled-name='uu_list_nearest_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_nearest_next'>
<parameter type-id='0c0b229b' name='lp'/>
<parameter type-id='f0dd35ff' name='idx'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='uu_list_nearest_prev' mangled-name='uu_list_nearest_prev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_nearest_prev'>
<parameter type-id='0c0b229b' name='lp'/>
<parameter type-id='f0dd35ff' name='idx'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='uu_list_walk_start' mangled-name='uu_list_walk_start' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_walk_start'>
<parameter type-id='0c0b229b' name='lp'/>
<parameter type-id='8f92235e' name='flags'/>
<return type-id='4d848103'/>
</function-decl>
<function-decl name='uu_list_walk_next' mangled-name='uu_list_walk_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_walk_next'>
<parameter type-id='4d848103' name='wp'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='uu_list_walk_end' mangled-name='uu_list_walk_end' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_walk_end'>
<parameter type-id='4d848103' name='wp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='uu_list_walk' mangled-name='uu_list_walk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_walk'>
<parameter type-id='0c0b229b' name='lp'/>
<parameter type-id='30a42b6d' name='func'/>
<parameter type-id='eaa32e2f' name='private'/>
<parameter type-id='8f92235e' name='flags'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='uu_list_remove' mangled-name='uu_list_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_remove'>
<parameter type-id='0c0b229b' name='lp'/>
<parameter type-id='eaa32e2f' name='elem'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='uu_list_teardown' mangled-name='uu_list_teardown' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_teardown'>
<parameter type-id='0c0b229b' name='lp'/>
<parameter type-id='63e171df' name='cookie'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='uu_list_insert_before' mangled-name='uu_list_insert_before' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_insert_before'>
<parameter type-id='0c0b229b' name='lp'/>
<parameter type-id='eaa32e2f' name='target'/>
<parameter type-id='eaa32e2f' name='elem'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='uu_list_insert_after' mangled-name='uu_list_insert_after' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_insert_after'>
<parameter type-id='0c0b229b' name='lp'/>
<parameter type-id='eaa32e2f' name='target'/>
<parameter type-id='eaa32e2f' name='elem'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='uu_list_numnodes' mangled-name='uu_list_numnodes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_numnodes'>
<parameter type-id='0c0b229b' name='lp'/>
<return type-id='b59d7dce'/>
</function-decl>
<function-decl name='uu_list_first' mangled-name='uu_list_first' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_first'>
<parameter type-id='0c0b229b' name='lp'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='uu_list_last' mangled-name='uu_list_last' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_last'>
<parameter type-id='0c0b229b' name='lp'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='uu_list_next' mangled-name='uu_list_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_next'>
<parameter type-id='0c0b229b' name='lp'/>
<parameter type-id='eaa32e2f' name='elem'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='uu_list_prev' mangled-name='uu_list_prev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_prev'>
<parameter type-id='0c0b229b' name='lp'/>
<parameter type-id='eaa32e2f' name='elem'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='uu_list_lockup' mangled-name='uu_list_lockup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_lockup'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='uu_list_release' mangled-name='uu_list_release' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_release'>
<return type-id='48b5725f'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='uu_misc.c' language='LANG_C99'>
<function-decl name='uu_set_error' mangled-name='uu_set_error' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_set_error'>
<parameter type-id='3502e3ff' name='code'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='uu_error' mangled-name='uu_error' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_error'>
<return type-id='8f92235e'/>
</function-decl>
<function-decl name='uu_strerror' mangled-name='uu_strerror' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_strerror'>
<parameter type-id='8f92235e' name='code'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='uu_panic' mangled-name='uu_panic' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_panic'>
<parameter type-id='80f4b756' name='format'/>
<parameter is-variadic='yes'/>
<return type-id='48b5725f'/>
</function-decl>
</abi-instr>
- <abi-instr address-size='64' path='uu_pname.c' language='LANG_C99'>
- <class-decl name='__va_list_tag' size-in-bits='192' is-struct='yes' visibility='default' id='d5027220'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='gp_offset' type-id='f0981eeb' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='fp_offset' type-id='f0981eeb' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='overflow_arg_area' type-id='eaa32e2f' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='reg_save_area' type-id='eaa32e2f' visibility='default'/>
- </data-member>
- </class-decl>
- <pointer-type-def type-id='d5027220' size-in-bits='64' id='b7f2d5e6'/>
- <pointer-type-def type-id='95e97e5e' size-in-bits='64' id='7292109c'/>
- <var-decl name='uu_exit_ok_value' type-id='95e97e5e' mangled-name='uu_exit_ok_value' visibility='default' elf-symbol-id='uu_exit_ok_value'/>
- <var-decl name='uu_exit_fatal_value' type-id='95e97e5e' mangled-name='uu_exit_fatal_value' visibility='default' elf-symbol-id='uu_exit_fatal_value'/>
- <var-decl name='uu_exit_usage_value' type-id='95e97e5e' mangled-name='uu_exit_usage_value' visibility='default' elf-symbol-id='uu_exit_usage_value'/>
- <function-decl name='uu_exit_ok' mangled-name='uu_exit_ok' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_exit_ok'>
- <return type-id='7292109c'/>
- </function-decl>
- <function-decl name='uu_exit_fatal' mangled-name='uu_exit_fatal' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_exit_fatal'>
- <return type-id='7292109c'/>
- </function-decl>
- <function-decl name='uu_exit_usage' mangled-name='uu_exit_usage' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_exit_usage'>
- <return type-id='7292109c'/>
- </function-decl>
- <function-decl name='uu_alt_exit' mangled-name='uu_alt_exit' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_alt_exit'>
- <parameter type-id='95e97e5e' name='profile'/>
- <return type-id='48b5725f'/>
- </function-decl>
- <function-decl name='uu_vwarn' mangled-name='uu_vwarn' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_vwarn'>
- <parameter type-id='80f4b756' name='format'/>
- <parameter type-id='b7f2d5e6' name='alist'/>
- <return type-id='48b5725f'/>
- </function-decl>
- <function-decl name='uu_warn' mangled-name='uu_warn' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_warn'>
- <parameter type-id='80f4b756' name='format'/>
- <parameter is-variadic='yes'/>
- <return type-id='48b5725f'/>
- </function-decl>
- <function-decl name='uu_vdie' mangled-name='uu_vdie' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_vdie'>
- <parameter type-id='80f4b756' name='format'/>
- <parameter type-id='b7f2d5e6' name='alist'/>
- <return type-id='48b5725f'/>
- </function-decl>
- <function-decl name='uu_die' mangled-name='uu_die' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_die'>
- <parameter type-id='80f4b756' name='format'/>
- <parameter is-variadic='yes'/>
- <return type-id='48b5725f'/>
- </function-decl>
- <function-decl name='uu_vxdie' mangled-name='uu_vxdie' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_vxdie'>
- <parameter type-id='95e97e5e' name='status'/>
- <parameter type-id='80f4b756' name='format'/>
- <parameter type-id='b7f2d5e6' name='alist'/>
- <return type-id='48b5725f'/>
- </function-decl>
- <function-decl name='uu_xdie' mangled-name='uu_xdie' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_xdie'>
- <parameter type-id='95e97e5e' name='status'/>
- <parameter type-id='80f4b756' name='format'/>
- <parameter is-variadic='yes'/>
- <return type-id='48b5725f'/>
- </function-decl>
- <function-decl name='uu_setpname' mangled-name='uu_setpname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_setpname'>
- <parameter type-id='26a90f95' name='arg0'/>
- <return type-id='80f4b756'/>
- </function-decl>
- <function-decl name='uu_getpname' mangled-name='uu_getpname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_getpname'>
- <return type-id='80f4b756'/>
- </function-decl>
- </abi-instr>
<abi-instr address-size='64' path='uu_string.c' language='LANG_C99'>
<type-decl name='unnamed-enum-underlying-type-32' is-anonymous='yes' size-in-bits='32' alignment-in-bits='32' id='9cac1fee'/>
<enum-decl name='boolean_t' naming-typedef-id='c19b74c3' id='f58c8277'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='B_FALSE' value='0'/>
<enumerator name='B_TRUE' value='1'/>
</enum-decl>
<typedef-decl name='boolean_t' type-id='f58c8277' id='c19b74c3'/>
<function-decl name='uu_streq' mangled-name='uu_streq' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_streq'>
<parameter type-id='80f4b756' name='a'/>
<parameter type-id='80f4b756' name='b'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='uu_strcaseeq' mangled-name='uu_strcaseeq' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_strcaseeq'>
<parameter type-id='80f4b756' name='a'/>
<parameter type-id='80f4b756' name='b'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='uu_strbw' mangled-name='uu_strbw' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_strbw'>
<parameter type-id='80f4b756' name='a'/>
<parameter type-id='80f4b756' name='b'/>
<return type-id='c19b74c3'/>
</function-decl>
</abi-instr>
</abi-corpus>
diff --git a/sys/contrib/openzfs/lib/libuutil/uu_pname.c b/sys/contrib/openzfs/lib/libuutil/uu_pname.c
deleted file mode 100644
index 37c093731ef9..000000000000
--- a/sys/contrib/openzfs/lib/libuutil/uu_pname.c
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or https://opensource.org/licenses/CDDL-1.0.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-
-
-#include "libuutil_common.h"
-
-#include <libintl.h>
-#include <limits.h>
-#include <string.h>
-#include <stdlib.h>
-#include <stdarg.h>
-#include <stdio.h>
-#include <errno.h>
-#include <wchar.h>
-#include <unistd.h>
-
-static const char *pname;
-
-static __attribute__((noreturn)) void
-uu_die_internal(int status, const char *format, va_list alist);
-
-int uu_exit_ok_value = EXIT_SUCCESS;
-int uu_exit_fatal_value = EXIT_FAILURE;
-int uu_exit_usage_value = 2;
-
-int *
-uu_exit_ok(void)
-{
- return (&uu_exit_ok_value);
-}
-
-int *
-uu_exit_fatal(void)
-{
- return (&uu_exit_fatal_value);
-}
-
-int *
-uu_exit_usage(void)
-{
- return (&uu_exit_usage_value);
-}
-
-void
-uu_alt_exit(int profile)
-{
- switch (profile) {
- case UU_PROFILE_DEFAULT:
- uu_exit_ok_value = EXIT_SUCCESS;
- uu_exit_fatal_value = EXIT_FAILURE;
- uu_exit_usage_value = 2;
- break;
- case UU_PROFILE_LAUNCHER:
- uu_exit_ok_value = EXIT_SUCCESS;
- uu_exit_fatal_value = 124;
- uu_exit_usage_value = 125;
- break;
- }
-}
-
-static __attribute__((format(printf, 2, 0))) void
-uu_warn_internal(int err, const char *format, va_list alist)
-{
- if (pname != NULL)
- (void) fprintf(stderr, "%s: ", pname);
-
- if (format != NULL)
- (void) vfprintf(stderr, format, alist);
-
- if (strrchr(format, '\n') == NULL)
- (void) fprintf(stderr, ": %s\n", strerror(err));
-}
-
-void
-uu_vwarn(const char *format, va_list alist)
-{
- uu_warn_internal(errno, format, alist);
-}
-
-void
-uu_warn(const char *format, ...)
-{
- va_list alist;
- va_start(alist, format);
- uu_warn_internal(errno, format, alist);
- va_end(alist);
-}
-
-static __attribute__((format(printf, 2, 0))) __attribute__((noreturn)) void
-uu_die_internal(int status, const char *format, va_list alist)
-{
- uu_warn_internal(errno, format, alist);
-#ifdef DEBUG
- {
- char *cp;
-
- if (!issetugid()) {
- cp = getenv("UU_DIE_ABORTS");
- if (cp != NULL && *cp != '\0')
- abort();
- }
- }
-#endif
- exit(status);
-}
-
-void
-uu_vdie(const char *format, va_list alist)
-{
- uu_die_internal(UU_EXIT_FATAL, format, alist);
-}
-
-void
-uu_die(const char *format, ...)
-{
- va_list alist;
- va_start(alist, format);
- uu_die_internal(UU_EXIT_FATAL, format, alist);
- va_end(alist);
-}
-
-void
-uu_vxdie(int status, const char *format, va_list alist)
-{
- uu_die_internal(status, format, alist);
-}
-
-void
-uu_xdie(int status, const char *format, ...)
-{
- va_list alist;
- va_start(alist, format);
- uu_die_internal(status, format, alist);
- va_end(alist);
-}
-
-const char *
-uu_setpname(char *arg0)
-{
- /*
- * Having a NULL argv[0], while uncommon, is possible. It
- * makes more sense to handle this event in uu_setpname rather
- * than in each of its consumers.
- */
- if (arg0 == NULL) {
- pname = getexecname();
- if (pname == NULL)
- pname = "unknown_command";
- return (pname);
- }
-
- /*
- * Guard against '/' at end of command invocation.
- */
- for (;;) {
- char *p = strrchr(arg0, '/');
- if (p == NULL) {
- pname = arg0;
- break;
- } else {
- if (*(p + 1) == '\0') {
- *p = '\0';
- continue;
- }
-
- pname = p + 1;
- break;
- }
- }
-
- return (pname);
-}
-
-const char *
-uu_getpname(void)
-{
- return (pname);
-}
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs.abi b/sys/contrib/openzfs/lib/libzfs/libzfs.abi
index 0494aec208e5..7dd12df81718 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs.abi
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs.abi
@@ -1,5829 +1,5852 @@
<abi-corpus version='2.0' architecture='elf-amd-x86_64' soname='libzfs.so.4'>
<elf-needed>
<dependency name='libzfs_core.so.3'/>
<dependency name='libnvpair.so.3'/>
<dependency name='libuuid.so.1'/>
<dependency name='librt.so.1'/>
<dependency name='libblkid.so.1'/>
<dependency name='libudev.so.1'/>
<dependency name='libuutil.so.3'/>
<dependency name='libm.so.6'/>
<dependency name='libcrypto.so.1.1'/>
<dependency name='libz.so.1'/>
<dependency name='libdl.so.2'/>
<dependency name='libpthread.so.0'/>
<dependency name='libc.so.6'/>
<dependency name='ld-linux-x86-64.so.2'/>
</elf-needed>
<elf-function-symbols>
<elf-symbol name='_sol_getmntent' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_char' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_char_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_int_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_long' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_long_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_ptr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_ptr_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_short' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_short_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_uchar_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_uint_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_ulong_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_ushort_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_ptr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_clear_long_excl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_uchar_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_uint_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_ulong_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_ushort_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_uchar_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_uint_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_ulong_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_ushort_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_uchar_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_uint_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_ulong_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_ushort_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_set_long_excl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_char' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_char_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_int_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_long' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_long_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_ptr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_ptr_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_short' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_short_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_ptr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_add' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_destroy_nodes' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_find' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_first' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_insert' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_insert_here' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_is_empty' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_last' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_nearest' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_numnodes' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_swap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_update' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_update_gt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_update_lt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_walk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='bookmark_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='cityhash4' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='color_end' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='color_start' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='dataset_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='dataset_nestcheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='efi_alloc_and_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='efi_alloc_and_read' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='efi_err_check' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='efi_free' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='efi_rescan' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='efi_use_whole_disk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='efi_write' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='entity_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_2_byteswap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_2_incremental_byteswap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_2_incremental_native' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_2_native' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_byteswap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_impl_set' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_incremental_byteswap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_incremental_native' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_native' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_native_varsize' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='get_dataset_depth' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='get_system_hostid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getexecname' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getextmntent' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getmntany' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getprop_uint64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getzoneid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='is_mounted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='is_mpath_whole_disk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libspl_assertf' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libspl_set_assert_ok' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_add_handle' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_envvar_is_set' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_errno' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_error_action' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_error_description' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_error_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_free_str_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_mnttab_add' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_mnttab_cache' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_mnttab_find' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_mnttab_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_mnttab_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_mnttab_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_print_on_error' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_run_process' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_run_process_get_stdout' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_run_process_get_stdout_nopath' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_head' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_insert_after' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_insert_before' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_insert_head' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_insert_tail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_is_empty' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_link_active' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_link_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_link_replace' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_move_tail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_next' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_prev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_remove_head' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_remove_tail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_tail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='membar_consumer' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='membar_enter' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='membar_exit' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='membar_producer' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='mkdirp' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='mountpoint_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='permset_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='pool_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='print_timestamp' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='printf_color' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_commit_shares' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_disable_share' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_enable_share' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_errorstr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_is_shared' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='sa_truncate_shares' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_validate_shareopts' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='snapshot_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='spl_pagesize' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='strlcat' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='strlcpy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_abandon' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_dispatch' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_member' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_resume' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_suspend' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_suspended' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_wait' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='update_vdev_config_dev_strs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_expand_proplist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_name_to_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_align_right' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_column_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_default_numeric' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_default_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_get_table' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_get_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_index_to_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_random_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_readonly' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_string_to_index' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_to_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_user' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_values' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_depends_on' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_is_supported' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_is_valid_guid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_lookup_guid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_lookup_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_adjust_mount_options' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_allocatable_devs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_append_partition' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_basename' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_bookmark_exists' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_clone' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_close' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_commit_shares' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_component_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_create_ancestors' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_attempt_load_keys' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_clone_check' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_get_encryption_root' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_load_key' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_rewrap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_unload_key' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_dataset_exists' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_dataset_name_hidden' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_deleg_canonicalize_perm' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_deleg_verify_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_deleg_whokey' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_destroy_snaps' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_destroy_snaps_nvl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_destroy_snaps_nvl_os' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_dev_flush' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_dev_is_dm' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_dev_is_whole_disk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_device_get_devid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_device_get_physical' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_dirnamelen' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_expand_proplist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_foreach_mountpoint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_all_props' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_clones_nvl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_enclosure_sysfs_path' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_fsacl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_handle' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_holds' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_pool_handle' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_pool_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_recvd_props' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_underlying_path' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_underlying_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_user_props' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_handle_dup' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_hold' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_hold_nvl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_ioctl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_is_mounted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_is_shared' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_isnumber' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_bookmarks' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_children' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_dependents' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_filesystems' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_mounted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_root' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_snapshots' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_snapshots_sorted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_snapspec' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_mod_supported' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_mount' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_mount_at' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_mount_delegation_check' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_name_to_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_name_valid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_nicebytes' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_nicenum' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_nicenum_format' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_niceraw' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_nicestrtonum' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_nicetime' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_open' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_parent_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_parse_mount_options' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_path_to_zhandle' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_promote' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_align_right' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_column_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_default_numeric' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_default_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_delegatable' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_encryption_key_param' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_numeric' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_recvd' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_table' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_userquota' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_userquota_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_written' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_written_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_index_to_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_inherit' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_inheritable' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_is_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_random_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_readonly' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_set' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_set_list' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_setonce' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_string_to_index' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_to_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_user' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_userquota' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_valid_for_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_valid_keylocation' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_values' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_visible' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_written' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prune_proplist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_receive' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_refresh_properties' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_release' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_rename' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_resolve_shortname' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_rollback' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_save_arguments' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_send' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_send_one' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_send_progress' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_send_resume' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_send_resume_token_to_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_send_saved' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_set_fsacl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_share' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_show_diffs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_smb_acl_add' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_smb_acl_purge' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_smb_acl_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_smb_acl_rename' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_snapshot' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_snapshot_nvl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_spa_version' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_spa_version_map' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_special_devs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_standard_error' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_strcmp_pathname' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_strip_partition' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_strip_path' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='zfs_truncate_shares' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_type_to_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unmount' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unmountall' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unshare' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unshareall' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_userns' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_userspace' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_valid_proplist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_version_kernel' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_version_print' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_version_userland' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_wait_status' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_zpl_version_map' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_add' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_checkpoint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_clear' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_clear_label' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_close' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_default_search_paths' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_disable_datasets' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_disable_datasets_os' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_disable_volume_os' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_discard_checkpoint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_dump_ddt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_enable_datasets' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_events_clear' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_events_next' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_events_seek' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_expand_proplist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_explain_recover' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_export' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_export_force' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_feature_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_find_config' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_find_vdev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_find_vdev_by_physpath' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_free_handles' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_all_vdev_props' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_bootenv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_config' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_errlog' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_features' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_handle' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_history' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_load_policy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_prop_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_state' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_state_str' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_status' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_vdev_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_vdev_prop_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_history_unpack' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_import' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_import_props' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_import_status' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_in_use' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_initialize' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_initialize_wait' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_is_draid_spare' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_iter' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_label_disk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_label_disk_wait' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_load_compat' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_log_history' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_name_to_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_obj_to_path' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_obj_to_path_ds' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_open' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_open_canfail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_pool_state_to_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_print_unsup_feat' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_align_right' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_column_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_default_numeric' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_default_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_feature' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_get_feature' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_get_table' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_get_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_index_to_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_random_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_readonly' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_setonce' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_string_to_index' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_to_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_unsupported' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_values' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_vdev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_props_refresh' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_read_label' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_refresh_stats' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_reguid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_reopen_one' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_scan' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_search_import' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_set_bootenv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_set_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_set_vdev_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_skip_pool' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_state_to_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_sync_one' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_trim' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_upgrade' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_attach' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_clear' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_degrade' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_detach' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_fault' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_indirect_size' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_offline' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_online' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_path_to_guid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_remove_cancel' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_split' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_wait' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_wait_status' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_free_list' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_get_list' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_index_to_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_iter' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_iter_common' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_name_to_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_print_one_property' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_random_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_register_hidden' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_register_impl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_register_index' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_register_number' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_register_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_string_to_index' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_valid_char' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_valid_for_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_values' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_width' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zvol_volsize_to_reservation' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
</elf-function-symbols>
<elf-variable-symbols>
<elf-symbol name='efi_debug' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_abd_ops' size='24' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_avx2_ops' size='64' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_avx512bw_ops' size='64' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_avx512f_ops' size='64' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_sse2_ops' size='64' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_ssse3_ops' size='64' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_superscalar4_ops' size='64' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_superscalar_ops' size='64' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_config_ops' size='16' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_protocol_names' size='16' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='spa_feature_table' size='2072' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_checks_disable' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_deleg_perm_tab' size='512' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_history_event_names' size='328' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_max_dataset_nesting' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_userquota_prop_prefixes' size='96' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
</elf-variable-symbols>
<abi-instr address-size='64' path='lib/libefi/rdwr_efi.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='288' id='16e6f2c6'>
<subrange length='36' type-id='7359adad' id='ae666bde'/>
</array-type-def>
<array-type-def dimensions='1' type-id='a65ae39c' size-in-bits='960' id='fa198beb'>
<subrange length='1' type-id='7359adad' id='52f813b4'/>
</array-type-def>
<array-type-def dimensions='1' type-id='b96825af' size-in-bits='48' id='0f562bd0'>
<subrange length='6' type-id='7359adad' id='52fa524b'/>
</array-type-def>
<array-type-def dimensions='1' type-id='3502e3ff' size-in-bits='384' id='dba89ba3'>
<subrange length='12' type-id='7359adad' id='84827bdc'/>
</array-type-def>
<array-type-def dimensions='1' type-id='3502e3ff' size-in-bits='256' id='01d84ed4'>
<subrange length='8' type-id='7359adad' id='56e0c0b1'/>
</array-type-def>
<class-decl name='dk_part' size-in-bits='960' is-struct='yes' visibility='default' id='a65ae39c'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='p_start' type-id='804dc465' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='p_size' type-id='804dc465' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='p_guid' type-id='214f32ea' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='p_tag' type-id='d908a348' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='272'>
<var-decl name='p_flag' type-id='d908a348' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
<var-decl name='p_name' type-id='16e6f2c6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
<var-decl name='p_uguid' type-id='214f32ea' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='704'>
<var-decl name='p_resv' type-id='01d84ed4' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='dk_gpt' size-in-bits='1920' is-struct='yes' visibility='default' id='dd4a2e5a'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='efi_version' type-id='3502e3ff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='efi_nparts' type-id='3502e3ff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='efi_part_size' type-id='3502e3ff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='efi_lbasize' type-id='3502e3ff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='efi_last_lba' type-id='804dc465' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='efi_first_u_lba' type-id='804dc465' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='efi_last_u_lba' type-id='804dc465' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='efi_disk_uguid' type-id='214f32ea' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='efi_flags' type-id='3502e3ff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='480'>
<var-decl name='efi_reserved1' type-id='3502e3ff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='efi_altern_lba' type-id='804dc465' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
<var-decl name='efi_reserved' type-id='dba89ba3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='960'>
<var-decl name='efi_parts' type-id='fa198beb' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='uuid' size-in-bits='128' is-struct='yes' visibility='default' id='214f32ea'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='time_low' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='time_mid' type-id='149c6638' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='48'>
<var-decl name='time_hi_and_version' type-id='149c6638' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='clock_seq_hi_and_reserved' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='72'>
<var-decl name='clock_seq_low' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='80'>
<var-decl name='node_addr' type-id='0f562bd0' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='ushort_t' type-id='8efea9e5' id='d908a348'/>
<pointer-type-def type-id='dd4a2e5a' size-in-bits='64' id='0d8119a8'/>
<pointer-type-def type-id='0d8119a8' size-in-bits='64' id='c43b27a6'/>
<var-decl name='efi_debug' type-id='95e97e5e' mangled-name='efi_debug' visibility='default' elf-symbol-id='efi_debug'/>
<function-decl name='efi_alloc_and_init' mangled-name='efi_alloc_and_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_alloc_and_init'>
<parameter type-id='95e97e5e' name='fd'/>
<parameter type-id='8f92235e' name='nparts'/>
<parameter type-id='c43b27a6' name='vtoc'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='efi_alloc_and_read' mangled-name='efi_alloc_and_read' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_alloc_and_read'>
<parameter type-id='95e97e5e' name='fd'/>
<parameter type-id='c43b27a6' name='vtoc'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='efi_rescan' mangled-name='efi_rescan' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_rescan'>
<parameter type-id='95e97e5e' name='fd'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='efi_use_whole_disk' mangled-name='efi_use_whole_disk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_use_whole_disk'>
<parameter type-id='95e97e5e' name='fd'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='efi_write' mangled-name='efi_write' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_write'>
<parameter type-id='95e97e5e' name='fd'/>
<parameter type-id='0d8119a8' name='vtoc'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='efi_free' mangled-name='efi_free' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_free'>
<parameter type-id='0d8119a8' name='ptr'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='efi_err_check' mangled-name='efi_err_check' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_err_check'>
<parameter type-id='0d8119a8' name='vtoc'/>
<return type-id='48b5725f'/>
</function-decl>
<type-decl name='unsigned long int' size-in-bits='64' id='7359adad'/>
<type-decl name='unsigned short int' size-in-bits='16' id='8efea9e5'/>
</abi-instr>
<abi-instr address-size='64' path='lib/libshare/libshare.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='b99c00c9' size-in-bits='128' id='2d6895a3'>
<subrange length='2' type-id='7359adad' id='52efc4ef'/>
</array-type-def>
<var-decl name='sa_protocol_names' type-id='2d6895a3' mangled-name='sa_protocol_names' visibility='default' elf-symbol-id='sa_protocol_names'/>
<function-decl name='sa_enable_share' mangled-name='sa_enable_share' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_enable_share'>
<parameter type-id='80f4b756' name='zfsname'/>
<parameter type-id='80f4b756' name='mountpoint'/>
<parameter type-id='80f4b756' name='shareopts'/>
<parameter type-id='9155d4b5' name='protocol'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='sa_disable_share' mangled-name='sa_disable_share' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_disable_share'>
<parameter type-id='80f4b756' name='mountpoint'/>
<parameter type-id='9155d4b5' name='protocol'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='sa_is_shared' mangled-name='sa_is_shared' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_is_shared'>
<parameter type-id='80f4b756' name='mountpoint'/>
<parameter type-id='9155d4b5' name='protocol'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='sa_commit_shares' mangled-name='sa_commit_shares' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_commit_shares'>
<parameter type-id='9155d4b5' name='protocol'/>
<return type-id='48b5725f'/>
</function-decl>
+ <function-decl name='sa_truncate_shares' mangled-name='sa_truncate_shares' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_truncate_shares'>
+ <parameter type-id='9155d4b5' name='protocol'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
<function-decl name='sa_validate_shareopts' mangled-name='sa_validate_shareopts' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_validate_shareopts'>
<parameter type-id='80f4b756' name='options'/>
<parameter type-id='9155d4b5' name='protocol'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='sa_errorstr' mangled-name='sa_errorstr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_errorstr'>
<parameter type-id='95e97e5e' name='err'/>
<return type-id='80f4b756'/>
</function-decl>
<enum-decl name='sa_protocol' id='9155d4b5'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='SA_PROTOCOL_NFS' value='0'/>
<enumerator name='SA_PROTOCOL_SMB' value='1'/>
<enumerator name='SA_PROTOCOL_COUNT' value='2'/>
</enum-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libshare/os/linux/nfs.c' language='LANG_C99'>
<class-decl name='sa_share_impl' size-in-bits='192' is-struct='yes' visibility='default' id='72b09bf8'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='sa_zfsname' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='sa_mountpoint' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='sa_shareopts' type-id='80f4b756' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='sa_share_impl_t' type-id='946a2c6b' id='a48b47d0'/>
- <class-decl name='sa_fstype_t' size-in-bits='320' is-struct='yes' naming-typedef-id='639af739' visibility='default' id='944afa86'>
+ <class-decl name='sa_fstype_t' size-in-bits='384' is-struct='yes' naming-typedef-id='639af739' visibility='default' id='944afa86'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='enable_share' type-id='2f78a9c1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='disable_share' type-id='2f78a9c1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='is_shared' type-id='81020bc2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='validate_shareopts' type-id='f194a8fb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='commit_shares' type-id='797ee7da' visibility='default'/>
</data-member>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='truncate_shares' type-id='5d51038b' visibility='default'/>
+ </data-member>
</class-decl>
<typedef-decl name='sa_fstype_t' type-id='944afa86' id='639af739'/>
<qualified-type-def type-id='639af739' const='yes' id='d19dbca9'/>
<qualified-type-def type-id='72b09bf8' const='yes' id='484950e3'/>
<pointer-type-def type-id='484950e3' size-in-bits='64' id='946a2c6b'/>
<pointer-type-def type-id='276427e1' size-in-bits='64' id='1db260e5'/>
<qualified-type-def type-id='1db260e5' const='yes' id='797ee7da'/>
<pointer-type-def type-id='5113b296' size-in-bits='64' id='70487b28'/>
<qualified-type-def type-id='70487b28' const='yes' id='f194a8fb'/>
<pointer-type-def type-id='c13578bc' size-in-bits='64' id='fa1f29ce'/>
<qualified-type-def type-id='fa1f29ce' const='yes' id='2f78a9c1'/>
<pointer-type-def type-id='86373eb1' size-in-bits='64' id='f337456d'/>
<qualified-type-def type-id='f337456d' const='yes' id='81020bc2'/>
+ <pointer-type-def type-id='ee076206' size-in-bits='64' id='953b12f8'/>
+ <qualified-type-def type-id='953b12f8' const='yes' id='5d51038b'/>
<var-decl name='libshare_nfs_type' type-id='d19dbca9' visibility='default'/>
<function-type size-in-bits='64' id='276427e1'>
<return type-id='95e97e5e'/>
</function-type>
<function-type size-in-bits='64' id='5113b296'>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-type>
<function-type size-in-bits='64' id='c13578bc'>
<parameter type-id='a48b47d0'/>
<return type-id='95e97e5e'/>
</function-type>
<function-type size-in-bits='64' id='86373eb1'>
<parameter type-id='a48b47d0'/>
<return type-id='c19b74c3'/>
</function-type>
+ <function-type size-in-bits='64' id='ee076206'>
+ <return type-id='48b5725f'/>
+ </function-type>
</abi-instr>
<abi-instr address-size='64' path='lib/libshare/os/linux/smb.c' language='LANG_C99'>
<var-decl name='libshare_smb_type' type-id='d19dbca9' visibility='default'/>
</abi-instr>
<abi-instr address-size='64' path='lib/libspl/assert.c' language='LANG_C99'>
<function-decl name='libspl_set_assert_ok' mangled-name='libspl_set_assert_ok' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libspl_set_assert_ok'>
<parameter type-id='c19b74c3' name='val'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='libspl_assertf' mangled-name='libspl_assertf' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libspl_assertf'>
<parameter type-id='80f4b756' name='file'/>
<parameter type-id='80f4b756' name='func'/>
<parameter type-id='95e97e5e' name='line'/>
<parameter type-id='80f4b756' name='format'/>
<parameter is-variadic='yes'/>
<return type-id='48b5725f'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libspl/atomic.c' language='LANG_C99'>
<type-decl name='signed char' size-in-bits='8' id='28577a57'/>
<type-decl name='unsigned short int' size-in-bits='16' id='8efea9e5'/>
<typedef-decl name='int8_t' type-id='2171a512' id='ee31ee44'/>
<typedef-decl name='int16_t' type-id='03896e23' id='23bd8cb5'/>
<typedef-decl name='uint16_t' type-id='253c2d2a' id='149c6638'/>
<typedef-decl name='__int8_t' type-id='28577a57' id='2171a512'/>
<typedef-decl name='__int16_t' type-id='a2185560' id='03896e23'/>
<typedef-decl name='__uint16_t' type-id='8efea9e5' id='253c2d2a'/>
<qualified-type-def type-id='149c6638' volatile='yes' id='5120c5f7'/>
<pointer-type-def type-id='5120c5f7' size-in-bits='64' id='93977ae7'/>
<qualified-type-def type-id='8f92235e' volatile='yes' id='430e0681'/>
<pointer-type-def type-id='430e0681' size-in-bits='64' id='3a147f31'/>
<qualified-type-def type-id='b96825af' volatile='yes' id='84ff7d66'/>
<pointer-type-def type-id='84ff7d66' size-in-bits='64' id='aa323ea4'/>
<qualified-type-def type-id='ee1f298e' volatile='yes' id='6f7e09cb'/>
<pointer-type-def type-id='6f7e09cb' size-in-bits='64' id='64698d33'/>
<qualified-type-def type-id='48b5725f' volatile='yes' id='b0b3cbf9'/>
<pointer-type-def type-id='b0b3cbf9' size-in-bits='64' id='fe09dd29'/>
<function-decl name='atomic_inc_8' mangled-name='atomic_inc_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_8'>
<parameter type-id='aa323ea4' name='target'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_inc_16' mangled-name='atomic_inc_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_16'>
<parameter type-id='93977ae7' name='target'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_inc_32' mangled-name='atomic_inc_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_32'>
<parameter type-id='3a147f31' name='target'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_inc_ulong' mangled-name='atomic_inc_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_ulong'>
<parameter type-id='64698d33' name='target'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_dec_8' mangled-name='atomic_dec_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_8'>
<parameter type-id='aa323ea4' name='target'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_dec_16' mangled-name='atomic_dec_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_16'>
<parameter type-id='93977ae7' name='target'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_dec_32' mangled-name='atomic_dec_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_32'>
<parameter type-id='3a147f31' name='target'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_dec_ulong' mangled-name='atomic_dec_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_ulong'>
<parameter type-id='64698d33' name='target'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_add_ptr' mangled-name='atomic_add_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_ptr'>
<parameter type-id='fe09dd29' name='target'/>
<parameter type-id='79a0948f' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_add_8' mangled-name='atomic_add_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_8'>
<parameter type-id='aa323ea4' name='target'/>
<parameter type-id='ee31ee44' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_add_16' mangled-name='atomic_add_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_16'>
<parameter type-id='93977ae7' name='target'/>
<parameter type-id='23bd8cb5' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_add_32' mangled-name='atomic_add_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_32'>
<parameter type-id='3a147f31' name='target'/>
<parameter type-id='3ff5601b' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_add_long' mangled-name='atomic_add_long' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_long'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='bd54fe1a' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_sub_ptr' mangled-name='atomic_sub_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_ptr'>
<parameter type-id='fe09dd29' name='target'/>
<parameter type-id='79a0948f' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_sub_8' mangled-name='atomic_sub_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_8'>
<parameter type-id='aa323ea4' name='target'/>
<parameter type-id='ee31ee44' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_sub_16' mangled-name='atomic_sub_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_16'>
<parameter type-id='93977ae7' name='target'/>
<parameter type-id='23bd8cb5' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_sub_32' mangled-name='atomic_sub_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_32'>
<parameter type-id='3a147f31' name='target'/>
<parameter type-id='3ff5601b' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_sub_long' mangled-name='atomic_sub_long' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_long'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='bd54fe1a' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_or_8' mangled-name='atomic_or_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_8'>
<parameter type-id='aa323ea4' name='target'/>
<parameter type-id='b96825af' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_or_16' mangled-name='atomic_or_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_16'>
<parameter type-id='93977ae7' name='target'/>
<parameter type-id='149c6638' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_or_32' mangled-name='atomic_or_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_32'>
<parameter type-id='3a147f31' name='target'/>
<parameter type-id='8f92235e' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_or_ulong' mangled-name='atomic_or_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_ulong'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='ee1f298e' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_and_8' mangled-name='atomic_and_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_8'>
<parameter type-id='aa323ea4' name='target'/>
<parameter type-id='b96825af' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_and_16' mangled-name='atomic_and_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_16'>
<parameter type-id='93977ae7' name='target'/>
<parameter type-id='149c6638' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_and_32' mangled-name='atomic_and_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_32'>
<parameter type-id='3a147f31' name='target'/>
<parameter type-id='8f92235e' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_and_ulong' mangled-name='atomic_and_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_ulong'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='ee1f298e' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_inc_8_nv' mangled-name='atomic_inc_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_8_nv'>
<parameter type-id='aa323ea4' name='target'/>
<return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_inc_16_nv' mangled-name='atomic_inc_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_16_nv'>
<parameter type-id='93977ae7' name='target'/>
<return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_inc_32_nv' mangled-name='atomic_inc_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_32_nv'>
<parameter type-id='3a147f31' name='target'/>
<return type-id='8f92235e'/>
</function-decl>
<function-decl name='atomic_inc_ulong_nv' mangled-name='atomic_inc_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_ulong_nv'>
<parameter type-id='64698d33' name='target'/>
<return type-id='ee1f298e'/>
</function-decl>
<function-decl name='atomic_dec_8_nv' mangled-name='atomic_dec_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_8_nv'>
<parameter type-id='aa323ea4' name='target'/>
<return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_dec_16_nv' mangled-name='atomic_dec_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_16_nv'>
<parameter type-id='93977ae7' name='target'/>
<return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_dec_32_nv' mangled-name='atomic_dec_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_32_nv'>
<parameter type-id='3a147f31' name='target'/>
<return type-id='8f92235e'/>
</function-decl>
<function-decl name='atomic_dec_ulong_nv' mangled-name='atomic_dec_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_ulong_nv'>
<parameter type-id='64698d33' name='target'/>
<return type-id='ee1f298e'/>
</function-decl>
<function-decl name='atomic_add_ptr_nv' mangled-name='atomic_add_ptr_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_ptr_nv'>
<parameter type-id='fe09dd29' name='target'/>
<parameter type-id='79a0948f' name='bits'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='atomic_add_8_nv' mangled-name='atomic_add_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_8_nv'>
<parameter type-id='aa323ea4' name='target'/>
<parameter type-id='ee31ee44' name='bits'/>
<return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_add_16_nv' mangled-name='atomic_add_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_16_nv'>
<parameter type-id='93977ae7' name='target'/>
<parameter type-id='23bd8cb5' name='bits'/>
<return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_add_32_nv' mangled-name='atomic_add_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_32_nv'>
<parameter type-id='3a147f31' name='target'/>
<parameter type-id='3ff5601b' name='bits'/>
<return type-id='8f92235e'/>
</function-decl>
<function-decl name='atomic_add_long_nv' mangled-name='atomic_add_long_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_long_nv'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='bd54fe1a' name='bits'/>
<return type-id='ee1f298e'/>
</function-decl>
<function-decl name='atomic_sub_ptr_nv' mangled-name='atomic_sub_ptr_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_ptr_nv'>
<parameter type-id='fe09dd29' name='target'/>
<parameter type-id='79a0948f' name='bits'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='atomic_sub_8_nv' mangled-name='atomic_sub_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_8_nv'>
<parameter type-id='aa323ea4' name='target'/>
<parameter type-id='ee31ee44' name='bits'/>
<return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_sub_16_nv' mangled-name='atomic_sub_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_16_nv'>
<parameter type-id='93977ae7' name='target'/>
<parameter type-id='23bd8cb5' name='bits'/>
<return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_sub_32_nv' mangled-name='atomic_sub_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_32_nv'>
<parameter type-id='3a147f31' name='target'/>
<parameter type-id='3ff5601b' name='bits'/>
<return type-id='8f92235e'/>
</function-decl>
<function-decl name='atomic_sub_long_nv' mangled-name='atomic_sub_long_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_long_nv'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='bd54fe1a' name='bits'/>
<return type-id='ee1f298e'/>
</function-decl>
<function-decl name='atomic_or_8_nv' mangled-name='atomic_or_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_8_nv'>
<parameter type-id='aa323ea4' name='target'/>
<parameter type-id='b96825af' name='bits'/>
<return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_or_16_nv' mangled-name='atomic_or_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_16_nv'>
<parameter type-id='93977ae7' name='target'/>
<parameter type-id='149c6638' name='bits'/>
<return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_or_32_nv' mangled-name='atomic_or_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_32_nv'>
<parameter type-id='3a147f31' name='target'/>
<parameter type-id='8f92235e' name='bits'/>
<return type-id='8f92235e'/>
</function-decl>
<function-decl name='atomic_or_ulong_nv' mangled-name='atomic_or_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_ulong_nv'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='ee1f298e' name='bits'/>
<return type-id='ee1f298e'/>
</function-decl>
<function-decl name='atomic_and_8_nv' mangled-name='atomic_and_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_8_nv'>
<parameter type-id='aa323ea4' name='target'/>
<parameter type-id='b96825af' name='bits'/>
<return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_and_16_nv' mangled-name='atomic_and_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_16_nv'>
<parameter type-id='93977ae7' name='target'/>
<parameter type-id='149c6638' name='bits'/>
<return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_and_32_nv' mangled-name='atomic_and_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_32_nv'>
<parameter type-id='3a147f31' name='target'/>
<parameter type-id='8f92235e' name='bits'/>
<return type-id='8f92235e'/>
</function-decl>
<function-decl name='atomic_and_ulong_nv' mangled-name='atomic_and_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_ulong_nv'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='ee1f298e' name='bits'/>
<return type-id='ee1f298e'/>
</function-decl>
<function-decl name='atomic_cas_ptr' mangled-name='atomic_cas_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_ptr'>
<parameter type-id='fe09dd29' name='target'/>
<parameter type-id='eaa32e2f' name='exp'/>
<parameter type-id='eaa32e2f' name='des'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='atomic_cas_8' mangled-name='atomic_cas_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_8'>
<parameter type-id='aa323ea4' name='target'/>
<parameter type-id='b96825af' name='exp'/>
<parameter type-id='b96825af' name='des'/>
<return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_cas_16' mangled-name='atomic_cas_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_16'>
<parameter type-id='93977ae7' name='target'/>
<parameter type-id='149c6638' name='exp'/>
<parameter type-id='149c6638' name='des'/>
<return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_cas_32' mangled-name='atomic_cas_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_32'>
<parameter type-id='3a147f31' name='target'/>
<parameter type-id='8f92235e' name='exp'/>
<parameter type-id='8f92235e' name='des'/>
<return type-id='8f92235e'/>
</function-decl>
<function-decl name='atomic_cas_ulong' mangled-name='atomic_cas_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_ulong'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='ee1f298e' name='exp'/>
<parameter type-id='ee1f298e' name='des'/>
<return type-id='ee1f298e'/>
</function-decl>
<function-decl name='atomic_swap_8' mangled-name='atomic_swap_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_8'>
<parameter type-id='aa323ea4' name='target'/>
<parameter type-id='b96825af' name='bits'/>
<return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_swap_16' mangled-name='atomic_swap_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_16'>
<parameter type-id='93977ae7' name='target'/>
<parameter type-id='149c6638' name='bits'/>
<return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_swap_32' mangled-name='atomic_swap_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_32'>
<parameter type-id='3a147f31' name='target'/>
<parameter type-id='8f92235e' name='bits'/>
<return type-id='8f92235e'/>
</function-decl>
<function-decl name='atomic_swap_ulong' mangled-name='atomic_swap_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_ulong'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='ee1f298e' name='bits'/>
<return type-id='ee1f298e'/>
</function-decl>
<function-decl name='atomic_swap_ptr' mangled-name='atomic_swap_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_ptr'>
<parameter type-id='fe09dd29' name='target'/>
<parameter type-id='eaa32e2f' name='bits'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='atomic_set_long_excl' mangled-name='atomic_set_long_excl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_set_long_excl'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='3502e3ff' name='value'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='atomic_clear_long_excl' mangled-name='atomic_clear_long_excl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_clear_long_excl'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='3502e3ff' name='value'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='membar_enter' mangled-name='membar_enter' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_enter'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='membar_producer' mangled-name='membar_producer' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_producer'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='membar_consumer' mangled-name='membar_consumer' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_consumer'>
<return type-id='48b5725f'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libspl/getexecname.c' language='LANG_C99'>
<function-decl name='getexecname' mangled-name='getexecname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getexecname'>
<return type-id='80f4b756'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libspl/list.c' language='LANG_C99'>
<typedef-decl name='list_node_t' type-id='b0b5e45e' id='b21843b2'/>
<typedef-decl name='list_t' type-id='e824dae9' id='0899125f'/>
<class-decl name='list_node' size-in-bits='128' is-struct='yes' visibility='default' id='b0b5e45e'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='next' type-id='b03eadb4' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='prev' type-id='b03eadb4' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='list' size-in-bits='256' is-struct='yes' visibility='default' id='e824dae9'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='list_size' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='list_offset' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='list_head' type-id='b0b5e45e' visibility='default'/>
</data-member>
</class-decl>
<pointer-type-def type-id='b0b5e45e' size-in-bits='64' id='b03eadb4'/>
<pointer-type-def type-id='b21843b2' size-in-bits='64' id='ccc38265'/>
<pointer-type-def type-id='0899125f' size-in-bits='64' id='352ec160'/>
<function-decl name='list_create' mangled-name='list_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_create'>
<parameter type-id='352ec160' name='list'/>
<parameter type-id='b59d7dce' name='size'/>
<parameter type-id='b59d7dce' name='offset'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_destroy' mangled-name='list_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_destroy'>
<parameter type-id='352ec160' name='list'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_insert_after' mangled-name='list_insert_after' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_after'>
<parameter type-id='352ec160' name='list'/>
<parameter type-id='eaa32e2f' name='object'/>
<parameter type-id='eaa32e2f' name='nobject'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_insert_before' mangled-name='list_insert_before' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_before'>
<parameter type-id='352ec160' name='list'/>
<parameter type-id='eaa32e2f' name='object'/>
<parameter type-id='eaa32e2f' name='nobject'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_insert_head' mangled-name='list_insert_head' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_head'>
<parameter type-id='352ec160' name='list'/>
<parameter type-id='eaa32e2f' name='object'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_insert_tail' mangled-name='list_insert_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_tail'>
<parameter type-id='352ec160' name='list'/>
<parameter type-id='eaa32e2f' name='object'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_remove' mangled-name='list_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_remove'>
<parameter type-id='352ec160' name='list'/>
<parameter type-id='eaa32e2f' name='object'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_remove_head' mangled-name='list_remove_head' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_remove_head'>
<parameter type-id='352ec160' name='list'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='list_remove_tail' mangled-name='list_remove_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_remove_tail'>
<parameter type-id='352ec160' name='list'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='list_head' mangled-name='list_head' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_head'>
<parameter type-id='352ec160' name='list'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='list_tail' mangled-name='list_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_tail'>
<parameter type-id='352ec160' name='list'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='list_next' mangled-name='list_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_next'>
<parameter type-id='352ec160' name='list'/>
<parameter type-id='eaa32e2f' name='object'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='list_prev' mangled-name='list_prev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_prev'>
<parameter type-id='352ec160' name='list'/>
<parameter type-id='eaa32e2f' name='object'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='list_move_tail' mangled-name='list_move_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_move_tail'>
<parameter type-id='352ec160' name='dst'/>
<parameter type-id='352ec160' name='src'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_link_replace' mangled-name='list_link_replace' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_link_replace'>
<parameter type-id='ccc38265' name='lold'/>
<parameter type-id='ccc38265' name='lnew'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_link_init' mangled-name='list_link_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_link_init'>
<parameter type-id='ccc38265' name='ln'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_link_active' mangled-name='list_link_active' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_link_active'>
<parameter type-id='ccc38265' name='ln'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='list_is_empty' mangled-name='list_is_empty' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_is_empty'>
<parameter type-id='352ec160' name='list'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libspl/mkdirp.c' language='LANG_C99'>
<typedef-decl name='mode_t' type-id='e1c52942' id='d50d396c'/>
<function-decl name='mkdirp' mangled-name='mkdirp' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='mkdirp'>
<parameter type-id='80f4b756' name='d'/>
<parameter type-id='d50d396c' name='mode'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libspl/os/linux/gethostid.c' language='LANG_C99'>
<function-decl name='get_system_hostid' mangled-name='get_system_hostid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='get_system_hostid'>
<return type-id='7359adad'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libspl/os/linux/getmntany.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='03085adc' size-in-bits='192' id='083f8d58'>
<subrange length='3' type-id='7359adad' id='56f209d2'/>
</array-type-def>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='8' id='89feb1ec'>
<subrange length='1' type-id='7359adad' id='52f813b4'/>
</array-type-def>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='160' id='664ac0b7'>
<subrange length='20' type-id='7359adad' id='fdca39cf'/>
</array-type-def>
<class-decl name='_IO_codecvt' is-struct='yes' visibility='default' is-declaration-only='yes' id='a4036571'/>
<class-decl name='_IO_marker' is-struct='yes' visibility='default' is-declaration-only='yes' id='010ae0b9'/>
<class-decl name='_IO_wide_data' is-struct='yes' visibility='default' is-declaration-only='yes' id='79bd3751'/>
<class-decl name='extmnttab' size-in-bits='320' is-struct='yes' visibility='default' id='0c544dc0'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='mnt_special' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='mnt_mountp' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='mnt_fstype' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='mnt_mntopts' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='mnt_major' type-id='3502e3ff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
<var-decl name='mnt_minor' type-id='3502e3ff' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='stat64' size-in-bits='1152' is-struct='yes' visibility='default' id='0bbec9cd'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='st_dev' type-id='35ed8932' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='st_ino' type-id='71288a47' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='st_nlink' type-id='80f0b9df' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='st_mode' type-id='e1c52942' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='224'>
<var-decl name='st_uid' type-id='cc5fcceb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='st_gid' type-id='d94ec6d9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
<var-decl name='__pad0' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='st_rdev' type-id='35ed8932' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='st_size' type-id='79989e9c' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='st_blksize' type-id='d3f10a7f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='st_blocks' type-id='4e711bf1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
<var-decl name='st_atim' type-id='a9c79a1f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='704'>
<var-decl name='st_mtim' type-id='a9c79a1f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='832'>
<var-decl name='st_ctim' type-id='a9c79a1f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='960'>
<var-decl name='__glibc_reserved' type-id='083f8d58' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='__dev_t' type-id='7359adad' id='35ed8932'/>
<typedef-decl name='__gid_t' type-id='f0981eeb' id='d94ec6d9'/>
<typedef-decl name='__ino64_t' type-id='7359adad' id='71288a47'/>
<typedef-decl name='__mode_t' type-id='f0981eeb' id='e1c52942'/>
<typedef-decl name='__nlink_t' type-id='7359adad' id='80f0b9df'/>
<typedef-decl name='__off_t' type-id='bd54fe1a' id='79989e9c'/>
<typedef-decl name='__off64_t' type-id='bd54fe1a' id='724e4de6'/>
<typedef-decl name='__time_t' type-id='bd54fe1a' id='65eda9c0'/>
<typedef-decl name='__blksize_t' type-id='bd54fe1a' id='d3f10a7f'/>
<typedef-decl name='__blkcnt64_t' type-id='bd54fe1a' id='4e711bf1'/>
<typedef-decl name='__syscall_slong_t' type-id='bd54fe1a' id='03085adc'/>
<typedef-decl name='FILE' type-id='ec1ed955' id='aa12d1ba'/>
<typedef-decl name='_IO_lock_t' type-id='48b5725f' id='bb4788fa'/>
<class-decl name='_IO_FILE' size-in-bits='1728' is-struct='yes' visibility='default' id='ec1ed955'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='_flags' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='_IO_read_ptr' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='_IO_read_end' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='_IO_read_base' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='_IO_write_base' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='_IO_write_ptr' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='_IO_write_end' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='_IO_buf_base' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='_IO_buf_end' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
<var-decl name='_IO_save_base' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='640'>
<var-decl name='_IO_backup_base' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='704'>
<var-decl name='_IO_save_end' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='768'>
<var-decl name='_markers' type-id='e4c6fa61' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='832'>
<var-decl name='_chain' type-id='dca988a5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='896'>
<var-decl name='_fileno' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='928'>
<var-decl name='_flags2' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='960'>
<var-decl name='_old_offset' type-id='79989e9c' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1024'>
<var-decl name='_cur_column' type-id='8efea9e5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1040'>
<var-decl name='_vtable_offset' type-id='28577a57' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1048'>
<var-decl name='_shortbuf' type-id='89feb1ec' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1088'>
<var-decl name='_lock' type-id='cecf4ea7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1152'>
<var-decl name='_offset' type-id='724e4de6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1216'>
<var-decl name='_codecvt' type-id='570f8c59' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1280'>
<var-decl name='_wide_data' type-id='c65a1f29' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1344'>
<var-decl name='_freeres_list' type-id='dca988a5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1408'>
<var-decl name='_freeres_buf' type-id='eaa32e2f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1472'>
<var-decl name='__pad5' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1536'>
<var-decl name='_mode' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1568'>
<var-decl name='_unused2' type-id='664ac0b7' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='timespec' size-in-bits='128' is-struct='yes' visibility='default' id='a9c79a1f'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='tv_sec' type-id='65eda9c0' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='tv_nsec' type-id='03085adc' visibility='default'/>
</data-member>
</class-decl>
<pointer-type-def type-id='aa12d1ba' size-in-bits='64' id='822cd80b'/>
<pointer-type-def type-id='ec1ed955' size-in-bits='64' id='dca988a5'/>
<pointer-type-def type-id='a4036571' size-in-bits='64' id='570f8c59'/>
<pointer-type-def type-id='bb4788fa' size-in-bits='64' id='cecf4ea7'/>
<pointer-type-def type-id='010ae0b9' size-in-bits='64' id='e4c6fa61'/>
<pointer-type-def type-id='79bd3751' size-in-bits='64' id='c65a1f29'/>
<pointer-type-def type-id='0c544dc0' size-in-bits='64' id='394fc496'/>
<pointer-type-def type-id='0bbec9cd' size-in-bits='64' id='62f7a03d'/>
<class-decl name='_IO_codecvt' is-struct='yes' visibility='default' is-declaration-only='yes' id='a4036571'/>
<class-decl name='_IO_marker' is-struct='yes' visibility='default' is-declaration-only='yes' id='010ae0b9'/>
<class-decl name='_IO_wide_data' is-struct='yes' visibility='default' is-declaration-only='yes' id='79bd3751'/>
<function-decl name='getmntany' mangled-name='getmntany' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getmntany'>
<parameter type-id='822cd80b' name='fp'/>
<parameter type-id='9d424d31' name='mgetp'/>
<parameter type-id='9d424d31' name='mrefp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='_sol_getmntent' mangled-name='_sol_getmntent' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='_sol_getmntent'>
<parameter type-id='822cd80b' name='fp'/>
<parameter type-id='9d424d31' name='mgetp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='getextmntent' mangled-name='getextmntent' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getextmntent'>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='394fc496' name='entry'/>
<parameter type-id='62f7a03d' name='statbuf'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libspl/os/linux/zone.c' language='LANG_C99'>
<typedef-decl name='zoneid_t' type-id='3502e3ff' id='4da03624'/>
<function-decl name='getzoneid' mangled-name='getzoneid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getzoneid'>
<return type-id='4da03624'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libspl/page.c' language='LANG_C99'>
<function-decl name='spl_pagesize' mangled-name='spl_pagesize' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='spl_pagesize'>
<return type-id='b59d7dce'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libspl/strlcat.c' language='LANG_C99'>
<function-decl name='strlcat' mangled-name='strlcat' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='strlcat'>
<parameter type-id='26a90f95' name='dst'/>
<parameter type-id='80f4b756' name='src'/>
<parameter type-id='b59d7dce' name='dstsize'/>
<return type-id='b59d7dce'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libspl/strlcpy.c' language='LANG_C99'>
<function-decl name='strlcpy' mangled-name='strlcpy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='strlcpy'>
<parameter type-id='26a90f95' name='dst'/>
<parameter type-id='80f4b756' name='src'/>
<parameter type-id='b59d7dce' name='len'/>
<return type-id='b59d7dce'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libspl/timestamp.c' language='LANG_C99'>
<function-decl name='print_timestamp' mangled-name='print_timestamp' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='print_timestamp'>
<parameter type-id='3502e3ff' name='timestamp_fmt'/>
<return type-id='48b5725f'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libtpool/thread_pool.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='384' id='36d7f119'>
<subrange length='48' type-id='7359adad' id='8f6d2a81'/>
</array-type-def>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='448' id='6093ff7c'>
<subrange length='56' type-id='7359adad' id='f8137894'/>
</array-type-def>
<type-decl name='long long unsigned int' size-in-bits='64' id='3a47d82b'/>
<array-type-def dimensions='1' type-id='f0981eeb' size-in-bits='64' id='0d532ec1'>
<subrange length='2' type-id='7359adad' id='52efc4ef'/>
</array-type-def>
<typedef-decl name='tpool_t' type-id='88d1b7f9' id='b1bbf10d'/>
<typedef-decl name='pthread_t' type-id='7359adad' id='4051f5e7'/>
<union-decl name='pthread_attr_t' size-in-bits='448' visibility='default' id='b63afacd'>
<data-member access='public'>
<var-decl name='__size' type-id='6093ff7c' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='__align' type-id='bd54fe1a' visibility='default'/>
</data-member>
</union-decl>
<typedef-decl name='pthread_attr_t' type-id='b63afacd' id='7d8569fd'/>
<union-decl name='pthread_cond_t' size-in-bits='384' naming-typedef-id='62fab762' visibility='default' id='cbb12c12'>
<data-member access='public'>
<var-decl name='__data' type-id='c987b47c' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='__size' type-id='36d7f119' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='__align' type-id='1eb56b1e' visibility='default'/>
</data-member>
</union-decl>
<typedef-decl name='pthread_cond_t' type-id='cbb12c12' id='62fab762'/>
<class-decl name='__pthread_cond_s' size-in-bits='384' is-struct='yes' visibility='default' id='c987b47c'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='' type-id='ac5ab595' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='' type-id='ac5ab596' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='__g_refs' type-id='0d532ec1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='__g_size' type-id='0d532ec1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='__g1_orig_size' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
<var-decl name='__wrefs' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='__g_signals' type-id='0d532ec1' visibility='default'/>
</data-member>
</class-decl>
<union-decl name='__anonymous_union__1' size-in-bits='64' is-anonymous='yes' visibility='default' id='ac5ab595'>
<data-member access='public'>
<var-decl name='__wseq' type-id='3a47d82b' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='__wseq32' type-id='e7f43f72' visibility='default'/>
</data-member>
</union-decl>
<class-decl name='__anonymous_struct__' size-in-bits='64' is-struct='yes' is-anonymous='yes' visibility='default' id='e7f43f72'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='__low' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='__high' type-id='f0981eeb' visibility='default'/>
</data-member>
</class-decl>
<union-decl name='__anonymous_union__2' size-in-bits='64' is-anonymous='yes' visibility='default' id='ac5ab596'>
<data-member access='public'>
<var-decl name='__g1_start' type-id='3a47d82b' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='__g1_start32' type-id='e7f43f72' visibility='default'/>
</data-member>
</union-decl>
<typedef-decl name='tpool_job_t' type-id='3b8579e5' id='66a0afc9'/>
<class-decl name='tpool_job' size-in-bits='192' is-struct='yes' visibility='default' id='3b8579e5'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='tpj_next' type-id='f32b30e4' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='tpj_func' type-id='b7f9d8e6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='tpj_arg' type-id='eaa32e2f' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='tpool_active_t' type-id='c8d086f4' id='6fcda10e'/>
<class-decl name='tpool_active' size-in-bits='128' is-struct='yes' visibility='default' id='c8d086f4'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='tpa_next' type-id='ad33e5e7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='tpa_tid' type-id='4051f5e7' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='tpool' size-in-bits='2496' is-struct='yes' visibility='default' id='88d1b7f9'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='tp_forw' type-id='9cf59a50' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='tp_back' type-id='9cf59a50' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='tp_mutex' type-id='7a6844eb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='tp_busycv' type-id='62fab762' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='832'>
<var-decl name='tp_workcv' type-id='62fab762' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1216'>
<var-decl name='tp_waitcv' type-id='62fab762' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1600'>
<var-decl name='tp_active' type-id='ad33e5e7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1664'>
<var-decl name='tp_head' type-id='f32b30e4' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1728'>
<var-decl name='tp_tail' type-id='f32b30e4' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1792'>
<var-decl name='tp_attr' type-id='7d8569fd' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2240'>
<var-decl name='tp_flags' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2272'>
<var-decl name='tp_linger' type-id='3502e3ff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2304'>
<var-decl name='tp_njobs' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2336'>
<var-decl name='tp_minimum' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2368'>
<var-decl name='tp_maximum' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2400'>
<var-decl name='tp_current' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2432'>
<var-decl name='tp_idle' type-id='95e97e5e' visibility='default'/>
</data-member>
</class-decl>
<pointer-type-def type-id='7d8569fd' size-in-bits='64' id='7347a39e'/>
<pointer-type-def type-id='6fcda10e' size-in-bits='64' id='ad33e5e7'/>
<pointer-type-def type-id='66a0afc9' size-in-bits='64' id='f32b30e4'/>
<pointer-type-def type-id='b1bbf10d' size-in-bits='64' id='9cf59a50'/>
<pointer-type-def type-id='c5c76c9c' size-in-bits='64' id='b7f9d8e6'/>
<function-decl name='tpool_create' mangled-name='tpool_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_create'>
<parameter type-id='3502e3ff' name='min_threads'/>
<parameter type-id='3502e3ff' name='max_threads'/>
<parameter type-id='3502e3ff' name='linger'/>
<parameter type-id='7347a39e' name='attr'/>
<return type-id='9cf59a50'/>
</function-decl>
<function-decl name='tpool_dispatch' mangled-name='tpool_dispatch' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_dispatch'>
<parameter type-id='9cf59a50' name='tpool'/>
<parameter type-id='b7f9d8e6' name='func'/>
<parameter type-id='eaa32e2f' name='arg'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='tpool_destroy' mangled-name='tpool_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_destroy'>
<parameter type-id='9cf59a50' name='tpool'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='tpool_abandon' mangled-name='tpool_abandon' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_abandon'>
<parameter type-id='9cf59a50' name='tpool'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='tpool_wait' mangled-name='tpool_wait' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_wait'>
<parameter type-id='9cf59a50' name='tpool'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='tpool_suspend' mangled-name='tpool_suspend' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_suspend'>
<parameter type-id='9cf59a50' name='tpool'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='tpool_suspended' mangled-name='tpool_suspended' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_suspended'>
<parameter type-id='9cf59a50' name='tpool'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='tpool_resume' mangled-name='tpool_resume' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_resume'>
<parameter type-id='9cf59a50' name='tpool'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='tpool_member' mangled-name='tpool_member' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_member'>
<parameter type-id='9cf59a50' name='tpool'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-type size-in-bits='64' id='c5c76c9c'>
<parameter type-id='eaa32e2f'/>
<return type-id='48b5725f'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs/libzfs_changelist.c' language='LANG_C99'>
<type-decl name='void' id='48b5725f'/>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs/libzfs_config.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='bf311473' size-in-bits='128' id='f0f65199'>
<subrange length='2' type-id='7359adad' id='52efc4ef'/>
</array-type-def>
<type-decl name='char' size-in-bits='8' id='a84c031d'/>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='8192' id='b54ce520'>
<subrange length='1024' type-id='7359adad' id='c60446f8'/>
</array-type-def>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='2048' id='d1617432'>
<subrange length='256' type-id='7359adad' id='36e5b9fa'/>
</array-type-def>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='320' id='36c46961'>
<subrange length='40' type-id='7359adad' id='8f80b239'/>
</array-type-def>
<class-decl name='re_dfa_t' is-struct='yes' visibility='default' is-declaration-only='yes' id='b48d2441'/>
<class-decl name='uu_avl' is-struct='yes' visibility='default' is-declaration-only='yes' id='4af029d1'/>
<class-decl name='uu_avl_pool' is-struct='yes' visibility='default' is-declaration-only='yes' id='12a530a8'/>
<type-decl name='int' size-in-bits='32' id='95e97e5e'/>
<type-decl name='long int' size-in-bits='64' id='bd54fe1a'/>
<type-decl name='long long int' size-in-bits='64' id='1eb56b1e'/>
<type-decl name='short int' size-in-bits='16' id='a2185560'/>
<type-decl name='unnamed-enum-underlying-type-32' is-anonymous='yes' size-in-bits='32' alignment-in-bits='32' id='9cac1fee'/>
<type-decl name='unsigned char' size-in-bits='8' id='002ac4a6'/>
<type-decl name='unsigned int' size-in-bits='32' id='f0981eeb'/>
<type-decl name='unsigned long int' size-in-bits='64' id='7359adad'/>
<typedef-decl name='uu_avl_pool_t' type-id='12a530a8' id='7f84e390'/>
<typedef-decl name='uu_avl_t' type-id='4af029d1' id='bb7f0973'/>
<typedef-decl name='zfs_handle_t' type-id='f6ee4445' id='775509eb'/>
<typedef-decl name='zpool_handle_t' type-id='67002a8a' id='b1efc708'/>
<typedef-decl name='libzfs_handle_t' type-id='c8a9d9d8' id='95942d0c'/>
<typedef-decl name='zpool_iter_f' type-id='3aebb66f' id='fa476e62'/>
<typedef-decl name='zfs_iter_f' type-id='5571cde4' id='d8e49ab9'/>
<typedef-decl name='avl_tree_t' type-id='b351119f' id='f20fbd51'/>
<class-decl name='avl_node' size-in-bits='192' is-struct='yes' visibility='default' id='428b67b3'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='avl_child' type-id='f0f65199' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='avl_pcb' type-id='e475ab95' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='avl_tree' size-in-bits='320' is-struct='yes' visibility='default' id='b351119f'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='avl_root' type-id='bf311473' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='avl_compar' type-id='585e1de9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='avl_offset' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='avl_numnodes' type-id='ee1f298e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='avl_pad' type-id='b59d7dce' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='dmu_objset_stats' size-in-bits='2304' is-struct='yes' visibility='default' id='098f0221'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='dds_num_clones' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='dds_creation_txg' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='dds_guid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='dds_type' type-id='230f1e16' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='224'>
<var-decl name='dds_is_snapshot' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='232'>
<var-decl name='dds_inconsistent' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='240'>
<var-decl name='dds_redacted' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='248'>
<var-decl name='dds_origin' type-id='d1617432' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='dmu_objset_stats_t' type-id='098f0221' id='b2c14f17'/>
<enum-decl name='zfs_type_t' naming-typedef-id='2e45de5d' id='5d8f7321'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZFS_TYPE_INVALID' value='0'/>
<enumerator name='ZFS_TYPE_FILESYSTEM' value='1'/>
<enumerator name='ZFS_TYPE_SNAPSHOT' value='2'/>
<enumerator name='ZFS_TYPE_VOLUME' value='4'/>
<enumerator name='ZFS_TYPE_POOL' value='8'/>
<enumerator name='ZFS_TYPE_BOOKMARK' value='16'/>
<enumerator name='ZFS_TYPE_VDEV' value='32'/>
</enum-decl>
<typedef-decl name='zfs_type_t' type-id='5d8f7321' id='2e45de5d'/>
<enum-decl name='dmu_objset_type' id='6b1b19f9'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='DMU_OST_NONE' value='0'/>
<enumerator name='DMU_OST_META' value='1'/>
<enumerator name='DMU_OST_ZFS' value='2'/>
<enumerator name='DMU_OST_ZVOL' value='3'/>
<enumerator name='DMU_OST_OTHER' value='4'/>
<enumerator name='DMU_OST_ANY' value='5'/>
<enumerator name='DMU_OST_NUMTYPES' value='6'/>
</enum-decl>
<typedef-decl name='dmu_objset_type_t' type-id='6b1b19f9' id='230f1e16'/>
<class-decl name='nvlist' size-in-bits='192' is-struct='yes' visibility='default' id='ac266fd9'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='nvl_version' type-id='3ff5601b' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='nvl_nvflag' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='nvl_priv' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='nvl_flag' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='160'>
<var-decl name='nvl_pad' type-id='3ff5601b' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='nvlist_t' type-id='ac266fd9' id='8e8d4be3'/>
<enum-decl name='boolean_t' naming-typedef-id='c19b74c3' id='f58c8277'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='B_FALSE' value='0'/>
<enumerator name='B_TRUE' value='1'/>
</enum-decl>
<typedef-decl name='boolean_t' type-id='f58c8277' id='c19b74c3'/>
<typedef-decl name='ulong_t' type-id='7359adad' id='ee1f298e'/>
<typedef-decl name='longlong_t' type-id='1eb56b1e' id='9b3ff54f'/>
<typedef-decl name='diskaddr_t' type-id='9b3ff54f' id='804dc465'/>
<typedef-decl name='__re_long_size_t' type-id='7359adad' id='ba516949'/>
<typedef-decl name='reg_syntax_t' type-id='7359adad' id='1b72c3b3'/>
<class-decl name='re_pattern_buffer' size-in-bits='512' is-struct='yes' visibility='default' id='19fc9a8c'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='buffer' type-id='33976309' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='allocated' type-id='ba516949' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='used' type-id='ba516949' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='syntax' type-id='1b72c3b3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='fastmap' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='translate' type-id='cf536864' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='re_nsub' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='can_be_null' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='449'>
<var-decl name='regs_allocated' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='451'>
<var-decl name='fastmap_accurate' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='452'>
<var-decl name='no_sub' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='453'>
<var-decl name='not_bol' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='454'>
<var-decl name='not_eol' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='455'>
<var-decl name='newline_anchor' type-id='f0981eeb' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='regex_t' type-id='19fc9a8c' id='aca3bac8'/>
<typedef-decl name='uintptr_t' type-id='7359adad' id='e475ab95'/>
<union-decl name='pthread_mutex_t' size-in-bits='320' naming-typedef-id='7a6844eb' visibility='default' id='70681f9b'>
<data-member access='public'>
<var-decl name='__data' type-id='4c734837' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='__size' type-id='36c46961' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='__align' type-id='bd54fe1a' visibility='default'/>
</data-member>
</union-decl>
<typedef-decl name='pthread_mutex_t' type-id='70681f9b' id='7a6844eb'/>
<typedef-decl name='int32_t' type-id='33f57a65' id='3ff5601b'/>
<typedef-decl name='uint8_t' type-id='c51d6389' id='b96825af'/>
<typedef-decl name='uint32_t' type-id='62f1140c' id='8f92235e'/>
<typedef-decl name='uint64_t' type-id='8910171f' id='9c313c2d'/>
<class-decl name='__pthread_mutex_s' size-in-bits='320' is-struct='yes' visibility='default' id='4c734837'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='__lock' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='__count' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='__owner' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='__nusers' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='__kind' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='160'>
<var-decl name='__spins' type-id='a2185560' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='176'>
<var-decl name='__elision' type-id='a2185560' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='__list' type-id='518fb49c' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='__pthread_internal_list' size-in-bits='128' is-struct='yes' visibility='default' id='0e01899c'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='__prev' type-id='4d98cd5a' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='__next' type-id='4d98cd5a' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='__pthread_list_t' type-id='0e01899c' id='518fb49c'/>
<typedef-decl name='__uint8_t' type-id='002ac4a6' id='c51d6389'/>
<typedef-decl name='__int32_t' type-id='95e97e5e' id='33f57a65'/>
<typedef-decl name='__uint32_t' type-id='f0981eeb' id='62f1140c'/>
<typedef-decl name='__uint64_t' type-id='7359adad' id='8910171f'/>
<typedef-decl name='size_t' type-id='7359adad' id='b59d7dce'/>
<class-decl name='libzfs_handle' size-in-bits='18240' is-struct='yes' visibility='default' id='c8a9d9d8'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='libzfs_error' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='libzfs_fd' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='libzfs_pool_handles' type-id='4c81de99' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='libzfs_ns_avlpool' type-id='de82c773' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='libzfs_ns_avl' type-id='a5c21a38' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='libzfs_ns_gen' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='libzfs_desc_active' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='352'>
<var-decl name='libzfs_action' type-id='b54ce520' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='8544'>
<var-decl name='libzfs_desc' type-id='b54ce520' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='16736'>
<var-decl name='libzfs_printerr' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='16768'>
<var-decl name='libzfs_mnttab_enable' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='16832'>
<var-decl name='libzfs_mnttab_cache_lock' type-id='7a6844eb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='17152'>
<var-decl name='libzfs_mnttab_cache' type-id='f20fbd51' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='17472'>
<var-decl name='libzfs_pool_iter' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='17504'>
<var-decl name='libzfs_prop_debug' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='17536'>
<var-decl name='libzfs_urire' type-id='aca3bac8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='18048'>
<var-decl name='libzfs_max_nvlist' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='18112'>
<var-decl name='libfetch' type-id='eaa32e2f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='18176'>
<var-decl name='libfetch_load_error' type-id='26a90f95' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='zfs_handle' size-in-bits='4928' is-struct='yes' visibility='default' id='f6ee4445'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='zfs_hdl' type-id='b0382bb3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='zpool_hdl' type-id='4c81de99' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='zfs_name' type-id='d1617432' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2176'>
<var-decl name='zfs_type' type-id='2e45de5d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2208'>
<var-decl name='zfs_head_type' type-id='2e45de5d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2240'>
<var-decl name='zfs_dmustats' type-id='b2c14f17' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='4544'>
<var-decl name='zfs_props' type-id='5ce45b60' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='4608'>
<var-decl name='zfs_user_props' type-id='5ce45b60' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='4672'>
<var-decl name='zfs_recvd_props' type-id='5ce45b60' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='4736'>
<var-decl name='zfs_mntcheck' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='4800'>
<var-decl name='zfs_mntopts' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='4864'>
<var-decl name='zfs_props_table' type-id='ae3e8ca6' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='zpool_handle' size-in-bits='2560' is-struct='yes' visibility='default' id='67002a8a'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='zpool_hdl' type-id='b0382bb3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='zpool_next' type-id='4c81de99' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='zpool_name' type-id='d1617432' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2176'>
<var-decl name='zpool_state' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2240'>
<var-decl name='zpool_config_size' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2304'>
<var-decl name='zpool_config' type-id='5ce45b60' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2368'>
<var-decl name='zpool_old_config' type-id='5ce45b60' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2432'>
<var-decl name='zpool_props' type-id='5ce45b60' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2496'>
<var-decl name='zpool_start_block' type-id='804dc465' visibility='default'/>
</data-member>
</class-decl>
<pointer-type-def type-id='0e01899c' size-in-bits='64' id='4d98cd5a'/>
<pointer-type-def type-id='428b67b3' size-in-bits='64' id='bf311473'/>
<pointer-type-def type-id='c19b74c3' size-in-bits='64' id='37e3bd22'/>
<pointer-type-def type-id='a84c031d' size-in-bits='64' id='26a90f95'/>
<qualified-type-def type-id='a84c031d' const='yes' id='9b45d938'/>
<pointer-type-def type-id='9b45d938' size-in-bits='64' id='80f4b756'/>
<pointer-type-def type-id='96ee24a5' size-in-bits='64' id='585e1de9'/>
<pointer-type-def type-id='cb9628fa' size-in-bits='64' id='5571cde4'/>
<pointer-type-def type-id='2bce87e3' size-in-bits='64' id='3aebb66f'/>
<pointer-type-def type-id='95942d0c' size-in-bits='64' id='b0382bb3'/>
<pointer-type-def type-id='8e8d4be3' size-in-bits='64' id='5ce45b60'/>
<pointer-type-def type-id='5ce45b60' size-in-bits='64' id='857bb57e'/>
<pointer-type-def type-id='b48d2441' size-in-bits='64' id='33976309'/>
<pointer-type-def type-id='b96825af' size-in-bits='64' id='ae3e8ca6'/>
<pointer-type-def type-id='002ac4a6' size-in-bits='64' id='cf536864'/>
<pointer-type-def type-id='7f84e390' size-in-bits='64' id='de82c773'/>
<pointer-type-def type-id='bb7f0973' size-in-bits='64' id='a5c21a38'/>
<pointer-type-def type-id='48b5725f' size-in-bits='64' id='eaa32e2f'/>
<pointer-type-def type-id='775509eb' size-in-bits='64' id='9200a744'/>
<pointer-type-def type-id='b1efc708' size-in-bits='64' id='4c81de99'/>
<class-decl name='re_dfa_t' is-struct='yes' visibility='default' is-declaration-only='yes' id='b48d2441'/>
<class-decl name='uu_avl' is-struct='yes' visibility='default' is-declaration-only='yes' id='4af029d1'/>
<class-decl name='uu_avl_pool' is-struct='yes' visibility='default' is-declaration-only='yes' id='12a530a8'/>
<function-decl name='zpool_get_config' mangled-name='zpool_get_config' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_config'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='857bb57e' name='oldconfig'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zpool_get_features' mangled-name='zpool_get_features' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_features'>
<parameter type-id='4c81de99' name='zhp'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zpool_refresh_stats' mangled-name='zpool_refresh_stats' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_refresh_stats'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='37e3bd22' name='missing'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_skip_pool' mangled-name='zpool_skip_pool' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_skip_pool'>
<parameter type-id='80f4b756' name='poolname'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zpool_iter' mangled-name='zpool_iter' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_iter'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='fa476e62' name='func'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_root' mangled-name='zfs_iter_root' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_root'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='d8e49ab9' name='func'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-type size-in-bits='64' id='96ee24a5'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-type>
<function-type size-in-bits='64' id='cb9628fa'>
<parameter type-id='9200a744'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-type>
<function-type size-in-bits='64' id='2bce87e3'>
<parameter type-id='4c81de99'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs/libzfs_crypto.c' language='LANG_C99'>
<typedef-decl name='uint_t' type-id='f0981eeb' id='3502e3ff'/>
<pointer-type-def type-id='ae3e8ca6' size-in-bits='64' id='d8774064'/>
<pointer-type-def type-id='3502e3ff' size-in-bits='64' id='4dd26a40'/>
<function-decl name='zfs_crypto_get_encryption_root' mangled-name='zfs_crypto_get_encryption_root' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_get_encryption_root'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='37e3bd22' name='is_encroot'/>
<parameter type-id='26a90f95' name='buf'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_crypto_create' mangled-name='zfs_crypto_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_create'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='26a90f95' name='parent_name'/>
<parameter type-id='5ce45b60' name='props'/>
<parameter type-id='5ce45b60' name='pool_props'/>
<parameter type-id='c19b74c3' name='stdin_available'/>
<parameter type-id='d8774064' name='wkeydata_out'/>
<parameter type-id='4dd26a40' name='wkeylen_out'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_crypto_clone_check' mangled-name='zfs_crypto_clone_check' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_clone_check'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='9200a744' name='origin_zhp'/>
<parameter type-id='26a90f95' name='parent_name'/>
<parameter type-id='5ce45b60' name='props'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_crypto_attempt_load_keys' mangled-name='zfs_crypto_attempt_load_keys' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_attempt_load_keys'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='fsname'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_crypto_load_key' mangled-name='zfs_crypto_load_key' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_load_key'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='c19b74c3' name='noop'/>
<parameter type-id='80f4b756' name='alt_keylocation'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_crypto_unload_key' mangled-name='zfs_crypto_unload_key' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_unload_key'>
<parameter type-id='9200a744' name='zhp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_crypto_rewrap' mangled-name='zfs_crypto_rewrap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_rewrap'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='5ce45b60' name='raw_props'/>
<parameter type-id='c19b74c3' name='inheritkey'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs/libzfs_dataset.c' language='LANG_C99'>
<class-decl name='zprop_list' size-in-bits='448' is-struct='yes' visibility='default' id='bd9b4291'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='pl_prop' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='pl_user_prop' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='pl_next' type-id='9f1a1109' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='pl_all' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='pl_width' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='pl_recvd_width' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='pl_fixed' type-id='c19b74c3' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zprop_list_t' type-id='bd9b4291' id='bdb8ac4f'/>
<class-decl name='renameflags' size-in-bits='32' is-struct='yes' visibility='default' id='7aee5792'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='recursive' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1'>
<var-decl name='nounmount' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2'>
<var-decl name='forceunmount' type-id='95e97e5e' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='renameflags_t' type-id='7aee5792' id='067170c2'/>
<typedef-decl name='zfs_userspace_cb_t' type-id='ca64ff60' id='16c5f410'/>
<enum-decl name='zfs_prop_t' naming-typedef-id='58603c44' id='4b000d60'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZPROP_CONT' value='-2'/>
<enumerator name='ZPROP_INVAL' value='-1'/>
+ <enumerator name='ZPROP_USERPROP' value='-1'/>
<enumerator name='ZFS_PROP_TYPE' value='0'/>
<enumerator name='ZFS_PROP_CREATION' value='1'/>
<enumerator name='ZFS_PROP_USED' value='2'/>
<enumerator name='ZFS_PROP_AVAILABLE' value='3'/>
<enumerator name='ZFS_PROP_REFERENCED' value='4'/>
<enumerator name='ZFS_PROP_COMPRESSRATIO' value='5'/>
<enumerator name='ZFS_PROP_MOUNTED' value='6'/>
<enumerator name='ZFS_PROP_ORIGIN' value='7'/>
<enumerator name='ZFS_PROP_QUOTA' value='8'/>
<enumerator name='ZFS_PROP_RESERVATION' value='9'/>
<enumerator name='ZFS_PROP_VOLSIZE' value='10'/>
<enumerator name='ZFS_PROP_VOLBLOCKSIZE' value='11'/>
<enumerator name='ZFS_PROP_RECORDSIZE' value='12'/>
<enumerator name='ZFS_PROP_MOUNTPOINT' value='13'/>
<enumerator name='ZFS_PROP_SHARENFS' value='14'/>
<enumerator name='ZFS_PROP_CHECKSUM' value='15'/>
<enumerator name='ZFS_PROP_COMPRESSION' value='16'/>
<enumerator name='ZFS_PROP_ATIME' value='17'/>
<enumerator name='ZFS_PROP_DEVICES' value='18'/>
<enumerator name='ZFS_PROP_EXEC' value='19'/>
<enumerator name='ZFS_PROP_SETUID' value='20'/>
<enumerator name='ZFS_PROP_READONLY' value='21'/>
<enumerator name='ZFS_PROP_ZONED' value='22'/>
<enumerator name='ZFS_PROP_SNAPDIR' value='23'/>
<enumerator name='ZFS_PROP_ACLMODE' value='24'/>
<enumerator name='ZFS_PROP_ACLINHERIT' value='25'/>
<enumerator name='ZFS_PROP_CREATETXG' value='26'/>
<enumerator name='ZFS_PROP_NAME' value='27'/>
<enumerator name='ZFS_PROP_CANMOUNT' value='28'/>
<enumerator name='ZFS_PROP_ISCSIOPTIONS' value='29'/>
<enumerator name='ZFS_PROP_XATTR' value='30'/>
<enumerator name='ZFS_PROP_NUMCLONES' value='31'/>
<enumerator name='ZFS_PROP_COPIES' value='32'/>
<enumerator name='ZFS_PROP_VERSION' value='33'/>
<enumerator name='ZFS_PROP_UTF8ONLY' value='34'/>
<enumerator name='ZFS_PROP_NORMALIZE' value='35'/>
<enumerator name='ZFS_PROP_CASE' value='36'/>
<enumerator name='ZFS_PROP_VSCAN' value='37'/>
<enumerator name='ZFS_PROP_NBMAND' value='38'/>
<enumerator name='ZFS_PROP_SHARESMB' value='39'/>
<enumerator name='ZFS_PROP_REFQUOTA' value='40'/>
<enumerator name='ZFS_PROP_REFRESERVATION' value='41'/>
<enumerator name='ZFS_PROP_GUID' value='42'/>
<enumerator name='ZFS_PROP_PRIMARYCACHE' value='43'/>
<enumerator name='ZFS_PROP_SECONDARYCACHE' value='44'/>
<enumerator name='ZFS_PROP_USEDSNAP' value='45'/>
<enumerator name='ZFS_PROP_USEDDS' value='46'/>
<enumerator name='ZFS_PROP_USEDCHILD' value='47'/>
<enumerator name='ZFS_PROP_USEDREFRESERV' value='48'/>
<enumerator name='ZFS_PROP_USERACCOUNTING' value='49'/>
<enumerator name='ZFS_PROP_STMF_SHAREINFO' value='50'/>
<enumerator name='ZFS_PROP_DEFER_DESTROY' value='51'/>
<enumerator name='ZFS_PROP_USERREFS' value='52'/>
<enumerator name='ZFS_PROP_LOGBIAS' value='53'/>
<enumerator name='ZFS_PROP_UNIQUE' value='54'/>
<enumerator name='ZFS_PROP_OBJSETID' value='55'/>
<enumerator name='ZFS_PROP_DEDUP' value='56'/>
<enumerator name='ZFS_PROP_MLSLABEL' value='57'/>
<enumerator name='ZFS_PROP_SYNC' value='58'/>
<enumerator name='ZFS_PROP_DNODESIZE' value='59'/>
<enumerator name='ZFS_PROP_REFRATIO' value='60'/>
<enumerator name='ZFS_PROP_WRITTEN' value='61'/>
<enumerator name='ZFS_PROP_CLONES' value='62'/>
<enumerator name='ZFS_PROP_LOGICALUSED' value='63'/>
<enumerator name='ZFS_PROP_LOGICALREFERENCED' value='64'/>
<enumerator name='ZFS_PROP_INCONSISTENT' value='65'/>
<enumerator name='ZFS_PROP_VOLMODE' value='66'/>
<enumerator name='ZFS_PROP_FILESYSTEM_LIMIT' value='67'/>
<enumerator name='ZFS_PROP_SNAPSHOT_LIMIT' value='68'/>
<enumerator name='ZFS_PROP_FILESYSTEM_COUNT' value='69'/>
<enumerator name='ZFS_PROP_SNAPSHOT_COUNT' value='70'/>
<enumerator name='ZFS_PROP_SNAPDEV' value='71'/>
<enumerator name='ZFS_PROP_ACLTYPE' value='72'/>
<enumerator name='ZFS_PROP_SELINUX_CONTEXT' value='73'/>
<enumerator name='ZFS_PROP_SELINUX_FSCONTEXT' value='74'/>
<enumerator name='ZFS_PROP_SELINUX_DEFCONTEXT' value='75'/>
<enumerator name='ZFS_PROP_SELINUX_ROOTCONTEXT' value='76'/>
<enumerator name='ZFS_PROP_RELATIME' value='77'/>
<enumerator name='ZFS_PROP_REDUNDANT_METADATA' value='78'/>
<enumerator name='ZFS_PROP_OVERLAY' value='79'/>
<enumerator name='ZFS_PROP_PREV_SNAP' value='80'/>
<enumerator name='ZFS_PROP_RECEIVE_RESUME_TOKEN' value='81'/>
<enumerator name='ZFS_PROP_ENCRYPTION' value='82'/>
<enumerator name='ZFS_PROP_KEYLOCATION' value='83'/>
<enumerator name='ZFS_PROP_KEYFORMAT' value='84'/>
<enumerator name='ZFS_PROP_PBKDF2_SALT' value='85'/>
<enumerator name='ZFS_PROP_PBKDF2_ITERS' value='86'/>
<enumerator name='ZFS_PROP_ENCRYPTION_ROOT' value='87'/>
<enumerator name='ZFS_PROP_KEY_GUID' value='88'/>
<enumerator name='ZFS_PROP_KEYSTATUS' value='89'/>
<enumerator name='ZFS_PROP_REMAPTXG' value='90'/>
<enumerator name='ZFS_PROP_SPECIAL_SMALL_BLOCKS' value='91'/>
<enumerator name='ZFS_PROP_IVSET_GUID' value='92'/>
<enumerator name='ZFS_PROP_REDACTED' value='93'/>
<enumerator name='ZFS_PROP_REDACT_SNAPS' value='94'/>
<enumerator name='ZFS_PROP_SNAPSHOTS_CHANGED' value='95'/>
<enumerator name='ZFS_NUM_PROPS' value='96'/>
</enum-decl>
<typedef-decl name='zfs_prop_t' type-id='4b000d60' id='58603c44'/>
<enum-decl name='zfs_userquota_prop_t' naming-typedef-id='279fde6a' id='5258d2f6'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZFS_PROP_USERUSED' value='0'/>
<enumerator name='ZFS_PROP_USERQUOTA' value='1'/>
<enumerator name='ZFS_PROP_GROUPUSED' value='2'/>
<enumerator name='ZFS_PROP_GROUPQUOTA' value='3'/>
<enumerator name='ZFS_PROP_USEROBJUSED' value='4'/>
<enumerator name='ZFS_PROP_USEROBJQUOTA' value='5'/>
<enumerator name='ZFS_PROP_GROUPOBJUSED' value='6'/>
<enumerator name='ZFS_PROP_GROUPOBJQUOTA' value='7'/>
<enumerator name='ZFS_PROP_PROJECTUSED' value='8'/>
<enumerator name='ZFS_PROP_PROJECTQUOTA' value='9'/>
<enumerator name='ZFS_PROP_PROJECTOBJUSED' value='10'/>
<enumerator name='ZFS_PROP_PROJECTOBJQUOTA' value='11'/>
<enumerator name='ZFS_NUM_USERQUOTA_PROPS' value='12'/>
</enum-decl>
<typedef-decl name='zfs_userquota_prop_t' type-id='5258d2f6' id='279fde6a'/>
<enum-decl name='zprop_source_t' naming-typedef-id='a2256d42' id='5903f80e'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZPROP_SRC_NONE' value='1'/>
<enumerator name='ZPROP_SRC_DEFAULT' value='2'/>
<enumerator name='ZPROP_SRC_TEMPORARY' value='4'/>
<enumerator name='ZPROP_SRC_LOCAL' value='8'/>
<enumerator name='ZPROP_SRC_INHERITED' value='16'/>
<enumerator name='ZPROP_SRC_RECEIVED' value='32'/>
</enum-decl>
<typedef-decl name='zprop_source_t' type-id='5903f80e' id='a2256d42'/>
<enum-decl name='zfs_wait_activity_t' naming-typedef-id='3024501a' id='527d5dc6'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZFS_WAIT_DELETEQ' value='0'/>
<enumerator name='ZFS_WAIT_NUM_ACTIVITIES' value='1'/>
</enum-decl>
<typedef-decl name='zfs_wait_activity_t' type-id='527d5dc6' id='3024501a'/>
<class-decl name='mnttab' size-in-bits='256' is-struct='yes' visibility='default' id='1b055409'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='mnt_special' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='mnt_mountp' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='mnt_fstype' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='mnt_mntopts' type-id='26a90f95' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='__uid_t' type-id='f0981eeb' id='cc5fcceb'/>
<typedef-decl name='uid_t' type-id='cc5fcceb' id='354978ed'/>
<pointer-type-def type-id='26a90f95' size-in-bits='64' id='9b23c9ad'/>
<qualified-type-def type-id='775509eb' const='yes' id='5eadf2db'/>
<pointer-type-def type-id='5eadf2db' size-in-bits='64' id='fcd57163'/>
<pointer-type-def type-id='7e291ce6' size-in-bits='64' id='ca64ff60'/>
<pointer-type-def type-id='95e97e5e' size-in-bits='64' id='7292109c'/>
<pointer-type-def type-id='1b055409' size-in-bits='64' id='9d424d31'/>
<pointer-type-def type-id='9c313c2d' size-in-bits='64' id='5d6479ae'/>
<pointer-type-def type-id='bd9b4291' size-in-bits='64' id='9f1a1109'/>
<pointer-type-def type-id='bdb8ac4f' size-in-bits='64' id='3a9b2288'/>
<pointer-type-def type-id='3a9b2288' size-in-bits='64' id='e4378506'/>
<pointer-type-def type-id='a2256d42' size-in-bits='64' id='debc6aa3'/>
<function-decl name='zfs_type_to_name' mangled-name='zfs_type_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_type_to_name'>
<parameter type-id='2e45de5d' name='type'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_name_valid' mangled-name='zfs_name_valid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_name_valid'>
<parameter type-id='80f4b756' name='name'/>
<parameter type-id='2e45de5d' name='type'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_free_handles' mangled-name='zpool_free_handles' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_free_handles'>
<parameter type-id='b0382bb3' name='hdl'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_refresh_properties' mangled-name='zfs_refresh_properties' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_refresh_properties'>
<parameter type-id='9200a744' name='zhp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_handle_dup' mangled-name='zfs_handle_dup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_handle_dup'>
<parameter type-id='9200a744' name='zhp_orig'/>
<return type-id='9200a744'/>
</function-decl>
<function-decl name='zfs_bookmark_exists' mangled-name='zfs_bookmark_exists' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_bookmark_exists'>
<parameter type-id='80f4b756' name='path'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_open' mangled-name='zfs_open' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_open'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='95e97e5e' name='types'/>
<return type-id='9200a744'/>
</function-decl>
<function-decl name='zfs_close' mangled-name='zfs_close' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_close'>
<parameter type-id='9200a744' name='zhp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='libzfs_mnttab_init' mangled-name='libzfs_mnttab_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_init'>
<parameter type-id='b0382bb3' name='hdl'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='libzfs_mnttab_fini' mangled-name='libzfs_mnttab_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_fini'>
<parameter type-id='b0382bb3' name='hdl'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='libzfs_mnttab_cache' mangled-name='libzfs_mnttab_cache' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_cache'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='c19b74c3' name='enable'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='libzfs_mnttab_find' mangled-name='libzfs_mnttab_find' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_find'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='fsname'/>
<parameter type-id='9d424d31' name='entry'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='libzfs_mnttab_add' mangled-name='libzfs_mnttab_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_add'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='special'/>
<parameter type-id='80f4b756' name='mountp'/>
<parameter type-id='80f4b756' name='mntopts'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='libzfs_mnttab_remove' mangled-name='libzfs_mnttab_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_remove'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='fsname'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_spa_version' mangled-name='zfs_spa_version' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_spa_version'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='7292109c' name='spa_version'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_valid_proplist' mangled-name='zfs_valid_proplist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_valid_proplist'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='2e45de5d' name='type'/>
<parameter type-id='5ce45b60' name='nvl'/>
<parameter type-id='9c313c2d' name='zoned'/>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='4c81de99' name='zpool_hdl'/>
<parameter type-id='c19b74c3' name='key_params_ok'/>
<parameter type-id='80f4b756' name='errbuf'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zfs_prop_set' mangled-name='zfs_prop_set' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_set'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='80f4b756' name='propval'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_set_list' mangled-name='zfs_prop_set_list' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_set_list'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='5ce45b60' name='props'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_inherit' mangled-name='zfs_prop_inherit' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_inherit'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='c19b74c3' name='received'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='getprop_uint64' mangled-name='getprop_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getprop_uint64'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='58603c44' name='prop'/>
<parameter type-id='9b23c9ad' name='source'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='zfs_prop_get_recvd' mangled-name='zfs_prop_get_recvd' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_recvd'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='26a90f95' name='propbuf'/>
<parameter type-id='b59d7dce' name='proplen'/>
<parameter type-id='c19b74c3' name='literal'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_get_clones_nvl' mangled-name='zfs_get_clones_nvl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_clones_nvl'>
<parameter type-id='9200a744' name='zhp'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zfs_prop_get' mangled-name='zfs_prop_get' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='58603c44' name='prop'/>
<parameter type-id='26a90f95' name='propbuf'/>
<parameter type-id='b59d7dce' name='proplen'/>
<parameter type-id='debc6aa3' name='src'/>
<parameter type-id='26a90f95' name='statbuf'/>
<parameter type-id='b59d7dce' name='statlen'/>
<parameter type-id='c19b74c3' name='literal'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_get_int' mangled-name='zfs_prop_get_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_int'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='58603c44' name='prop'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='zfs_prop_get_numeric' mangled-name='zfs_prop_get_numeric' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_numeric'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='58603c44' name='prop'/>
<parameter type-id='5d6479ae' name='value'/>
<parameter type-id='debc6aa3' name='src'/>
<parameter type-id='26a90f95' name='statbuf'/>
<parameter type-id='b59d7dce' name='statlen'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_get_userquota_int' mangled-name='zfs_prop_get_userquota_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_userquota_int'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='5d6479ae' name='propvalue'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_get_userquota' mangled-name='zfs_prop_get_userquota' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_userquota'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='26a90f95' name='propbuf'/>
<parameter type-id='95e97e5e' name='proplen'/>
<parameter type-id='c19b74c3' name='literal'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_get_written_int' mangled-name='zfs_prop_get_written_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_written_int'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='5d6479ae' name='propvalue'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_get_written' mangled-name='zfs_prop_get_written' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_written'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='26a90f95' name='propbuf'/>
<parameter type-id='95e97e5e' name='proplen'/>
<parameter type-id='c19b74c3' name='literal'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_get_name' mangled-name='zfs_get_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_name'>
<parameter type-id='fcd57163' name='zhp'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_get_pool_name' mangled-name='zfs_get_pool_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_pool_name'>
<parameter type-id='fcd57163' name='zhp'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_get_type' mangled-name='zfs_get_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_type'>
<parameter type-id='fcd57163' name='zhp'/>
<return type-id='2e45de5d'/>
</function-decl>
<function-decl name='zfs_get_underlying_type' mangled-name='zfs_get_underlying_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_underlying_type'>
<parameter type-id='fcd57163' name='zhp'/>
<return type-id='2e45de5d'/>
</function-decl>
<function-decl name='zfs_parent_name' mangled-name='zfs_parent_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_parent_name'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='26a90f95' name='buf'/>
<parameter type-id='b59d7dce' name='buflen'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_dataset_exists' mangled-name='zfs_dataset_exists' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dataset_exists'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='2e45de5d' name='types'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_create_ancestors' mangled-name='zfs_create_ancestors' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_create_ancestors'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='path'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_create' mangled-name='zfs_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_create'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='2e45de5d' name='type'/>
<parameter type-id='5ce45b60' name='props'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_destroy' mangled-name='zfs_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_destroy'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='c19b74c3' name='defer'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_destroy_snaps' mangled-name='zfs_destroy_snaps' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_destroy_snaps'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='26a90f95' name='snapname'/>
<parameter type-id='c19b74c3' name='defer'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_destroy_snaps_nvl' mangled-name='zfs_destroy_snaps_nvl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_destroy_snaps_nvl'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='5ce45b60' name='snaps'/>
<parameter type-id='c19b74c3' name='defer'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_clone' mangled-name='zfs_clone' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_clone'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='target'/>
<parameter type-id='5ce45b60' name='props'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_promote' mangled-name='zfs_promote' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_promote'>
<parameter type-id='9200a744' name='zhp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_snapshot_nvl' mangled-name='zfs_snapshot_nvl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_snapshot_nvl'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='5ce45b60' name='snaps'/>
<parameter type-id='5ce45b60' name='props'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_snapshot' mangled-name='zfs_snapshot' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_snapshot'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='c19b74c3' name='recursive'/>
<parameter type-id='5ce45b60' name='props'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_rollback' mangled-name='zfs_rollback' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_rollback'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='9200a744' name='snap'/>
<parameter type-id='c19b74c3' name='force'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_rename' mangled-name='zfs_rename' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_rename'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='target'/>
<parameter type-id='067170c2' name='flags'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_get_all_props' mangled-name='zfs_get_all_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_all_props'>
<parameter type-id='9200a744' name='zhp'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zfs_get_recvd_props' mangled-name='zfs_get_recvd_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_recvd_props'>
<parameter type-id='9200a744' name='zhp'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zfs_get_user_props' mangled-name='zfs_get_user_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_user_props'>
<parameter type-id='9200a744' name='zhp'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zfs_expand_proplist' mangled-name='zfs_expand_proplist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_expand_proplist'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='e4378506' name='plp'/>
<parameter type-id='c19b74c3' name='received'/>
<parameter type-id='c19b74c3' name='literal'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prune_proplist' mangled-name='zfs_prune_proplist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prune_proplist'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='ae3e8ca6' name='props'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_smb_acl_add' mangled-name='zfs_smb_acl_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_smb_acl_add'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='26a90f95' name='dataset'/>
<parameter type-id='26a90f95' name='path'/>
<parameter type-id='26a90f95' name='resource'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_smb_acl_remove' mangled-name='zfs_smb_acl_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_smb_acl_remove'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='26a90f95' name='dataset'/>
<parameter type-id='26a90f95' name='path'/>
<parameter type-id='26a90f95' name='resource'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_smb_acl_purge' mangled-name='zfs_smb_acl_purge' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_smb_acl_purge'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='26a90f95' name='dataset'/>
<parameter type-id='26a90f95' name='path'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_smb_acl_rename' mangled-name='zfs_smb_acl_rename' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_smb_acl_rename'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='26a90f95' name='dataset'/>
<parameter type-id='26a90f95' name='path'/>
<parameter type-id='26a90f95' name='oldname'/>
<parameter type-id='26a90f95' name='newname'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_userspace' mangled-name='zfs_userspace' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_userspace'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='279fde6a' name='type'/>
<parameter type-id='16c5f410' name='func'/>
<parameter type-id='eaa32e2f' name='arg'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_hold' mangled-name='zfs_hold' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_hold'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='snapname'/>
<parameter type-id='80f4b756' name='tag'/>
<parameter type-id='c19b74c3' name='recursive'/>
<parameter type-id='95e97e5e' name='cleanup_fd'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_hold_nvl' mangled-name='zfs_hold_nvl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_hold_nvl'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='95e97e5e' name='cleanup_fd'/>
<parameter type-id='5ce45b60' name='holds'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_release' mangled-name='zfs_release' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_release'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='snapname'/>
<parameter type-id='80f4b756' name='tag'/>
<parameter type-id='c19b74c3' name='recursive'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_get_fsacl' mangled-name='zfs_get_fsacl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_fsacl'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='857bb57e' name='nvl'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_set_fsacl' mangled-name='zfs_set_fsacl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_set_fsacl'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='c19b74c3' name='un'/>
<parameter type-id='5ce45b60' name='nvl'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_get_holds' mangled-name='zfs_get_holds' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_holds'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='857bb57e' name='nvl'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zvol_volsize_to_reservation' mangled-name='zvol_volsize_to_reservation' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zvol_volsize_to_reservation'>
<parameter type-id='4c81de99' name='zph'/>
<parameter type-id='9c313c2d' name='volsize'/>
<parameter type-id='5ce45b60' name='props'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='zfs_wait_status' mangled-name='zfs_wait_status' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_wait_status'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='3024501a' name='activity'/>
<parameter type-id='37e3bd22' name='missing'/>
<parameter type-id='37e3bd22' name='waited'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-type size-in-bits='64' id='7e291ce6'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='80f4b756'/>
<parameter type-id='354978ed'/>
<parameter type-id='9c313c2d'/>
<return type-id='95e97e5e'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs/libzfs_diff.c' language='LANG_C99'>
<function-decl name='zfs_show_diffs' mangled-name='zfs_show_diffs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_show_diffs'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='95e97e5e' name='outfd'/>
<parameter type-id='80f4b756' name='fromsnap'/>
<parameter type-id='80f4b756' name='tosnap'/>
<parameter type-id='95e97e5e' name='flags'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs/libzfs_import.c' language='LANG_C99'>
<typedef-decl name='refresh_config_func_t' type-id='29f040d2' id='b7c58eaa'/>
<typedef-decl name='pool_active_func_t' type-id='baa42fef' id='de5d1d8f'/>
<class-decl name='pool_config_ops' size-in-bits='128' is-struct='yes' visibility='default' id='8b092c69'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='pco_refresh_config' type-id='e7c00489' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='pco_pool_active' type-id='9eadf5e0' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='pool_config_ops_t' type-id='1a21babe' id='b1e62775'/>
<enum-decl name='pool_state' id='4871ac24'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='POOL_STATE_ACTIVE' value='0'/>
<enumerator name='POOL_STATE_EXPORTED' value='1'/>
<enumerator name='POOL_STATE_DESTROYED' value='2'/>
<enumerator name='POOL_STATE_SPARE' value='3'/>
<enumerator name='POOL_STATE_L2CACHE' value='4'/>
<enumerator name='POOL_STATE_UNINITIALIZED' value='5'/>
<enumerator name='POOL_STATE_UNAVAIL' value='6'/>
<enumerator name='POOL_STATE_POTENTIALLY_ACTIVE' value='7'/>
</enum-decl>
<typedef-decl name='pool_state_t' type-id='4871ac24' id='084a08a3'/>
<qualified-type-def type-id='8b092c69' const='yes' id='1a21babe'/>
<pointer-type-def type-id='de5d1d8f' size-in-bits='64' id='9eadf5e0'/>
<pointer-type-def type-id='084a08a3' size-in-bits='64' id='b9ea57b8'/>
<pointer-type-def type-id='b7c58eaa' size-in-bits='64' id='e7c00489'/>
<var-decl name='libzfs_config_ops' type-id='b1e62775' mangled-name='libzfs_config_ops' visibility='default' elf-symbol-id='libzfs_config_ops'/>
<function-decl name='zpool_clear_label' mangled-name='zpool_clear_label' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_clear_label'>
<parameter type-id='95e97e5e' name='fd'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_in_use' mangled-name='zpool_in_use' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_in_use'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='95e97e5e' name='fd'/>
<parameter type-id='b9ea57b8' name='state'/>
<parameter type-id='9b23c9ad' name='namestr'/>
<parameter type-id='37e3bd22' name='inuse'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-type size-in-bits='64' id='baa42fef'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='80f4b756'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='37e3bd22'/>
<return type-id='95e97e5e'/>
</function-type>
<function-type size-in-bits='64' id='29f040d2'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='5ce45b60'/>
<return type-id='5ce45b60'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs/libzfs_iter.c' language='LANG_C99'>
<function-decl name='zfs_iter_filesystems' mangled-name='zfs_iter_filesystems' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_filesystems'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='d8e49ab9' name='func'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_snapshots' mangled-name='zfs_iter_snapshots' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapshots'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='c19b74c3' name='simple'/>
<parameter type-id='d8e49ab9' name='func'/>
<parameter type-id='eaa32e2f' name='data'/>
<parameter type-id='9c313c2d' name='min_txg'/>
<parameter type-id='9c313c2d' name='max_txg'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_bookmarks' mangled-name='zfs_iter_bookmarks' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_bookmarks'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='d8e49ab9' name='func'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_snapshots_sorted' mangled-name='zfs_iter_snapshots_sorted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapshots_sorted'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='d8e49ab9' name='callback'/>
<parameter type-id='eaa32e2f' name='data'/>
<parameter type-id='9c313c2d' name='min_txg'/>
<parameter type-id='9c313c2d' name='max_txg'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_snapspec' mangled-name='zfs_iter_snapspec' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapspec'>
<parameter type-id='9200a744' name='fs_zhp'/>
<parameter type-id='80f4b756' name='spec_orig'/>
<parameter type-id='d8e49ab9' name='func'/>
<parameter type-id='eaa32e2f' name='arg'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_children' mangled-name='zfs_iter_children' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_children'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='d8e49ab9' name='func'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_dependents' mangled-name='zfs_iter_dependents' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_dependents'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='c19b74c3' name='allowrecursion'/>
<parameter type-id='d8e49ab9' name='func'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_mounted' mangled-name='zfs_iter_mounted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_mounted'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='d8e49ab9' name='func'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs/libzfs_mount.c' language='LANG_C99'>
<class-decl name='get_all_cb' size-in-bits='192' is-struct='yes' visibility='default' id='803dac95'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='cb_handles' type-id='4507922a' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='cb_alloc' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='cb_used' type-id='b59d7dce' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='get_all_cb_t' type-id='803dac95' id='9b293607'/>
<enum-decl name='sa_protocol' id='9155d4b5'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='SA_PROTOCOL_NFS' value='0'/>
<enumerator name='SA_PROTOCOL_SMB' value='1'/>
<enumerator name='SA_PROTOCOL_COUNT' value='2'/>
</enum-decl>
<qualified-type-def type-id='9155d4b5' const='yes' id='9f2c1699'/>
<pointer-type-def type-id='9f2c1699' size-in-bits='64' id='4567bbc9'/>
<pointer-type-def type-id='9b293607' size-in-bits='64' id='77bf1784'/>
<pointer-type-def type-id='9200a744' size-in-bits='64' id='4507922a'/>
<function-decl name='is_mounted' mangled-name='is_mounted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='is_mounted'>
<parameter type-id='b0382bb3' name='zfs_hdl'/>
<parameter type-id='80f4b756' name='special'/>
<parameter type-id='9b23c9ad' name='where'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_is_mounted' mangled-name='zfs_is_mounted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_is_mounted'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='9b23c9ad' name='where'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_mount' mangled-name='zfs_mount' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_mount'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='options'/>
<parameter type-id='95e97e5e' name='flags'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_mount_at' mangled-name='zfs_mount_at' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_mount_at'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='options'/>
<parameter type-id='95e97e5e' name='flags'/>
<parameter type-id='80f4b756' name='mountpoint'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_unmount' mangled-name='zfs_unmount' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unmount'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='mountpoint'/>
<parameter type-id='95e97e5e' name='flags'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_unmountall' mangled-name='zfs_unmountall' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unmountall'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='95e97e5e' name='flags'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_share' mangled-name='zfs_share' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_share'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='4567bbc9' name='proto'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_is_shared' mangled-name='zfs_is_shared' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_is_shared'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='9b23c9ad' name='where'/>
<parameter type-id='4567bbc9' name='proto'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_commit_shares' mangled-name='zfs_commit_shares' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_commit_shares'>
<parameter type-id='4567bbc9' name='proto'/>
<return type-id='48b5725f'/>
</function-decl>
+ <function-decl name='zfs_truncate_shares' mangled-name='zfs_truncate_shares' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_truncate_shares'>
+ <parameter type-id='4567bbc9' name='proto'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
<function-decl name='zfs_unshare' mangled-name='zfs_unshare' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshare'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='mountpoint'/>
<parameter type-id='4567bbc9' name='proto'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_unshareall' mangled-name='zfs_unshareall' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshareall'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='4567bbc9' name='proto'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='libzfs_add_handle' mangled-name='libzfs_add_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_add_handle'>
<parameter type-id='77bf1784' name='cbp'/>
<parameter type-id='9200a744' name='zhp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_foreach_mountpoint' mangled-name='zfs_foreach_mountpoint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_foreach_mountpoint'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='4507922a' name='handles'/>
<parameter type-id='b59d7dce' name='num_handles'/>
<parameter type-id='d8e49ab9' name='func'/>
<parameter type-id='eaa32e2f' name='data'/>
<parameter type-id='c19b74c3' name='parallel'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_enable_datasets' mangled-name='zpool_enable_datasets' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_enable_datasets'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='mntopts'/>
<parameter type-id='95e97e5e' name='flags'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_disable_datasets' mangled-name='zpool_disable_datasets' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_disable_datasets'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='c19b74c3' name='force'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs/libzfs_pool.c' language='LANG_C99'>
<class-decl name='splitflags' size-in-bits='64' is-struct='yes' visibility='default' id='dc01bf52'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='dryrun' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1'>
<var-decl name='import' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='name_flags' type-id='95e97e5e' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='splitflags_t' type-id='dc01bf52' id='325c1e34'/>
<class-decl name='trimflags' size-in-bits='192' is-struct='yes' visibility='default' id='8ef58008'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='fullpool' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='secure' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='wait' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='rate' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='trimflags_t' type-id='8ef58008' id='a093cbb8'/>
<enum-decl name='zpool_compat_status_t' naming-typedef-id='901b78d1' id='20676925'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZPOOL_COMPATIBILITY_OK' value='0'/>
<enumerator name='ZPOOL_COMPATIBILITY_WARNTOKEN' value='1'/>
<enumerator name='ZPOOL_COMPATIBILITY_BADTOKEN' value='2'/>
<enumerator name='ZPOOL_COMPATIBILITY_BADFILE' value='3'/>
<enumerator name='ZPOOL_COMPATIBILITY_NOFILES' value='4'/>
</enum-decl>
<typedef-decl name='zpool_compat_status_t' type-id='20676925' id='901b78d1'/>
<enum-decl name='zpool_prop_t' naming-typedef-id='5d0c23fb' id='af1ba157'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZPOOL_PROP_INVAL' value='-1'/>
<enumerator name='ZPOOL_PROP_NAME' value='0'/>
<enumerator name='ZPOOL_PROP_SIZE' value='1'/>
<enumerator name='ZPOOL_PROP_CAPACITY' value='2'/>
<enumerator name='ZPOOL_PROP_ALTROOT' value='3'/>
<enumerator name='ZPOOL_PROP_HEALTH' value='4'/>
<enumerator name='ZPOOL_PROP_GUID' value='5'/>
<enumerator name='ZPOOL_PROP_VERSION' value='6'/>
<enumerator name='ZPOOL_PROP_BOOTFS' value='7'/>
<enumerator name='ZPOOL_PROP_DELEGATION' value='8'/>
<enumerator name='ZPOOL_PROP_AUTOREPLACE' value='9'/>
<enumerator name='ZPOOL_PROP_CACHEFILE' value='10'/>
<enumerator name='ZPOOL_PROP_FAILUREMODE' value='11'/>
<enumerator name='ZPOOL_PROP_LISTSNAPS' value='12'/>
<enumerator name='ZPOOL_PROP_AUTOEXPAND' value='13'/>
<enumerator name='ZPOOL_PROP_DEDUPDITTO' value='14'/>
<enumerator name='ZPOOL_PROP_DEDUPRATIO' value='15'/>
<enumerator name='ZPOOL_PROP_FREE' value='16'/>
<enumerator name='ZPOOL_PROP_ALLOCATED' value='17'/>
<enumerator name='ZPOOL_PROP_READONLY' value='18'/>
<enumerator name='ZPOOL_PROP_ASHIFT' value='19'/>
<enumerator name='ZPOOL_PROP_COMMENT' value='20'/>
<enumerator name='ZPOOL_PROP_EXPANDSZ' value='21'/>
<enumerator name='ZPOOL_PROP_FREEING' value='22'/>
<enumerator name='ZPOOL_PROP_FRAGMENTATION' value='23'/>
<enumerator name='ZPOOL_PROP_LEAKED' value='24'/>
<enumerator name='ZPOOL_PROP_MAXBLOCKSIZE' value='25'/>
<enumerator name='ZPOOL_PROP_TNAME' value='26'/>
<enumerator name='ZPOOL_PROP_MAXDNODESIZE' value='27'/>
<enumerator name='ZPOOL_PROP_MULTIHOST' value='28'/>
<enumerator name='ZPOOL_PROP_CHECKPOINT' value='29'/>
<enumerator name='ZPOOL_PROP_LOAD_GUID' value='30'/>
<enumerator name='ZPOOL_PROP_AUTOTRIM' value='31'/>
<enumerator name='ZPOOL_PROP_COMPATIBILITY' value='32'/>
<enumerator name='ZPOOL_NUM_PROPS' value='33'/>
</enum-decl>
<typedef-decl name='zpool_prop_t' type-id='af1ba157' id='5d0c23fb'/>
<enum-decl name='vdev_prop_t' naming-typedef-id='5aa5c90c' id='1573bec8'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='VDEV_PROP_INVAL' value='-1'/>
+ <enumerator name='VDEV_PROP_USERPROP' value='-1'/>
<enumerator name='VDEV_PROP_NAME' value='0'/>
<enumerator name='VDEV_PROP_CAPACITY' value='1'/>
<enumerator name='VDEV_PROP_STATE' value='2'/>
<enumerator name='VDEV_PROP_GUID' value='3'/>
<enumerator name='VDEV_PROP_ASIZE' value='4'/>
<enumerator name='VDEV_PROP_PSIZE' value='5'/>
<enumerator name='VDEV_PROP_ASHIFT' value='6'/>
<enumerator name='VDEV_PROP_SIZE' value='7'/>
<enumerator name='VDEV_PROP_FREE' value='8'/>
<enumerator name='VDEV_PROP_ALLOCATED' value='9'/>
<enumerator name='VDEV_PROP_COMMENT' value='10'/>
<enumerator name='VDEV_PROP_EXPANDSZ' value='11'/>
<enumerator name='VDEV_PROP_FRAGMENTATION' value='12'/>
<enumerator name='VDEV_PROP_BOOTSIZE' value='13'/>
<enumerator name='VDEV_PROP_PARITY' value='14'/>
<enumerator name='VDEV_PROP_PATH' value='15'/>
<enumerator name='VDEV_PROP_DEVID' value='16'/>
<enumerator name='VDEV_PROP_PHYS_PATH' value='17'/>
<enumerator name='VDEV_PROP_ENC_PATH' value='18'/>
<enumerator name='VDEV_PROP_FRU' value='19'/>
<enumerator name='VDEV_PROP_PARENT' value='20'/>
<enumerator name='VDEV_PROP_CHILDREN' value='21'/>
<enumerator name='VDEV_PROP_NUMCHILDREN' value='22'/>
<enumerator name='VDEV_PROP_READ_ERRORS' value='23'/>
<enumerator name='VDEV_PROP_WRITE_ERRORS' value='24'/>
<enumerator name='VDEV_PROP_CHECKSUM_ERRORS' value='25'/>
<enumerator name='VDEV_PROP_INITIALIZE_ERRORS' value='26'/>
<enumerator name='VDEV_PROP_OPS_NULL' value='27'/>
<enumerator name='VDEV_PROP_OPS_READ' value='28'/>
<enumerator name='VDEV_PROP_OPS_WRITE' value='29'/>
<enumerator name='VDEV_PROP_OPS_FREE' value='30'/>
<enumerator name='VDEV_PROP_OPS_CLAIM' value='31'/>
<enumerator name='VDEV_PROP_OPS_TRIM' value='32'/>
<enumerator name='VDEV_PROP_BYTES_NULL' value='33'/>
<enumerator name='VDEV_PROP_BYTES_READ' value='34'/>
<enumerator name='VDEV_PROP_BYTES_WRITE' value='35'/>
<enumerator name='VDEV_PROP_BYTES_FREE' value='36'/>
<enumerator name='VDEV_PROP_BYTES_CLAIM' value='37'/>
<enumerator name='VDEV_PROP_BYTES_TRIM' value='38'/>
<enumerator name='VDEV_PROP_REMOVING' value='39'/>
<enumerator name='VDEV_PROP_ALLOCATING' value='40'/>
<enumerator name='VDEV_NUM_PROPS' value='41'/>
</enum-decl>
<typedef-decl name='vdev_prop_t' type-id='1573bec8' id='5aa5c90c'/>
<enum-decl name='vdev_state' id='21566197'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='VDEV_STATE_UNKNOWN' value='0'/>
<enumerator name='VDEV_STATE_CLOSED' value='1'/>
<enumerator name='VDEV_STATE_OFFLINE' value='2'/>
<enumerator name='VDEV_STATE_REMOVED' value='3'/>
<enumerator name='VDEV_STATE_CANT_OPEN' value='4'/>
<enumerator name='VDEV_STATE_FAULTED' value='5'/>
<enumerator name='VDEV_STATE_DEGRADED' value='6'/>
<enumerator name='VDEV_STATE_HEALTHY' value='7'/>
</enum-decl>
<typedef-decl name='vdev_state_t' type-id='21566197' id='35acf840'/>
<enum-decl name='vdev_aux' id='7f5bcca4'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='VDEV_AUX_NONE' value='0'/>
<enumerator name='VDEV_AUX_OPEN_FAILED' value='1'/>
<enumerator name='VDEV_AUX_CORRUPT_DATA' value='2'/>
<enumerator name='VDEV_AUX_NO_REPLICAS' value='3'/>
<enumerator name='VDEV_AUX_BAD_GUID_SUM' value='4'/>
<enumerator name='VDEV_AUX_TOO_SMALL' value='5'/>
<enumerator name='VDEV_AUX_BAD_LABEL' value='6'/>
<enumerator name='VDEV_AUX_VERSION_NEWER' value='7'/>
<enumerator name='VDEV_AUX_VERSION_OLDER' value='8'/>
<enumerator name='VDEV_AUX_UNSUP_FEAT' value='9'/>
<enumerator name='VDEV_AUX_SPARED' value='10'/>
<enumerator name='VDEV_AUX_ERR_EXCEEDED' value='11'/>
<enumerator name='VDEV_AUX_IO_FAILURE' value='12'/>
<enumerator name='VDEV_AUX_BAD_LOG' value='13'/>
<enumerator name='VDEV_AUX_EXTERNAL' value='14'/>
<enumerator name='VDEV_AUX_SPLIT_POOL' value='15'/>
<enumerator name='VDEV_AUX_BAD_ASHIFT' value='16'/>
<enumerator name='VDEV_AUX_EXTERNAL_PERSIST' value='17'/>
<enumerator name='VDEV_AUX_ACTIVE' value='18'/>
<enumerator name='VDEV_AUX_CHILDREN_OFFLINE' value='19'/>
<enumerator name='VDEV_AUX_ASHIFT_TOO_BIG' value='20'/>
</enum-decl>
<typedef-decl name='vdev_aux_t' type-id='7f5bcca4' id='9d774e0b'/>
<enum-decl name='pool_scan_func' id='1b092565'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='POOL_SCAN_NONE' value='0'/>
<enumerator name='POOL_SCAN_SCRUB' value='1'/>
<enumerator name='POOL_SCAN_RESILVER' value='2'/>
<enumerator name='POOL_SCAN_FUNCS' value='3'/>
</enum-decl>
<typedef-decl name='pool_scan_func_t' type-id='1b092565' id='7313fbe2'/>
<enum-decl name='pool_scrub_cmd' id='a1474cbd'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='POOL_SCRUB_NORMAL' value='0'/>
<enumerator name='POOL_SCRUB_PAUSE' value='1'/>
<enumerator name='POOL_SCRUB_FLAGS_END' value='2'/>
</enum-decl>
<typedef-decl name='pool_scrub_cmd_t' type-id='a1474cbd' id='b51cf3c2'/>
<enum-decl name='pool_initialize_func' id='5c246ad4'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='POOL_INITIALIZE_START' value='0'/>
<enumerator name='POOL_INITIALIZE_CANCEL' value='1'/>
<enumerator name='POOL_INITIALIZE_SUSPEND' value='2'/>
<enumerator name='POOL_INITIALIZE_FUNCS' value='3'/>
</enum-decl>
<typedef-decl name='pool_initialize_func_t' type-id='5c246ad4' id='7063e1ab'/>
<enum-decl name='pool_trim_func' id='54ed608a'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='POOL_TRIM_START' value='0'/>
<enumerator name='POOL_TRIM_CANCEL' value='1'/>
<enumerator name='POOL_TRIM_SUSPEND' value='2'/>
<enumerator name='POOL_TRIM_FUNCS' value='3'/>
</enum-decl>
<typedef-decl name='pool_trim_func_t' type-id='54ed608a' id='b1146b8d'/>
<enum-decl name='zpool_wait_activity_t' naming-typedef-id='73446457' id='849338e3'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZPOOL_WAIT_CKPT_DISCARD' value='0'/>
<enumerator name='ZPOOL_WAIT_FREE' value='1'/>
<enumerator name='ZPOOL_WAIT_INITIALIZE' value='2'/>
<enumerator name='ZPOOL_WAIT_REPLACE' value='3'/>
<enumerator name='ZPOOL_WAIT_REMOVE' value='4'/>
<enumerator name='ZPOOL_WAIT_RESILVER' value='5'/>
<enumerator name='ZPOOL_WAIT_SCRUB' value='6'/>
<enumerator name='ZPOOL_WAIT_TRIM' value='7'/>
<enumerator name='ZPOOL_WAIT_NUM_ACTIVITIES' value='8'/>
</enum-decl>
<typedef-decl name='zpool_wait_activity_t' type-id='849338e3' id='73446457'/>
<qualified-type-def type-id='8e8d4be3' const='yes' id='693c3853'/>
<pointer-type-def type-id='693c3853' size-in-bits='64' id='22cce67b'/>
<pointer-type-def type-id='a093cbb8' size-in-bits='64' id='b13f38c3'/>
<pointer-type-def type-id='35acf840' size-in-bits='64' id='17f3480d'/>
<function-decl name='zpool_props_refresh' mangled-name='zpool_props_refresh' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_props_refresh'>
<parameter type-id='4c81de99' name='zhp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_get_prop_int' mangled-name='zpool_get_prop_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_prop_int'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='5d0c23fb' name='prop'/>
<parameter type-id='debc6aa3' name='src'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='zpool_state_to_name' mangled-name='zpool_state_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_state_to_name'>
<parameter type-id='35acf840' name='state'/>
<parameter type-id='9d774e0b' name='aux'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zpool_pool_state_to_name' mangled-name='zpool_pool_state_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_pool_state_to_name'>
<parameter type-id='084a08a3' name='state'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zpool_get_state_str' mangled-name='zpool_get_state_str' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_state_str'>
<parameter type-id='4c81de99' name='zhp'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zpool_get_prop' mangled-name='zpool_get_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_prop'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='5d0c23fb' name='prop'/>
<parameter type-id='26a90f95' name='buf'/>
<parameter type-id='b59d7dce' name='len'/>
<parameter type-id='debc6aa3' name='srctype'/>
<parameter type-id='c19b74c3' name='literal'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_set_prop' mangled-name='zpool_set_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_set_prop'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='80f4b756' name='propval'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_expand_proplist' mangled-name='zpool_expand_proplist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_expand_proplist'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='e4378506' name='plp'/>
<parameter type-id='2e45de5d' name='type'/>
<parameter type-id='c19b74c3' name='literal'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='vdev_expand_proplist' mangled-name='vdev_expand_proplist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_expand_proplist'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='vdevname'/>
<parameter type-id='e4378506' name='plp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_prop_get_feature' mangled-name='zpool_prop_get_feature' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_get_feature'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='26a90f95' name='buf'/>
<parameter type-id='b59d7dce' name='len'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_open_canfail' mangled-name='zpool_open_canfail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_open_canfail'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='pool'/>
<return type-id='4c81de99'/>
</function-decl>
<function-decl name='zpool_open' mangled-name='zpool_open' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_open'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='pool'/>
<return type-id='4c81de99'/>
</function-decl>
<function-decl name='zpool_close' mangled-name='zpool_close' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_close'>
<parameter type-id='4c81de99' name='zhp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_get_name' mangled-name='zpool_get_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_name'>
<parameter type-id='4c81de99' name='zhp'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zpool_get_state' mangled-name='zpool_get_state' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_state'>
<parameter type-id='4c81de99' name='zhp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_is_draid_spare' mangled-name='zpool_is_draid_spare' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_is_draid_spare'>
<parameter type-id='80f4b756' name='name'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zpool_create' mangled-name='zpool_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_create'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='pool'/>
<parameter type-id='5ce45b60' name='nvroot'/>
<parameter type-id='5ce45b60' name='props'/>
<parameter type-id='5ce45b60' name='fsprops'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_destroy' mangled-name='zpool_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_destroy'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='log_str'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_checkpoint' mangled-name='zpool_checkpoint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_checkpoint'>
<parameter type-id='4c81de99' name='zhp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_discard_checkpoint' mangled-name='zpool_discard_checkpoint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_discard_checkpoint'>
<parameter type-id='4c81de99' name='zhp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_add' mangled-name='zpool_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_add'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='5ce45b60' name='nvroot'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_export' mangled-name='zpool_export' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_export'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='c19b74c3' name='force'/>
<parameter type-id='80f4b756' name='log_str'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_export_force' mangled-name='zpool_export_force' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_export_force'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='log_str'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_explain_recover' mangled-name='zpool_explain_recover' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_explain_recover'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='name'/>
<parameter type-id='95e97e5e' name='reason'/>
<parameter type-id='5ce45b60' name='config'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_import' mangled-name='zpool_import' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_import'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='5ce45b60' name='config'/>
<parameter type-id='80f4b756' name='newname'/>
<parameter type-id='26a90f95' name='altroot'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_print_unsup_feat' mangled-name='zpool_print_unsup_feat' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_print_unsup_feat'>
<parameter type-id='5ce45b60' name='config'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_import_props' mangled-name='zpool_import_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_import_props'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='5ce45b60' name='config'/>
<parameter type-id='80f4b756' name='newname'/>
<parameter type-id='5ce45b60' name='props'/>
<parameter type-id='95e97e5e' name='flags'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_initialize' mangled-name='zpool_initialize' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_initialize'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='7063e1ab' name='cmd_type'/>
<parameter type-id='5ce45b60' name='vds'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_initialize_wait' mangled-name='zpool_initialize_wait' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_initialize_wait'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='7063e1ab' name='cmd_type'/>
<parameter type-id='5ce45b60' name='vds'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_trim' mangled-name='zpool_trim' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_trim'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='b1146b8d' name='cmd_type'/>
<parameter type-id='5ce45b60' name='vds'/>
<parameter type-id='b13f38c3' name='trim_flags'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_scan' mangled-name='zpool_scan' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_scan'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='7313fbe2' name='func'/>
<parameter type-id='b51cf3c2' name='cmd'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_find_vdev_by_physpath' mangled-name='zpool_find_vdev_by_physpath' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_find_vdev_by_physpath'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='ppath'/>
<parameter type-id='37e3bd22' name='avail_spare'/>
<parameter type-id='37e3bd22' name='l2cache'/>
<parameter type-id='37e3bd22' name='log'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zpool_find_vdev' mangled-name='zpool_find_vdev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_find_vdev'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='37e3bd22' name='avail_spare'/>
<parameter type-id='37e3bd22' name='l2cache'/>
<parameter type-id='37e3bd22' name='log'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zpool_vdev_path_to_guid' mangled-name='zpool_vdev_path_to_guid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_path_to_guid'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='path'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='zpool_vdev_online' mangled-name='zpool_vdev_online' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_online'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='95e97e5e' name='flags'/>
<parameter type-id='17f3480d' name='newstate'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_offline' mangled-name='zpool_vdev_offline' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_offline'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='c19b74c3' name='istmp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_fault' mangled-name='zpool_vdev_fault' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_fault'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='9c313c2d' name='guid'/>
<parameter type-id='9d774e0b' name='aux'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_degrade' mangled-name='zpool_vdev_degrade' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_degrade'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='9c313c2d' name='guid'/>
<parameter type-id='9d774e0b' name='aux'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_attach' mangled-name='zpool_vdev_attach' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_attach'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='old_disk'/>
<parameter type-id='80f4b756' name='new_disk'/>
<parameter type-id='5ce45b60' name='nvroot'/>
<parameter type-id='95e97e5e' name='replacing'/>
<parameter type-id='c19b74c3' name='rebuild'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_detach' mangled-name='zpool_vdev_detach' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_detach'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='path'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_split' mangled-name='zpool_vdev_split' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_split'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='26a90f95' name='newname'/>
<parameter type-id='857bb57e' name='newroot'/>
<parameter type-id='5ce45b60' name='props'/>
<parameter type-id='325c1e34' name='flags'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_remove' mangled-name='zpool_vdev_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_remove'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='path'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_remove_cancel' mangled-name='zpool_vdev_remove_cancel' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_remove_cancel'>
<parameter type-id='4c81de99' name='zhp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_indirect_size' mangled-name='zpool_vdev_indirect_size' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_indirect_size'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='5d6479ae' name='sizep'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_clear' mangled-name='zpool_clear' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_clear'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='5ce45b60' name='rewindnvl'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_clear' mangled-name='zpool_vdev_clear' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_clear'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='9c313c2d' name='guid'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_reguid' mangled-name='zpool_reguid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_reguid'>
<parameter type-id='4c81de99' name='zhp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_reopen_one' mangled-name='zpool_reopen_one' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_reopen_one'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_sync_one' mangled-name='zpool_sync_one' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_sync_one'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_name' mangled-name='zpool_vdev_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_name'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='5ce45b60' name='nv'/>
<parameter type-id='95e97e5e' name='name_flags'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='zpool_get_errlog' mangled-name='zpool_get_errlog' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_errlog'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='857bb57e' name='nverrlistp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_upgrade' mangled-name='zpool_upgrade' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_upgrade'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='9c313c2d' name='new_version'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_save_arguments' mangled-name='zfs_save_arguments' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_save_arguments'>
<parameter type-id='95e97e5e' name='argc'/>
<parameter type-id='9b23c9ad' name='argv'/>
<parameter type-id='26a90f95' name='string'/>
<parameter type-id='95e97e5e' name='len'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_log_history' mangled-name='zpool_log_history' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_log_history'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='message'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_get_history' mangled-name='zpool_get_history' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_history'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='857bb57e' name='nvhisp'/>
<parameter type-id='5d6479ae' name='off'/>
<parameter type-id='37e3bd22' name='eof'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_events_next' mangled-name='zpool_events_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_events_next'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='857bb57e' name='nvp'/>
<parameter type-id='7292109c' name='dropped'/>
<parameter type-id='f0981eeb' name='flags'/>
<parameter type-id='95e97e5e' name='zevent_fd'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_events_clear' mangled-name='zpool_events_clear' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_events_clear'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='7292109c' name='count'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_events_seek' mangled-name='zpool_events_seek' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_events_seek'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='9c313c2d' name='eid'/>
<parameter type-id='95e97e5e' name='zevent_fd'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_obj_to_path' mangled-name='zpool_obj_to_path' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_obj_to_path'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='9c313c2d' name='dsobj'/>
<parameter type-id='9c313c2d' name='obj'/>
<parameter type-id='26a90f95' name='pathname'/>
<parameter type-id='b59d7dce' name='len'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_obj_to_path_ds' mangled-name='zpool_obj_to_path_ds' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_obj_to_path_ds'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='9c313c2d' name='dsobj'/>
<parameter type-id='9c313c2d' name='obj'/>
<parameter type-id='26a90f95' name='pathname'/>
<parameter type-id='b59d7dce' name='len'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_wait' mangled-name='zpool_wait' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_wait'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='73446457' name='activity'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_wait_status' mangled-name='zpool_wait_status' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_wait_status'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='73446457' name='activity'/>
<parameter type-id='37e3bd22' name='missing'/>
<parameter type-id='37e3bd22' name='waited'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_set_bootenv' mangled-name='zpool_set_bootenv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_set_bootenv'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='22cce67b' name='envmap'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_get_bootenv' mangled-name='zpool_get_bootenv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_bootenv'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='857bb57e' name='nvlp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_load_compat' mangled-name='zpool_load_compat' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_load_compat'>
<parameter type-id='80f4b756' name='compat'/>
<parameter type-id='37e3bd22' name='features'/>
<parameter type-id='26a90f95' name='report'/>
<parameter type-id='b59d7dce' name='rlen'/>
<return type-id='901b78d1'/>
</function-decl>
<function-decl name='zpool_get_vdev_prop_value' mangled-name='zpool_get_vdev_prop_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_vdev_prop_value'>
<parameter type-id='5ce45b60' name='nvprop'/>
<parameter type-id='5aa5c90c' name='prop'/>
<parameter type-id='26a90f95' name='prop_name'/>
<parameter type-id='26a90f95' name='buf'/>
<parameter type-id='b59d7dce' name='len'/>
<parameter type-id='debc6aa3' name='srctype'/>
<parameter type-id='c19b74c3' name='literal'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_get_vdev_prop' mangled-name='zpool_get_vdev_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_vdev_prop'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='vdevname'/>
<parameter type-id='5aa5c90c' name='prop'/>
<parameter type-id='26a90f95' name='prop_name'/>
<parameter type-id='26a90f95' name='buf'/>
<parameter type-id='b59d7dce' name='len'/>
<parameter type-id='debc6aa3' name='srctype'/>
<parameter type-id='c19b74c3' name='literal'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_get_all_vdev_props' mangled-name='zpool_get_all_vdev_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_all_vdev_props'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='vdevname'/>
<parameter type-id='857bb57e' name='outnvl'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_set_vdev_prop' mangled-name='zpool_set_vdev_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_set_vdev_prop'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='vdevname'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='80f4b756' name='propval'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs/libzfs_sendrecv.c' language='LANG_C99'>
<class-decl name='sendflags' size-in-bits='544' is-struct='yes' visibility='default' id='f6aa15be'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='verbosity' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='replicate' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='skipmissing' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='doall' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='fromorigin' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='160'>
<var-decl name='pad' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='props' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='224'>
<var-decl name='dryrun' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='parsable' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
<var-decl name='progress' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='largeblock' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='352'>
<var-decl name='embed_data' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='compress' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='416'>
<var-decl name='raw' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='backup' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='480'>
<var-decl name='holds' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='saved' type-id='c19b74c3' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='sendflags_t' type-id='f6aa15be' id='945467e6'/>
<typedef-decl name='snapfilter_cb_t' type-id='d2a5e211' id='3d3ffb69'/>
- <class-decl name='recvflags' size-in-bits='416' is-struct='yes' visibility='default' id='34a384dc'>
+ <class-decl name='recvflags' size-in-bits='448' is-struct='yes' visibility='default' id='34a384dc'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='verbose' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='isprefix' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='istail' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='dryrun' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='force' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='160'>
<var-decl name='canmountoff' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='resumable' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='224'>
<var-decl name='byteswap' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='nomount' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
<var-decl name='holds' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='skipholds' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='352'>
<var-decl name='domount' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='forceunmount' type-id='c19b74c3' visibility='default'/>
</data-member>
+ <data-member access='public' layout-offset-in-bits='416'>
+ <var-decl name='heal' type-id='c19b74c3' visibility='default'/>
+ </data-member>
</class-decl>
<typedef-decl name='recvflags_t' type-id='34a384dc' id='9e59d1d4'/>
<pointer-type-def type-id='f20fbd51' size-in-bits='64' id='a3681dea'/>
<pointer-type-def type-id='9e59d1d4' size-in-bits='64' id='4ea84b4f'/>
<pointer-type-def type-id='945467e6' size-in-bits='64' id='8def7735'/>
<pointer-type-def type-id='3d3ffb69' size-in-bits='64' id='72a26210'/>
<function-decl name='zfs_send_progress' mangled-name='zfs_send_progress' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_progress'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='95e97e5e' name='fd'/>
<parameter type-id='5d6479ae' name='bytes_written'/>
<parameter type-id='5d6479ae' name='blocks_visited'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_send_resume_token_to_nvlist' mangled-name='zfs_send_resume_token_to_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_resume_token_to_nvlist'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='token'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zfs_send_resume' mangled-name='zfs_send_resume' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_resume'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='8def7735' name='flags'/>
<parameter type-id='95e97e5e' name='outfd'/>
<parameter type-id='80f4b756' name='resume_token'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_send_saved' mangled-name='zfs_send_saved' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_saved'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='8def7735' name='flags'/>
<parameter type-id='95e97e5e' name='outfd'/>
<parameter type-id='80f4b756' name='resume_token'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_send' mangled-name='zfs_send' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='fromsnap'/>
<parameter type-id='80f4b756' name='tosnap'/>
<parameter type-id='8def7735' name='flags'/>
<parameter type-id='95e97e5e' name='outfd'/>
<parameter type-id='72a26210' name='filter_func'/>
<parameter type-id='eaa32e2f' name='cb_arg'/>
<parameter type-id='857bb57e' name='debugnvp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_send_one' mangled-name='zfs_send_one' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_one'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='from'/>
<parameter type-id='95e97e5e' name='fd'/>
<parameter type-id='8def7735' name='flags'/>
<parameter type-id='80f4b756' name='redactbook'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_receive' mangled-name='zfs_receive' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_receive'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='tosnap'/>
<parameter type-id='5ce45b60' name='props'/>
<parameter type-id='4ea84b4f' name='flags'/>
<parameter type-id='95e97e5e' name='infd'/>
<parameter type-id='a3681dea' name='stream_avl'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-type size-in-bits='64' id='d2a5e211'>
<parameter type-id='9200a744'/>
<parameter type-id='eaa32e2f'/>
<return type-id='c19b74c3'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs/libzfs_status.c' language='LANG_C99'>
<enum-decl name='zpool_status_t' naming-typedef-id='d3dd6294' id='5e770b40'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZPOOL_STATUS_CORRUPT_CACHE' value='0'/>
<enumerator name='ZPOOL_STATUS_MISSING_DEV_R' value='1'/>
<enumerator name='ZPOOL_STATUS_MISSING_DEV_NR' value='2'/>
<enumerator name='ZPOOL_STATUS_CORRUPT_LABEL_R' value='3'/>
<enumerator name='ZPOOL_STATUS_CORRUPT_LABEL_NR' value='4'/>
<enumerator name='ZPOOL_STATUS_BAD_GUID_SUM' value='5'/>
<enumerator name='ZPOOL_STATUS_CORRUPT_POOL' value='6'/>
<enumerator name='ZPOOL_STATUS_CORRUPT_DATA' value='7'/>
<enumerator name='ZPOOL_STATUS_FAILING_DEV' value='8'/>
<enumerator name='ZPOOL_STATUS_VERSION_NEWER' value='9'/>
<enumerator name='ZPOOL_STATUS_HOSTID_MISMATCH' value='10'/>
<enumerator name='ZPOOL_STATUS_HOSTID_ACTIVE' value='11'/>
<enumerator name='ZPOOL_STATUS_HOSTID_REQUIRED' value='12'/>
<enumerator name='ZPOOL_STATUS_IO_FAILURE_WAIT' value='13'/>
<enumerator name='ZPOOL_STATUS_IO_FAILURE_CONTINUE' value='14'/>
<enumerator name='ZPOOL_STATUS_IO_FAILURE_MMP' value='15'/>
<enumerator name='ZPOOL_STATUS_BAD_LOG' value='16'/>
<enumerator name='ZPOOL_STATUS_ERRATA' value='17'/>
<enumerator name='ZPOOL_STATUS_UNSUP_FEAT_READ' value='18'/>
<enumerator name='ZPOOL_STATUS_UNSUP_FEAT_WRITE' value='19'/>
<enumerator name='ZPOOL_STATUS_FAULTED_DEV_R' value='20'/>
<enumerator name='ZPOOL_STATUS_FAULTED_DEV_NR' value='21'/>
<enumerator name='ZPOOL_STATUS_VERSION_OLDER' value='22'/>
<enumerator name='ZPOOL_STATUS_FEAT_DISABLED' value='23'/>
<enumerator name='ZPOOL_STATUS_RESILVERING' value='24'/>
<enumerator name='ZPOOL_STATUS_OFFLINE_DEV' value='25'/>
<enumerator name='ZPOOL_STATUS_REMOVED_DEV' value='26'/>
<enumerator name='ZPOOL_STATUS_REBUILDING' value='27'/>
<enumerator name='ZPOOL_STATUS_REBUILD_SCRUB' value='28'/>
<enumerator name='ZPOOL_STATUS_NON_NATIVE_ASHIFT' value='29'/>
<enumerator name='ZPOOL_STATUS_COMPATIBILITY_ERR' value='30'/>
<enumerator name='ZPOOL_STATUS_INCOMPATIBLE_FEAT' value='31'/>
<enumerator name='ZPOOL_STATUS_OK' value='32'/>
</enum-decl>
<typedef-decl name='zpool_status_t' type-id='5e770b40' id='d3dd6294'/>
<enum-decl name='zpool_errata' id='d9abbf54'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZPOOL_ERRATA_NONE' value='0'/>
<enumerator name='ZPOOL_ERRATA_ZOL_2094_SCRUB' value='1'/>
<enumerator name='ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY' value='2'/>
<enumerator name='ZPOOL_ERRATA_ZOL_6845_ENCRYPTION' value='3'/>
<enumerator name='ZPOOL_ERRATA_ZOL_8308_ENCRYPTION' value='4'/>
</enum-decl>
<typedef-decl name='zpool_errata_t' type-id='d9abbf54' id='688c495b'/>
+ <pointer-type-def type-id='80f4b756' size-in-bits='64' id='7d3cd834'/>
<pointer-type-def type-id='688c495b' size-in-bits='64' id='cec6f2e4'/>
<function-decl name='zpool_get_status' mangled-name='zpool_get_status' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_status'>
<parameter type-id='4c81de99' name='zhp'/>
- <parameter type-id='9b23c9ad' name='msgid'/>
+ <parameter type-id='7d3cd834' name='msgid'/>
<parameter type-id='cec6f2e4' name='errata'/>
<return type-id='d3dd6294'/>
</function-decl>
<function-decl name='zpool_import_status' mangled-name='zpool_import_status' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_import_status'>
<parameter type-id='5ce45b60' name='config'/>
- <parameter type-id='9b23c9ad' name='msgid'/>
+ <parameter type-id='7d3cd834' name='msgid'/>
<parameter type-id='cec6f2e4' name='errata'/>
<return type-id='d3dd6294'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs/libzfs_util.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='95e97e5e' size-in-bits='192' id='e41bdf22'>
<subrange length='6' type-id='7359adad' id='52fa524b'/>
</array-type-def>
<type-decl name='variadic parameter type' id='2c1145c5'/>
<array-type-def dimensions='1' type-id='19cefcee' size-in-bits='160' alignment-in-bits='32' id='3fcf57d2'>
<subrange length='5' type-id='7359adad' id='53010e10'/>
</array-type-def>
<enum-decl name='zfs_get_column_t' naming-typedef-id='19cefcee' id='223bdcaa'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='GET_COL_NONE' value='0'/>
<enumerator name='GET_COL_NAME' value='1'/>
<enumerator name='GET_COL_PROPERTY' value='2'/>
<enumerator name='GET_COL_VALUE' value='3'/>
<enumerator name='GET_COL_RECVD' value='4'/>
<enumerator name='GET_COL_SOURCE' value='5'/>
</enum-decl>
<typedef-decl name='zfs_get_column_t' type-id='223bdcaa' id='19cefcee'/>
<class-decl name='vdev_cbdata' size-in-bits='192' is-struct='yes' visibility='default' id='b8006be8'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='cb_name_flags' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='cb_names' type-id='9b23c9ad' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='cb_names_count' type-id='f0981eeb' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='vdev_cbdata_t' type-id='b8006be8' id='a9679c94'/>
<class-decl name='zprop_get_cbdata' size-in-bits='832' is-struct='yes' visibility='default' id='f3d3c319'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='cb_sources' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='cb_columns' type-id='3fcf57d2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='cb_colwidths' type-id='e41bdf22' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='cb_scripted' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='416'>
<var-decl name='cb_literal' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='cb_first' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='cb_proplist' type-id='3a9b2288' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
<var-decl name='cb_type' type-id='2e45de5d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='640'>
<var-decl name='cb_vdevs' type-id='a9679c94' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zprop_get_cbdata_t' type-id='f3d3c319' id='f3d87113'/>
<typedef-decl name='zprop_func' type-id='2e711a2a' id='1ec3747a'/>
<pointer-type-def type-id='9b23c9ad' size-in-bits='64' id='c0563f85'/>
<pointer-type-def type-id='c70fa2e8' size-in-bits='64' id='2e711a2a'/>
<pointer-type-def type-id='f3d87113' size-in-bits='64' id='0d2a0670'/>
<function-decl name='libzfs_errno' mangled-name='libzfs_errno' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_errno'>
<parameter type-id='b0382bb3' name='hdl'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='libzfs_error_action' mangled-name='libzfs_error_action' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_error_action'>
<parameter type-id='b0382bb3' name='hdl'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='libzfs_error_description' mangled-name='libzfs_error_description' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_error_description'>
<parameter type-id='b0382bb3' name='hdl'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_standard_error' mangled-name='zfs_standard_error' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_standard_error'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='95e97e5e' name='error'/>
<parameter type-id='80f4b756' name='msg'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='libzfs_print_on_error' mangled-name='libzfs_print_on_error' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_print_on_error'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='c19b74c3' name='printerr'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='libzfs_run_process' mangled-name='libzfs_run_process' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_run_process'>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='9b23c9ad' name='argv'/>
<parameter type-id='95e97e5e' name='flags'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='libzfs_run_process_get_stdout' mangled-name='libzfs_run_process_get_stdout' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_run_process_get_stdout'>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='9b23c9ad' name='argv'/>
<parameter type-id='9b23c9ad' name='env'/>
<parameter type-id='c0563f85' name='lines'/>
<parameter type-id='7292109c' name='lines_cnt'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='libzfs_run_process_get_stdout_nopath' mangled-name='libzfs_run_process_get_stdout_nopath' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_run_process_get_stdout_nopath'>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='9b23c9ad' name='argv'/>
<parameter type-id='9b23c9ad' name='env'/>
<parameter type-id='c0563f85' name='lines'/>
<parameter type-id='7292109c' name='lines_cnt'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='libzfs_free_str_array' mangled-name='libzfs_free_str_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_free_str_array'>
<parameter type-id='9b23c9ad' name='strs'/>
<parameter type-id='95e97e5e' name='count'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='libzfs_envvar_is_set' mangled-name='libzfs_envvar_is_set' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_envvar_is_set'>
- <parameter type-id='26a90f95' name='envvar'/>
- <return type-id='95e97e5e'/>
+ <parameter type-id='80f4b756' name='envvar'/>
+ <return type-id='c19b74c3'/>
</function-decl>
<function-decl name='libzfs_init' mangled-name='libzfs_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_init'>
<return type-id='b0382bb3'/>
</function-decl>
<function-decl name='libzfs_fini' mangled-name='libzfs_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_fini'>
<parameter type-id='b0382bb3' name='hdl'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_get_handle' mangled-name='zpool_get_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_handle'>
<parameter type-id='4c81de99' name='zhp'/>
<return type-id='b0382bb3'/>
</function-decl>
<function-decl name='zfs_get_handle' mangled-name='zfs_get_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_handle'>
<parameter type-id='9200a744' name='zhp'/>
<return type-id='b0382bb3'/>
</function-decl>
<function-decl name='zfs_get_pool_handle' mangled-name='zfs_get_pool_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_pool_handle'>
<parameter type-id='fcd57163' name='zhp'/>
<return type-id='4c81de99'/>
</function-decl>
<function-decl name='zfs_path_to_zhandle' mangled-name='zfs_path_to_zhandle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_path_to_zhandle'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='2e45de5d' name='argtype'/>
<return type-id='9200a744'/>
</function-decl>
<function-decl name='zprop_print_one_property' mangled-name='zprop_print_one_property' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_print_one_property'>
<parameter type-id='80f4b756' name='name'/>
<parameter type-id='0d2a0670' name='cbp'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='80f4b756' name='value'/>
<parameter type-id='a2256d42' name='sourcetype'/>
<parameter type-id='80f4b756' name='source'/>
<parameter type-id='80f4b756' name='recvd_value'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_nicestrtonum' mangled-name='zfs_nicestrtonum' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicestrtonum'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='value'/>
<parameter type-id='5d6479ae' name='num'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zprop_get_list' mangled-name='zprop_get_list' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_get_list'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='26a90f95' name='props'/>
<parameter type-id='e4378506' name='listp'/>
<parameter type-id='2e45de5d' name='type'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zprop_free_list' mangled-name='zprop_free_list' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_free_list'>
<parameter type-id='3a9b2288' name='pl'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zprop_iter' mangled-name='zprop_iter' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_iter'>
<parameter type-id='1ec3747a' name='func'/>
<parameter type-id='eaa32e2f' name='cb'/>
<parameter type-id='c19b74c3' name='show_all'/>
<parameter type-id='c19b74c3' name='ordered'/>
<parameter type-id='2e45de5d' name='type'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_version_userland' mangled-name='zfs_version_userland' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_version_userland'>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_version_print' mangled-name='zfs_version_print' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_version_print'>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='color_start' mangled-name='color_start' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='color_start'>
- <parameter type-id='26a90f95' name='color'/>
+ <parameter type-id='80f4b756' name='color'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='color_end' mangled-name='color_end' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='color_end'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='printf_color' mangled-name='printf_color' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='printf_color'>
- <parameter type-id='26a90f95' name='color'/>
- <parameter type-id='26a90f95' name='format'/>
+ <parameter type-id='80f4b756' name='color'/>
+ <parameter type-id='80f4b756' name='format'/>
<parameter is-variadic='yes'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-type size-in-bits='64' id='c70fa2e8'>
<parameter type-id='95e97e5e'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs/os/linux/libzfs_mount_os.c' language='LANG_C99'>
<pointer-type-def type-id='7359adad' size-in-bits='64' id='1d2c2b85'/>
<function-decl name='zfs_parse_mount_options' mangled-name='zfs_parse_mount_options' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_parse_mount_options'>
- <parameter type-id='26a90f95' name='mntopts'/>
+ <parameter type-id='80f4b756' name='mntopts'/>
<parameter type-id='1d2c2b85' name='mntflags'/>
<parameter type-id='1d2c2b85' name='zfsflags'/>
<parameter type-id='95e97e5e' name='sloppy'/>
<parameter type-id='26a90f95' name='badopt'/>
<parameter type-id='26a90f95' name='mtabopt'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_adjust_mount_options' mangled-name='zfs_adjust_mount_options' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_adjust_mount_options'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='mntpoint'/>
<parameter type-id='26a90f95' name='mntopts'/>
<parameter type-id='26a90f95' name='mtabopt'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_mount_delegation_check' mangled-name='zfs_mount_delegation_check' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_mount_delegation_check'>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_disable_datasets_os' mangled-name='zpool_disable_datasets_os' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_disable_datasets_os'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='c19b74c3' name='force'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_disable_volume_os' mangled-name='zpool_disable_volume_os' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_disable_volume_os'>
<parameter type-id='80f4b756' name='name'/>
<return type-id='48b5725f'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs/os/linux/libzfs_pool_os.c' language='LANG_C99'>
<function-decl name='zpool_label_disk' mangled-name='zpool_label_disk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_label_disk'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='name'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs/os/linux/libzfs_util_os.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='32768' id='d16c6df4'>
<subrange length='4096' type-id='7359adad' id='bc1b5ddc'/>
</array-type-def>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='65536' id='163f6aa5'>
<subrange length='8192' type-id='7359adad' id='c88f397d'/>
</array-type-def>
<array-type-def dimensions='1' type-id='9c313c2d' size-in-bits='128' id='c1c22e6c'>
<subrange length='2' type-id='7359adad' id='52efc4ef'/>
</array-type-def>
<array-type-def dimensions='1' type-id='b96825af' size-in-bits='24' id='d3490169'>
<subrange length='3' type-id='7359adad' id='56f209d2'/>
</array-type-def>
<class-decl name='drr_begin' size-in-bits='2432' is-struct='yes' visibility='default' id='09fcdc01'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='drr_magic' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='drr_versioninfo' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='drr_creation_time' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='drr_type' type-id='230f1e16' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='224'>
<var-decl name='drr_flags' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='drr_fromguid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='drr_toname' type-id='d1617432' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='zinject_record' size-in-bits='2816' is-struct='yes' visibility='default' id='3216f820'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='zi_objset' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='zi_object' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='zi_start' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='zi_end' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='zi_guid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='zi_level' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='352'>
<var-decl name='zi_error' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='zi_type' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='zi_freq' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='480'>
<var-decl name='zi_failfast' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='zi_func' type-id='d1617432' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2560'>
<var-decl name='zi_iotype' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2592'>
<var-decl name='zi_duration' type-id='3ff5601b' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2624'>
<var-decl name='zi_timer' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2688'>
<var-decl name='zi_nlanes' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2752'>
<var-decl name='zi_cmd' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2784'>
<var-decl name='zi_dvas' type-id='8f92235e' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zinject_record_t' type-id='3216f820' id='a4301ca6'/>
<class-decl name='zfs_share' size-in-bits='256' is-struct='yes' visibility='default' id='feb6f2da'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='z_exportdata' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='z_sharedata' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='z_sharetype' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='z_sharemax' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zfs_share_t' type-id='feb6f2da' id='ee5cec36'/>
<class-decl name='zfs_cmd' size-in-bits='109952' is-struct='yes' visibility='default' id='3522cd69'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='zc_name' type-id='d16c6df4' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32768'>
<var-decl name='zc_nvlist_src' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32832'>
<var-decl name='zc_nvlist_src_size' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32896'>
<var-decl name='zc_nvlist_dst' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32960'>
<var-decl name='zc_nvlist_dst_size' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='33024'>
<var-decl name='zc_nvlist_dst_filled' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='33056'>
<var-decl name='zc_pad2' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='33088'>
<var-decl name='zc_history' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='33152'>
<var-decl name='zc_value' type-id='163f6aa5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='98688'>
<var-decl name='zc_string' type-id='d1617432' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='100736'>
<var-decl name='zc_guid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='100800'>
<var-decl name='zc_nvlist_conf' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='100864'>
<var-decl name='zc_nvlist_conf_size' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='100928'>
<var-decl name='zc_cookie' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='100992'>
<var-decl name='zc_objset_type' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='101056'>
<var-decl name='zc_perm_action' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='101120'>
<var-decl name='zc_history_len' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='101184'>
<var-decl name='zc_history_offset' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='101248'>
<var-decl name='zc_obj' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='101312'>
<var-decl name='zc_iflags' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='101376'>
<var-decl name='zc_share' type-id='ee5cec36' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='101632'>
<var-decl name='zc_objset_stats' type-id='b2c14f17' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='103936'>
<var-decl name='zc_begin_record' type-id='09fcdc01' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='106368'>
<var-decl name='zc_inject_record' type-id='a4301ca6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109184'>
<var-decl name='zc_defer_destroy' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109216'>
<var-decl name='zc_flags' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109248'>
<var-decl name='zc_action_handle' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109312'>
<var-decl name='zc_cleanup_fd' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109344'>
<var-decl name='zc_simple' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109352'>
<var-decl name='zc_pad' type-id='d3490169' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109376'>
<var-decl name='zc_sendobj' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109440'>
<var-decl name='zc_fromobj' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109504'>
<var-decl name='zc_createtxg' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109568'>
<var-decl name='zc_stat' type-id='0371a9c7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109888'>
<var-decl name='zc_zoneid' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zfs_cmd_t' type-id='3522cd69' id='a5559cdd'/>
<class-decl name='zfs_stat' size-in-bits='320' is-struct='yes' visibility='default' id='6417f0b9'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='zs_gen' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='zs_mode' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='zs_links' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='zs_ctime' type-id='c1c22e6c' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zfs_stat_t' type-id='6417f0b9' id='0371a9c7'/>
<pointer-type-def type-id='a5559cdd' size-in-bits='64' id='e4ec4540'/>
<function-decl name='zfs_ioctl' mangled-name='zfs_ioctl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_ioctl'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='95e97e5e' name='request'/>
<parameter type-id='e4ec4540' name='zc'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='libzfs_error_init' mangled-name='libzfs_error_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_error_init'>
<parameter type-id='95e97e5e' name='error'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_destroy_snaps_nvl_os' mangled-name='zfs_destroy_snaps_nvl_os' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_destroy_snaps_nvl_os'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='5ce45b60' name='snaps'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_version_kernel' mangled-name='zfs_version_kernel' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_version_kernel'>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='zfs_userns' mangled-name='zfs_userns' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_userns'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='nspath'/>
<parameter type-id='95e97e5e' name='attach'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libzutil/os/linux/zutil_device_path_os.c' language='LANG_C99'>
<function-decl name='zfs_append_partition' mangled-name='zfs_append_partition' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_append_partition'>
<parameter type-id='26a90f95' name='path'/>
<parameter type-id='b59d7dce' name='max_len'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_strip_partition' mangled-name='zfs_strip_partition' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_strip_partition'>
<parameter type-id='80f4b756' name='path'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='zfs_strip_path' mangled-name='zfs_strip_path' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_strip_path'>
<parameter type-id='80f4b756' name='path'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_get_enclosure_sysfs_path' mangled-name='zfs_get_enclosure_sysfs_path' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_enclosure_sysfs_path'>
<parameter type-id='80f4b756' name='dev_name'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='zfs_dev_is_dm' mangled-name='zfs_dev_is_dm' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dev_is_dm'>
<parameter type-id='80f4b756' name='dev_name'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_dev_is_whole_disk' mangled-name='zfs_dev_is_whole_disk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dev_is_whole_disk'>
<parameter type-id='80f4b756' name='dev_name'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_get_underlying_path' mangled-name='zfs_get_underlying_path' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_underlying_path'>
<parameter type-id='80f4b756' name='dev_name'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='is_mpath_whole_disk' mangled-name='is_mpath_whole_disk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='is_mpath_whole_disk'>
<parameter type-id='80f4b756' name='path'/>
<return type-id='c19b74c3'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libzutil/os/linux/zutil_import_os.c' language='LANG_C99'>
<class-decl name='udev_device' is-struct='yes' visibility='default' is-declaration-only='yes' id='640b33ca'/>
<pointer-type-def type-id='b99c00c9' size-in-bits='64' id='13956559'/>
<pointer-type-def type-id='b59d7dce' size-in-bits='64' id='78c01427'/>
<pointer-type-def type-id='640b33ca' size-in-bits='64' id='b32bae08'/>
<class-decl name='udev_device' is-struct='yes' visibility='default' is-declaration-only='yes' id='640b33ca'/>
<function-decl name='zfs_dev_flush' mangled-name='zfs_dev_flush' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dev_flush'>
<parameter type-id='95e97e5e' name='fd'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_default_search_paths' mangled-name='zpool_default_search_paths' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_default_search_paths'>
<parameter type-id='78c01427' name='count'/>
<return type-id='13956559'/>
</function-decl>
<function-decl name='zfs_device_get_devid' mangled-name='zfs_device_get_devid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_device_get_devid'>
<parameter type-id='b32bae08' name='dev'/>
<parameter type-id='26a90f95' name='bufptr'/>
<parameter type-id='b59d7dce' name='buflen'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_device_get_physical' mangled-name='zfs_device_get_physical' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_device_get_physical'>
<parameter type-id='b32bae08' name='dev'/>
<parameter type-id='26a90f95' name='bufptr'/>
<parameter type-id='b59d7dce' name='buflen'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_label_disk_wait' mangled-name='zpool_label_disk_wait' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_label_disk_wait'>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='95e97e5e' name='timeout_ms'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='update_vdev_config_dev_strs' mangled-name='update_vdev_config_dev_strs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='update_vdev_config_dev_strs'>
<parameter type-id='5ce45b60' name='nv'/>
<return type-id='48b5725f'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libzutil/zutil_device_path.c' language='LANG_C99'>
<typedef-decl name='ssize_t' type-id='41060289' id='79a0948f'/>
<typedef-decl name='__ssize_t' type-id='bd54fe1a' id='41060289'/>
<function-decl name='zfs_basename' mangled-name='zfs_basename' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_basename'>
<parameter type-id='80f4b756' name='path'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_dirnamelen' mangled-name='zfs_dirnamelen' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dirnamelen'>
<parameter type-id='80f4b756' name='path'/>
<return type-id='79a0948f'/>
</function-decl>
<function-decl name='zfs_resolve_shortname' mangled-name='zfs_resolve_shortname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_resolve_shortname'>
<parameter type-id='80f4b756' name='name'/>
<parameter type-id='26a90f95' name='path'/>
<parameter type-id='b59d7dce' name='len'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_strcmp_pathname' mangled-name='zfs_strcmp_pathname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_strcmp_pathname'>
<parameter type-id='80f4b756' name='name'/>
<parameter type-id='80f4b756' name='cmp'/>
<parameter type-id='95e97e5e' name='wholedisk'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libzutil/zutil_import.c' language='LANG_C99'>
<class-decl name='importargs' size-in-bits='448' is-struct='yes' visibility='default' id='7ac83801'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='path' type-id='9b23c9ad' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='paths' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='poolname' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='guid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='cachefile' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='can_be_active' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='352'>
<var-decl name='scan' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='policy' type-id='5ce45b60' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='importargs_t' type-id='7ac83801' id='7a842a6b'/>
<pointer-type-def type-id='7a842a6b' size-in-bits='64' id='07ee4a58'/>
<pointer-type-def type-id='b1e62775' size-in-bits='64' id='f095e320'/>
<function-decl name='zpool_read_label' mangled-name='zpool_read_label' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_read_label'>
<parameter type-id='95e97e5e' name='fd'/>
<parameter type-id='857bb57e' name='config'/>
<parameter type-id='7292109c' name='num_labels'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_search_import' mangled-name='zpool_search_import' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_search_import'>
<parameter type-id='eaa32e2f' name='hdl'/>
<parameter type-id='07ee4a58' name='import'/>
<parameter type-id='f095e320' name='pco'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zpool_find_config' mangled-name='zpool_find_config' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_find_config'>
<parameter type-id='eaa32e2f' name='hdl'/>
<parameter type-id='80f4b756' name='target'/>
<parameter type-id='857bb57e' name='configp'/>
<parameter type-id='07ee4a58' name='args'/>
<parameter type-id='f095e320' name='pco'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libzutil/zutil_nicenum.c' language='LANG_C99'>
<enum-decl name='zfs_nicenum_format' id='29cf1969'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZFS_NICENUM_1024' value='0'/>
<enumerator name='ZFS_NICENUM_BYTES' value='1'/>
<enumerator name='ZFS_NICENUM_TIME' value='2'/>
<enumerator name='ZFS_NICENUM_RAW' value='3'/>
<enumerator name='ZFS_NICENUM_RAWTIME' value='4'/>
</enum-decl>
<function-decl name='zfs_isnumber' mangled-name='zfs_isnumber' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_isnumber'>
<parameter type-id='80f4b756' name='str'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_nicenum_format' mangled-name='zfs_nicenum_format' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicenum_format'>
<parameter type-id='9c313c2d' name='num'/>
<parameter type-id='26a90f95' name='buf'/>
<parameter type-id='b59d7dce' name='buflen'/>
<parameter type-id='29cf1969' name='format'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_nicenum' mangled-name='zfs_nicenum' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicenum'>
<parameter type-id='9c313c2d' name='num'/>
<parameter type-id='26a90f95' name='buf'/>
<parameter type-id='b59d7dce' name='buflen'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_nicetime' mangled-name='zfs_nicetime' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicetime'>
<parameter type-id='9c313c2d' name='num'/>
<parameter type-id='26a90f95' name='buf'/>
<parameter type-id='b59d7dce' name='buflen'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_niceraw' mangled-name='zfs_niceraw' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_niceraw'>
<parameter type-id='9c313c2d' name='num'/>
<parameter type-id='26a90f95' name='buf'/>
<parameter type-id='b59d7dce' name='buflen'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_nicebytes' mangled-name='zfs_nicebytes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicebytes'>
<parameter type-id='9c313c2d' name='num'/>
<parameter type-id='26a90f95' name='buf'/>
<parameter type-id='b59d7dce' name='buflen'/>
<return type-id='48b5725f'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libzutil/zutil_pool.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='853fd5dc' size-in-bits='32768' id='b505fc2f'>
<subrange length='64' type-id='7359adad' id='b10be967'/>
</array-type-def>
<class-decl name='ddt_stat' size-in-bits='512' is-struct='yes' visibility='default' id='65242dfe'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='dds_blocks' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='dds_lsize' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='dds_psize' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='dds_dsize' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='dds_ref_blocks' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='dds_ref_lsize' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='dds_ref_psize' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='dds_ref_dsize' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='ddt_stat_t' type-id='65242dfe' id='853fd5dc'/>
<class-decl name='ddt_histogram' size-in-bits='32768' is-struct='yes' visibility='default' id='bc2b3086'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='ddh_stat' type-id='b505fc2f' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='ddt_histogram_t' type-id='bc2b3086' id='2d7fe832'/>
<qualified-type-def type-id='2d7fe832' const='yes' id='ec92d602'/>
<pointer-type-def type-id='ec92d602' size-in-bits='64' id='932720f8'/>
<qualified-type-def type-id='853fd5dc' const='yes' id='764c298c'/>
<pointer-type-def type-id='764c298c' size-in-bits='64' id='dfe59052'/>
<pointer-type-def type-id='857bb57e' size-in-bits='64' id='75be733c'/>
<function-decl name='zpool_dump_ddt' mangled-name='zpool_dump_ddt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_dump_ddt'>
<parameter type-id='dfe59052' name='dds_total'/>
<parameter type-id='932720f8' name='ddh'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_history_unpack' mangled-name='zpool_history_unpack' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_history_unpack'>
<parameter type-id='26a90f95' name='buf'/>
<parameter type-id='9c313c2d' name='bytes_read'/>
<parameter type-id='5d6479ae' name='leftover'/>
<parameter type-id='75be733c' name='records'/>
<parameter type-id='4dd26a40' name='numrecords'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='module/avl/avl.c' language='LANG_C99'>
<typedef-decl name='avl_index_t' type-id='e475ab95' id='fba6cb51'/>
<pointer-type-def type-id='fba6cb51' size-in-bits='64' id='32adbf30'/>
<pointer-type-def type-id='eaa32e2f' size-in-bits='64' id='63e171df'/>
<function-decl name='avl_walk' mangled-name='avl_walk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_walk'>
<parameter type-id='a3681dea' name='tree'/>
<parameter type-id='eaa32e2f' name='oldnode'/>
<parameter type-id='95e97e5e' name='left'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='avl_first' mangled-name='avl_first' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_first'>
<parameter type-id='a3681dea' name='tree'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='avl_last' mangled-name='avl_last' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_last'>
<parameter type-id='a3681dea' name='tree'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='avl_nearest' mangled-name='avl_nearest' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_nearest'>
<parameter type-id='a3681dea' name='tree'/>
<parameter type-id='fba6cb51' name='where'/>
<parameter type-id='95e97e5e' name='direction'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='avl_find' mangled-name='avl_find' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_find'>
<parameter type-id='a3681dea' name='tree'/>
<parameter type-id='eaa32e2f' name='value'/>
<parameter type-id='32adbf30' name='where'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='avl_insert' mangled-name='avl_insert' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_insert'>
<parameter type-id='a3681dea' name='tree'/>
<parameter type-id='eaa32e2f' name='new_data'/>
<parameter type-id='fba6cb51' name='where'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='avl_insert_here' mangled-name='avl_insert_here' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_insert_here'>
<parameter type-id='a3681dea' name='tree'/>
<parameter type-id='eaa32e2f' name='new_data'/>
<parameter type-id='eaa32e2f' name='here'/>
<parameter type-id='95e97e5e' name='direction'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='avl_add' mangled-name='avl_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_add'>
<parameter type-id='a3681dea' name='tree'/>
<parameter type-id='eaa32e2f' name='new_node'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='avl_remove' mangled-name='avl_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_remove'>
<parameter type-id='a3681dea' name='tree'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='avl_update_lt' mangled-name='avl_update_lt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_update_lt'>
<parameter type-id='a3681dea' name='t'/>
<parameter type-id='eaa32e2f' name='obj'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='avl_update_gt' mangled-name='avl_update_gt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_update_gt'>
<parameter type-id='a3681dea' name='t'/>
<parameter type-id='eaa32e2f' name='obj'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='avl_update' mangled-name='avl_update' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_update'>
<parameter type-id='a3681dea' name='t'/>
<parameter type-id='eaa32e2f' name='obj'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='avl_swap' mangled-name='avl_swap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_swap'>
<parameter type-id='a3681dea' name='tree1'/>
<parameter type-id='a3681dea' name='tree2'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='avl_create' mangled-name='avl_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_create'>
<parameter type-id='a3681dea' name='tree'/>
<parameter type-id='585e1de9' name='compar'/>
<parameter type-id='b59d7dce' name='size'/>
<parameter type-id='b59d7dce' name='offset'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='avl_destroy' mangled-name='avl_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_destroy'>
<parameter type-id='a3681dea' name='tree'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='avl_numnodes' mangled-name='avl_numnodes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_numnodes'>
<parameter type-id='a3681dea' name='tree'/>
<return type-id='ee1f298e'/>
</function-decl>
<function-decl name='avl_is_empty' mangled-name='avl_is_empty' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_is_empty'>
<parameter type-id='a3681dea' name='tree'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='avl_destroy_nodes' mangled-name='avl_destroy_nodes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_destroy_nodes'>
<parameter type-id='a3681dea' name='tree'/>
<parameter type-id='63e171df' name='cookie'/>
<return type-id='eaa32e2f'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='module/zcommon/cityhash.c' language='LANG_C99'>
<function-decl name='cityhash4' mangled-name='cityhash4' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='cityhash4'>
<parameter type-id='9c313c2d' name='w1'/>
<parameter type-id='9c313c2d' name='w2'/>
<parameter type-id='9c313c2d' name='w3'/>
<parameter type-id='9c313c2d' name='w4'/>
<return type-id='9c313c2d'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='module/zcommon/zfeature_common.c' language='LANG_C99'>
- <array-type-def dimensions='1' type-id='83f29ca2' size-in-bits='16576' id='9d5e9e2e'>
- <subrange length='37' type-id='7359adad' id='ae666bde'/>
+ <array-type-def dimensions='1' type-id='83f29ca2' size-in-bits='16576' id='d95b2b0b'>
+ <subrange length='37' type-id='7359adad' id='aa6426fb'/>
</array-type-def>
<enum-decl name='spa_feature' id='33ecb627'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='SPA_FEATURE_NONE' value='-1'/>
<enumerator name='SPA_FEATURE_ASYNC_DESTROY' value='0'/>
<enumerator name='SPA_FEATURE_EMPTY_BPOBJ' value='1'/>
<enumerator name='SPA_FEATURE_LZ4_COMPRESS' value='2'/>
<enumerator name='SPA_FEATURE_MULTI_VDEV_CRASH_DUMP' value='3'/>
<enumerator name='SPA_FEATURE_SPACEMAP_HISTOGRAM' value='4'/>
<enumerator name='SPA_FEATURE_ENABLED_TXG' value='5'/>
<enumerator name='SPA_FEATURE_HOLE_BIRTH' value='6'/>
<enumerator name='SPA_FEATURE_EXTENSIBLE_DATASET' value='7'/>
<enumerator name='SPA_FEATURE_EMBEDDED_DATA' value='8'/>
<enumerator name='SPA_FEATURE_BOOKMARKS' value='9'/>
<enumerator name='SPA_FEATURE_FS_SS_LIMIT' value='10'/>
<enumerator name='SPA_FEATURE_LARGE_BLOCKS' value='11'/>
<enumerator name='SPA_FEATURE_LARGE_DNODE' value='12'/>
<enumerator name='SPA_FEATURE_SHA512' value='13'/>
<enumerator name='SPA_FEATURE_SKEIN' value='14'/>
<enumerator name='SPA_FEATURE_EDONR' value='15'/>
<enumerator name='SPA_FEATURE_USEROBJ_ACCOUNTING' value='16'/>
<enumerator name='SPA_FEATURE_ENCRYPTION' value='17'/>
<enumerator name='SPA_FEATURE_PROJECT_QUOTA' value='18'/>
<enumerator name='SPA_FEATURE_DEVICE_REMOVAL' value='19'/>
<enumerator name='SPA_FEATURE_OBSOLETE_COUNTS' value='20'/>
<enumerator name='SPA_FEATURE_POOL_CHECKPOINT' value='21'/>
<enumerator name='SPA_FEATURE_SPACEMAP_V2' value='22'/>
<enumerator name='SPA_FEATURE_ALLOCATION_CLASSES' value='23'/>
<enumerator name='SPA_FEATURE_RESILVER_DEFER' value='24'/>
<enumerator name='SPA_FEATURE_BOOKMARK_V2' value='25'/>
<enumerator name='SPA_FEATURE_REDACTION_BOOKMARKS' value='26'/>
<enumerator name='SPA_FEATURE_REDACTED_DATASETS' value='27'/>
<enumerator name='SPA_FEATURE_BOOKMARK_WRITTEN' value='28'/>
<enumerator name='SPA_FEATURE_LOG_SPACEMAP' value='29'/>
<enumerator name='SPA_FEATURE_LIVELIST' value='30'/>
<enumerator name='SPA_FEATURE_DEVICE_REBUILD' value='31'/>
<enumerator name='SPA_FEATURE_ZSTD_COMPRESS' value='32'/>
<enumerator name='SPA_FEATURE_DRAID' value='33'/>
<enumerator name='SPA_FEATURE_ZILSAXATTR' value='34'/>
<enumerator name='SPA_FEATURE_HEAD_ERRLOG' value='35'/>
<enumerator name='SPA_FEATURE_BLAKE3' value='36'/>
<enumerator name='SPA_FEATURES' value='37'/>
</enum-decl>
<typedef-decl name='spa_feature_t' type-id='33ecb627' id='d6618c78'/>
<enum-decl name='zfeature_flags' id='6db816a4'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZFEATURE_FLAG_READONLY_COMPAT' value='1'/>
<enumerator name='ZFEATURE_FLAG_MOS' value='2'/>
<enumerator name='ZFEATURE_FLAG_ACTIVATE_ON_ENABLE' value='4'/>
<enumerator name='ZFEATURE_FLAG_PER_DATASET' value='8'/>
</enum-decl>
<typedef-decl name='zfeature_flags_t' type-id='6db816a4' id='fc329033'/>
<enum-decl name='zfeature_type' id='c4fa2355'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZFEATURE_TYPE_BOOLEAN' value='0'/>
<enumerator name='ZFEATURE_TYPE_UINT64_ARRAY' value='1'/>
<enumerator name='ZFEATURE_NUM_TYPES' value='2'/>
</enum-decl>
<typedef-decl name='zfeature_type_t' type-id='c4fa2355' id='732d2bb2'/>
<class-decl name='zfeature_info' size-in-bits='448' is-struct='yes' visibility='default' id='1178d146'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='fi_feature' type-id='d6618c78' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='fi_uname' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='fi_guid' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='fi_desc' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='fi_flags' type-id='fc329033' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
<var-decl name='fi_zfs_mod_supported' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='fi_type' type-id='732d2bb2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='fi_depends' type-id='1acff326' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zfeature_info_t' type-id='1178d146' id='83f29ca2'/>
<class-decl name='zfs_mod_supported_features' size-in-bits='128' is-struct='yes' visibility='default' id='3eee3342'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='tree' type-id='eaa32e2f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='all_features' type-id='c19b74c3' visibility='default'/>
</data-member>
</class-decl>
<qualified-type-def type-id='d6618c78' const='yes' id='81a65028'/>
<pointer-type-def type-id='81a65028' size-in-bits='64' id='1acff326'/>
<qualified-type-def type-id='3eee3342' const='yes' id='0c1d5bbb'/>
<pointer-type-def type-id='0c1d5bbb' size-in-bits='64' id='a3372543'/>
<pointer-type-def type-id='d6618c78' size-in-bits='64' id='a8425263'/>
- <var-decl name='spa_feature_table' type-id='9d5e9e2e' mangled-name='spa_feature_table' visibility='default' elf-symbol-id='spa_feature_table'/>
+ <var-decl name='spa_feature_table' type-id='d95b2b0b' mangled-name='spa_feature_table' visibility='default' elf-symbol-id='spa_feature_table'/>
<var-decl name='zfeature_checks_disable' type-id='c19b74c3' mangled-name='zfeature_checks_disable' visibility='default' elf-symbol-id='zfeature_checks_disable'/>
<function-decl name='zfeature_is_valid_guid' mangled-name='zfeature_is_valid_guid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_is_valid_guid'>
<parameter type-id='80f4b756' name='name'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfeature_is_supported' mangled-name='zfeature_is_supported' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_is_supported'>
<parameter type-id='80f4b756' name='guid'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfeature_lookup_guid' mangled-name='zfeature_lookup_guid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_lookup_guid'>
<parameter type-id='80f4b756' name='guid'/>
<parameter type-id='a8425263' name='res'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfeature_lookup_name' mangled-name='zfeature_lookup_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_lookup_name'>
<parameter type-id='80f4b756' name='name'/>
<parameter type-id='a8425263' name='res'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfeature_depends_on' mangled-name='zfeature_depends_on' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_depends_on'>
<parameter type-id='d6618c78' name='fid'/>
<parameter type-id='d6618c78' name='check'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_mod_supported' mangled-name='zfs_mod_supported' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_mod_supported'>
<parameter type-id='80f4b756' name='scope'/>
<parameter type-id='80f4b756' name='name'/>
<parameter type-id='a3372543' name='sfeatures'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zpool_feature_init' mangled-name='zpool_feature_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_feature_init'>
<return type-id='48b5725f'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='module/zcommon/zfs_comutil.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='b99c00c9' size-in-bits='2624' id='5ce15418'>
<subrange length='41' type-id='7359adad' id='cb834f44'/>
</array-type-def>
<class-decl name='zpool_load_policy' size-in-bits='256' is-struct='yes' visibility='default' id='2f65b36f'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='zlp_rewind' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='zlp_maxmeta' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='zlp_maxdata' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='zlp_txg' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zpool_load_policy_t' type-id='2f65b36f' id='d11b7617'/>
<qualified-type-def type-id='80f4b756' const='yes' id='b99c00c9'/>
<pointer-type-def type-id='d11b7617' size-in-bits='64' id='23432aaa'/>
<var-decl name='zfs_history_event_names' type-id='5ce15418' mangled-name='zfs_history_event_names' visibility='default' elf-symbol-id='zfs_history_event_names'/>
<function-decl name='zfs_allocatable_devs' mangled-name='zfs_allocatable_devs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_allocatable_devs'>
<parameter type-id='5ce45b60' name='nv'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_special_devs' mangled-name='zfs_special_devs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_special_devs'>
<parameter type-id='5ce45b60' name='nv'/>
- <parameter type-id='26a90f95' name='type'/>
+ <parameter type-id='80f4b756' name='type'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zpool_get_load_policy' mangled-name='zpool_get_load_policy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_load_policy'>
<parameter type-id='5ce45b60' name='nvl'/>
<parameter type-id='23432aaa' name='zlpp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_zpl_version_map' mangled-name='zfs_zpl_version_map' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_zpl_version_map'>
<parameter type-id='95e97e5e' name='spa_version'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_spa_version_map' mangled-name='zfs_spa_version_map' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_spa_version_map'>
<parameter type-id='95e97e5e' name='zpl_version'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_dataset_name_hidden' mangled-name='zfs_dataset_name_hidden' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dataset_name_hidden'>
<parameter type-id='80f4b756' name='name'/>
<return type-id='c19b74c3'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='module/zcommon/zfs_deleg.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='fa1870fd' size-in-bits='infinite' id='7c00e69d'>
<subrange length='infinite' id='031f2035'/>
</array-type-def>
<enum-decl name='zfs_deleg_who_type_t' naming-typedef-id='36d4bd5a' id='b5fa5816'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZFS_DELEG_WHO_UNKNOWN' value='0'/>
<enumerator name='ZFS_DELEG_USER' value='117'/>
<enumerator name='ZFS_DELEG_USER_SETS' value='85'/>
<enumerator name='ZFS_DELEG_GROUP' value='103'/>
<enumerator name='ZFS_DELEG_GROUP_SETS' value='71'/>
<enumerator name='ZFS_DELEG_EVERYONE' value='101'/>
<enumerator name='ZFS_DELEG_EVERYONE_SETS' value='69'/>
<enumerator name='ZFS_DELEG_CREATE' value='99'/>
<enumerator name='ZFS_DELEG_CREATE_SETS' value='67'/>
<enumerator name='ZFS_DELEG_NAMED_SET' value='115'/>
<enumerator name='ZFS_DELEG_NAMED_SET_SETS' value='83'/>
</enum-decl>
<typedef-decl name='zfs_deleg_who_type_t' type-id='b5fa5816' id='36d4bd5a'/>
<enum-decl name='zfs_deleg_note_t' naming-typedef-id='4613c173' id='729d4547'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZFS_DELEG_NOTE_CREATE' value='0'/>
<enumerator name='ZFS_DELEG_NOTE_DESTROY' value='1'/>
<enumerator name='ZFS_DELEG_NOTE_SNAPSHOT' value='2'/>
<enumerator name='ZFS_DELEG_NOTE_ROLLBACK' value='3'/>
<enumerator name='ZFS_DELEG_NOTE_CLONE' value='4'/>
<enumerator name='ZFS_DELEG_NOTE_PROMOTE' value='5'/>
<enumerator name='ZFS_DELEG_NOTE_RENAME' value='6'/>
<enumerator name='ZFS_DELEG_NOTE_SEND' value='7'/>
<enumerator name='ZFS_DELEG_NOTE_RECEIVE' value='8'/>
<enumerator name='ZFS_DELEG_NOTE_ALLOW' value='9'/>
<enumerator name='ZFS_DELEG_NOTE_USERPROP' value='10'/>
<enumerator name='ZFS_DELEG_NOTE_MOUNT' value='11'/>
<enumerator name='ZFS_DELEG_NOTE_SHARE' value='12'/>
<enumerator name='ZFS_DELEG_NOTE_USERQUOTA' value='13'/>
<enumerator name='ZFS_DELEG_NOTE_GROUPQUOTA' value='14'/>
<enumerator name='ZFS_DELEG_NOTE_USERUSED' value='15'/>
<enumerator name='ZFS_DELEG_NOTE_GROUPUSED' value='16'/>
<enumerator name='ZFS_DELEG_NOTE_USEROBJQUOTA' value='17'/>
<enumerator name='ZFS_DELEG_NOTE_GROUPOBJQUOTA' value='18'/>
<enumerator name='ZFS_DELEG_NOTE_USEROBJUSED' value='19'/>
<enumerator name='ZFS_DELEG_NOTE_GROUPOBJUSED' value='20'/>
<enumerator name='ZFS_DELEG_NOTE_HOLD' value='21'/>
<enumerator name='ZFS_DELEG_NOTE_RELEASE' value='22'/>
<enumerator name='ZFS_DELEG_NOTE_DIFF' value='23'/>
<enumerator name='ZFS_DELEG_NOTE_BOOKMARK' value='24'/>
<enumerator name='ZFS_DELEG_NOTE_LOAD_KEY' value='25'/>
<enumerator name='ZFS_DELEG_NOTE_CHANGE_KEY' value='26'/>
<enumerator name='ZFS_DELEG_NOTE_PROJECTUSED' value='27'/>
<enumerator name='ZFS_DELEG_NOTE_PROJECTQUOTA' value='28'/>
<enumerator name='ZFS_DELEG_NOTE_PROJECTOBJUSED' value='29'/>
<enumerator name='ZFS_DELEG_NOTE_PROJECTOBJQUOTA' value='30'/>
<enumerator name='ZFS_DELEG_NOTE_NONE' value='31'/>
</enum-decl>
<typedef-decl name='zfs_deleg_note_t' type-id='729d4547' id='4613c173'/>
<class-decl name='zfs_deleg_perm_tab' size-in-bits='128' is-struct='yes' visibility='default' id='5aa05c1f'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='z_perm' type-id='26a90f95' visibility='default'/>
+ <var-decl name='z_perm' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='z_note' type-id='4613c173' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zfs_deleg_perm_tab_t' type-id='5aa05c1f' id='f3f851ad'/>
<qualified-type-def type-id='f3f851ad' const='yes' id='fa1870fd'/>
<var-decl name='zfs_deleg_perm_tab' type-id='7c00e69d' mangled-name='zfs_deleg_perm_tab' visibility='default' elf-symbol-id='zfs_deleg_perm_tab'/>
<function-decl name='zfs_deleg_canonicalize_perm' mangled-name='zfs_deleg_canonicalize_perm' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_deleg_canonicalize_perm'>
<parameter type-id='80f4b756' name='perm'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_deleg_verify_nvlist' mangled-name='zfs_deleg_verify_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_deleg_verify_nvlist'>
<parameter type-id='5ce45b60' name='nvp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_deleg_whokey' mangled-name='zfs_deleg_whokey' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_deleg_whokey'>
<parameter type-id='26a90f95' name='attr'/>
<parameter type-id='36d4bd5a' name='type'/>
<parameter type-id='a84c031d' name='inheritchr'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='48b5725f'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='module/zcommon/zfs_fletcher.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='9c313c2d' size-in-bits='256' id='85c64d26'>
<subrange length='4' type-id='7359adad' id='16fe7105'/>
</array-type-def>
<array-type-def dimensions='1' type-id='9c313c2d' size-in-bits='512' id='c5d13f42'>
<subrange length='8' type-id='7359adad' id='56e0c0b1'/>
</array-type-def>
<array-type-def dimensions='1' type-id='90dbb6d6' size-in-bits='2048' id='16582e69'>
<subrange length='4' type-id='7359adad' id='16fe7105'/>
</array-type-def>
<array-type-def dimensions='1' type-id='8240361c' size-in-bits='1024' id='481f90b1'>
<subrange length='4' type-id='7359adad' id='16fe7105'/>
</array-type-def>
<array-type-def dimensions='1' type-id='7c1ab40c' size-in-bits='512' id='cbd91ec1'>
<subrange length='4' type-id='7359adad' id='16fe7105'/>
</array-type-def>
<array-type-def dimensions='1' type-id='6d059eaa' size-in-bits='1024' id='729b6ebb'>
<subrange length='4' type-id='7359adad' id='16fe7105'/>
</array-type-def>
<class-decl name='zio_cksum' size-in-bits='256' is-struct='yes' visibility='default' id='1d53e28b'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='zc_word' type-id='85c64d26' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zio_cksum_t' type-id='1d53e28b' id='39730d0b'/>
<enum-decl name='zio_byteorder_t' naming-typedef-id='595a65ec' id='fc861be0'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZIO_CHECKSUM_NATIVE' value='0'/>
<enumerator name='ZIO_CHECKSUM_BYTESWAP' value='1'/>
</enum-decl>
<typedef-decl name='zio_byteorder_t' type-id='fc861be0' id='595a65ec'/>
<class-decl name='zio_abd_checksum_data' size-in-bits='256' is-struct='yes' visibility='default' id='4bf4b004'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='acd_byteorder' type-id='595a65ec' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='acd_ctx' type-id='0f7df99e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='acd_zcp' type-id='c24fc2ee' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='acd_private' type-id='eaa32e2f' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zio_abd_checksum_data_t' type-id='4bf4b004' id='74e39470'/>
<typedef-decl name='zio_abd_checksum_init_t' type-id='a5444274' id='029a8ebe'/>
<typedef-decl name='zio_abd_checksum_fini_t' type-id='a5444274' id='d6fd5c6c'/>
<typedef-decl name='zio_abd_checksum_iter_t' type-id='f4a1892e' id='cefa0f4a'/>
<class-decl name='zio_abd_checksum_func' size-in-bits='192' is-struct='yes' visibility='default' id='aa14691a'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='acf_init' type-id='0bcca125' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='acf_fini' type-id='bfe36153' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='acf_iter' type-id='1e276399' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zio_abd_checksum_func_t' type-id='3f8e8d11' id='c2eb138a'/>
<class-decl name='zfs_fletcher_superscalar' size-in-bits='256' is-struct='yes' visibility='default' id='28efb250'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='v' type-id='85c64d26' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zfs_fletcher_superscalar_t' type-id='28efb250' id='6d059eaa'/>
<class-decl name='zfs_fletcher_sse' size-in-bits='128' is-struct='yes' visibility='default' id='acd4019a'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='v' type-id='c1c22e6c' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zfs_fletcher_sse_t' type-id='acd4019a' id='7c1ab40c'/>
<class-decl name='zfs_fletcher_avx' size-in-bits='256' is-struct='yes' visibility='default' id='8c208dfa'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='v' type-id='85c64d26' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zfs_fletcher_avx_t' type-id='8c208dfa' id='8240361c'/>
<class-decl name='zfs_fletcher_avx512' size-in-bits='512' is-struct='yes' visibility='default' id='c6d0c382'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='v' type-id='c5d13f42' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zfs_fletcher_avx512_t' type-id='c6d0c382' id='90dbb6d6'/>
<union-decl name='fletcher_4_ctx' size-in-bits='2048' visibility='default' id='1f951ade'>
<data-member access='public'>
<var-decl name='scalar' type-id='39730d0b' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='superscalar' type-id='729b6ebb' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='sse' type-id='cbd91ec1' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='avx' type-id='481f90b1' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='avx512' type-id='16582e69' visibility='default'/>
</data-member>
</union-decl>
<typedef-decl name='fletcher_4_ctx_t' type-id='1f951ade' id='4b675395'/>
<qualified-type-def type-id='aa14691a' const='yes' id='3f8e8d11'/>
<pointer-type-def type-id='4b675395' size-in-bits='64' id='0f7df99e'/>
<pointer-type-def type-id='74e39470' size-in-bits='64' id='eefe7427'/>
<pointer-type-def type-id='d6fd5c6c' size-in-bits='64' id='bfe36153'/>
<pointer-type-def type-id='029a8ebe' size-in-bits='64' id='0bcca125'/>
<pointer-type-def type-id='cefa0f4a' size-in-bits='64' id='1e276399'/>
<pointer-type-def type-id='39730d0b' size-in-bits='64' id='c24fc2ee'/>
<var-decl name='fletcher_4_abd_ops' type-id='c2eb138a' mangled-name='fletcher_4_abd_ops' visibility='default' elf-symbol-id='fletcher_4_abd_ops'/>
<function-decl name='fletcher_init' mangled-name='fletcher_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_init'>
<parameter type-id='c24fc2ee' name='zcp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fletcher_2_incremental_native' mangled-name='fletcher_2_incremental_native' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_2_incremental_native'>
<parameter type-id='eaa32e2f' name='buf'/>
<parameter type-id='b59d7dce' name='size'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fletcher_2_native' mangled-name='fletcher_2_native' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_2_native'>
<parameter type-id='eaa32e2f' name='buf'/>
<parameter type-id='9c313c2d' name='size'/>
<parameter type-id='eaa32e2f' name='ctx_template'/>
<parameter type-id='c24fc2ee' name='zcp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fletcher_2_incremental_byteswap' mangled-name='fletcher_2_incremental_byteswap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_2_incremental_byteswap'>
<parameter type-id='eaa32e2f' name='buf'/>
<parameter type-id='b59d7dce' name='size'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fletcher_2_byteswap' mangled-name='fletcher_2_byteswap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_2_byteswap'>
<parameter type-id='eaa32e2f' name='buf'/>
<parameter type-id='9c313c2d' name='size'/>
<parameter type-id='eaa32e2f' name='ctx_template'/>
<parameter type-id='c24fc2ee' name='zcp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fletcher_4_impl_set' mangled-name='fletcher_4_impl_set' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_impl_set'>
<parameter type-id='80f4b756' name='val'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fletcher_4_native' mangled-name='fletcher_4_native' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_native'>
<parameter type-id='eaa32e2f' name='buf'/>
<parameter type-id='9c313c2d' name='size'/>
<parameter type-id='eaa32e2f' name='ctx_template'/>
<parameter type-id='c24fc2ee' name='zcp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fletcher_4_native_varsize' mangled-name='fletcher_4_native_varsize' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_native_varsize'>
<parameter type-id='eaa32e2f' name='buf'/>
<parameter type-id='9c313c2d' name='size'/>
<parameter type-id='c24fc2ee' name='zcp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fletcher_4_byteswap' mangled-name='fletcher_4_byteswap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_byteswap'>
<parameter type-id='eaa32e2f' name='buf'/>
<parameter type-id='9c313c2d' name='size'/>
<parameter type-id='eaa32e2f' name='ctx_template'/>
<parameter type-id='c24fc2ee' name='zcp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fletcher_4_incremental_native' mangled-name='fletcher_4_incremental_native' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_incremental_native'>
<parameter type-id='eaa32e2f' name='buf'/>
<parameter type-id='b59d7dce' name='size'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fletcher_4_incremental_byteswap' mangled-name='fletcher_4_incremental_byteswap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_incremental_byteswap'>
<parameter type-id='eaa32e2f' name='buf'/>
<parameter type-id='b59d7dce' name='size'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fletcher_4_init' mangled-name='fletcher_4_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_init'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fletcher_4_fini' mangled-name='fletcher_4_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_fini'>
<return type-id='48b5725f'/>
</function-decl>
<function-type size-in-bits='64' id='f4a1892e'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-type>
<function-type size-in-bits='64' id='a5444274'>
<parameter type-id='eefe7427'/>
<return type-id='48b5725f'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='module/zcommon/zfs_fletcher_avx512.c' language='LANG_C99'>
<typedef-decl name='fletcher_4_init_f' type-id='173aa527' id='b9ae1656'/>
<typedef-decl name='fletcher_4_fini_f' type-id='0ad5b8a8' id='c4c1f4fc'/>
<typedef-decl name='fletcher_4_compute_f' type-id='38147eff' id='ad1dc4cb'/>
<class-decl name='fletcher_4_func' size-in-bits='512' is-struct='yes' visibility='default' id='57f479a0'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='init_native' type-id='b9ae1656' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='fini_native' type-id='c4c1f4fc' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='compute_native' type-id='ad1dc4cb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='init_byteswap' type-id='b9ae1656' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='fini_byteswap' type-id='c4c1f4fc' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='compute_byteswap' type-id='ad1dc4cb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='valid' type-id='297d38bc' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='name' type-id='80f4b756' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='fletcher_4_ops_t' type-id='57f479a0' id='eba91718'/>
<qualified-type-def type-id='eba91718' const='yes' id='9eeabdc8'/>
<pointer-type-def type-id='e9e61702' size-in-bits='64' id='297d38bc'/>
<pointer-type-def type-id='fe40251b' size-in-bits='64' id='173aa527'/>
<pointer-type-def type-id='17fb1f83' size-in-bits='64' id='38147eff'/>
<pointer-type-def type-id='fb39e25e' size-in-bits='64' id='0ad5b8a8'/>
<var-decl name='fletcher_4_avx512f_ops' type-id='9eeabdc8' mangled-name='fletcher_4_avx512f_ops' visibility='default' elf-symbol-id='fletcher_4_avx512f_ops'/>
<var-decl name='fletcher_4_avx512bw_ops' type-id='9eeabdc8' mangled-name='fletcher_4_avx512bw_ops' visibility='default' elf-symbol-id='fletcher_4_avx512bw_ops'/>
<function-type size-in-bits='64' id='e9e61702'>
<return type-id='c19b74c3'/>
</function-type>
<function-type size-in-bits='64' id='fe40251b'>
<parameter type-id='0f7df99e'/>
<return type-id='48b5725f'/>
</function-type>
<function-type size-in-bits='64' id='17fb1f83'>
<parameter type-id='0f7df99e'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='9c313c2d'/>
<return type-id='48b5725f'/>
</function-type>
<function-type size-in-bits='64' id='fb39e25e'>
<parameter type-id='0f7df99e'/>
<parameter type-id='c24fc2ee'/>
<return type-id='48b5725f'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='module/zcommon/zfs_fletcher_intel.c' language='LANG_C99'>
<var-decl name='fletcher_4_avx2_ops' type-id='9eeabdc8' mangled-name='fletcher_4_avx2_ops' visibility='default' elf-symbol-id='fletcher_4_avx2_ops'/>
</abi-instr>
<abi-instr address-size='64' path='module/zcommon/zfs_fletcher_sse.c' language='LANG_C99'>
<var-decl name='fletcher_4_sse2_ops' type-id='9eeabdc8' mangled-name='fletcher_4_sse2_ops' visibility='default' elf-symbol-id='fletcher_4_sse2_ops'/>
<var-decl name='fletcher_4_ssse3_ops' type-id='9eeabdc8' mangled-name='fletcher_4_ssse3_ops' visibility='default' elf-symbol-id='fletcher_4_ssse3_ops'/>
</abi-instr>
<abi-instr address-size='64' path='module/zcommon/zfs_fletcher_superscalar.c' language='LANG_C99'>
<var-decl name='fletcher_4_superscalar_ops' type-id='9eeabdc8' mangled-name='fletcher_4_superscalar_ops' visibility='default' elf-symbol-id='fletcher_4_superscalar_ops'/>
</abi-instr>
<abi-instr address-size='64' path='module/zcommon/zfs_fletcher_superscalar4.c' language='LANG_C99'>
<var-decl name='fletcher_4_superscalar4_ops' type-id='9eeabdc8' mangled-name='fletcher_4_superscalar4_ops' visibility='default' elf-symbol-id='fletcher_4_superscalar4_ops'/>
</abi-instr>
<abi-instr address-size='64' path='module/zcommon/zfs_namecheck.c' language='LANG_C99'>
<enum-decl name='namecheck_err_t' naming-typedef-id='8e0af06e' id='f43bbcda'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='NAME_ERR_LEADING_SLASH' value='0'/>
<enumerator name='NAME_ERR_EMPTY_COMPONENT' value='1'/>
<enumerator name='NAME_ERR_TRAILING_SLASH' value='2'/>
<enumerator name='NAME_ERR_INVALCHAR' value='3'/>
<enumerator name='NAME_ERR_MULTIPLE_DELIMITERS' value='4'/>
<enumerator name='NAME_ERR_NOLETTER' value='5'/>
<enumerator name='NAME_ERR_RESERVED' value='6'/>
<enumerator name='NAME_ERR_DISKLIKE' value='7'/>
<enumerator name='NAME_ERR_TOOLONG' value='8'/>
<enumerator name='NAME_ERR_SELF_REF' value='9'/>
<enumerator name='NAME_ERR_PARENT_REF' value='10'/>
<enumerator name='NAME_ERR_NO_AT' value='11'/>
<enumerator name='NAME_ERR_NO_POUND' value='12'/>
</enum-decl>
<typedef-decl name='namecheck_err_t' type-id='f43bbcda' id='8e0af06e'/>
<pointer-type-def type-id='8e0af06e' size-in-bits='64' id='053457bd'/>
<var-decl name='zfs_max_dataset_nesting' type-id='95e97e5e' mangled-name='zfs_max_dataset_nesting' visibility='default' elf-symbol-id='zfs_max_dataset_nesting'/>
<function-decl name='get_dataset_depth' mangled-name='get_dataset_depth' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='get_dataset_depth'>
<parameter type-id='80f4b756' name='path'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_component_namecheck' mangled-name='zfs_component_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_component_namecheck'>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='053457bd' name='why'/>
<parameter type-id='26a90f95' name='what'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='permset_namecheck' mangled-name='permset_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='permset_namecheck'>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='053457bd' name='why'/>
<parameter type-id='26a90f95' name='what'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='dataset_nestcheck' mangled-name='dataset_nestcheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='dataset_nestcheck'>
<parameter type-id='80f4b756' name='path'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='entity_namecheck' mangled-name='entity_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='entity_namecheck'>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='053457bd' name='why'/>
<parameter type-id='26a90f95' name='what'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='dataset_namecheck' mangled-name='dataset_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='dataset_namecheck'>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='053457bd' name='why'/>
<parameter type-id='26a90f95' name='what'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='bookmark_namecheck' mangled-name='bookmark_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='bookmark_namecheck'>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='053457bd' name='why'/>
<parameter type-id='26a90f95' name='what'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='snapshot_namecheck' mangled-name='snapshot_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='snapshot_namecheck'>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='053457bd' name='why'/>
<parameter type-id='26a90f95' name='what'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='mountpoint_namecheck' mangled-name='mountpoint_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='mountpoint_namecheck'>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='053457bd' name='why'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pool_namecheck' mangled-name='pool_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='pool_namecheck'>
<parameter type-id='80f4b756' name='pool'/>
<parameter type-id='053457bd' name='why'/>
<parameter type-id='26a90f95' name='what'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='module/zcommon/zfs_prop.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='b99c00c9' size-in-bits='768' id='bcc77e38'>
<subrange length='12' type-id='7359adad' id='84827bdc'/>
</array-type-def>
<enum-decl name='zprop_type_t' naming-typedef-id='31429eff' id='87676253'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='PROP_TYPE_NUMBER' value='0'/>
<enumerator name='PROP_TYPE_STRING' value='1'/>
<enumerator name='PROP_TYPE_INDEX' value='2'/>
</enum-decl>
<typedef-decl name='zprop_type_t' type-id='87676253' id='31429eff'/>
<enum-decl name='zprop_attr_t' naming-typedef-id='999701cc' id='77d05200'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='PROP_DEFAULT' value='0'/>
<enumerator name='PROP_READONLY' value='1'/>
<enumerator name='PROP_INHERIT' value='2'/>
<enumerator name='PROP_ONETIME' value='3'/>
<enumerator name='PROP_ONETIME_DEFAULT' value='4'/>
</enum-decl>
<typedef-decl name='zprop_attr_t' type-id='77d05200' id='999701cc'/>
<class-decl name='zfs_index' size-in-bits='128' is-struct='yes' visibility='default' id='87957af9'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='pi_name' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='pi_value' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zprop_index_t' type-id='87957af9' id='64636ce3'/>
<class-decl name='zprop_desc_t' size-in-bits='640' is-struct='yes' naming-typedef-id='ffa52b96' visibility='default' id='bbff5e4b'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='pd_name' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='pd_propnum' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='pd_proptype' type-id='31429eff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='pd_strdefault' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='pd_numdefault' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='pd_attr' type-id='999701cc' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
<var-decl name='pd_types' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='pd_values' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='pd_colname' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='pd_rightalign' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='449'>
<var-decl name='pd_visible' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='450'>
<var-decl name='pd_zfs_mod_supported' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='451'>
<var-decl name='pd_always_flex' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='pd_table' type-id='c8bc397b' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
<var-decl name='pd_table_size' type-id='b59d7dce' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zprop_desc_t' type-id='bbff5e4b' id='ffa52b96'/>
- <pointer-type-def type-id='80f4b756' size-in-bits='64' id='7d3cd834'/>
<qualified-type-def type-id='64636ce3' const='yes' id='072f7953'/>
<pointer-type-def type-id='072f7953' size-in-bits='64' id='c8bc397b'/>
<pointer-type-def type-id='ffa52b96' size-in-bits='64' id='76c8174b'/>
<var-decl name='zfs_userquota_prop_prefixes' type-id='bcc77e38' mangled-name='zfs_userquota_prop_prefixes' visibility='default' elf-symbol-id='zfs_userquota_prop_prefixes'/>
<function-decl name='zfs_prop_get_table' mangled-name='zfs_prop_get_table' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_table'>
<return type-id='76c8174b'/>
</function-decl>
<function-decl name='zfs_prop_init' mangled-name='zfs_prop_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_init'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_prop_delegatable' mangled-name='zfs_prop_delegatable' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_delegatable'>
<parameter type-id='58603c44' name='prop'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_name_to_prop' mangled-name='zfs_name_to_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_name_to_prop'>
<parameter type-id='80f4b756' name='propname'/>
<return type-id='58603c44'/>
</function-decl>
<function-decl name='zfs_prop_user' mangled-name='zfs_prop_user' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_user'>
<parameter type-id='80f4b756' name='name'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_prop_userquota' mangled-name='zfs_prop_userquota' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_userquota'>
<parameter type-id='80f4b756' name='name'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_prop_written' mangled-name='zfs_prop_written' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_written'>
<parameter type-id='80f4b756' name='name'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_prop_string_to_index' mangled-name='zfs_prop_string_to_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_string_to_index'>
<parameter type-id='58603c44' name='prop'/>
<parameter type-id='80f4b756' name='string'/>
<parameter type-id='5d6479ae' name='index'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_index_to_string' mangled-name='zfs_prop_index_to_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_index_to_string'>
<parameter type-id='58603c44' name='prop'/>
<parameter type-id='9c313c2d' name='index'/>
<parameter type-id='7d3cd834' name='string'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_random_value' mangled-name='zfs_prop_random_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_random_value'>
<parameter type-id='58603c44' name='prop'/>
<parameter type-id='9c313c2d' name='seed'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='zfs_prop_valid_for_type' mangled-name='zfs_prop_valid_for_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_valid_for_type'>
<parameter type-id='95e97e5e' name='prop'/>
<parameter type-id='2e45de5d' name='types'/>
<parameter type-id='c19b74c3' name='headcheck'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_prop_get_type' mangled-name='zfs_prop_get_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_type'>
<parameter type-id='58603c44' name='prop'/>
<return type-id='31429eff'/>
</function-decl>
<function-decl name='zfs_prop_readonly' mangled-name='zfs_prop_readonly' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_readonly'>
<parameter type-id='58603c44' name='prop'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_prop_visible' mangled-name='zfs_prop_visible' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_visible'>
<parameter type-id='58603c44' name='prop'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_prop_setonce' mangled-name='zfs_prop_setonce' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_setonce'>
<parameter type-id='58603c44' name='prop'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_prop_default_string' mangled-name='zfs_prop_default_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_default_string'>
<parameter type-id='58603c44' name='prop'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_prop_default_numeric' mangled-name='zfs_prop_default_numeric' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_default_numeric'>
<parameter type-id='58603c44' name='prop'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='zfs_prop_to_name' mangled-name='zfs_prop_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_to_name'>
<parameter type-id='58603c44' name='prop'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_prop_inheritable' mangled-name='zfs_prop_inheritable' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_inheritable'>
<parameter type-id='58603c44' name='prop'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_prop_encryption_key_param' mangled-name='zfs_prop_encryption_key_param' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_encryption_key_param'>
<parameter type-id='58603c44' name='prop'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_prop_valid_keylocation' mangled-name='zfs_prop_valid_keylocation' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_valid_keylocation'>
<parameter type-id='80f4b756' name='str'/>
<parameter type-id='c19b74c3' name='encrypted'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_prop_values' mangled-name='zfs_prop_values' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_values'>
<parameter type-id='58603c44' name='prop'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_prop_is_string' mangled-name='zfs_prop_is_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_is_string'>
<parameter type-id='58603c44' name='prop'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_column_name' mangled-name='zfs_prop_column_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_column_name'>
<parameter type-id='58603c44' name='prop'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_prop_align_right' mangled-name='zfs_prop_align_right' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_align_right'>
<parameter type-id='58603c44' name='prop'/>
<return type-id='c19b74c3'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='module/zcommon/zpool_prop.c' language='LANG_C99'>
<function-decl name='zpool_prop_get_table' mangled-name='zpool_prop_get_table' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_get_table'>
<return type-id='76c8174b'/>
</function-decl>
<function-decl name='zpool_prop_init' mangled-name='zpool_prop_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_init'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_name_to_prop' mangled-name='zpool_name_to_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_name_to_prop'>
<parameter type-id='80f4b756' name='propname'/>
<return type-id='5d0c23fb'/>
</function-decl>
<function-decl name='zpool_prop_to_name' mangled-name='zpool_prop_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_to_name'>
<parameter type-id='5d0c23fb' name='prop'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zpool_prop_get_type' mangled-name='zpool_prop_get_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_get_type'>
<parameter type-id='5d0c23fb' name='prop'/>
<return type-id='31429eff'/>
</function-decl>
<function-decl name='zpool_prop_readonly' mangled-name='zpool_prop_readonly' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_readonly'>
<parameter type-id='5d0c23fb' name='prop'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zpool_prop_setonce' mangled-name='zpool_prop_setonce' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_setonce'>
<parameter type-id='5d0c23fb' name='prop'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zpool_prop_default_string' mangled-name='zpool_prop_default_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_default_string'>
<parameter type-id='5d0c23fb' name='prop'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zpool_prop_default_numeric' mangled-name='zpool_prop_default_numeric' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_default_numeric'>
<parameter type-id='5d0c23fb' name='prop'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='zpool_prop_feature' mangled-name='zpool_prop_feature' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_feature'>
<parameter type-id='80f4b756' name='name'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zpool_prop_unsupported' mangled-name='zpool_prop_unsupported' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_unsupported'>
<parameter type-id='80f4b756' name='name'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zpool_prop_string_to_index' mangled-name='zpool_prop_string_to_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_string_to_index'>
<parameter type-id='5d0c23fb' name='prop'/>
<parameter type-id='80f4b756' name='string'/>
<parameter type-id='5d6479ae' name='index'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_prop_index_to_string' mangled-name='zpool_prop_index_to_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_index_to_string'>
<parameter type-id='5d0c23fb' name='prop'/>
<parameter type-id='9c313c2d' name='index'/>
<parameter type-id='7d3cd834' name='string'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_prop_random_value' mangled-name='zpool_prop_random_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_random_value'>
<parameter type-id='5d0c23fb' name='prop'/>
<parameter type-id='9c313c2d' name='seed'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='zpool_prop_values' mangled-name='zpool_prop_values' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_values'>
<parameter type-id='5d0c23fb' name='prop'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zpool_prop_column_name' mangled-name='zpool_prop_column_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_column_name'>
<parameter type-id='5d0c23fb' name='prop'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zpool_prop_align_right' mangled-name='zpool_prop_align_right' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_align_right'>
<parameter type-id='5d0c23fb' name='prop'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='vdev_prop_get_table' mangled-name='vdev_prop_get_table' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_prop_get_table'>
<return type-id='76c8174b'/>
</function-decl>
<function-decl name='vdev_prop_init' mangled-name='vdev_prop_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_prop_init'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='vdev_name_to_prop' mangled-name='vdev_name_to_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_name_to_prop'>
<parameter type-id='80f4b756' name='propname'/>
<return type-id='5aa5c90c'/>
</function-decl>
<function-decl name='vdev_prop_user' mangled-name='vdev_prop_user' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_prop_user'>
<parameter type-id='80f4b756' name='name'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='vdev_prop_to_name' mangled-name='vdev_prop_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_prop_to_name'>
<parameter type-id='5aa5c90c' name='prop'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='vdev_prop_get_type' mangled-name='vdev_prop_get_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_prop_get_type'>
<parameter type-id='5aa5c90c' name='prop'/>
<return type-id='31429eff'/>
</function-decl>
<function-decl name='vdev_prop_readonly' mangled-name='vdev_prop_readonly' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_prop_readonly'>
<parameter type-id='5aa5c90c' name='prop'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='vdev_prop_default_string' mangled-name='vdev_prop_default_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_prop_default_string'>
<parameter type-id='5aa5c90c' name='prop'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='vdev_prop_default_numeric' mangled-name='vdev_prop_default_numeric' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_prop_default_numeric'>
<parameter type-id='5aa5c90c' name='prop'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='vdev_prop_string_to_index' mangled-name='vdev_prop_string_to_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_prop_string_to_index'>
<parameter type-id='5aa5c90c' name='prop'/>
<parameter type-id='80f4b756' name='string'/>
<parameter type-id='5d6479ae' name='index'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='vdev_prop_index_to_string' mangled-name='vdev_prop_index_to_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_prop_index_to_string'>
<parameter type-id='5aa5c90c' name='prop'/>
<parameter type-id='9c313c2d' name='index'/>
<parameter type-id='7d3cd834' name='string'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_prop_vdev' mangled-name='zpool_prop_vdev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_vdev'>
<parameter type-id='80f4b756' name='name'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='vdev_prop_random_value' mangled-name='vdev_prop_random_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_prop_random_value'>
<parameter type-id='5aa5c90c' name='prop'/>
<parameter type-id='9c313c2d' name='seed'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='vdev_prop_values' mangled-name='vdev_prop_values' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_prop_values'>
<parameter type-id='5aa5c90c' name='prop'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='vdev_prop_column_name' mangled-name='vdev_prop_column_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_prop_column_name'>
<parameter type-id='5aa5c90c' name='prop'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='vdev_prop_align_right' mangled-name='vdev_prop_align_right' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_prop_align_right'>
<parameter type-id='5aa5c90c' name='prop'/>
<return type-id='c19b74c3'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='module/zcommon/zprop_common.c' language='LANG_C99'>
<function-decl name='zprop_register_impl' mangled-name='zprop_register_impl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_impl'>
<parameter type-id='95e97e5e' name='prop'/>
<parameter type-id='80f4b756' name='name'/>
<parameter type-id='31429eff' name='type'/>
<parameter type-id='9c313c2d' name='numdefault'/>
<parameter type-id='80f4b756' name='strdefault'/>
<parameter type-id='999701cc' name='attr'/>
<parameter type-id='95e97e5e' name='objset_types'/>
<parameter type-id='80f4b756' name='values'/>
<parameter type-id='80f4b756' name='colname'/>
<parameter type-id='c19b74c3' name='rightalign'/>
<parameter type-id='c19b74c3' name='visible'/>
<parameter type-id='c19b74c3' name='flex'/>
<parameter type-id='c8bc397b' name='idx_tbl'/>
<parameter type-id='a3372543' name='sfeatures'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zprop_register_string' mangled-name='zprop_register_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_string'>
<parameter type-id='95e97e5e' name='prop'/>
<parameter type-id='80f4b756' name='name'/>
<parameter type-id='80f4b756' name='def'/>
<parameter type-id='999701cc' name='attr'/>
<parameter type-id='95e97e5e' name='objset_types'/>
<parameter type-id='80f4b756' name='values'/>
<parameter type-id='80f4b756' name='colname'/>
<parameter type-id='a3372543' name='sfeatures'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zprop_register_number' mangled-name='zprop_register_number' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_number'>
<parameter type-id='95e97e5e' name='prop'/>
<parameter type-id='80f4b756' name='name'/>
<parameter type-id='9c313c2d' name='def'/>
<parameter type-id='999701cc' name='attr'/>
<parameter type-id='95e97e5e' name='objset_types'/>
<parameter type-id='80f4b756' name='values'/>
<parameter type-id='80f4b756' name='colname'/>
<parameter type-id='c19b74c3' name='flex'/>
<parameter type-id='a3372543' name='sfeatures'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zprop_register_index' mangled-name='zprop_register_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_index'>
<parameter type-id='95e97e5e' name='prop'/>
<parameter type-id='80f4b756' name='name'/>
<parameter type-id='9c313c2d' name='def'/>
<parameter type-id='999701cc' name='attr'/>
<parameter type-id='95e97e5e' name='objset_types'/>
<parameter type-id='80f4b756' name='values'/>
<parameter type-id='80f4b756' name='colname'/>
<parameter type-id='c8bc397b' name='idx_tbl'/>
<parameter type-id='a3372543' name='sfeatures'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zprop_register_hidden' mangled-name='zprop_register_hidden' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_hidden'>
<parameter type-id='95e97e5e' name='prop'/>
<parameter type-id='80f4b756' name='name'/>
<parameter type-id='31429eff' name='type'/>
<parameter type-id='999701cc' name='attr'/>
<parameter type-id='95e97e5e' name='objset_types'/>
<parameter type-id='80f4b756' name='colname'/>
<parameter type-id='c19b74c3' name='flex'/>
<parameter type-id='a3372543' name='sfeatures'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zprop_iter_common' mangled-name='zprop_iter_common' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_iter_common'>
<parameter type-id='1ec3747a' name='func'/>
<parameter type-id='eaa32e2f' name='cb'/>
<parameter type-id='c19b74c3' name='show_all'/>
<parameter type-id='c19b74c3' name='ordered'/>
<parameter type-id='2e45de5d' name='type'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zprop_name_to_prop' mangled-name='zprop_name_to_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_name_to_prop'>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='2e45de5d' name='type'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zprop_string_to_index' mangled-name='zprop_string_to_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_string_to_index'>
<parameter type-id='95e97e5e' name='prop'/>
<parameter type-id='80f4b756' name='string'/>
<parameter type-id='5d6479ae' name='index'/>
<parameter type-id='2e45de5d' name='type'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zprop_index_to_string' mangled-name='zprop_index_to_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_index_to_string'>
<parameter type-id='95e97e5e' name='prop'/>
<parameter type-id='9c313c2d' name='index'/>
<parameter type-id='7d3cd834' name='string'/>
<parameter type-id='2e45de5d' name='type'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zprop_random_value' mangled-name='zprop_random_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_random_value'>
<parameter type-id='95e97e5e' name='prop'/>
<parameter type-id='9c313c2d' name='seed'/>
<parameter type-id='2e45de5d' name='type'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='zprop_values' mangled-name='zprop_values' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_values'>
<parameter type-id='95e97e5e' name='prop'/>
<parameter type-id='2e45de5d' name='type'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zprop_valid_for_type' mangled-name='zprop_valid_for_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_valid_for_type'>
<parameter type-id='95e97e5e' name='prop'/>
<parameter type-id='2e45de5d' name='type'/>
<parameter type-id='c19b74c3' name='headcheck'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zprop_valid_char' mangled-name='zprop_valid_char' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_valid_char'>
<parameter type-id='a84c031d' name='c'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zprop_width' mangled-name='zprop_width' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_width'>
<parameter type-id='95e97e5e' name='prop'/>
<parameter type-id='37e3bd22' name='fixed'/>
<parameter type-id='2e45de5d' name='type'/>
<return type-id='b59d7dce'/>
</function-decl>
</abi-instr>
</abi-corpus>
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_mount.c b/sys/contrib/openzfs/lib/libzfs/libzfs_mount.c
index fdfdd8d2808a..44f7d698c82c 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_mount.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_mount.c
@@ -1,1452 +1,1462 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, 2021 by Delphix. All rights reserved.
+ * Copyright (c) 2014, 2022 by Delphix. All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
* Copyright 2017 RackTop Systems.
* Copyright (c) 2018 Datto Inc.
* Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
*/
/*
* Routines to manage ZFS mounts. We separate all the nasty routines that have
* to deal with the OS. The following functions are the main entry points --
* they are used by mount and unmount and when changing a filesystem's
* mountpoint.
*
* zfs_is_mounted()
* zfs_mount()
* zfs_mount_at()
* zfs_unmount()
* zfs_unmountall()
*
* This file also contains the functions used to manage sharing filesystems:
*
* zfs_is_shared()
* zfs_share()
* zfs_unshare()
* zfs_unshareall()
* zfs_commit_shares()
*
* The following functions are available for pool consumers, and will
* mount/unmount and share/unshare all datasets within pool:
*
* zpool_enable_datasets()
* zpool_disable_datasets()
*/
#include <dirent.h>
#include <dlfcn.h>
#include <errno.h>
#include <fcntl.h>
#include <libgen.h>
#include <libintl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <zone.h>
#include <sys/mntent.h>
#include <sys/mount.h>
#include <sys/stat.h>
#include <sys/vfs.h>
#include <sys/dsl_crypt.h>
#include <libzfs.h>
#include "libzfs_impl.h"
#include <thread_pool.h>
#include <libshare.h>
#include <sys/systeminfo.h>
#define MAXISALEN 257 /* based on sysinfo(2) man page */
static int mount_tp_nthr = 512; /* tpool threads for multi-threaded mounting */
static void zfs_mount_task(void *);
static const proto_table_t proto_table[SA_PROTOCOL_COUNT] = {
[SA_PROTOCOL_NFS] =
{ZFS_PROP_SHARENFS, EZFS_SHARENFSFAILED, EZFS_UNSHARENFSFAILED},
[SA_PROTOCOL_SMB] =
{ZFS_PROP_SHARESMB, EZFS_SHARESMBFAILED, EZFS_UNSHARESMBFAILED},
};
static const enum sa_protocol share_all_proto[SA_PROTOCOL_COUNT + 1] = {
SA_PROTOCOL_NFS,
SA_PROTOCOL_SMB,
SA_NO_PROTOCOL
};
static boolean_t
dir_is_empty_stat(const char *dirname)
{
struct stat st;
/*
* We only want to return false if the given path is a non empty
* directory, all other errors are handled elsewhere.
*/
if (stat(dirname, &st) < 0 || !S_ISDIR(st.st_mode)) {
return (B_TRUE);
}
/*
* An empty directory will still have two entries in it, one
* entry for each of "." and "..".
*/
if (st.st_size > 2) {
return (B_FALSE);
}
return (B_TRUE);
}
static boolean_t
dir_is_empty_readdir(const char *dirname)
{
DIR *dirp;
struct dirent64 *dp;
int dirfd;
if ((dirfd = openat(AT_FDCWD, dirname,
O_RDONLY | O_NDELAY | O_LARGEFILE | O_CLOEXEC, 0)) < 0) {
return (B_TRUE);
}
if ((dirp = fdopendir(dirfd)) == NULL) {
(void) close(dirfd);
return (B_TRUE);
}
while ((dp = readdir64(dirp)) != NULL) {
if (strcmp(dp->d_name, ".") == 0 ||
strcmp(dp->d_name, "..") == 0)
continue;
(void) closedir(dirp);
return (B_FALSE);
}
(void) closedir(dirp);
return (B_TRUE);
}
/*
* Returns true if the specified directory is empty. If we can't open the
* directory at all, return true so that the mount can fail with a more
* informative error message.
*/
static boolean_t
dir_is_empty(const char *dirname)
{
struct statfs64 st;
/*
* If the statvfs call fails or the filesystem is not a ZFS
* filesystem, fall back to the slow path which uses readdir.
*/
if ((statfs64(dirname, &st) != 0) ||
(st.f_type != ZFS_SUPER_MAGIC)) {
return (dir_is_empty_readdir(dirname));
}
/*
* At this point, we know the provided path is on a ZFS
* filesystem, so we can use stat instead of readdir to
* determine if the directory is empty or not. We try to avoid
* using readdir because that requires opening "dirname"; this
* open file descriptor can potentially end up in a child
* process if there's a concurrent fork, thus preventing the
* zfs_mount() from otherwise succeeding (the open file
* descriptor inherited by the child process will cause the
* parent's mount to fail with EBUSY). The performance
* implications of replacing the open, read, and close with a
* single stat is nice; but is not the main motivation for the
* added complexity.
*/
return (dir_is_empty_stat(dirname));
}
/*
* Checks to see if the mount is active. If the filesystem is mounted, we fill
* in 'where' with the current mountpoint, and return 1. Otherwise, we return
* 0.
*/
boolean_t
is_mounted(libzfs_handle_t *zfs_hdl, const char *special, char **where)
{
struct mnttab entry;
if (libzfs_mnttab_find(zfs_hdl, special, &entry) != 0)
return (B_FALSE);
if (where != NULL)
*where = zfs_strdup(zfs_hdl, entry.mnt_mountp);
return (B_TRUE);
}
boolean_t
zfs_is_mounted(zfs_handle_t *zhp, char **where)
{
return (is_mounted(zhp->zfs_hdl, zfs_get_name(zhp), where));
}
/*
* Checks any higher order concerns about whether the given dataset is
* mountable, false otherwise. zfs_is_mountable_internal specifically assumes
* that the caller has verified the sanity of mounting the dataset at
* its mountpoint to the extent the caller wants.
*/
static boolean_t
zfs_is_mountable_internal(zfs_handle_t *zhp)
{
if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED) &&
getzoneid() == GLOBAL_ZONEID)
return (B_FALSE);
return (B_TRUE);
}
/*
* Returns true if the given dataset is mountable, false otherwise. Returns the
* mountpoint in 'buf'.
*/
static boolean_t
zfs_is_mountable(zfs_handle_t *zhp, char *buf, size_t buflen,
zprop_source_t *source, int flags)
{
char sourceloc[MAXNAMELEN];
zprop_source_t sourcetype;
if (!zfs_prop_valid_for_type(ZFS_PROP_MOUNTPOINT, zhp->zfs_type,
B_FALSE))
return (B_FALSE);
verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, buf, buflen,
&sourcetype, sourceloc, sizeof (sourceloc), B_FALSE) == 0);
if (strcmp(buf, ZFS_MOUNTPOINT_NONE) == 0 ||
strcmp(buf, ZFS_MOUNTPOINT_LEGACY) == 0)
return (B_FALSE);
if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_OFF)
return (B_FALSE);
if (!zfs_is_mountable_internal(zhp))
return (B_FALSE);
if (zfs_prop_get_int(zhp, ZFS_PROP_REDACTED) && !(flags & MS_FORCE))
return (B_FALSE);
if (source)
*source = sourcetype;
return (B_TRUE);
}
/*
* The filesystem is mounted by invoking the system mount utility rather
* than by the system call mount(2). This ensures that the /etc/mtab
* file is correctly locked for the update. Performing our own locking
* and /etc/mtab update requires making an unsafe assumption about how
* the mount utility performs its locking. Unfortunately, this also means
* in the case of a mount failure we do not have the exact errno. We must
* make due with return value from the mount process.
*
* In the long term a shared library called libmount is under development
* which provides a common API to address the locking and errno issues.
* Once the standard mount utility has been updated to use this library
* we can add an autoconf check to conditionally use it.
*
* http://www.kernel.org/pub/linux/utils/util-linux/libmount-docs/index.html
*/
static int
zfs_add_option(zfs_handle_t *zhp, char *options, int len,
zfs_prop_t prop, const char *on, const char *off)
{
char *source;
uint64_t value;
/* Skip adding duplicate default options */
if ((strstr(options, on) != NULL) || (strstr(options, off) != NULL))
return (0);
/*
* zfs_prop_get_int() is not used to ensure our mount options
* are not influenced by the current /proc/self/mounts contents.
*/
value = getprop_uint64(zhp, prop, &source);
(void) strlcat(options, ",", len);
(void) strlcat(options, value ? on : off, len);
return (0);
}
static int
zfs_add_options(zfs_handle_t *zhp, char *options, int len)
{
int error = 0;
error = zfs_add_option(zhp, options, len,
ZFS_PROP_ATIME, MNTOPT_ATIME, MNTOPT_NOATIME);
/*
* don't add relatime/strictatime when atime=off, otherwise strictatime
* will force atime=on
*/
if (strstr(options, MNTOPT_NOATIME) == NULL) {
error = zfs_add_option(zhp, options, len,
ZFS_PROP_RELATIME, MNTOPT_RELATIME, MNTOPT_STRICTATIME);
}
error = error ? error : zfs_add_option(zhp, options, len,
ZFS_PROP_DEVICES, MNTOPT_DEVICES, MNTOPT_NODEVICES);
error = error ? error : zfs_add_option(zhp, options, len,
ZFS_PROP_EXEC, MNTOPT_EXEC, MNTOPT_NOEXEC);
error = error ? error : zfs_add_option(zhp, options, len,
ZFS_PROP_READONLY, MNTOPT_RO, MNTOPT_RW);
error = error ? error : zfs_add_option(zhp, options, len,
ZFS_PROP_SETUID, MNTOPT_SETUID, MNTOPT_NOSETUID);
error = error ? error : zfs_add_option(zhp, options, len,
ZFS_PROP_NBMAND, MNTOPT_NBMAND, MNTOPT_NONBMAND);
return (error);
}
int
zfs_mount(zfs_handle_t *zhp, const char *options, int flags)
{
char mountpoint[ZFS_MAXPROPLEN];
if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL,
flags))
return (0);
return (zfs_mount_at(zhp, options, flags, mountpoint));
}
/*
* Mount the given filesystem.
*/
int
zfs_mount_at(zfs_handle_t *zhp, const char *options, int flags,
const char *mountpoint)
{
struct stat buf;
char mntopts[MNT_LINE_MAX];
char overlay[ZFS_MAXPROPLEN];
char prop_encroot[MAXNAMELEN];
boolean_t is_encroot;
zfs_handle_t *encroot_hp = zhp;
libzfs_handle_t *hdl = zhp->zfs_hdl;
uint64_t keystatus;
int remount = 0, rc;
if (options == NULL) {
(void) strlcpy(mntopts, MNTOPT_DEFAULTS, sizeof (mntopts));
} else {
(void) strlcpy(mntopts, options, sizeof (mntopts));
}
if (strstr(mntopts, MNTOPT_REMOUNT) != NULL)
remount = 1;
/* Potentially duplicates some checks if invoked by zfs_mount(). */
if (!zfs_is_mountable_internal(zhp))
return (0);
/*
* If the pool is imported read-only then all mounts must be read-only
*/
if (zpool_get_prop_int(zhp->zpool_hdl, ZPOOL_PROP_READONLY, NULL))
(void) strlcat(mntopts, "," MNTOPT_RO, sizeof (mntopts));
/*
* Append default mount options which apply to the mount point.
* This is done because under Linux (unlike Solaris) multiple mount
* points may reference a single super block. This means that just
* given a super block there is no back reference to update the per
* mount point options.
*/
rc = zfs_add_options(zhp, mntopts, sizeof (mntopts));
if (rc) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"default options unavailable"));
return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
mountpoint));
}
/*
* If the filesystem is encrypted the key must be loaded in order to
* mount. If the key isn't loaded, the MS_CRYPT flag decides whether
* or not we attempt to load the keys. Note: we must call
* zfs_refresh_properties() here since some callers of this function
* (most notably zpool_enable_datasets()) may implicitly load our key
* by loading the parent's key first.
*/
if (zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) != ZIO_CRYPT_OFF) {
zfs_refresh_properties(zhp);
keystatus = zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS);
/*
* If the key is unavailable and MS_CRYPT is set give the
* user a chance to enter the key. Otherwise just fail
* immediately.
*/
if (keystatus == ZFS_KEYSTATUS_UNAVAILABLE) {
if (flags & MS_CRYPT) {
rc = zfs_crypto_get_encryption_root(zhp,
&is_encroot, prop_encroot);
if (rc) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Failed to get encryption root for "
"'%s'."), zfs_get_name(zhp));
return (rc);
}
if (!is_encroot) {
encroot_hp = zfs_open(hdl, prop_encroot,
ZFS_TYPE_DATASET);
if (encroot_hp == NULL)
return (hdl->libzfs_error);
}
rc = zfs_crypto_load_key(encroot_hp,
B_FALSE, NULL);
if (!is_encroot)
zfs_close(encroot_hp);
if (rc)
return (rc);
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"encryption key not loaded"));
return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
mountpoint));
}
}
}
/*
* Append zfsutil option so the mount helper allow the mount
*/
strlcat(mntopts, "," MNTOPT_ZFSUTIL, sizeof (mntopts));
/* Create the directory if it doesn't already exist */
if (lstat(mountpoint, &buf) != 0) {
if (mkdirp(mountpoint, 0755) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"failed to create mountpoint: %s"),
strerror(errno));
return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
mountpoint));
}
}
/*
* Overlay mounts are enabled by default but may be disabled
* via the 'overlay' property. The -O flag remains for compatibility.
*/
if (!(flags & MS_OVERLAY)) {
if (zfs_prop_get(zhp, ZFS_PROP_OVERLAY, overlay,
sizeof (overlay), NULL, NULL, 0, B_FALSE) == 0) {
if (strcmp(overlay, "on") == 0) {
flags |= MS_OVERLAY;
}
}
}
/*
* Determine if the mountpoint is empty. If so, refuse to perform the
* mount. We don't perform this check if 'remount' is
* specified or if overlay option (-O) is given
*/
if ((flags & MS_OVERLAY) == 0 && !remount &&
!dir_is_empty(mountpoint)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"directory is not empty"));
return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
dgettext(TEXT_DOMAIN, "cannot mount '%s'"), mountpoint));
}
/* perform the mount */
rc = do_mount(zhp, mountpoint, mntopts, flags);
if (rc) {
/*
* Generic errors are nasty, but there are just way too many
* from mount(), and they're well-understood. We pick a few
* common ones to improve upon.
*/
if (rc == EBUSY) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"mountpoint or dataset is busy"));
} else if (rc == EPERM) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Insufficient privileges"));
} else if (rc == ENOTSUP) {
int spa_version;
VERIFY(zfs_spa_version(zhp, &spa_version) == 0);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Can't mount a version %llu "
"file system on a version %d pool. Pool must be"
" upgraded to mount this file system."),
(u_longlong_t)zfs_prop_get_int(zhp,
ZFS_PROP_VERSION), spa_version);
} else {
zfs_error_aux(hdl, "%s", strerror(rc));
}
return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
zhp->zfs_name));
}
/* remove the mounted entry before re-adding on remount */
if (remount)
libzfs_mnttab_remove(hdl, zhp->zfs_name);
/* add the mounted entry into our cache */
libzfs_mnttab_add(hdl, zfs_get_name(zhp), mountpoint, mntopts);
return (0);
}
/*
* Unmount a single filesystem.
*/
static int
unmount_one(zfs_handle_t *zhp, const char *mountpoint, int flags)
{
int error;
error = do_unmount(zhp, mountpoint, flags);
if (error != 0) {
int libzfs_err;
switch (error) {
case EBUSY:
libzfs_err = EZFS_BUSY;
break;
case EIO:
libzfs_err = EZFS_IO;
break;
case ENOENT:
libzfs_err = EZFS_NOENT;
break;
case ENOMEM:
libzfs_err = EZFS_NOMEM;
break;
case EPERM:
libzfs_err = EZFS_PERM;
break;
default:
libzfs_err = EZFS_UMOUNTFAILED;
}
if (zhp) {
return (zfs_error_fmt(zhp->zfs_hdl, libzfs_err,
dgettext(TEXT_DOMAIN, "cannot unmount '%s'"),
mountpoint));
} else {
return (-1);
}
}
return (0);
}
/*
* Unmount the given filesystem.
*/
int
zfs_unmount(zfs_handle_t *zhp, const char *mountpoint, int flags)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
struct mnttab entry;
char *mntpt = NULL;
boolean_t encroot, unmounted = B_FALSE;
/* check to see if we need to unmount the filesystem */
if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) &&
libzfs_mnttab_find(hdl, zhp->zfs_name, &entry) == 0)) {
/*
* mountpoint may have come from a call to
* getmnt/getmntany if it isn't NULL. If it is NULL,
* we know it comes from libzfs_mnttab_find which can
* then get freed later. We strdup it to play it safe.
*/
if (mountpoint == NULL)
mntpt = zfs_strdup(hdl, entry.mnt_mountp);
else
mntpt = zfs_strdup(hdl, mountpoint);
/*
* Unshare and unmount the filesystem
*/
if (zfs_unshare(zhp, mntpt, share_all_proto) != 0) {
free(mntpt);
return (-1);
}
zfs_commit_shares(NULL);
if (unmount_one(zhp, mntpt, flags) != 0) {
free(mntpt);
(void) zfs_share(zhp, NULL);
zfs_commit_shares(NULL);
return (-1);
}
libzfs_mnttab_remove(hdl, zhp->zfs_name);
free(mntpt);
unmounted = B_TRUE;
}
/*
* If the MS_CRYPT flag is provided we must ensure we attempt to
* unload the dataset's key regardless of whether we did any work
* to unmount it. We only do this for encryption roots.
*/
if ((flags & MS_CRYPT) != 0 &&
zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) != ZIO_CRYPT_OFF) {
zfs_refresh_properties(zhp);
if (zfs_crypto_get_encryption_root(zhp, &encroot, NULL) != 0 &&
unmounted) {
(void) zfs_mount(zhp, NULL, 0);
return (-1);
}
if (encroot && zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) ==
ZFS_KEYSTATUS_AVAILABLE &&
zfs_crypto_unload_key(zhp) != 0) {
(void) zfs_mount(zhp, NULL, 0);
return (-1);
}
}
zpool_disable_volume_os(zhp->zfs_name);
return (0);
}
/*
* Unmount this filesystem and any children inheriting the mountpoint property.
* To do this, just act like we're changing the mountpoint property, but don't
* remount the filesystems afterwards.
*/
int
zfs_unmountall(zfs_handle_t *zhp, int flags)
{
prop_changelist_t *clp;
int ret;
clp = changelist_gather(zhp, ZFS_PROP_MOUNTPOINT,
CL_GATHER_ITER_MOUNTED, flags);
if (clp == NULL)
return (-1);
ret = changelist_prefix(clp);
changelist_free(clp);
return (ret);
}
/*
* Unshare a filesystem by mountpoint.
*/
static int
unshare_one(libzfs_handle_t *hdl, const char *name, const char *mountpoint,
enum sa_protocol proto)
{
int err = sa_disable_share(mountpoint, proto);
if (err != SA_OK)
return (zfs_error_fmt(hdl, proto_table[proto].p_unshare_err,
dgettext(TEXT_DOMAIN, "cannot unshare '%s': %s"),
name, sa_errorstr(err)));
return (0);
}
/*
* Share the given filesystem according to the options in the specified
* protocol specific properties (sharenfs, sharesmb). We rely
* on "libshare" to do the dirty work for us.
*/
int
zfs_share(zfs_handle_t *zhp, const enum sa_protocol *proto)
{
char mountpoint[ZFS_MAXPROPLEN];
char shareopts[ZFS_MAXPROPLEN];
char sourcestr[ZFS_MAXPROPLEN];
const enum sa_protocol *curr_proto;
zprop_source_t sourcetype;
int err = 0;
if (proto == NULL)
proto = share_all_proto;
if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL, 0))
return (0);
for (curr_proto = proto; *curr_proto != SA_NO_PROTOCOL; curr_proto++) {
/*
* Return success if there are no share options.
*/
if (zfs_prop_get(zhp, proto_table[*curr_proto].p_prop,
shareopts, sizeof (shareopts), &sourcetype, sourcestr,
ZFS_MAXPROPLEN, B_FALSE) != 0 ||
strcmp(shareopts, "off") == 0)
continue;
/*
* If the 'zoned' property is set, then zfs_is_mountable()
* will have already bailed out if we are in the global zone.
* But local zones cannot be NFS servers, so we ignore it for
* local zones as well.
*/
if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED))
continue;
err = sa_enable_share(zfs_get_name(zhp), mountpoint, shareopts,
*curr_proto);
if (err != SA_OK) {
return (zfs_error_fmt(zhp->zfs_hdl,
proto_table[*curr_proto].p_share_err,
dgettext(TEXT_DOMAIN, "cannot share '%s: %s'"),
zfs_get_name(zhp), sa_errorstr(err)));
}
}
return (0);
}
/*
* Check to see if the filesystem is currently shared.
*/
boolean_t
zfs_is_shared(zfs_handle_t *zhp, char **where,
const enum sa_protocol *proto)
{
char *mountpoint;
if (proto == NULL)
proto = share_all_proto;
if (ZFS_IS_VOLUME(zhp))
return (B_FALSE);
if (!zfs_is_mounted(zhp, &mountpoint))
return (B_FALSE);
for (const enum sa_protocol *p = proto; *p != SA_NO_PROTOCOL; ++p)
if (sa_is_shared(mountpoint, *p)) {
if (where != NULL)
*where = mountpoint;
else
free(mountpoint);
return (B_TRUE);
}
free(mountpoint);
return (B_FALSE);
}
void
zfs_commit_shares(const enum sa_protocol *proto)
{
if (proto == NULL)
proto = share_all_proto;
for (const enum sa_protocol *p = proto; *p != SA_NO_PROTOCOL; ++p)
sa_commit_shares(*p);
}
+void
+zfs_truncate_shares(const enum sa_protocol *proto)
+{
+ if (proto == NULL)
+ proto = share_all_proto;
+
+ for (const enum sa_protocol *p = proto; *p != SA_NO_PROTOCOL; ++p)
+ sa_truncate_shares(*p);
+}
+
/*
* Unshare the given filesystem.
*/
int
zfs_unshare(zfs_handle_t *zhp, const char *mountpoint,
const enum sa_protocol *proto)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
struct mnttab entry;
if (proto == NULL)
proto = share_all_proto;
if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) &&
libzfs_mnttab_find(hdl, zfs_get_name(zhp), &entry) == 0)) {
/* check to see if need to unmount the filesystem */
const char *mntpt = mountpoint ?: entry.mnt_mountp;
for (const enum sa_protocol *curr_proto = proto;
*curr_proto != SA_NO_PROTOCOL; curr_proto++)
if (sa_is_shared(mntpt, *curr_proto) &&
unshare_one(hdl, zhp->zfs_name,
mntpt, *curr_proto) != 0)
return (-1);
}
return (0);
}
/*
* Same as zfs_unmountall(), but for NFS and SMB unshares.
*/
int
zfs_unshareall(zfs_handle_t *zhp, const enum sa_protocol *proto)
{
prop_changelist_t *clp;
int ret;
if (proto == NULL)
proto = share_all_proto;
clp = changelist_gather(zhp, ZFS_PROP_SHARENFS, 0, 0);
if (clp == NULL)
return (-1);
ret = changelist_unshare(clp, proto);
changelist_free(clp);
return (ret);
}
/*
* Remove the mountpoint associated with the current dataset, if necessary.
* We only remove the underlying directory if:
*
* - The mountpoint is not 'none' or 'legacy'
* - The mountpoint is non-empty
* - The mountpoint is the default or inherited
* - The 'zoned' property is set, or we're in a local zone
*
* Any other directories we leave alone.
*/
void
remove_mountpoint(zfs_handle_t *zhp)
{
char mountpoint[ZFS_MAXPROPLEN];
zprop_source_t source;
if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint),
&source, 0))
return;
if (source == ZPROP_SRC_DEFAULT ||
source == ZPROP_SRC_INHERITED) {
/*
* Try to remove the directory, silently ignoring any errors.
* The filesystem may have since been removed or moved around,
* and this error isn't really useful to the administrator in
* any way.
*/
(void) rmdir(mountpoint);
}
}
/*
* Add the given zfs handle to the cb_handles array, dynamically reallocating
* the array if it is out of space.
*/
void
libzfs_add_handle(get_all_cb_t *cbp, zfs_handle_t *zhp)
{
if (cbp->cb_alloc == cbp->cb_used) {
size_t newsz;
zfs_handle_t **newhandles;
newsz = cbp->cb_alloc != 0 ? cbp->cb_alloc * 2 : 64;
newhandles = zfs_realloc(zhp->zfs_hdl,
cbp->cb_handles, cbp->cb_alloc * sizeof (zfs_handle_t *),
newsz * sizeof (zfs_handle_t *));
cbp->cb_handles = newhandles;
cbp->cb_alloc = newsz;
}
cbp->cb_handles[cbp->cb_used++] = zhp;
}
/*
* Recursive helper function used during file system enumeration
*/
static int
zfs_iter_cb(zfs_handle_t *zhp, void *data)
{
get_all_cb_t *cbp = data;
if (!(zfs_get_type(zhp) & ZFS_TYPE_FILESYSTEM)) {
zfs_close(zhp);
return (0);
}
if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_NOAUTO) {
zfs_close(zhp);
return (0);
}
if (zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) ==
ZFS_KEYSTATUS_UNAVAILABLE) {
zfs_close(zhp);
return (0);
}
/*
* If this filesystem is inconsistent and has a receive resume
* token, we can not mount it.
*/
if (zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) &&
zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN,
NULL, 0, NULL, NULL, 0, B_TRUE) == 0) {
zfs_close(zhp);
return (0);
}
libzfs_add_handle(cbp, zhp);
if (zfs_iter_filesystems(zhp, zfs_iter_cb, cbp) != 0) {
zfs_close(zhp);
return (-1);
}
return (0);
}
/*
* Sort comparator that compares two mountpoint paths. We sort these paths so
* that subdirectories immediately follow their parents. This means that we
* effectively treat the '/' character as the lowest value non-nul char.
* Since filesystems from non-global zones can have the same mountpoint
* as other filesystems, the comparator sorts global zone filesystems to
* the top of the list. This means that the global zone will traverse the
* filesystem list in the correct order and can stop when it sees the
* first zoned filesystem. In a non-global zone, only the delegated
* filesystems are seen.
*
* An example sorted list using this comparator would look like:
*
* /foo
* /foo/bar
* /foo/bar/baz
* /foo/baz
* /foo.bar
* /foo (NGZ1)
* /foo (NGZ2)
*
* The mounting code depends on this ordering to deterministically iterate
* over filesystems in order to spawn parallel mount tasks.
*/
static int
mountpoint_cmp(const void *arga, const void *argb)
{
zfs_handle_t *const *zap = arga;
zfs_handle_t *za = *zap;
zfs_handle_t *const *zbp = argb;
zfs_handle_t *zb = *zbp;
char mounta[MAXPATHLEN];
char mountb[MAXPATHLEN];
const char *a = mounta;
const char *b = mountb;
boolean_t gota, gotb;
uint64_t zoneda, zonedb;
zoneda = zfs_prop_get_int(za, ZFS_PROP_ZONED);
zonedb = zfs_prop_get_int(zb, ZFS_PROP_ZONED);
if (zoneda && !zonedb)
return (1);
if (!zoneda && zonedb)
return (-1);
gota = (zfs_get_type(za) == ZFS_TYPE_FILESYSTEM);
if (gota) {
verify(zfs_prop_get(za, ZFS_PROP_MOUNTPOINT, mounta,
sizeof (mounta), NULL, NULL, 0, B_FALSE) == 0);
}
gotb = (zfs_get_type(zb) == ZFS_TYPE_FILESYSTEM);
if (gotb) {
verify(zfs_prop_get(zb, ZFS_PROP_MOUNTPOINT, mountb,
sizeof (mountb), NULL, NULL, 0, B_FALSE) == 0);
}
if (gota && gotb) {
while (*a != '\0' && (*a == *b)) {
a++;
b++;
}
if (*a == *b)
return (0);
if (*a == '\0')
return (-1);
if (*b == '\0')
return (1);
if (*a == '/')
return (-1);
if (*b == '/')
return (1);
return (*a < *b ? -1 : *a > *b);
}
if (gota)
return (-1);
if (gotb)
return (1);
/*
* If neither filesystem has a mountpoint, revert to sorting by
* dataset name.
*/
return (strcmp(zfs_get_name(za), zfs_get_name(zb)));
}
/*
* Return true if path2 is a child of path1 or path2 equals path1 or
* path1 is "/" (path2 is always a child of "/").
*/
static boolean_t
libzfs_path_contains(const char *path1, const char *path2)
{
return (strcmp(path1, path2) == 0 || strcmp(path1, "/") == 0 ||
(strstr(path2, path1) == path2 && path2[strlen(path1)] == '/'));
}
/*
* Given a mountpoint specified by idx in the handles array, find the first
* non-descendent of that mountpoint and return its index. Descendant paths
* start with the parent's path. This function relies on the ordering
* enforced by mountpoint_cmp().
*/
static int
non_descendant_idx(zfs_handle_t **handles, size_t num_handles, int idx)
{
char parent[ZFS_MAXPROPLEN];
char child[ZFS_MAXPROPLEN];
int i;
verify(zfs_prop_get(handles[idx], ZFS_PROP_MOUNTPOINT, parent,
sizeof (parent), NULL, NULL, 0, B_FALSE) == 0);
for (i = idx + 1; i < num_handles; i++) {
verify(zfs_prop_get(handles[i], ZFS_PROP_MOUNTPOINT, child,
sizeof (child), NULL, NULL, 0, B_FALSE) == 0);
if (!libzfs_path_contains(parent, child))
break;
}
return (i);
}
typedef struct mnt_param {
libzfs_handle_t *mnt_hdl;
tpool_t *mnt_tp;
zfs_handle_t **mnt_zhps; /* filesystems to mount */
size_t mnt_num_handles;
int mnt_idx; /* Index of selected entry to mount */
zfs_iter_f mnt_func;
void *mnt_data;
} mnt_param_t;
/*
* Allocate and populate the parameter struct for mount function, and
* schedule mounting of the entry selected by idx.
*/
static void
zfs_dispatch_mount(libzfs_handle_t *hdl, zfs_handle_t **handles,
size_t num_handles, int idx, zfs_iter_f func, void *data, tpool_t *tp)
{
mnt_param_t *mnt_param = zfs_alloc(hdl, sizeof (mnt_param_t));
mnt_param->mnt_hdl = hdl;
mnt_param->mnt_tp = tp;
mnt_param->mnt_zhps = handles;
mnt_param->mnt_num_handles = num_handles;
mnt_param->mnt_idx = idx;
mnt_param->mnt_func = func;
mnt_param->mnt_data = data;
(void) tpool_dispatch(tp, zfs_mount_task, (void*)mnt_param);
}
/*
* This is the structure used to keep state of mounting or sharing operations
* during a call to zpool_enable_datasets().
*/
typedef struct mount_state {
/*
* ms_mntstatus is set to -1 if any mount fails. While multiple threads
* could update this variable concurrently, no synchronization is
* needed as it's only ever set to -1.
*/
int ms_mntstatus;
int ms_mntflags;
const char *ms_mntopts;
} mount_state_t;
static int
zfs_mount_one(zfs_handle_t *zhp, void *arg)
{
mount_state_t *ms = arg;
int ret = 0;
/*
* don't attempt to mount encrypted datasets with
* unloaded keys
*/
if (zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) ==
ZFS_KEYSTATUS_UNAVAILABLE)
return (0);
if (zfs_mount(zhp, ms->ms_mntopts, ms->ms_mntflags) != 0)
ret = ms->ms_mntstatus = -1;
return (ret);
}
static int
zfs_share_one(zfs_handle_t *zhp, void *arg)
{
mount_state_t *ms = arg;
int ret = 0;
if (zfs_share(zhp, NULL) != 0)
ret = ms->ms_mntstatus = -1;
return (ret);
}
/*
* Thread pool function to mount one file system. On completion, it finds and
* schedules its children to be mounted. This depends on the sorting done in
* zfs_foreach_mountpoint(). Note that the degenerate case (chain of entries
* each descending from the previous) will have no parallelism since we always
* have to wait for the parent to finish mounting before we can schedule
* its children.
*/
static void
zfs_mount_task(void *arg)
{
mnt_param_t *mp = arg;
int idx = mp->mnt_idx;
zfs_handle_t **handles = mp->mnt_zhps;
size_t num_handles = mp->mnt_num_handles;
char mountpoint[ZFS_MAXPROPLEN];
verify(zfs_prop_get(handles[idx], ZFS_PROP_MOUNTPOINT, mountpoint,
sizeof (mountpoint), NULL, NULL, 0, B_FALSE) == 0);
if (mp->mnt_func(handles[idx], mp->mnt_data) != 0)
goto out;
/*
* We dispatch tasks to mount filesystems with mountpoints underneath
* this one. We do this by dispatching the next filesystem with a
* descendant mountpoint of the one we just mounted, then skip all of
* its descendants, dispatch the next descendant mountpoint, and so on.
* The non_descendant_idx() function skips over filesystems that are
* descendants of the filesystem we just dispatched.
*/
for (int i = idx + 1; i < num_handles;
i = non_descendant_idx(handles, num_handles, i)) {
char child[ZFS_MAXPROPLEN];
verify(zfs_prop_get(handles[i], ZFS_PROP_MOUNTPOINT,
child, sizeof (child), NULL, NULL, 0, B_FALSE) == 0);
if (!libzfs_path_contains(mountpoint, child))
break; /* not a descendant, return */
zfs_dispatch_mount(mp->mnt_hdl, handles, num_handles, i,
mp->mnt_func, mp->mnt_data, mp->mnt_tp);
}
out:
free(mp);
}
/*
* Issue the func callback for each ZFS handle contained in the handles
* array. This function is used to mount all datasets, and so this function
* guarantees that filesystems for parent mountpoints are called before their
* children. As such, before issuing any callbacks, we first sort the array
* of handles by mountpoint.
*
* Callbacks are issued in one of two ways:
*
* 1. Sequentially: If the parallel argument is B_FALSE or the ZFS_SERIAL_MOUNT
* environment variable is set, then we issue callbacks sequentially.
*
* 2. In parallel: If the parallel argument is B_TRUE and the ZFS_SERIAL_MOUNT
* environment variable is not set, then we use a tpool to dispatch threads
* to mount filesystems in parallel. This function dispatches tasks to mount
* the filesystems at the top-level mountpoints, and these tasks in turn
* are responsible for recursively mounting filesystems in their children
* mountpoints.
*/
void
zfs_foreach_mountpoint(libzfs_handle_t *hdl, zfs_handle_t **handles,
size_t num_handles, zfs_iter_f func, void *data, boolean_t parallel)
{
zoneid_t zoneid = getzoneid();
/*
* The ZFS_SERIAL_MOUNT environment variable is an undocumented
* variable that can be used as a convenience to do a/b comparison
* of serial vs. parallel mounting.
*/
boolean_t serial_mount = !parallel ||
(getenv("ZFS_SERIAL_MOUNT") != NULL);
/*
* Sort the datasets by mountpoint. See mountpoint_cmp for details
* of how these are sorted.
*/
qsort(handles, num_handles, sizeof (zfs_handle_t *), mountpoint_cmp);
if (serial_mount) {
for (int i = 0; i < num_handles; i++) {
func(handles[i], data);
}
return;
}
/*
* Issue the callback function for each dataset using a parallel
* algorithm that uses a thread pool to manage threads.
*/
tpool_t *tp = tpool_create(1, mount_tp_nthr, 0, NULL);
/*
* There may be multiple "top level" mountpoints outside of the pool's
* root mountpoint, e.g.: /foo /bar. Dispatch a mount task for each of
* these.
*/
for (int i = 0; i < num_handles;
i = non_descendant_idx(handles, num_handles, i)) {
/*
* Since the mountpoints have been sorted so that the zoned
* filesystems are at the end, a zoned filesystem seen from
* the global zone means that we're done.
*/
if (zoneid == GLOBAL_ZONEID &&
zfs_prop_get_int(handles[i], ZFS_PROP_ZONED))
break;
zfs_dispatch_mount(hdl, handles, num_handles, i, func, data,
tp);
}
tpool_wait(tp); /* wait for all scheduled mounts to complete */
tpool_destroy(tp);
}
/*
* Mount and share all datasets within the given pool. This assumes that no
* datasets within the pool are currently mounted.
*/
int
zpool_enable_datasets(zpool_handle_t *zhp, const char *mntopts, int flags)
{
get_all_cb_t cb = { 0 };
mount_state_t ms = { 0 };
zfs_handle_t *zfsp;
int ret = 0;
if ((zfsp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
ZFS_TYPE_DATASET)) == NULL)
goto out;
/*
* Gather all non-snapshot datasets within the pool. Start by adding
* the root filesystem for this pool to the list, and then iterate
* over all child filesystems.
*/
libzfs_add_handle(&cb, zfsp);
if (zfs_iter_filesystems(zfsp, zfs_iter_cb, &cb) != 0)
goto out;
/*
* Mount all filesystems
*/
ms.ms_mntopts = mntopts;
ms.ms_mntflags = flags;
zfs_foreach_mountpoint(zhp->zpool_hdl, cb.cb_handles, cb.cb_used,
zfs_mount_one, &ms, B_TRUE);
if (ms.ms_mntstatus != 0)
ret = ms.ms_mntstatus;
/*
* Share all filesystems that need to be shared. This needs to be
* a separate pass because libshare is not mt-safe, and so we need
* to share serially.
*/
ms.ms_mntstatus = 0;
zfs_foreach_mountpoint(zhp->zpool_hdl, cb.cb_handles, cb.cb_used,
zfs_share_one, &ms, B_FALSE);
if (ms.ms_mntstatus != 0)
ret = ms.ms_mntstatus;
else
zfs_commit_shares(NULL);
out:
for (int i = 0; i < cb.cb_used; i++)
zfs_close(cb.cb_handles[i]);
free(cb.cb_handles);
return (ret);
}
struct sets_s {
char *mountpoint;
zfs_handle_t *dataset;
};
static int
mountpoint_compare(const void *a, const void *b)
{
const struct sets_s *mounta = (struct sets_s *)a;
const struct sets_s *mountb = (struct sets_s *)b;
return (strcmp(mountb->mountpoint, mounta->mountpoint));
}
/*
* Unshare and unmount all datasets within the given pool. We don't want to
* rely on traversing the DSL to discover the filesystems within the pool,
* because this may be expensive (if not all of them are mounted), and can fail
* arbitrarily (on I/O error, for example). Instead, we walk /proc/self/mounts
* and gather all the filesystems that are currently mounted.
*/
int
zpool_disable_datasets(zpool_handle_t *zhp, boolean_t force)
{
int used, alloc;
FILE *mnttab;
struct mnttab entry;
size_t namelen;
struct sets_s *sets = NULL;
libzfs_handle_t *hdl = zhp->zpool_hdl;
int i;
int ret = -1;
int flags = (force ? MS_FORCE : 0);
namelen = strlen(zhp->zpool_name);
if ((mnttab = fopen(MNTTAB, "re")) == NULL)
return (ENOENT);
used = alloc = 0;
while (getmntent(mnttab, &entry) == 0) {
/*
* Ignore non-ZFS entries.
*/
if (entry.mnt_fstype == NULL ||
strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0)
continue;
/*
* Ignore filesystems not within this pool.
*/
if (entry.mnt_mountp == NULL ||
strncmp(entry.mnt_special, zhp->zpool_name, namelen) != 0 ||
(entry.mnt_special[namelen] != '/' &&
entry.mnt_special[namelen] != '\0'))
continue;
/*
* At this point we've found a filesystem within our pool. Add
* it to our growing list.
*/
if (used == alloc) {
if (alloc == 0) {
sets = zfs_alloc(hdl,
8 * sizeof (struct sets_s));
alloc = 8;
} else {
sets = zfs_realloc(hdl, sets,
alloc * sizeof (struct sets_s),
alloc * 2 * sizeof (struct sets_s));
alloc *= 2;
}
}
sets[used].mountpoint = zfs_strdup(hdl, entry.mnt_mountp);
/*
* This is allowed to fail, in case there is some I/O error. It
* is only used to determine if we need to remove the underlying
* mountpoint, so failure is not fatal.
*/
sets[used].dataset = make_dataset_handle(hdl,
entry.mnt_special);
used++;
}
/*
* At this point, we have the entire list of filesystems, so sort it by
* mountpoint.
*/
if (used != 0)
qsort(sets, used, sizeof (struct sets_s), mountpoint_compare);
/*
* Walk through and first unshare everything.
*/
for (i = 0; i < used; i++) {
for (enum sa_protocol i = 0; i < SA_PROTOCOL_COUNT; ++i) {
if (sa_is_shared(sets[i].mountpoint, i) &&
unshare_one(hdl, sets[i].mountpoint,
sets[i].mountpoint, i) != 0)
goto out;
}
}
zfs_commit_shares(NULL);
/*
* Now unmount everything, removing the underlying directories as
* appropriate.
*/
for (i = 0; i < used; i++) {
if (unmount_one(sets[i].dataset, sets[i].mountpoint,
flags) != 0)
goto out;
}
for (i = 0; i < used; i++) {
if (sets[i].dataset)
remove_mountpoint(sets[i].dataset);
}
zpool_disable_datasets_os(zhp, force);
ret = 0;
out:
(void) fclose(mnttab);
for (i = 0; i < used; i++) {
if (sets[i].dataset)
zfs_close(sets[i].dataset);
free(sets[i].mountpoint);
}
free(sets);
return (ret);
}
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_pool.c b/sys/contrib/openzfs/lib/libzfs/libzfs_pool.c
index 928f8b4287ba..eea388cf348f 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_pool.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_pool.c
@@ -1,5181 +1,5181 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
* Copyright (c) 2018 Datto Inc.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
* Copyright (c) 2021, Klara Inc.
*/
#include <errno.h>
#include <libintl.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <libgen.h>
#include <zone.h>
#include <sys/stat.h>
#include <sys/efi_partition.h>
#include <sys/systeminfo.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_sysfs.h>
#include <sys/vdev_disk.h>
#include <sys/types.h>
#include <dlfcn.h>
#include <libzutil.h>
#include <fcntl.h>
#include "zfs_namecheck.h"
#include "zfs_prop.h"
#include "libzfs_impl.h"
#include "zfs_comutil.h"
#include "zfeature_common.h"
static boolean_t zpool_vdev_is_interior(const char *name);
typedef struct prop_flags {
int create:1; /* Validate property on creation */
int import:1; /* Validate property on import */
int vdevprop:1; /* Validate property as a VDEV property */
} prop_flags_t;
/*
* ====================================================================
* zpool property functions
* ====================================================================
*/
static int
zpool_get_all_props(zpool_handle_t *zhp)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zcmd_alloc_dst_nvlist(hdl, &zc, 0);
while (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
if (errno == ENOMEM)
zcmd_expand_dst_nvlist(hdl, &zc);
else {
zcmd_free_nvlists(&zc);
return (-1);
}
}
if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
zcmd_free_nvlists(&zc);
return (-1);
}
zcmd_free_nvlists(&zc);
return (0);
}
int
zpool_props_refresh(zpool_handle_t *zhp)
{
nvlist_t *old_props;
old_props = zhp->zpool_props;
if (zpool_get_all_props(zhp) != 0)
return (-1);
nvlist_free(old_props);
return (0);
}
static const char *
zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
zprop_source_t *src)
{
nvlist_t *nv, *nvl;
const char *value;
zprop_source_t source;
nvl = zhp->zpool_props;
if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
source = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
value = fnvlist_lookup_string(nv, ZPROP_VALUE);
} else {
source = ZPROP_SRC_DEFAULT;
if ((value = zpool_prop_default_string(prop)) == NULL)
value = "-";
}
if (src)
*src = source;
return (value);
}
uint64_t
zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
{
nvlist_t *nv, *nvl;
uint64_t value;
zprop_source_t source;
if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
/*
* zpool_get_all_props() has most likely failed because
* the pool is faulted, but if all we need is the top level
* vdev's guid then get it from the zhp config nvlist.
*/
if ((prop == ZPOOL_PROP_GUID) &&
(nvlist_lookup_nvlist(zhp->zpool_config,
ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
== 0)) {
return (value);
}
return (zpool_prop_default_numeric(prop));
}
nvl = zhp->zpool_props;
if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
source = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
value = fnvlist_lookup_uint64(nv, ZPROP_VALUE);
} else {
source = ZPROP_SRC_DEFAULT;
value = zpool_prop_default_numeric(prop);
}
if (src)
*src = source;
return (value);
}
/*
* Map VDEV STATE to printed strings.
*/
const char *
zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
{
switch (state) {
case VDEV_STATE_CLOSED:
case VDEV_STATE_OFFLINE:
return (gettext("OFFLINE"));
case VDEV_STATE_REMOVED:
return (gettext("REMOVED"));
case VDEV_STATE_CANT_OPEN:
if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
return (gettext("FAULTED"));
else if (aux == VDEV_AUX_SPLIT_POOL)
return (gettext("SPLIT"));
else
return (gettext("UNAVAIL"));
case VDEV_STATE_FAULTED:
return (gettext("FAULTED"));
case VDEV_STATE_DEGRADED:
return (gettext("DEGRADED"));
case VDEV_STATE_HEALTHY:
return (gettext("ONLINE"));
default:
break;
}
return (gettext("UNKNOWN"));
}
/*
* Map POOL STATE to printed strings.
*/
const char *
zpool_pool_state_to_name(pool_state_t state)
{
switch (state) {
default:
break;
case POOL_STATE_ACTIVE:
return (gettext("ACTIVE"));
case POOL_STATE_EXPORTED:
return (gettext("EXPORTED"));
case POOL_STATE_DESTROYED:
return (gettext("DESTROYED"));
case POOL_STATE_SPARE:
return (gettext("SPARE"));
case POOL_STATE_L2CACHE:
return (gettext("L2CACHE"));
case POOL_STATE_UNINITIALIZED:
return (gettext("UNINITIALIZED"));
case POOL_STATE_UNAVAIL:
return (gettext("UNAVAIL"));
case POOL_STATE_POTENTIALLY_ACTIVE:
return (gettext("POTENTIALLY_ACTIVE"));
}
return (gettext("UNKNOWN"));
}
/*
* Given a pool handle, return the pool health string ("ONLINE", "DEGRADED",
* "SUSPENDED", etc).
*/
const char *
zpool_get_state_str(zpool_handle_t *zhp)
{
zpool_errata_t errata;
zpool_status_t status;
const char *str;
status = zpool_get_status(zhp, NULL, &errata);
if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
str = gettext("FAULTED");
} else if (status == ZPOOL_STATUS_IO_FAILURE_WAIT ||
status == ZPOOL_STATUS_IO_FAILURE_MMP) {
str = gettext("SUSPENDED");
} else {
nvlist_t *nvroot = fnvlist_lookup_nvlist(
zpool_get_config(zhp, NULL), ZPOOL_CONFIG_VDEV_TREE);
uint_t vsc;
vdev_stat_t *vs = (vdev_stat_t *)fnvlist_lookup_uint64_array(
nvroot, ZPOOL_CONFIG_VDEV_STATS, &vsc);
str = zpool_state_to_name(vs->vs_state, vs->vs_aux);
}
return (str);
}
/*
* Get a zpool property value for 'prop' and return the value in
* a pre-allocated buffer.
*/
int
zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
size_t len, zprop_source_t *srctype, boolean_t literal)
{
uint64_t intval;
const char *strval;
zprop_source_t src = ZPROP_SRC_NONE;
if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
switch (prop) {
case ZPOOL_PROP_NAME:
(void) strlcpy(buf, zpool_get_name(zhp), len);
break;
case ZPOOL_PROP_HEALTH:
(void) strlcpy(buf, zpool_get_state_str(zhp), len);
break;
case ZPOOL_PROP_GUID:
intval = zpool_get_prop_int(zhp, prop, &src);
(void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
break;
case ZPOOL_PROP_ALTROOT:
case ZPOOL_PROP_CACHEFILE:
case ZPOOL_PROP_COMMENT:
case ZPOOL_PROP_COMPATIBILITY:
if (zhp->zpool_props != NULL ||
zpool_get_all_props(zhp) == 0) {
(void) strlcpy(buf,
zpool_get_prop_string(zhp, prop, &src),
len);
break;
}
zfs_fallthrough;
default:
(void) strlcpy(buf, "-", len);
break;
}
if (srctype != NULL)
*srctype = src;
return (0);
}
if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
prop != ZPOOL_PROP_NAME)
return (-1);
switch (zpool_prop_get_type(prop)) {
case PROP_TYPE_STRING:
(void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
len);
break;
case PROP_TYPE_NUMBER:
intval = zpool_get_prop_int(zhp, prop, &src);
switch (prop) {
case ZPOOL_PROP_SIZE:
case ZPOOL_PROP_ALLOCATED:
case ZPOOL_PROP_FREE:
case ZPOOL_PROP_FREEING:
case ZPOOL_PROP_LEAKED:
case ZPOOL_PROP_ASHIFT:
case ZPOOL_PROP_MAXBLOCKSIZE:
case ZPOOL_PROP_MAXDNODESIZE:
if (literal)
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
else
(void) zfs_nicenum(intval, buf, len);
break;
case ZPOOL_PROP_EXPANDSZ:
case ZPOOL_PROP_CHECKPOINT:
if (intval == 0) {
(void) strlcpy(buf, "-", len);
} else if (literal) {
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
} else {
(void) zfs_nicebytes(intval, buf, len);
}
break;
case ZPOOL_PROP_CAPACITY:
if (literal) {
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
} else {
(void) snprintf(buf, len, "%llu%%",
(u_longlong_t)intval);
}
break;
case ZPOOL_PROP_FRAGMENTATION:
if (intval == UINT64_MAX) {
(void) strlcpy(buf, "-", len);
} else if (literal) {
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
} else {
(void) snprintf(buf, len, "%llu%%",
(u_longlong_t)intval);
}
break;
case ZPOOL_PROP_DEDUPRATIO:
if (literal)
(void) snprintf(buf, len, "%llu.%02llu",
(u_longlong_t)(intval / 100),
(u_longlong_t)(intval % 100));
else
(void) snprintf(buf, len, "%llu.%02llux",
(u_longlong_t)(intval / 100),
(u_longlong_t)(intval % 100));
break;
case ZPOOL_PROP_HEALTH:
(void) strlcpy(buf, zpool_get_state_str(zhp), len);
break;
case ZPOOL_PROP_VERSION:
if (intval >= SPA_VERSION_FEATURES) {
(void) snprintf(buf, len, "-");
break;
}
zfs_fallthrough;
default:
(void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
}
break;
case PROP_TYPE_INDEX:
intval = zpool_get_prop_int(zhp, prop, &src);
if (zpool_prop_index_to_string(prop, intval, &strval)
!= 0)
return (-1);
(void) strlcpy(buf, strval, len);
break;
default:
abort();
}
if (srctype)
*srctype = src;
return (0);
}
/*
* Check if the bootfs name has the same pool name as it is set to.
* Assuming bootfs is a valid dataset name.
*/
static boolean_t
bootfs_name_valid(const char *pool, const char *bootfs)
{
int len = strlen(pool);
if (bootfs[0] == '\0')
return (B_TRUE);
if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
return (B_FALSE);
if (strncmp(pool, bootfs, len) == 0 &&
(bootfs[len] == '/' || bootfs[len] == '\0'))
return (B_TRUE);
return (B_FALSE);
}
/*
* Given an nvlist of zpool properties to be set, validate that they are
* correct, and parse any numeric properties (index, boolean, etc) if they are
* specified as strings.
*/
static nvlist_t *
zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
{
nvpair_t *elem;
nvlist_t *retprops;
zpool_prop_t prop;
char *strval;
uint64_t intval;
char *slash, *check;
struct stat64 statbuf;
zpool_handle_t *zhp;
char report[1024];
if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
(void) no_memory(hdl);
return (NULL);
}
elem = NULL;
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
const char *propname = nvpair_name(elem);
if (flags.vdevprop && zpool_prop_vdev(propname)) {
vdev_prop_t vprop = vdev_name_to_prop(propname);
if (vdev_prop_readonly(vprop)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
"is readonly"), propname);
(void) zfs_error(hdl, EZFS_PROPREADONLY,
errbuf);
goto error;
}
if (zprop_parse_value(hdl, elem, vprop, ZFS_TYPE_VDEV,
retprops, &strval, &intval, errbuf) != 0)
goto error;
continue;
} else if (flags.vdevprop && vdev_prop_user(propname)) {
if (nvlist_add_nvpair(retprops, elem) != 0) {
(void) no_memory(hdl);
goto error;
}
continue;
} else if (flags.vdevprop) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property: '%s'"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
prop = zpool_name_to_prop(propname);
if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) {
int err;
char *fname = strchr(propname, '@') + 1;
err = zfeature_lookup_name(fname, NULL);
if (err != 0) {
ASSERT3U(err, ==, ENOENT);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"feature '%s' unsupported by kernel"),
fname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (nvpair_type(elem) != DATA_TYPE_STRING) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a string"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
(void) nvpair_value_string(elem, &strval);
if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 &&
strcmp(strval, ZFS_FEATURE_DISABLED) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' can only be set to "
"'enabled' or 'disabled'"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (!flags.create &&
strcmp(strval, ZFS_FEATURE_DISABLED) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' can only be set to "
"'disabled' at creation time"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (nvlist_add_uint64(retprops, propname, 0) != 0) {
(void) no_memory(hdl);
goto error;
}
continue;
}
/*
* Make sure this property is valid and applies to this type.
*/
if (prop == ZPOOL_PROP_INVAL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property '%s'"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (zpool_prop_readonly(prop)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
"is readonly"), propname);
(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
goto error;
}
if (!flags.create && zpool_prop_setonce(prop)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' can only be set at "
"creation time"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
&strval, &intval, errbuf) != 0)
goto error;
/*
* Perform additional checking for specific properties.
*/
switch (prop) {
case ZPOOL_PROP_VERSION:
if (intval < version ||
!SPA_VERSION_IS_SUPPORTED(intval)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' number %llu is invalid."),
propname, (unsigned long long)intval);
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
goto error;
}
break;
case ZPOOL_PROP_ASHIFT:
if (intval != 0 &&
(intval < ASHIFT_MIN || intval > ASHIFT_MAX)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' number %llu is invalid, "
"only values between %" PRId32 " and %"
PRId32 " are allowed."),
propname, (unsigned long long)intval,
ASHIFT_MIN, ASHIFT_MAX);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
case ZPOOL_PROP_BOOTFS:
if (flags.create || flags.import) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' cannot be set at creation "
"or import time"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (version < SPA_VERSION_BOOTFS) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded to support "
"'%s' property"), propname);
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
goto error;
}
/*
* bootfs property value has to be a dataset name and
* the dataset has to be in the same pool as it sets to.
*/
if (!bootfs_name_valid(poolname, strval)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
"is an invalid name"), strval);
(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
goto error;
}
if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"could not open pool '%s'"), poolname);
(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
goto error;
}
zpool_close(zhp);
break;
case ZPOOL_PROP_ALTROOT:
if (!flags.create && !flags.import) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' can only be set during pool "
"creation or import"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (strval[0] != '/') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"bad alternate root '%s'"), strval);
(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
goto error;
}
break;
case ZPOOL_PROP_CACHEFILE:
if (strval[0] == '\0')
break;
if (strcmp(strval, "none") == 0)
break;
if (strval[0] != '/') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' must be empty, an "
"absolute path, or 'none'"), propname);
(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
goto error;
}
slash = strrchr(strval, '/');
if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
strcmp(slash, "/..") == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is not a valid file"), strval);
(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
goto error;
}
*slash = '\0';
if (strval[0] != '\0' &&
(stat64(strval, &statbuf) != 0 ||
!S_ISDIR(statbuf.st_mode))) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is not a valid directory"),
strval);
(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
goto error;
}
*slash = '/';
break;
case ZPOOL_PROP_COMPATIBILITY:
switch (zpool_load_compat(strval, NULL, report, 1024)) {
case ZPOOL_COMPATIBILITY_OK:
case ZPOOL_COMPATIBILITY_WARNTOKEN:
break;
case ZPOOL_COMPATIBILITY_BADFILE:
case ZPOOL_COMPATIBILITY_BADTOKEN:
case ZPOOL_COMPATIBILITY_NOFILES:
zfs_error_aux(hdl, "%s", report);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
case ZPOOL_PROP_COMMENT:
for (check = strval; *check != '\0'; check++) {
if (!isprint(*check)) {
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN,
"comment may only have printable "
"characters"));
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
}
}
if (strlen(strval) > ZPROP_MAX_COMMENT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"comment must not exceed %d characters"),
ZPROP_MAX_COMMENT);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
case ZPOOL_PROP_READONLY:
if (!flags.import) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' can only be set at "
"import time"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
case ZPOOL_PROP_MULTIHOST:
if (get_system_hostid() == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"requires a non-zero system hostid"));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
case ZPOOL_PROP_DEDUPDITTO:
printf("Note: property '%s' no longer has "
"any effect\n", propname);
break;
default:
break;
}
}
return (retprops);
error:
nvlist_free(retprops);
return (NULL);
}
/*
* Set zpool property : propname=propval.
*/
int
zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
{
zfs_cmd_t zc = {"\0"};
int ret = -1;
char errbuf[ERRBUFLEN];
nvlist_t *nvl = NULL;
nvlist_t *realprops;
uint64_t version;
prop_flags_t flags = { 0 };
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
zhp->zpool_name);
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
return (no_memory(zhp->zpool_hdl));
if (nvlist_add_string(nvl, propname, propval) != 0) {
nvlist_free(nvl);
return (no_memory(zhp->zpool_hdl));
}
version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
nvlist_free(nvl);
return (-1);
}
nvlist_free(nvl);
nvl = realprops;
/*
* Execute the corresponding ioctl() to set this property.
*/
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl);
ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
zcmd_free_nvlists(&zc);
nvlist_free(nvl);
if (ret)
(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
else
(void) zpool_props_refresh(zhp);
return (ret);
}
int
zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp,
zfs_type_t type, boolean_t literal)
{
libzfs_handle_t *hdl = zhp->zpool_hdl;
zprop_list_t *entry;
char buf[ZFS_MAXPROPLEN];
nvlist_t *features = NULL;
nvpair_t *nvp;
zprop_list_t **last;
boolean_t firstexpand = (NULL == *plp);
int i;
if (zprop_expand_list(hdl, plp, type) != 0)
return (-1);
if (type == ZFS_TYPE_VDEV)
return (0);
last = plp;
while (*last != NULL)
last = &(*last)->pl_next;
if ((*plp)->pl_all)
features = zpool_get_features(zhp);
if ((*plp)->pl_all && firstexpand) {
for (i = 0; i < SPA_FEATURES; i++) {
zprop_list_t *entry = zfs_alloc(hdl,
sizeof (zprop_list_t));
entry->pl_prop = ZPROP_USERPROP;
entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
spa_feature_table[i].fi_uname);
entry->pl_width = strlen(entry->pl_user_prop);
entry->pl_all = B_TRUE;
*last = entry;
last = &entry->pl_next;
}
}
/* add any unsupported features */
for (nvp = nvlist_next_nvpair(features, NULL);
nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
char *propname;
boolean_t found;
zprop_list_t *entry;
if (zfeature_is_supported(nvpair_name(nvp)))
continue;
propname = zfs_asprintf(hdl, "unsupported@%s",
nvpair_name(nvp));
/*
* Before adding the property to the list make sure that no
* other pool already added the same property.
*/
found = B_FALSE;
entry = *plp;
while (entry != NULL) {
if (entry->pl_user_prop != NULL &&
strcmp(propname, entry->pl_user_prop) == 0) {
found = B_TRUE;
break;
}
entry = entry->pl_next;
}
if (found) {
free(propname);
continue;
}
entry = zfs_alloc(hdl, sizeof (zprop_list_t));
entry->pl_prop = ZPROP_USERPROP;
entry->pl_user_prop = propname;
entry->pl_width = strlen(entry->pl_user_prop);
entry->pl_all = B_TRUE;
*last = entry;
last = &entry->pl_next;
}
for (entry = *plp; entry != NULL; entry = entry->pl_next) {
if (entry->pl_fixed && !literal)
continue;
if (entry->pl_prop != ZPROP_USERPROP &&
zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
NULL, literal) == 0) {
if (strlen(buf) > entry->pl_width)
entry->pl_width = strlen(buf);
}
}
return (0);
}
int
vdev_expand_proplist(zpool_handle_t *zhp, const char *vdevname,
zprop_list_t **plp)
{
zprop_list_t *entry;
char buf[ZFS_MAXPROPLEN];
char *strval = NULL;
int err = 0;
nvpair_t *elem = NULL;
nvlist_t *vprops = NULL;
nvlist_t *propval = NULL;
const char *propname;
vdev_prop_t prop;
zprop_list_t **last;
for (entry = *plp; entry != NULL; entry = entry->pl_next) {
if (entry->pl_fixed)
continue;
if (zpool_get_vdev_prop(zhp, vdevname, entry->pl_prop,
entry->pl_user_prop, buf, sizeof (buf), NULL,
B_FALSE) == 0) {
if (strlen(buf) > entry->pl_width)
entry->pl_width = strlen(buf);
}
if (entry->pl_prop == VDEV_PROP_NAME &&
strlen(vdevname) > entry->pl_width)
entry->pl_width = strlen(vdevname);
}
/* Handle the all properties case */
last = plp;
if (*last != NULL && (*last)->pl_all == B_TRUE) {
while (*last != NULL)
last = &(*last)->pl_next;
err = zpool_get_all_vdev_props(zhp, vdevname, &vprops);
if (err != 0)
return (err);
while ((elem = nvlist_next_nvpair(vprops, elem)) != NULL) {
propname = nvpair_name(elem);
/* Skip properties that are not user defined */
if ((prop = vdev_name_to_prop(propname)) !=
VDEV_PROP_USERPROP)
continue;
if (nvpair_value_nvlist(elem, &propval) != 0)
continue;
strval = fnvlist_lookup_string(propval, ZPROP_VALUE);
entry = zfs_alloc(zhp->zpool_hdl,
sizeof (zprop_list_t));
entry->pl_prop = prop;
entry->pl_user_prop = zfs_strdup(zhp->zpool_hdl,
propname);
entry->pl_width = strlen(strval);
entry->pl_all = B_TRUE;
*last = entry;
last = &entry->pl_next;
}
}
return (0);
}
/*
* Get the state for the given feature on the given ZFS pool.
*/
int
zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
size_t len)
{
uint64_t refcount;
boolean_t found = B_FALSE;
nvlist_t *features = zpool_get_features(zhp);
boolean_t supported;
const char *feature = strchr(propname, '@') + 1;
supported = zpool_prop_feature(propname);
ASSERT(supported || zpool_prop_unsupported(propname));
/*
* Convert from feature name to feature guid. This conversion is
* unnecessary for unsupported@... properties because they already
* use guids.
*/
if (supported) {
int ret;
spa_feature_t fid;
ret = zfeature_lookup_name(feature, &fid);
if (ret != 0) {
(void) strlcpy(buf, "-", len);
return (ENOTSUP);
}
feature = spa_feature_table[fid].fi_guid;
}
if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
found = B_TRUE;
if (supported) {
if (!found) {
(void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
} else {
if (refcount == 0)
(void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
else
(void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
}
} else {
if (found) {
if (refcount == 0) {
(void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
} else {
(void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
}
} else {
(void) strlcpy(buf, "-", len);
return (ENOTSUP);
}
}
return (0);
}
/*
* Validate the given pool name, optionally putting an extended error message in
* 'buf'.
*/
boolean_t
zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
{
namecheck_err_t why;
char what;
int ret;
ret = pool_namecheck(pool, &why, &what);
/*
* The rules for reserved pool names were extended at a later point.
* But we need to support users with existing pools that may now be
* invalid. So we only check for this expanded set of names during a
* create (or import), and only in userland.
*/
if (ret == 0 && !isopen &&
(strncmp(pool, "mirror", 6) == 0 ||
strncmp(pool, "raidz", 5) == 0 ||
strncmp(pool, "draid", 5) == 0 ||
strncmp(pool, "spare", 5) == 0 ||
strcmp(pool, "log") == 0)) {
if (hdl != NULL)
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "name is reserved"));
return (B_FALSE);
}
if (ret != 0) {
if (hdl != NULL) {
switch (why) {
case NAME_ERR_TOOLONG:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "name is too long"));
break;
case NAME_ERR_INVALCHAR:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "invalid character "
"'%c' in pool name"), what);
break;
case NAME_ERR_NOLETTER:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"name must begin with a letter"));
break;
case NAME_ERR_RESERVED:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"name is reserved"));
break;
case NAME_ERR_DISKLIKE:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool name is reserved"));
break;
case NAME_ERR_LEADING_SLASH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"leading slash in name"));
break;
case NAME_ERR_EMPTY_COMPONENT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"empty component in name"));
break;
case NAME_ERR_TRAILING_SLASH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"trailing slash in name"));
break;
case NAME_ERR_MULTIPLE_DELIMITERS:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"multiple '@' and/or '#' delimiters in "
"name"));
break;
case NAME_ERR_NO_AT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"permission set is missing '@'"));
break;
default:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"(%d) not defined"), why);
break;
}
}
return (B_FALSE);
}
return (B_TRUE);
}
/*
* Open a handle to the given pool, even if the pool is currently in the FAULTED
* state.
*/
zpool_handle_t *
zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
{
zpool_handle_t *zhp;
boolean_t missing;
/*
* Make sure the pool name is valid.
*/
if (!zpool_name_valid(hdl, B_TRUE, pool)) {
(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
dgettext(TEXT_DOMAIN, "cannot open '%s'"),
pool);
return (NULL);
}
zhp = zfs_alloc(hdl, sizeof (zpool_handle_t));
zhp->zpool_hdl = hdl;
(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
if (zpool_refresh_stats(zhp, &missing) != 0) {
zpool_close(zhp);
return (NULL);
}
if (missing) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
(void) zfs_error_fmt(hdl, EZFS_NOENT,
dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
zpool_close(zhp);
return (NULL);
}
return (zhp);
}
/*
* Like the above, but silent on error. Used when iterating over pools (because
* the configuration cache may be out of date).
*/
int
zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
{
zpool_handle_t *zhp;
boolean_t missing;
zhp = zfs_alloc(hdl, sizeof (zpool_handle_t));
zhp->zpool_hdl = hdl;
(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
if (zpool_refresh_stats(zhp, &missing) != 0) {
zpool_close(zhp);
return (-1);
}
if (missing) {
zpool_close(zhp);
*ret = NULL;
return (0);
}
*ret = zhp;
return (0);
}
/*
* Similar to zpool_open_canfail(), but refuses to open pools in the faulted
* state.
*/
zpool_handle_t *
zpool_open(libzfs_handle_t *hdl, const char *pool)
{
zpool_handle_t *zhp;
if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
return (NULL);
if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
zpool_close(zhp);
return (NULL);
}
return (zhp);
}
/*
* Close the handle. Simply frees the memory associated with the handle.
*/
void
zpool_close(zpool_handle_t *zhp)
{
nvlist_free(zhp->zpool_config);
nvlist_free(zhp->zpool_old_config);
nvlist_free(zhp->zpool_props);
free(zhp);
}
/*
* Return the name of the pool.
*/
const char *
zpool_get_name(zpool_handle_t *zhp)
{
return (zhp->zpool_name);
}
/*
* Return the state of the pool (ACTIVE or UNAVAILABLE)
*/
int
zpool_get_state(zpool_handle_t *zhp)
{
return (zhp->zpool_state);
}
/*
* Check if vdev list contains a special vdev
*/
static boolean_t
zpool_has_special_vdev(nvlist_t *nvroot)
{
nvlist_t **child;
uint_t children;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &child,
&children) == 0) {
for (uint_t c = 0; c < children; c++) {
char *bias;
if (nvlist_lookup_string(child[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0 &&
strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0) {
return (B_TRUE);
}
}
}
return (B_FALSE);
}
/*
* Check if vdev list contains a dRAID vdev
*/
static boolean_t
zpool_has_draid_vdev(nvlist_t *nvroot)
{
nvlist_t **child;
uint_t children;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (uint_t c = 0; c < children; c++) {
char *type;
if (nvlist_lookup_string(child[c],
ZPOOL_CONFIG_TYPE, &type) == 0 &&
strcmp(type, VDEV_TYPE_DRAID) == 0) {
return (B_TRUE);
}
}
}
return (B_FALSE);
}
/*
* Output a dRAID top-level vdev name in to the provided buffer.
*/
static char *
zpool_draid_name(char *name, int len, uint64_t data, uint64_t parity,
uint64_t spares, uint64_t children)
{
snprintf(name, len, "%s%llu:%llud:%lluc:%llus",
VDEV_TYPE_DRAID, (u_longlong_t)parity, (u_longlong_t)data,
(u_longlong_t)children, (u_longlong_t)spares);
return (name);
}
/*
* Return B_TRUE if the provided name is a dRAID spare name.
*/
boolean_t
zpool_is_draid_spare(const char *name)
{
uint64_t spare_id, parity, vdev_id;
if (sscanf(name, VDEV_TYPE_DRAID "%llu-%llu-%llu",
(u_longlong_t *)&parity, (u_longlong_t *)&vdev_id,
(u_longlong_t *)&spare_id) == 3) {
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Create the named pool, using the provided vdev list. It is assumed
* that the consumer has already validated the contents of the nvlist, so we
* don't have to worry about error semantics.
*/
int
zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
nvlist_t *props, nvlist_t *fsprops)
{
zfs_cmd_t zc = {"\0"};
nvlist_t *zc_fsprops = NULL;
nvlist_t *zc_props = NULL;
nvlist_t *hidden_args = NULL;
uint8_t *wkeydata = NULL;
uint_t wkeylen = 0;
char errbuf[ERRBUFLEN];
int ret = -1;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot create '%s'"), pool);
if (!zpool_name_valid(hdl, B_FALSE, pool))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
zcmd_write_conf_nvlist(hdl, &zc, nvroot);
if (props) {
prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
if ((zc_props = zpool_valid_proplist(hdl, pool, props,
SPA_VERSION_1, flags, errbuf)) == NULL) {
goto create_failed;
}
}
if (fsprops) {
uint64_t zoned;
char *zonestr;
zoned = ((nvlist_lookup_string(fsprops,
zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
strcmp(zonestr, "on") == 0);
if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
fsprops, zoned, NULL, NULL, B_TRUE, errbuf)) == NULL) {
goto create_failed;
}
if (nvlist_exists(zc_fsprops,
zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS)) &&
!zpool_has_special_vdev(nvroot)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"%s property requires a special vdev"),
zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto create_failed;
}
if (!zc_props &&
(nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
goto create_failed;
}
if (zfs_crypto_create(hdl, NULL, zc_fsprops, props, B_TRUE,
&wkeydata, &wkeylen) != 0) {
zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf);
goto create_failed;
}
if (nvlist_add_nvlist(zc_props,
ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
goto create_failed;
}
if (wkeydata != NULL) {
if (nvlist_alloc(&hidden_args, NV_UNIQUE_NAME, 0) != 0)
goto create_failed;
if (nvlist_add_uint8_array(hidden_args, "wkeydata",
wkeydata, wkeylen) != 0)
goto create_failed;
if (nvlist_add_nvlist(zc_props, ZPOOL_HIDDEN_ARGS,
hidden_args) != 0)
goto create_failed;
}
}
if (zc_props)
zcmd_write_src_nvlist(hdl, &zc, zc_props);
(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
zcmd_free_nvlists(&zc);
nvlist_free(zc_props);
nvlist_free(zc_fsprops);
nvlist_free(hidden_args);
if (wkeydata != NULL)
free(wkeydata);
switch (errno) {
case EBUSY:
/*
* This can happen if the user has specified the same
* device multiple times. We can't reliably detect this
* until we try to add it and see we already have a
* label. This can also happen under if the device is
* part of an active md or lvm device.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more vdevs refer to the same device, or "
"one of\nthe devices is part of an active md or "
"lvm device"));
return (zfs_error(hdl, EZFS_BADDEV, errbuf));
case ERANGE:
/*
* This happens if the record size is smaller or larger
* than the allowed size range, or not a power of 2.
*
* NOTE: although zfs_valid_proplist is called earlier,
* this case may have slipped through since the
* pool does not exist yet and it is therefore
* impossible to read properties e.g. max blocksize
* from the pool.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"record size invalid"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
case EOVERFLOW:
/*
* This occurs when one of the devices is below
* SPA_MINDEVSIZE. Unfortunately, we can't detect which
* device was the problem device since there's no
* reliable way to determine device size from userland.
*/
{
char buf[64];
zfs_nicebytes(SPA_MINDEVSIZE, buf,
sizeof (buf));
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is less than the "
"minimum size (%s)"), buf);
}
return (zfs_error(hdl, EZFS_BADDEV, errbuf));
case ENOSPC:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is out of space"));
return (zfs_error(hdl, EZFS_BADDEV, errbuf));
case EINVAL:
if (zpool_has_draid_vdev(nvroot) &&
zfeature_lookup_name("draid", NULL) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dRAID vdevs are unsupported by the "
"kernel"));
return (zfs_error(hdl, EZFS_BADDEV, errbuf));
} else {
return (zpool_standard_error(hdl, errno,
errbuf));
}
default:
return (zpool_standard_error(hdl, errno, errbuf));
}
}
create_failed:
zcmd_free_nvlists(&zc);
nvlist_free(zc_props);
nvlist_free(zc_fsprops);
nvlist_free(hidden_args);
if (wkeydata != NULL)
free(wkeydata);
return (ret);
}
/*
* Destroy the given pool. It is up to the caller to ensure that there are no
* datasets left in the pool.
*/
int
zpool_destroy(zpool_handle_t *zhp, const char *log_str)
{
zfs_cmd_t zc = {"\0"};
zfs_handle_t *zfp = NULL;
libzfs_handle_t *hdl = zhp->zpool_hdl;
char errbuf[ERRBUFLEN];
if (zhp->zpool_state == POOL_STATE_ACTIVE &&
(zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
return (-1);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_history = (uint64_t)(uintptr_t)log_str;
if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot destroy '%s'"), zhp->zpool_name);
if (errno == EROFS) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is read only"));
(void) zfs_error(hdl, EZFS_BADDEV, errbuf);
} else {
(void) zpool_standard_error(hdl, errno, errbuf);
}
if (zfp)
zfs_close(zfp);
return (-1);
}
if (zfp) {
remove_mountpoint(zfp);
zfs_close(zfp);
}
return (0);
}
/*
* Create a checkpoint in the given pool.
*/
int
zpool_checkpoint(zpool_handle_t *zhp)
{
libzfs_handle_t *hdl = zhp->zpool_hdl;
char errbuf[ERRBUFLEN];
int error;
error = lzc_pool_checkpoint(zhp->zpool_name);
if (error != 0) {
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot checkpoint '%s'"), zhp->zpool_name);
(void) zpool_standard_error(hdl, error, errbuf);
return (-1);
}
return (0);
}
/*
* Discard the checkpoint from the given pool.
*/
int
zpool_discard_checkpoint(zpool_handle_t *zhp)
{
libzfs_handle_t *hdl = zhp->zpool_hdl;
char errbuf[ERRBUFLEN];
int error;
error = lzc_pool_checkpoint_discard(zhp->zpool_name);
if (error != 0) {
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot discard checkpoint in '%s'"), zhp->zpool_name);
(void) zpool_standard_error(hdl, error, errbuf);
return (-1);
}
return (0);
}
/*
* Add the given vdevs to the pool. The caller must have already performed the
* necessary verification to ensure that the vdev specification is well-formed.
*/
int
zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
{
zfs_cmd_t zc = {"\0"};
int ret;
libzfs_handle_t *hdl = zhp->zpool_hdl;
char errbuf[ERRBUFLEN];
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot add to '%s'"), zhp->zpool_name);
if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
SPA_VERSION_SPARES &&
nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
"upgraded to add hot spares"));
return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
}
if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
SPA_VERSION_L2CACHE &&
nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
"upgraded to add cache devices"));
return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
}
zcmd_write_conf_nvlist(hdl, &zc, nvroot);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
switch (errno) {
case EBUSY:
/*
* This can happen if the user has specified the same
* device multiple times. We can't reliably detect this
* until we try to add it and see we already have a
* label.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more vdevs refer to the same device"));
(void) zfs_error(hdl, EZFS_BADDEV, errbuf);
break;
case EINVAL:
if (zpool_has_draid_vdev(nvroot) &&
zfeature_lookup_name("draid", NULL) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dRAID vdevs are unsupported by the "
"kernel"));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid config; a pool with removing/"
"removed vdevs does not support adding "
"raidz or dRAID vdevs"));
}
(void) zfs_error(hdl, EZFS_BADDEV, errbuf);
break;
case EOVERFLOW:
/*
* This occurs when one of the devices is below
* SPA_MINDEVSIZE. Unfortunately, we can't detect which
* device was the problem device since there's no
* reliable way to determine device size from userland.
*/
{
char buf[64];
zfs_nicebytes(SPA_MINDEVSIZE, buf,
sizeof (buf));
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"device is less than the minimum "
"size (%s)"), buf);
}
(void) zfs_error(hdl, EZFS_BADDEV, errbuf);
break;
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded to add these vdevs"));
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
default:
(void) zpool_standard_error(hdl, errno, errbuf);
}
ret = -1;
} else {
ret = 0;
}
zcmd_free_nvlists(&zc);
return (ret);
}
/*
* Exports the pool from the system. The caller must ensure that there are no
* mounted datasets in the pool.
*/
static int
zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
const char *log_str)
{
zfs_cmd_t zc = {"\0"};
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_cookie = force;
zc.zc_guid = hardforce;
zc.zc_history = (uint64_t)(uintptr_t)log_str;
if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
switch (errno) {
case EXDEV:
zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
"use '-f' to override the following errors:\n"
"'%s' has an active shared spare which could be"
" used by other pools once '%s' is exported."),
zhp->zpool_name, zhp->zpool_name);
return (zfs_error_fmt(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
dgettext(TEXT_DOMAIN, "cannot export '%s'"),
zhp->zpool_name));
default:
return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
dgettext(TEXT_DOMAIN, "cannot export '%s'"),
zhp->zpool_name));
}
}
return (0);
}
int
zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
{
return (zpool_export_common(zhp, force, B_FALSE, log_str));
}
int
zpool_export_force(zpool_handle_t *zhp, const char *log_str)
{
return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
}
static void
zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
nvlist_t *config)
{
nvlist_t *nv = NULL;
uint64_t rewindto;
int64_t loss = -1;
struct tm t;
char timestr[128];
if (!hdl->libzfs_printerr || config == NULL)
return;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
return;
}
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
return;
(void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
if (localtime_r((time_t *)&rewindto, &t) != NULL &&
strftime(timestr, 128, "%c", &t) != 0) {
if (dryrun) {
(void) printf(dgettext(TEXT_DOMAIN,
"Would be able to return %s "
"to its state as of %s.\n"),
name, timestr);
} else {
(void) printf(dgettext(TEXT_DOMAIN,
"Pool %s returned to its state as of %s.\n"),
name, timestr);
}
if (loss > 120) {
(void) printf(dgettext(TEXT_DOMAIN,
"%s approximately %lld "),
dryrun ? "Would discard" : "Discarded",
((longlong_t)loss + 30) / 60);
(void) printf(dgettext(TEXT_DOMAIN,
"minutes of transactions.\n"));
} else if (loss > 0) {
(void) printf(dgettext(TEXT_DOMAIN,
"%s approximately %lld "),
dryrun ? "Would discard" : "Discarded",
(longlong_t)loss);
(void) printf(dgettext(TEXT_DOMAIN,
"seconds of transactions.\n"));
}
}
}
void
zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
nvlist_t *config)
{
nvlist_t *nv = NULL;
int64_t loss = -1;
uint64_t edata = UINT64_MAX;
uint64_t rewindto;
struct tm t;
char timestr[128];
if (!hdl->libzfs_printerr)
return;
if (reason >= 0)
(void) printf(dgettext(TEXT_DOMAIN, "action: "));
else
(void) printf(dgettext(TEXT_DOMAIN, "\t"));
/* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
goto no_info;
(void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
&edata);
(void) printf(dgettext(TEXT_DOMAIN,
"Recovery is possible, but will result in some data loss.\n"));
if (localtime_r((time_t *)&rewindto, &t) != NULL &&
strftime(timestr, 128, "%c", &t) != 0) {
(void) printf(dgettext(TEXT_DOMAIN,
"\tReturning the pool to its state as of %s\n"
"\tshould correct the problem. "),
timestr);
} else {
(void) printf(dgettext(TEXT_DOMAIN,
"\tReverting the pool to an earlier state "
"should correct the problem.\n\t"));
}
if (loss > 120) {
(void) printf(dgettext(TEXT_DOMAIN,
"Approximately %lld minutes of data\n"
"\tmust be discarded, irreversibly. "),
((longlong_t)loss + 30) / 60);
} else if (loss > 0) {
(void) printf(dgettext(TEXT_DOMAIN,
"Approximately %lld seconds of data\n"
"\tmust be discarded, irreversibly. "),
(longlong_t)loss);
}
if (edata != 0 && edata != UINT64_MAX) {
if (edata == 1) {
(void) printf(dgettext(TEXT_DOMAIN,
"After rewind, at least\n"
"\tone persistent user-data error will remain. "));
} else {
(void) printf(dgettext(TEXT_DOMAIN,
"After rewind, several\n"
"\tpersistent user-data errors will remain. "));
}
}
(void) printf(dgettext(TEXT_DOMAIN,
"Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
reason >= 0 ? "clear" : "import", name);
(void) printf(dgettext(TEXT_DOMAIN,
"A scrub of the pool\n"
"\tis strongly recommended after recovery.\n"));
return;
no_info:
(void) printf(dgettext(TEXT_DOMAIN,
"Destroy and re-create the pool from\n\ta backup source.\n"));
}
/*
* zpool_import() is a contracted interface. Should be kept the same
* if possible.
*
* Applications should use zpool_import_props() to import a pool with
* new properties value to be set.
*/
int
zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
char *altroot)
{
nvlist_t *props = NULL;
int ret;
if (altroot != NULL) {
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
return (zfs_error_fmt(hdl, EZFS_NOMEM,
dgettext(TEXT_DOMAIN, "cannot import '%s'"),
newname));
}
if (nvlist_add_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
nvlist_add_string(props,
zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
nvlist_free(props);
return (zfs_error_fmt(hdl, EZFS_NOMEM,
dgettext(TEXT_DOMAIN, "cannot import '%s'"),
newname));
}
}
ret = zpool_import_props(hdl, config, newname, props,
ZFS_IMPORT_NORMAL);
nvlist_free(props);
return (ret);
}
static void
print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
int indent)
{
nvlist_t **child;
uint_t c, children;
char *vname;
uint64_t is_log = 0;
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
&is_log);
if (name != NULL)
(void) printf("\t%*s%s%s\n", indent, "", name,
is_log ? " [log]" : "");
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
return;
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);
print_vdev_tree(hdl, vname, child[c], indent + 2);
free(vname);
}
}
void
zpool_print_unsup_feat(nvlist_t *config)
{
nvlist_t *nvinfo, *unsup_feat;
nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
unsup_feat = fnvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT);
for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL);
nvp != NULL; nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
char *desc = fnvpair_value_string(nvp);
if (strlen(desc) > 0)
(void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
else
(void) printf("\t%s\n", nvpair_name(nvp));
}
}
/*
* Import the given pool using the known configuration and a list of
* properties to be set. The configuration should have come from
* zpool_find_import(). The 'newname' parameters control whether the pool
* is imported with a different name.
*/
int
zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
nvlist_t *props, int flags)
{
zfs_cmd_t zc = {"\0"};
zpool_load_policy_t policy;
nvlist_t *nv = NULL;
nvlist_t *nvinfo = NULL;
nvlist_t *missing = NULL;
const char *thename;
char *origname;
int ret;
int error = 0;
char errbuf[ERRBUFLEN];
origname = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot import pool '%s'"), origname);
if (newname != NULL) {
if (!zpool_name_valid(hdl, B_FALSE, newname))
return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
dgettext(TEXT_DOMAIN, "cannot import '%s'"),
newname));
thename = newname;
} else {
thename = origname;
}
if (props != NULL) {
uint64_t version;
prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
if ((props = zpool_valid_proplist(hdl, origname,
props, version, flags, errbuf)) == NULL)
return (-1);
zcmd_write_src_nvlist(hdl, &zc, props);
nvlist_free(props);
}
(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
zc.zc_guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID);
zcmd_write_conf_nvlist(hdl, &zc, config);
zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2);
zc.zc_cookie = flags;
while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
errno == ENOMEM)
zcmd_expand_dst_nvlist(hdl, &zc);
if (ret != 0)
error = errno;
(void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
zcmd_free_nvlists(&zc);
zpool_get_load_policy(config, &policy);
if (error) {
char desc[1024];
char aux[256];
/*
* Dry-run failed, but we print out what success
* looks like if we found a best txg
*/
if (policy.zlp_rewind & ZPOOL_TRY_REWIND) {
zpool_rewind_exclaim(hdl, newname ? origname : thename,
B_TRUE, nv);
nvlist_free(nv);
return (-1);
}
if (newname == NULL)
(void) snprintf(desc, sizeof (desc),
dgettext(TEXT_DOMAIN, "cannot import '%s'"),
thename);
else
(void) snprintf(desc, sizeof (desc),
dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
origname, thename);
switch (error) {
case ENOTSUP:
if (nv != NULL && nvlist_lookup_nvlist(nv,
ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
(void) printf(dgettext(TEXT_DOMAIN, "This "
"pool uses the following feature(s) not "
"supported by this system:\n"));
zpool_print_unsup_feat(nv);
if (nvlist_exists(nvinfo,
ZPOOL_CONFIG_CAN_RDONLY)) {
(void) printf(dgettext(TEXT_DOMAIN,
"All unsupported features are only "
"required for writing to the pool."
"\nThe pool can be imported using "
"'-o readonly=on'.\n"));
}
}
/*
* Unsupported version.
*/
(void) zfs_error(hdl, EZFS_BADVERSION, desc);
break;
case EREMOTEIO:
if (nv != NULL && nvlist_lookup_nvlist(nv,
ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) {
const char *hostname = "<unknown>";
uint64_t hostid = 0;
mmp_state_t mmp_state;
mmp_state = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_STATE);
if (nvlist_exists(nvinfo,
ZPOOL_CONFIG_MMP_HOSTNAME))
hostname = fnvlist_lookup_string(nvinfo,
ZPOOL_CONFIG_MMP_HOSTNAME);
if (nvlist_exists(nvinfo,
ZPOOL_CONFIG_MMP_HOSTID))
hostid = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_HOSTID);
if (mmp_state == MMP_STATE_ACTIVE) {
(void) snprintf(aux, sizeof (aux),
dgettext(TEXT_DOMAIN, "pool is imp"
"orted on host '%s' (hostid=%lx).\n"
"Export the pool on the other "
"system, then run 'zpool import'."),
hostname, (unsigned long) hostid);
} else if (mmp_state == MMP_STATE_NO_HOSTID) {
(void) snprintf(aux, sizeof (aux),
dgettext(TEXT_DOMAIN, "pool has "
"the multihost property on and "
"the\nsystem's hostid is not set. "
"Set a unique system hostid with "
"the zgenhostid(8) command.\n"));
}
(void) zfs_error_aux(hdl, "%s", aux);
}
(void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc);
break;
case EINVAL:
(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
break;
case EROFS:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is read only"));
(void) zfs_error(hdl, EZFS_BADDEV, desc);
break;
case ENXIO:
if (nv && nvlist_lookup_nvlist(nv,
ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
nvlist_lookup_nvlist(nvinfo,
ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
(void) printf(dgettext(TEXT_DOMAIN,
"The devices below are missing or "
"corrupted, use '-m' to import the pool "
"anyway:\n"));
print_vdev_tree(hdl, NULL, missing, 2);
(void) printf("\n");
}
(void) zpool_standard_error(hdl, error, desc);
break;
case EEXIST:
(void) zpool_standard_error(hdl, error, desc);
break;
case EBUSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices are already in use\n"));
(void) zfs_error(hdl, EZFS_BADDEV, desc);
break;
case ENAMETOOLONG:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"new name of at least one dataset is longer than "
"the maximum allowable length"));
(void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
break;
default:
(void) zpool_standard_error(hdl, error, desc);
zpool_explain_recover(hdl,
newname ? origname : thename, -error, nv);
break;
}
nvlist_free(nv);
ret = -1;
} else {
zpool_handle_t *zhp;
/*
* This should never fail, but play it safe anyway.
*/
if (zpool_open_silent(hdl, thename, &zhp) != 0)
ret = -1;
else if (zhp != NULL)
zpool_close(zhp);
if (policy.zlp_rewind &
(ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
zpool_rewind_exclaim(hdl, newname ? origname : thename,
((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), nv);
}
nvlist_free(nv);
return (0);
}
return (ret);
}
/*
* Translate vdev names to guids. If a vdev_path is determined to be
* unsuitable then a vd_errlist is allocated and the vdev path and errno
* are added to it.
*/
static int
zpool_translate_vdev_guids(zpool_handle_t *zhp, nvlist_t *vds,
nvlist_t *vdev_guids, nvlist_t *guids_to_paths, nvlist_t **vd_errlist)
{
nvlist_t *errlist = NULL;
int error = 0;
for (nvpair_t *elem = nvlist_next_nvpair(vds, NULL); elem != NULL;
elem = nvlist_next_nvpair(vds, elem)) {
boolean_t spare, cache;
char *vd_path = nvpair_name(elem);
nvlist_t *tgt = zpool_find_vdev(zhp, vd_path, &spare, &cache,
NULL);
if ((tgt == NULL) || cache || spare) {
if (errlist == NULL) {
errlist = fnvlist_alloc();
error = EINVAL;
}
uint64_t err = (tgt == NULL) ? EZFS_NODEVICE :
(spare ? EZFS_ISSPARE : EZFS_ISL2CACHE);
fnvlist_add_int64(errlist, vd_path, err);
continue;
}
uint64_t guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
fnvlist_add_uint64(vdev_guids, vd_path, guid);
char msg[MAXNAMELEN];
(void) snprintf(msg, sizeof (msg), "%llu", (u_longlong_t)guid);
fnvlist_add_string(guids_to_paths, msg, vd_path);
}
if (error != 0) {
verify(errlist != NULL);
if (vd_errlist != NULL)
*vd_errlist = errlist;
else
fnvlist_free(errlist);
}
return (error);
}
static int
xlate_init_err(int err)
{
switch (err) {
case ENODEV:
return (EZFS_NODEVICE);
case EINVAL:
case EROFS:
return (EZFS_BADDEV);
case EBUSY:
return (EZFS_INITIALIZING);
case ESRCH:
return (EZFS_NO_INITIALIZE);
}
return (err);
}
/*
* Begin, suspend, or cancel the initialization (initializing of all free
* blocks) for the given vdevs in the given pool.
*/
static int
zpool_initialize_impl(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
nvlist_t *vds, boolean_t wait)
{
int err;
nvlist_t *vdev_guids = fnvlist_alloc();
nvlist_t *guids_to_paths = fnvlist_alloc();
nvlist_t *vd_errlist = NULL;
nvlist_t *errlist;
nvpair_t *elem;
err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
guids_to_paths, &vd_errlist);
if (err != 0) {
verify(vd_errlist != NULL);
goto list_errors;
}
err = lzc_initialize(zhp->zpool_name, cmd_type,
vdev_guids, &errlist);
if (err != 0) {
if (errlist != NULL) {
vd_errlist = fnvlist_lookup_nvlist(errlist,
ZPOOL_INITIALIZE_VDEVS);
goto list_errors;
}
(void) zpool_standard_error(zhp->zpool_hdl, err,
dgettext(TEXT_DOMAIN, "operation failed"));
goto out;
}
if (wait) {
for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
elem = nvlist_next_nvpair(vdev_guids, elem)) {
uint64_t guid = fnvpair_value_uint64(elem);
err = lzc_wait_tag(zhp->zpool_name,
ZPOOL_WAIT_INITIALIZE, guid, NULL);
if (err != 0) {
(void) zpool_standard_error_fmt(zhp->zpool_hdl,
err, dgettext(TEXT_DOMAIN, "error "
"waiting for '%s' to initialize"),
nvpair_name(elem));
goto out;
}
}
}
goto out;
list_errors:
for (elem = nvlist_next_nvpair(vd_errlist, NULL); elem != NULL;
elem = nvlist_next_nvpair(vd_errlist, elem)) {
int64_t vd_error = xlate_init_err(fnvpair_value_int64(elem));
char *path;
if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
&path) != 0)
path = nvpair_name(elem);
(void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
"cannot initialize '%s'", path);
}
out:
fnvlist_free(vdev_guids);
fnvlist_free(guids_to_paths);
if (vd_errlist != NULL)
fnvlist_free(vd_errlist);
return (err == 0 ? 0 : -1);
}
int
zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
nvlist_t *vds)
{
return (zpool_initialize_impl(zhp, cmd_type, vds, B_FALSE));
}
int
zpool_initialize_wait(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
nvlist_t *vds)
{
return (zpool_initialize_impl(zhp, cmd_type, vds, B_TRUE));
}
static int
xlate_trim_err(int err)
{
switch (err) {
case ENODEV:
return (EZFS_NODEVICE);
case EINVAL:
case EROFS:
return (EZFS_BADDEV);
case EBUSY:
return (EZFS_TRIMMING);
case ESRCH:
return (EZFS_NO_TRIM);
case EOPNOTSUPP:
return (EZFS_TRIM_NOTSUP);
}
return (err);
}
static int
zpool_trim_wait(zpool_handle_t *zhp, nvlist_t *vdev_guids)
{
int err;
nvpair_t *elem;
for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
elem = nvlist_next_nvpair(vdev_guids, elem)) {
uint64_t guid = fnvpair_value_uint64(elem);
err = lzc_wait_tag(zhp->zpool_name,
ZPOOL_WAIT_TRIM, guid, NULL);
if (err != 0) {
(void) zpool_standard_error_fmt(zhp->zpool_hdl,
err, dgettext(TEXT_DOMAIN, "error "
"waiting to trim '%s'"), nvpair_name(elem));
return (err);
}
}
return (0);
}
/*
* Check errlist and report any errors, omitting ones which should be
* suppressed. Returns B_TRUE if any errors were reported.
*/
static boolean_t
check_trim_errs(zpool_handle_t *zhp, trimflags_t *trim_flags,
nvlist_t *guids_to_paths, nvlist_t *vds, nvlist_t *errlist)
{
nvpair_t *elem;
boolean_t reported_errs = B_FALSE;
int num_vds = 0;
int num_suppressed_errs = 0;
for (elem = nvlist_next_nvpair(vds, NULL);
elem != NULL; elem = nvlist_next_nvpair(vds, elem)) {
num_vds++;
}
for (elem = nvlist_next_nvpair(errlist, NULL);
elem != NULL; elem = nvlist_next_nvpair(errlist, elem)) {
int64_t vd_error = xlate_trim_err(fnvpair_value_int64(elem));
char *path;
/*
* If only the pool was specified, and it was not a secure
* trim then suppress warnings for individual vdevs which
* do not support trimming.
*/
if (vd_error == EZFS_TRIM_NOTSUP &&
trim_flags->fullpool &&
!trim_flags->secure) {
num_suppressed_errs++;
continue;
}
reported_errs = B_TRUE;
if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
&path) != 0)
path = nvpair_name(elem);
(void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
"cannot trim '%s'", path);
}
if (num_suppressed_errs == num_vds) {
(void) zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
"no devices in pool support trim operations"));
(void) (zfs_error(zhp->zpool_hdl, EZFS_TRIM_NOTSUP,
dgettext(TEXT_DOMAIN, "cannot trim")));
reported_errs = B_TRUE;
}
return (reported_errs);
}
/*
* Begin, suspend, or cancel the TRIM (discarding of all free blocks) for
* the given vdevs in the given pool.
*/
int
zpool_trim(zpool_handle_t *zhp, pool_trim_func_t cmd_type, nvlist_t *vds,
trimflags_t *trim_flags)
{
int err;
int retval = 0;
nvlist_t *vdev_guids = fnvlist_alloc();
nvlist_t *guids_to_paths = fnvlist_alloc();
nvlist_t *errlist = NULL;
err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
guids_to_paths, &errlist);
if (err != 0) {
check_trim_errs(zhp, trim_flags, guids_to_paths, vds, errlist);
retval = -1;
goto out;
}
err = lzc_trim(zhp->zpool_name, cmd_type, trim_flags->rate,
trim_flags->secure, vdev_guids, &errlist);
if (err != 0) {
nvlist_t *vd_errlist;
if (errlist != NULL && nvlist_lookup_nvlist(errlist,
ZPOOL_TRIM_VDEVS, &vd_errlist) == 0) {
if (check_trim_errs(zhp, trim_flags, guids_to_paths,
vds, vd_errlist)) {
retval = -1;
goto out;
}
} else {
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "operation failed"));
zpool_standard_error(zhp->zpool_hdl, err, errbuf);
retval = -1;
goto out;
}
}
if (trim_flags->wait)
retval = zpool_trim_wait(zhp, vdev_guids);
out:
if (errlist != NULL)
fnvlist_free(errlist);
fnvlist_free(vdev_guids);
fnvlist_free(guids_to_paths);
return (retval);
}
/*
* Scan the pool.
*/
int
zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
int err;
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_cookie = func;
zc.zc_flags = cmd;
if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0)
return (0);
err = errno;
/* ECANCELED on a scrub means we resumed a paused scrub */
if (err == ECANCELED && func == POOL_SCAN_SCRUB &&
cmd == POOL_SCRUB_NORMAL)
return (0);
if (err == ENOENT && func != POOL_SCAN_NONE && cmd == POOL_SCRUB_NORMAL)
return (0);
if (func == POOL_SCAN_SCRUB) {
if (cmd == POOL_SCRUB_PAUSE) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot pause scrubbing %s"),
zc.zc_name);
} else {
assert(cmd == POOL_SCRUB_NORMAL);
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot scrub %s"),
zc.zc_name);
}
} else if (func == POOL_SCAN_RESILVER) {
assert(cmd == POOL_SCRUB_NORMAL);
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot restart resilver on %s"), zc.zc_name);
} else if (func == POOL_SCAN_NONE) {
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot cancel scrubbing %s"), zc.zc_name);
} else {
assert(!"unexpected result");
}
if (err == EBUSY) {
nvlist_t *nvroot;
pool_scan_stat_t *ps = NULL;
uint_t psc;
nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,
ZPOOL_CONFIG_VDEV_TREE);
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
if (ps && ps->pss_func == POOL_SCAN_SCRUB &&
ps->pss_state == DSS_SCANNING) {
if (cmd == POOL_SCRUB_PAUSE)
return (zfs_error(hdl, EZFS_SCRUB_PAUSED,
errbuf));
else
return (zfs_error(hdl, EZFS_SCRUBBING, errbuf));
} else {
return (zfs_error(hdl, EZFS_RESILVERING, errbuf));
}
} else if (err == ENOENT) {
return (zfs_error(hdl, EZFS_NO_SCRUB, errbuf));
} else if (err == ENOTSUP && func == POOL_SCAN_RESILVER) {
return (zfs_error(hdl, EZFS_NO_RESILVER_DEFER, errbuf));
} else {
return (zpool_standard_error(hdl, err, errbuf));
}
}
/*
* Find a vdev that matches the search criteria specified. We use the
* the nvpair name to determine how we should look for the device.
* 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
* spare; but FALSE if its an INUSE spare.
*/
static nvlist_t *
vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
boolean_t *l2cache, boolean_t *log)
{
uint_t c, children;
nvlist_t **child;
nvlist_t *ret;
uint64_t is_log;
char *srchkey;
nvpair_t *pair = nvlist_next_nvpair(search, NULL);
/* Nothing to look for */
if (search == NULL || pair == NULL)
return (NULL);
/* Obtain the key we will use to search */
srchkey = nvpair_name(pair);
switch (nvpair_type(pair)) {
case DATA_TYPE_UINT64:
if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
uint64_t srchval = fnvpair_value_uint64(pair);
uint64_t theguid = fnvlist_lookup_uint64(nv,
ZPOOL_CONFIG_GUID);
if (theguid == srchval)
return (nv);
}
break;
case DATA_TYPE_STRING: {
char *srchval, *val;
srchval = fnvpair_value_string(pair);
if (nvlist_lookup_string(nv, srchkey, &val) != 0)
break;
/*
* Search for the requested value. Special cases:
*
* - ZPOOL_CONFIG_PATH for whole disk entries. These end in
* "-part1", or "p1". The suffix is hidden from the user,
* but included in the string, so this matches around it.
* - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
* is used to check all possible expanded paths.
* - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
*
* Otherwise, all other searches are simple string compares.
*/
if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
uint64_t wholedisk = 0;
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
&wholedisk);
if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
return (nv);
} else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
char *type, *idx, *end, *p;
uint64_t id, vdev_id;
/*
* Determine our vdev type, keeping in mind
* that the srchval is composed of a type and
* vdev id pair (i.e. mirror-4).
*/
if ((type = strdup(srchval)) == NULL)
return (NULL);
if ((p = strrchr(type, '-')) == NULL) {
free(type);
break;
}
idx = p + 1;
*p = '\0';
/*
* If the types don't match then keep looking.
*/
if (strncmp(val, type, strlen(val)) != 0) {
free(type);
break;
}
verify(zpool_vdev_is_interior(type));
id = fnvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID);
errno = 0;
vdev_id = strtoull(idx, &end, 10);
/*
* If we are looking for a raidz and a parity is
* specified, make sure it matches.
*/
int rzlen = strlen(VDEV_TYPE_RAIDZ);
assert(rzlen == strlen(VDEV_TYPE_DRAID));
int typlen = strlen(type);
if ((strncmp(type, VDEV_TYPE_RAIDZ, rzlen) == 0 ||
strncmp(type, VDEV_TYPE_DRAID, rzlen) == 0) &&
typlen != rzlen) {
uint64_t vdev_parity;
int parity = *(type + rzlen) - '0';
if (parity <= 0 || parity > 3 ||
(typlen - rzlen) != 1) {
/*
* Nonsense parity specified, can
* never match
*/
free(type);
return (NULL);
}
vdev_parity = fnvlist_lookup_uint64(nv,
ZPOOL_CONFIG_NPARITY);
if ((int)vdev_parity != parity) {
free(type);
break;
}
}
free(type);
if (errno != 0)
return (NULL);
/*
* Now verify that we have the correct vdev id.
*/
if (vdev_id == id)
return (nv);
}
/*
* Common case
*/
if (strcmp(srchval, val) == 0)
return (nv);
break;
}
default:
break;
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
return (NULL);
for (c = 0; c < children; c++) {
if ((ret = vdev_to_nvlist_iter(child[c], search,
avail_spare, l2cache, NULL)) != NULL) {
/*
* The 'is_log' value is only set for the toplevel
* vdev, not the leaf vdevs. So we always lookup the
* log device from the root of the vdev tree (where
* 'log' is non-NULL).
*/
if (log != NULL &&
nvlist_lookup_uint64(child[c],
ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
is_log) {
*log = B_TRUE;
}
return (ret);
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) == 0) {
for (c = 0; c < children; c++) {
if ((ret = vdev_to_nvlist_iter(child[c], search,
avail_spare, l2cache, NULL)) != NULL) {
*avail_spare = B_TRUE;
return (ret);
}
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0) {
for (c = 0; c < children; c++) {
if ((ret = vdev_to_nvlist_iter(child[c], search,
avail_spare, l2cache, NULL)) != NULL) {
*l2cache = B_TRUE;
return (ret);
}
}
}
return (NULL);
}
/*
* Given a physical path or guid, find the associated vdev.
*/
nvlist_t *
zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
{
nvlist_t *search, *nvroot, *ret;
uint64_t guid;
char *end;
search = fnvlist_alloc();
guid = strtoull(ppath, &end, 0);
if (guid != 0 && *end == '\0') {
fnvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid);
} else {
fnvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath);
}
nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,
ZPOOL_CONFIG_VDEV_TREE);
*avail_spare = B_FALSE;
*l2cache = B_FALSE;
if (log != NULL)
*log = B_FALSE;
ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
fnvlist_free(search);
return (ret);
}
/*
* Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
*/
static boolean_t
zpool_vdev_is_interior(const char *name)
{
if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 ||
strncmp(name,
VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 ||
strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
return (B_TRUE);
if (strncmp(name, VDEV_TYPE_DRAID, strlen(VDEV_TYPE_DRAID)) == 0 &&
!zpool_is_draid_spare(name))
return (B_TRUE);
return (B_FALSE);
}
nvlist_t *
zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
boolean_t *l2cache, boolean_t *log)
{
char *end;
nvlist_t *nvroot, *search, *ret;
uint64_t guid;
search = fnvlist_alloc();
guid = strtoull(path, &end, 0);
if (guid != 0 && *end == '\0') {
fnvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid);
} else if (zpool_vdev_is_interior(path)) {
fnvlist_add_string(search, ZPOOL_CONFIG_TYPE, path);
} else {
fnvlist_add_string(search, ZPOOL_CONFIG_PATH, path);
}
nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,
ZPOOL_CONFIG_VDEV_TREE);
*avail_spare = B_FALSE;
*l2cache = B_FALSE;
if (log != NULL)
*log = B_FALSE;
ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
fnvlist_free(search);
return (ret);
}
/*
* Convert a vdev path to a GUID. Returns GUID or 0 on error.
*
* If is_spare, is_l2cache, or is_log is non-NULL, then store within it
* if the VDEV is a spare, l2cache, or log device. If they're NULL then
* ignore them.
*/
static uint64_t
zpool_vdev_path_to_guid_impl(zpool_handle_t *zhp, const char *path,
boolean_t *is_spare, boolean_t *is_l2cache, boolean_t *is_log)
{
boolean_t spare = B_FALSE, l2cache = B_FALSE, log = B_FALSE;
nvlist_t *tgt;
if ((tgt = zpool_find_vdev(zhp, path, &spare, &l2cache,
&log)) == NULL)
return (0);
if (is_spare != NULL)
*is_spare = spare;
if (is_l2cache != NULL)
*is_l2cache = l2cache;
if (is_log != NULL)
*is_log = log;
return (fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID));
}
/* Convert a vdev path to a GUID. Returns GUID or 0 on error. */
uint64_t
zpool_vdev_path_to_guid(zpool_handle_t *zhp, const char *path)
{
return (zpool_vdev_path_to_guid_impl(zhp, path, NULL, NULL, NULL));
}
/*
* Bring the specified vdev online. The 'flags' parameter is a set of the
* ZFS_ONLINE_* flags.
*/
int
zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
vdev_state_t *newstate)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
nvlist_t *tgt;
boolean_t avail_spare, l2cache, islog;
libzfs_handle_t *hdl = zhp->zpool_hdl;
if (flags & ZFS_ONLINE_EXPAND) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
} else {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot online %s"), path);
}
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
&islog)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
#ifndef __FreeBSD__
char *pathname;
if ((flags & ZFS_ONLINE_EXPAND ||
zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) &&
nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) {
uint64_t wholedisk = 0;
(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
&wholedisk);
/*
* XXX - L2ARC 1.0 devices can't support expansion.
*/
if (l2cache) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot expand cache devices"));
return (zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf));
}
if (wholedisk) {
const char *fullpath = path;
char buf[MAXPATHLEN];
int error;
if (path[0] != '/') {
error = zfs_resolve_shortname(path, buf,
sizeof (buf));
if (error != 0)
return (zfs_error(hdl, EZFS_NODEVICE,
errbuf));
fullpath = buf;
}
error = zpool_relabel_disk(hdl, fullpath, errbuf);
if (error != 0)
return (error);
}
}
#endif
zc.zc_cookie = VDEV_STATE_ONLINE;
zc.zc_obj = flags;
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
if (errno == EINVAL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
"from this pool into a new one. Use '%s' "
"instead"), "zpool detach");
return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, errbuf));
}
return (zpool_standard_error(hdl, errno, errbuf));
}
*newstate = zc.zc_cookie;
return (0);
}
/*
* Take the specified vdev offline
*/
int
zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
nvlist_t *tgt;
boolean_t avail_spare, l2cache;
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
NULL)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
zc.zc_cookie = VDEV_STATE_OFFLINE;
zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
return (0);
switch (errno) {
case EBUSY:
/*
* There are no other replicas of this device.
*/
return (zfs_error(hdl, EZFS_NOREPLICAS, errbuf));
case EEXIST:
/*
* The log device has unplayed logs
*/
return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, errbuf));
default:
return (zpool_standard_error(hdl, errno, errbuf));
}
}
/*
* Mark the given vdev faulted.
*/
int
zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_guid = guid;
zc.zc_cookie = VDEV_STATE_FAULTED;
zc.zc_obj = aux;
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
return (0);
switch (errno) {
case EBUSY:
/*
* There are no other replicas of this device.
*/
return (zfs_error(hdl, EZFS_NOREPLICAS, errbuf));
default:
return (zpool_standard_error(hdl, errno, errbuf));
}
}
/*
* Mark the given vdev degraded.
*/
int
zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_guid = guid;
zc.zc_cookie = VDEV_STATE_DEGRADED;
zc.zc_obj = aux;
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
return (0);
return (zpool_standard_error(hdl, errno, errbuf));
}
/*
* Returns TRUE if the given nvlist is a vdev that was originally swapped in as
* a hot spare.
*/
static boolean_t
is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
{
nvlist_t **child;
uint_t c, children;
if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
&children) == 0) {
char *type = fnvlist_lookup_string(search, ZPOOL_CONFIG_TYPE);
if ((strcmp(type, VDEV_TYPE_SPARE) == 0 ||
strcmp(type, VDEV_TYPE_DRAID_SPARE) == 0) &&
children == 2 && child[which] == tgt)
return (B_TRUE);
for (c = 0; c < children; c++)
if (is_replacing_spare(child[c], tgt, which))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Attach new_disk (fully described by nvroot) to old_disk.
* If 'replacing' is specified, the new disk will replace the old one.
*/
int
zpool_vdev_attach(zpool_handle_t *zhp, const char *old_disk,
const char *new_disk, nvlist_t *nvroot, int replacing, boolean_t rebuild)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
int ret;
nvlist_t *tgt;
boolean_t avail_spare, l2cache, islog;
uint64_t val;
char *newname;
nvlist_t **child;
uint_t children;
nvlist_t *config_root;
libzfs_handle_t *hdl = zhp->zpool_hdl;
if (replacing)
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot replace %s with %s"), old_disk, new_disk);
else
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot attach %s to %s"), new_disk, old_disk);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
&islog)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
if (l2cache)
return (zfs_error(hdl, EZFS_ISL2CACHE, errbuf));
zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
zc.zc_cookie = replacing;
zc.zc_simple = rebuild;
if (rebuild &&
zfeature_lookup_guid("org.openzfs:device_rebuild", NULL) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"the loaded zfs module doesn't support device rebuilds"));
return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
}
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0 || children != 1) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"new device must be a single disk"));
return (zfs_error(hdl, EZFS_INVALCONFIG, errbuf));
}
config_root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
ZPOOL_CONFIG_VDEV_TREE);
if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)
return (-1);
/*
* If the target is a hot spare that has been swapped in, we can only
* replace it with another hot spare.
*/
if (replacing &&
nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
(zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
NULL) == NULL || !avail_spare) &&
is_replacing_spare(config_root, tgt, 1)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"can only be replaced by another hot spare"));
free(newname);
return (zfs_error(hdl, EZFS_BADTARGET, errbuf));
}
free(newname);
zcmd_write_conf_nvlist(hdl, &zc, nvroot);
ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
zcmd_free_nvlists(&zc);
if (ret == 0)
return (0);
switch (errno) {
case ENOTSUP:
/*
* Can't attach to or replace this type of vdev.
*/
if (replacing) {
uint64_t version = zpool_get_prop_int(zhp,
ZPOOL_PROP_VERSION, NULL);
if (islog) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot replace a log with a spare"));
} else if (rebuild) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"only mirror and dRAID vdevs support "
"sequential reconstruction"));
} else if (zpool_is_draid_spare(new_disk)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dRAID spares can only replace child "
"devices in their parent's dRAID vdev"));
} else if (version >= SPA_VERSION_MULTI_REPLACE) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"already in replacing/spare config; wait "
"for completion or use 'zpool detach'"));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot replace a replacing device"));
}
} else {
char status[64] = {0};
zpool_prop_get_feature(zhp,
"feature@device_rebuild", status, 63);
if (rebuild &&
strncmp(status, ZFS_FEATURE_DISABLED, 64) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"device_rebuild feature must be enabled "
"in order to use sequential "
"reconstruction"));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"can only attach to mirrors and top-level "
"disks"));
}
}
(void) zfs_error(hdl, EZFS_BADTARGET, errbuf);
break;
case EINVAL:
/*
* The new device must be a single disk.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"new device must be a single disk"));
(void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
break;
case EBUSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy, "
"or device removal is in progress"),
new_disk);
(void) zfs_error(hdl, EZFS_BADDEV, errbuf);
break;
case EOVERFLOW:
/*
* The new device is too small.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"device is too small"));
(void) zfs_error(hdl, EZFS_BADDEV, errbuf);
break;
case EDOM:
/*
* The new device has a different optimal sector size.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"new device has a different optimal sector size; use the "
"option '-o ashift=N' to override the optimal size"));
(void) zfs_error(hdl, EZFS_BADDEV, errbuf);
break;
case ENAMETOOLONG:
/*
* The resulting top-level vdev spec won't fit in the label.
*/
(void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
break;
default:
(void) zpool_standard_error(hdl, errno, errbuf);
}
return (-1);
}
/*
* Detach the specified device.
*/
int
zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
nvlist_t *tgt;
boolean_t avail_spare, l2cache;
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
NULL)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
if (l2cache)
return (zfs_error(hdl, EZFS_ISL2CACHE, errbuf));
zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
return (0);
switch (errno) {
case ENOTSUP:
/*
* Can't detach from this type of vdev.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
"applicable to mirror and replacing vdevs"));
(void) zfs_error(hdl, EZFS_BADTARGET, errbuf);
break;
case EBUSY:
/*
* There are no other replicas of this device.
*/
(void) zfs_error(hdl, EZFS_NOREPLICAS, errbuf);
break;
default:
(void) zpool_standard_error(hdl, errno, errbuf);
}
return (-1);
}
/*
* Find a mirror vdev in the source nvlist.
*
* The mchild array contains a list of disks in one of the top-level mirrors
* of the source pool. The schild array contains a list of disks that the
* user specified on the command line. We loop over the mchild array to
* see if any entry in the schild array matches.
*
* If a disk in the mchild array is found in the schild array, we return
* the index of that entry. Otherwise we return -1.
*/
static int
find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
nvlist_t **schild, uint_t schildren)
{
uint_t mc;
for (mc = 0; mc < mchildren; mc++) {
uint_t sc;
char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
mchild[mc], 0);
for (sc = 0; sc < schildren; sc++) {
char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
schild[sc], 0);
boolean_t result = (strcmp(mpath, spath) == 0);
free(spath);
if (result) {
free(mpath);
return (mc);
}
}
free(mpath);
}
return (-1);
}
/*
* Split a mirror pool. If newroot points to null, then a new nvlist
* is generated and it is the responsibility of the caller to free it.
*/
int
zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
nvlist_t *props, splitflags_t flags)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN], *bias;
nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
nvlist_t **varray = NULL, *zc_props = NULL;
uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
libzfs_handle_t *hdl = zhp->zpool_hdl;
uint64_t vers, readonly = B_FALSE;
boolean_t freelist = B_FALSE, memory_err = B_TRUE;
int retval = 0;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
if (!zpool_name_valid(hdl, B_FALSE, newname))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
if ((config = zpool_get_config(zhp, NULL)) == NULL) {
(void) fprintf(stderr, gettext("Internal error: unable to "
"retrieve pool configuration\n"));
return (-1);
}
tree = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
vers = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
if (props) {
prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
props, vers, flags, errbuf)) == NULL)
return (-1);
(void) nvlist_lookup_uint64(zc_props,
zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
if (readonly) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property %s can only be set at import time"),
zpool_prop_to_name(ZPOOL_PROP_READONLY));
return (-1);
}
}
if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
&children) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Source pool is missing vdev tree"));
nvlist_free(zc_props);
return (-1);
}
varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
vcount = 0;
if (*newroot == NULL ||
nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
&newchild, &newchildren) != 0)
newchildren = 0;
for (c = 0; c < children; c++) {
uint64_t is_log = B_FALSE, is_hole = B_FALSE;
boolean_t is_special = B_FALSE, is_dedup = B_FALSE;
char *type;
nvlist_t **mchild, *vdev;
uint_t mchildren;
int entry;
/*
* Unlike cache & spares, slogs are stored in the
* ZPOOL_CONFIG_CHILDREN array. We filter them out here.
*/
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&is_log);
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
&is_hole);
if (is_log || is_hole) {
/*
* Create a hole vdev and put it in the config.
*/
if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
goto out;
if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
VDEV_TYPE_HOLE) != 0)
goto out;
if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
1) != 0)
goto out;
if (lastlog == 0)
lastlog = vcount;
varray[vcount++] = vdev;
continue;
}
lastlog = 0;
type = fnvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE);
if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) {
vdev = child[c];
if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
goto out;
continue;
} else if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Source pool must be composed only of mirrors\n"));
retval = zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
goto out;
}
if (nvlist_lookup_string(child[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0) {
if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0)
is_special = B_TRUE;
else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0)
is_dedup = B_TRUE;
}
verify(nvlist_lookup_nvlist_array(child[c],
ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
/* find or add an entry for this top-level vdev */
if (newchildren > 0 &&
(entry = find_vdev_entry(zhp, mchild, mchildren,
newchild, newchildren)) >= 0) {
/* We found a disk that the user specified. */
vdev = mchild[entry];
++found;
} else {
/* User didn't specify a disk for this vdev. */
vdev = mchild[mchildren - 1];
}
if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
goto out;
if (flags.dryrun != 0) {
if (is_dedup == B_TRUE) {
if (nvlist_add_string(varray[vcount - 1],
ZPOOL_CONFIG_ALLOCATION_BIAS,
VDEV_ALLOC_BIAS_DEDUP) != 0)
goto out;
} else if (is_special == B_TRUE) {
if (nvlist_add_string(varray[vcount - 1],
ZPOOL_CONFIG_ALLOCATION_BIAS,
VDEV_ALLOC_BIAS_SPECIAL) != 0)
goto out;
}
}
}
/* did we find every disk the user specified? */
if (found != newchildren) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
"include at most one disk from each mirror"));
retval = zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
goto out;
}
/* Prepare the nvlist for populating. */
if (*newroot == NULL) {
if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
goto out;
freelist = B_TRUE;
if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
VDEV_TYPE_ROOT) != 0)
goto out;
} else {
verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
}
/* Add all the children we found */
if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
(const nvlist_t **)varray, lastlog == 0 ? vcount : lastlog) != 0)
goto out;
/*
* If we're just doing a dry run, exit now with success.
*/
if (flags.dryrun) {
memory_err = B_FALSE;
freelist = B_FALSE;
goto out;
}
/* now build up the config list & call the ioctl */
if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
goto out;
if (nvlist_add_nvlist(newconfig,
ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
nvlist_add_string(newconfig,
ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
goto out;
/*
* The new pool is automatically part of the namespace unless we
* explicitly export it.
*/
if (!flags.import)
zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
zcmd_write_conf_nvlist(hdl, &zc, newconfig);
if (zc_props != NULL)
zcmd_write_src_nvlist(hdl, &zc, zc_props);
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
retval = zpool_standard_error(hdl, errno, errbuf);
goto out;
}
freelist = B_FALSE;
memory_err = B_FALSE;
out:
if (varray != NULL) {
int v;
for (v = 0; v < vcount; v++)
nvlist_free(varray[v]);
free(varray);
}
zcmd_free_nvlists(&zc);
nvlist_free(zc_props);
nvlist_free(newconfig);
if (freelist) {
nvlist_free(*newroot);
*newroot = NULL;
}
if (retval != 0)
return (retval);
if (memory_err)
return (no_memory(hdl));
return (0);
}
/*
* Remove the given device.
*/
int
zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
nvlist_t *tgt;
boolean_t avail_spare, l2cache, islog;
libzfs_handle_t *hdl = zhp->zpool_hdl;
uint64_t version;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
if (zpool_is_draid_spare(path)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dRAID spares cannot be removed"));
return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
}
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
&islog)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
if (islog && version < SPA_VERSION_HOLES) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded to support log removal"));
return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
}
zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
return (0);
switch (errno) {
case EINVAL:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid config; all top-level vdevs must "
"have the same sector size and not be raidz."));
(void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
break;
case EBUSY:
if (islog) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Mount encrypted datasets to replay logs."));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Pool busy; removal may already be in progress"));
}
(void) zfs_error(hdl, EZFS_BUSY, errbuf);
break;
case EACCES:
if (islog) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Mount encrypted datasets to replay logs."));
(void) zfs_error(hdl, EZFS_BUSY, errbuf);
} else {
(void) zpool_standard_error(hdl, errno, errbuf);
}
break;
default:
(void) zpool_standard_error(hdl, errno, errbuf);
}
return (-1);
}
int
zpool_vdev_remove_cancel(zpool_handle_t *zhp)
{
zfs_cmd_t zc = {{0}};
char errbuf[ERRBUFLEN];
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot cancel removal"));
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_cookie = 1;
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
return (0);
return (zpool_standard_error(hdl, errno, errbuf));
}
int
zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path,
uint64_t *sizep)
{
char errbuf[ERRBUFLEN];
nvlist_t *tgt;
boolean_t avail_spare, l2cache, islog;
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"),
path);
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
&islog)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
if (avail_spare || l2cache || islog) {
*sizep = 0;
return (0);
}
if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"indirect size not available"));
return (zfs_error(hdl, EINVAL, errbuf));
}
return (0);
}
/*
* Clear the errors for the pool, or the particular device if specified.
*/
int
zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
nvlist_t *tgt;
zpool_load_policy_t policy;
boolean_t avail_spare, l2cache;
libzfs_handle_t *hdl = zhp->zpool_hdl;
nvlist_t *nvi = NULL;
int error;
if (path)
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
path);
else
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
zhp->zpool_name);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (path) {
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
&l2cache, NULL)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
/*
* Don't allow error clearing for hot spares. Do allow
* error clearing for l2cache devices.
*/
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
}
zpool_get_load_policy(rewindnvl, &policy);
zc.zc_cookie = policy.zlp_rewind;
zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2);
zcmd_write_src_nvlist(hdl, &zc, rewindnvl);
while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
errno == ENOMEM)
zcmd_expand_dst_nvlist(hdl, &zc);
if (!error || ((policy.zlp_rewind & ZPOOL_TRY_REWIND) &&
errno != EPERM && errno != EACCES)) {
if (policy.zlp_rewind &
(ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
zpool_rewind_exclaim(hdl, zc.zc_name,
((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0),
nvi);
nvlist_free(nvi);
}
zcmd_free_nvlists(&zc);
return (0);
}
zcmd_free_nvlists(&zc);
return (zpool_standard_error(hdl, errno, errbuf));
}
/*
* Similar to zpool_clear(), but takes a GUID (used by fmd).
*/
int
zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
(u_longlong_t)guid);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_guid = guid;
zc.zc_cookie = ZPOOL_NO_REWIND;
if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
return (0);
return (zpool_standard_error(hdl, errno, errbuf));
}
/*
* Change the GUID for a pool.
*/
int
zpool_reguid(zpool_handle_t *zhp)
{
char errbuf[ERRBUFLEN];
libzfs_handle_t *hdl = zhp->zpool_hdl;
zfs_cmd_t zc = {"\0"};
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
return (0);
return (zpool_standard_error(hdl, errno, errbuf));
}
/*
* Reopen the pool.
*/
int
zpool_reopen_one(zpool_handle_t *zhp, void *data)
{
libzfs_handle_t *hdl = zpool_get_handle(zhp);
const char *pool_name = zpool_get_name(zhp);
boolean_t *scrub_restart = data;
int error;
error = lzc_reopen(pool_name, *scrub_restart);
if (error) {
return (zpool_standard_error_fmt(hdl, error,
dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), pool_name));
}
return (0);
}
/* call into libzfs_core to execute the sync IOCTL per pool */
int
zpool_sync_one(zpool_handle_t *zhp, void *data)
{
int ret;
libzfs_handle_t *hdl = zpool_get_handle(zhp);
const char *pool_name = zpool_get_name(zhp);
boolean_t *force = data;
nvlist_t *innvl = fnvlist_alloc();
fnvlist_add_boolean_value(innvl, "force", *force);
if ((ret = lzc_sync(pool_name, innvl, NULL)) != 0) {
nvlist_free(innvl);
return (zpool_standard_error_fmt(hdl, ret,
dgettext(TEXT_DOMAIN, "sync '%s' failed"), pool_name));
}
nvlist_free(innvl);
return (0);
}
#define PATH_BUF_LEN 64
/*
* Given a vdev, return the name to display in iostat. If the vdev has a path,
* we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
* We also check if this is a whole disk, in which case we strip off the
* trailing 's0' slice name.
*
* This routine is also responsible for identifying when disks have been
* reconfigured in a new location. The kernel will have opened the device by
* devid, but the path will still refer to the old location. To catch this, we
* first do a path -> devid translation (which is fast for the common case). If
* the devid matches, we're done. If not, we do a reverse devid -> path
* translation and issue the appropriate ioctl() to update the path of the vdev.
* If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
* of these checks.
*/
char *
zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
int name_flags)
{
char *type, *tpath;
const char *path;
uint64_t value;
char buf[PATH_BUF_LEN];
char tmpbuf[PATH_BUF_LEN * 2];
/*
* vdev_name will be "root"/"root-0" for the root vdev, but it is the
* zpool name that will be displayed to the user.
*/
type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE);
if (zhp != NULL && strcmp(type, "root") == 0)
return (zfs_strdup(hdl, zpool_get_name(zhp)));
if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_PATH"))
name_flags |= VDEV_NAME_PATH;
if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_GUID"))
name_flags |= VDEV_NAME_GUID;
if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_FOLLOW_LINKS"))
name_flags |= VDEV_NAME_FOLLOW_LINKS;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
name_flags & VDEV_NAME_GUID) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);
(void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);
path = buf;
} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &tpath) == 0) {
path = tpath;
if (name_flags & VDEV_NAME_FOLLOW_LINKS) {
char *rp = realpath(path, NULL);
if (rp) {
strlcpy(buf, rp, sizeof (buf));
path = buf;
free(rp);
}
}
/*
* For a block device only use the name.
*/
if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&
!(name_flags & VDEV_NAME_PATH)) {
path = zfs_strip_path(path);
}
/*
* Remove the partition from the path if this is a whole disk.
*/
if (strcmp(type, VDEV_TYPE_DRAID_SPARE) != 0 &&
nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
== 0 && value && !(name_flags & VDEV_NAME_PATH)) {
return (zfs_strip_partition(path));
}
} else {
path = type;
/*
* If it's a raidz device, we need to stick in the parity level.
*/
if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
value = fnvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY);
(void) snprintf(buf, sizeof (buf), "%s%llu", path,
(u_longlong_t)value);
path = buf;
}
/*
* If it's a dRAID device, we add parity, groups, and spares.
*/
if (strcmp(path, VDEV_TYPE_DRAID) == 0) {
uint64_t ndata, nparity, nspares;
nvlist_t **child;
uint_t children;
verify(nvlist_lookup_nvlist_array(nv,
ZPOOL_CONFIG_CHILDREN, &child, &children) == 0);
nparity = fnvlist_lookup_uint64(nv,
ZPOOL_CONFIG_NPARITY);
ndata = fnvlist_lookup_uint64(nv,
ZPOOL_CONFIG_DRAID_NDATA);
nspares = fnvlist_lookup_uint64(nv,
ZPOOL_CONFIG_DRAID_NSPARES);
path = zpool_draid_name(buf, sizeof (buf), ndata,
nparity, nspares, children);
}
/*
* We identify each top-level vdev by using a <type-id>
* naming convention.
*/
if (name_flags & VDEV_NAME_TYPE_ID) {
uint64_t id = fnvlist_lookup_uint64(nv,
ZPOOL_CONFIG_ID);
(void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
path, (u_longlong_t)id);
path = tmpbuf;
}
}
return (zfs_strdup(hdl, path));
}
static int
zbookmark_mem_compare(const void *a, const void *b)
{
return (memcmp(a, b, sizeof (zbookmark_phys_t)));
}
/*
* Retrieve the persistent error log, uniquify the members, and return to the
* caller.
*/
int
zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zpool_hdl;
uint64_t count;
zbookmark_phys_t *zb = NULL;
int i;
/*
* Retrieve the raw error list from the kernel. If the number of errors
* has increased, allocate more space and continue until we get the
* entire list.
*/
count = fnvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT);
if (count == 0)
return (0);
zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
count * sizeof (zbookmark_phys_t));
zc.zc_nvlist_dst_size = count;
(void) strcpy(zc.zc_name, zhp->zpool_name);
for (;;) {
if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_ERROR_LOG,
&zc) != 0) {
free((void *)(uintptr_t)zc.zc_nvlist_dst);
if (errno == ENOMEM) {
void *dst;
count = zc.zc_nvlist_dst_size;
dst = zfs_alloc(zhp->zpool_hdl, count *
sizeof (zbookmark_phys_t));
zc.zc_nvlist_dst = (uintptr_t)dst;
} else {
return (zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "errors: List of "
"errors unavailable")));
}
} else {
break;
}
}
/*
* Sort the resulting bookmarks. This is a little confusing due to the
* implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
* to first, and 'zc_nvlist_dst_size' indicates the number of bookmarks
* _not_ copied as part of the process. So we point the start of our
* array appropriate and decrement the total number of elements.
*/
zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
zc.zc_nvlist_dst_size;
count -= zc.zc_nvlist_dst_size;
qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
/*
* Fill in the nverrlistp with nvlist's of dataset and object numbers.
*/
for (i = 0; i < count; i++) {
nvlist_t *nv;
/* ignoring zb_blkid and zb_level for now */
if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
zb[i-1].zb_object == zb[i].zb_object)
continue;
if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
goto nomem;
if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
zb[i].zb_objset) != 0) {
nvlist_free(nv);
goto nomem;
}
if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
zb[i].zb_object) != 0) {
nvlist_free(nv);
goto nomem;
}
if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
nvlist_free(nv);
goto nomem;
}
nvlist_free(nv);
}
free((void *)(uintptr_t)zc.zc_nvlist_dst);
return (0);
nomem:
free((void *)(uintptr_t)zc.zc_nvlist_dst);
return (no_memory(zhp->zpool_hdl));
}
/*
* Upgrade a ZFS pool to the latest on-disk version.
*/
int
zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) strcpy(zc.zc_name, zhp->zpool_name);
zc.zc_cookie = new_version;
if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
return (zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
zhp->zpool_name));
return (0);
}
void
zfs_save_arguments(int argc, char **argv, char *string, int len)
{
int i;
(void) strlcpy(string, zfs_basename(argv[0]), len);
for (i = 1; i < argc; i++) {
(void) strlcat(string, " ", len);
(void) strlcat(string, argv[i], len);
}
}
int
zpool_log_history(libzfs_handle_t *hdl, const char *message)
{
zfs_cmd_t zc = {"\0"};
nvlist_t *args;
args = fnvlist_alloc();
fnvlist_add_string(args, "message", message);
zcmd_write_src_nvlist(hdl, &zc, args);
int err = zfs_ioctl(hdl, ZFS_IOC_LOG_HISTORY, &zc);
nvlist_free(args);
zcmd_free_nvlists(&zc);
return (err);
}
/*
* Perform ioctl to get some command history of a pool.
*
* 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
* logical offset of the history buffer to start reading from.
*
* Upon return, 'off' is the next logical offset to read from and
* 'len' is the actual amount of bytes read into 'buf'.
*/
static int
get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_history = (uint64_t)(uintptr_t)buf;
zc.zc_history_len = *len;
zc.zc_history_offset = *off;
if (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
switch (errno) {
case EPERM:
return (zfs_error_fmt(hdl, EZFS_PERM,
dgettext(TEXT_DOMAIN,
"cannot show history for pool '%s'"),
zhp->zpool_name));
case ENOENT:
return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
dgettext(TEXT_DOMAIN, "cannot get history for pool "
"'%s'"), zhp->zpool_name));
case ENOTSUP:
return (zfs_error_fmt(hdl, EZFS_BADVERSION,
dgettext(TEXT_DOMAIN, "cannot get history for pool "
"'%s', pool must be upgraded"), zhp->zpool_name));
default:
return (zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN,
"cannot get history for '%s'"), zhp->zpool_name));
}
}
*len = zc.zc_history_len;
*off = zc.zc_history_offset;
return (0);
}
/*
* Retrieve the command history of a pool.
*/
int
zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp, uint64_t *off,
boolean_t *eof)
{
libzfs_handle_t *hdl = zhp->zpool_hdl;
char *buf;
int buflen = 128 * 1024;
nvlist_t **records = NULL;
uint_t numrecords = 0;
int err = 0, i;
uint64_t start = *off;
buf = zfs_alloc(hdl, buflen);
/* process about 1MiB a time */
while (*off - start < 1024 * 1024) {
uint64_t bytes_read = buflen;
uint64_t leftover;
if ((err = get_history(zhp, buf, off, &bytes_read)) != 0)
break;
/* if nothing else was read in, we're at EOF, just return */
if (!bytes_read) {
*eof = B_TRUE;
break;
}
if ((err = zpool_history_unpack(buf, bytes_read,
&leftover, &records, &numrecords)) != 0) {
zpool_standard_error_fmt(hdl, err,
dgettext(TEXT_DOMAIN,
"cannot get history for '%s'"), zhp->zpool_name);
break;
}
*off -= leftover;
if (leftover == bytes_read) {
/*
* no progress made, because buffer is not big enough
* to hold this record; resize and retry.
*/
buflen *= 2;
free(buf);
buf = zfs_alloc(hdl, buflen);
}
}
free(buf);
if (!err) {
*nvhisp = fnvlist_alloc();
fnvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
(const nvlist_t **)records, numrecords);
}
for (i = 0; i < numrecords; i++)
nvlist_free(records[i]);
free(records);
return (err);
}
/*
* Retrieve the next event given the passed 'zevent_fd' file descriptor.
* If there is a new event available 'nvp' will contain a newly allocated
* nvlist and 'dropped' will be set to the number of missed events since
* the last call to this function. When 'nvp' is set to NULL it indicates
* no new events are available. In either case the function returns 0 and
* it is up to the caller to free 'nvp'. In the case of a fatal error the
* function will return a non-zero value. When the function is called in
* blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
* it will not return until a new event is available.
*/
int
zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
int *dropped, unsigned flags, int zevent_fd)
{
zfs_cmd_t zc = {"\0"};
int error = 0;
*nvp = NULL;
*dropped = 0;
zc.zc_cleanup_fd = zevent_fd;
if (flags & ZEVENT_NONBLOCK)
zc.zc_guid = ZEVENT_NONBLOCK;
zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE);
retry:
if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
switch (errno) {
case ESHUTDOWN:
error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
dgettext(TEXT_DOMAIN, "zfs shutdown"));
goto out;
case ENOENT:
/* Blocking error case should not occur */
if (!(flags & ZEVENT_NONBLOCK))
error = zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "cannot get event"));
goto out;
case ENOMEM:
zcmd_expand_dst_nvlist(hdl, &zc);
goto retry;
default:
error = zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "cannot get event"));
goto out;
}
}
error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
if (error != 0)
goto out;
*dropped = (int)zc.zc_cookie;
out:
zcmd_free_nvlists(&zc);
return (error);
}
/*
* Clear all events.
*/
int
zpool_events_clear(libzfs_handle_t *hdl, int *count)
{
zfs_cmd_t zc = {"\0"};
if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
return (zpool_standard_error(hdl, errno,
dgettext(TEXT_DOMAIN, "cannot clear events")));
if (count != NULL)
*count = (int)zc.zc_cookie; /* # of events cleared */
return (0);
}
/*
* Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
* the passed zevent_fd file handle. On success zero is returned,
* otherwise -1 is returned and hdl->libzfs_error is set to the errno.
*/
int
zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)
{
zfs_cmd_t zc = {"\0"};
int error = 0;
zc.zc_guid = eid;
zc.zc_cleanup_fd = zevent_fd;
if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {
switch (errno) {
case ENOENT:
error = zfs_error_fmt(hdl, EZFS_NOENT,
dgettext(TEXT_DOMAIN, "cannot get event"));
break;
case ENOMEM:
error = zfs_error_fmt(hdl, EZFS_NOMEM,
dgettext(TEXT_DOMAIN, "cannot get event"));
break;
default:
error = zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "cannot get event"));
break;
}
}
return (error);
}
static void
zpool_obj_to_path_impl(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
char *pathname, size_t len, boolean_t always_unmounted)
{
zfs_cmd_t zc = {"\0"};
boolean_t mounted = B_FALSE;
char *mntpnt = NULL;
char dsname[ZFS_MAX_DATASET_NAME_LEN];
if (dsobj == 0) {
/* special case for the MOS */
(void) snprintf(pathname, len, "<metadata>:<0x%llx>",
(longlong_t)obj);
return;
}
/* get the dataset's name */
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_obj = dsobj;
if (zfs_ioctl(zhp->zpool_hdl,
ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
/* just write out a path of two object numbers */
(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
(longlong_t)dsobj, (longlong_t)obj);
return;
}
(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
/* find out if the dataset is mounted */
mounted = !always_unmounted && is_mounted(zhp->zpool_hdl, dsname,
&mntpnt);
/* get the corrupted object's path */
(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
zc.zc_obj = obj;
if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_OBJ_TO_PATH,
&zc) == 0) {
if (mounted) {
(void) snprintf(pathname, len, "%s%s", mntpnt,
zc.zc_value);
} else {
(void) snprintf(pathname, len, "%s:%s",
dsname, zc.zc_value);
}
} else {
(void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
(longlong_t)obj);
}
free(mntpnt);
}
void
zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
char *pathname, size_t len)
{
zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_FALSE);
}
void
zpool_obj_to_path_ds(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
char *pathname, size_t len)
{
zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_TRUE);
}
/*
* Wait while the specified activity is in progress in the pool.
*/
int
zpool_wait(zpool_handle_t *zhp, zpool_wait_activity_t activity)
{
boolean_t missing;
int error = zpool_wait_status(zhp, activity, &missing, NULL);
if (missing) {
(void) zpool_standard_error_fmt(zhp->zpool_hdl, ENOENT,
dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
zhp->zpool_name);
return (ENOENT);
} else {
return (error);
}
}
/*
* Wait for the given activity and return the status of the wait (whether or not
* any waiting was done) in the 'waited' parameter. Non-existent pools are
* reported via the 'missing' parameter, rather than by printing an error
* message. This is convenient when this function is called in a loop over a
* long period of time (as it is, for example, by zpool's wait cmd). In that
* scenario, a pool being exported or destroyed should be considered a normal
* event, so we don't want to print an error when we find that the pool doesn't
* exist.
*/
int
zpool_wait_status(zpool_handle_t *zhp, zpool_wait_activity_t activity,
boolean_t *missing, boolean_t *waited)
{
int error = lzc_wait(zhp->zpool_name, activity, waited);
*missing = (error == ENOENT);
if (*missing)
return (0);
if (error != 0) {
(void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
zhp->zpool_name);
}
return (error);
}
int
zpool_set_bootenv(zpool_handle_t *zhp, const nvlist_t *envmap)
{
int error = lzc_set_bootenv(zhp->zpool_name, envmap);
if (error != 0) {
(void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
dgettext(TEXT_DOMAIN,
"error setting bootenv in pool '%s'"), zhp->zpool_name);
}
return (error);
}
int
zpool_get_bootenv(zpool_handle_t *zhp, nvlist_t **nvlp)
{
nvlist_t *nvl;
int error;
nvl = NULL;
error = lzc_get_bootenv(zhp->zpool_name, &nvl);
if (error != 0) {
(void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
dgettext(TEXT_DOMAIN,
"error getting bootenv in pool '%s'"), zhp->zpool_name);
} else {
*nvlp = nvl;
}
return (error);
}
/*
* Attempt to read and parse feature file(s) (from "compatibility" property).
* Files contain zpool feature names, comma or whitespace-separated.
* Comments (# character to next newline) are discarded.
*
* Arguments:
* compatibility : string containing feature filenames
* features : either NULL or pointer to array of boolean
* report : either NULL or pointer to string buffer
* rlen : length of "report" buffer
*
* compatibility is NULL (unset), "", "off", "legacy", or list of
* comma-separated filenames. filenames should either be absolute,
* or relative to:
* 1) ZPOOL_SYSCONF_COMPAT_D (eg: /etc/zfs/compatibility.d) or
* 2) ZPOOL_DATA_COMPAT_D (eg: /usr/share/zfs/compatibility.d).
* (Unset), "" or "off" => enable all features
* "legacy" => disable all features
*
* Any feature names read from files which match unames in spa_feature_table
* will have the corresponding boolean set in the features array (if non-NULL).
* If more than one feature set specified, only features present in *all* of
* them will be set.
*
* "report" if not NULL will be populated with a suitable status message.
*
* Return values:
* ZPOOL_COMPATIBILITY_OK : files read and parsed ok
* ZPOOL_COMPATIBILITY_BADFILE : file too big or not a text file
* ZPOOL_COMPATIBILITY_BADTOKEN : SYSCONF file contains invalid feature name
* ZPOOL_COMPATIBILITY_WARNTOKEN : DATA file contains invalid feature name
* ZPOOL_COMPATIBILITY_NOFILES : no feature files found
*/
zpool_compat_status_t
zpool_load_compat(const char *compat, boolean_t *features, char *report,
size_t rlen)
{
int sdirfd, ddirfd, featfd;
struct stat fs;
char *fc;
char *ps, *ls, *ws;
char *file, *line, *word;
char l_compat[ZFS_MAXPROPLEN];
boolean_t ret_nofiles = B_TRUE;
boolean_t ret_badfile = B_FALSE;
boolean_t ret_badtoken = B_FALSE;
boolean_t ret_warntoken = B_FALSE;
/* special cases (unset), "" and "off" => enable all features */
if (compat == NULL || compat[0] == '\0' ||
strcmp(compat, ZPOOL_COMPAT_OFF) == 0) {
if (features != NULL)
for (uint_t i = 0; i < SPA_FEATURES; i++)
features[i] = B_TRUE;
if (report != NULL)
strlcpy(report, gettext("all features enabled"), rlen);
return (ZPOOL_COMPATIBILITY_OK);
}
/* Final special case "legacy" => disable all features */
if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {
if (features != NULL)
for (uint_t i = 0; i < SPA_FEATURES; i++)
features[i] = B_FALSE;
if (report != NULL)
strlcpy(report, gettext("all features disabled"), rlen);
return (ZPOOL_COMPATIBILITY_OK);
}
/*
* Start with all true; will be ANDed with results from each file
*/
if (features != NULL)
for (uint_t i = 0; i < SPA_FEATURES; i++)
features[i] = B_TRUE;
- char err_badfile[1024] = "";
- char err_badtoken[1024] = "";
+ char err_badfile[ZFS_MAXPROPLEN] = "";
+ char err_badtoken[ZFS_MAXPROPLEN] = "";
/*
* We ignore errors from the directory open()
* as they're only needed if the filename is relative
* which will be checked during the openat().
*/
/* O_PATH safer than O_RDONLY if system allows it */
#if defined(O_PATH)
#define ZC_DIR_FLAGS (O_DIRECTORY | O_CLOEXEC | O_PATH)
#else
#define ZC_DIR_FLAGS (O_DIRECTORY | O_CLOEXEC | O_RDONLY)
#endif
sdirfd = open(ZPOOL_SYSCONF_COMPAT_D, ZC_DIR_FLAGS);
ddirfd = open(ZPOOL_DATA_COMPAT_D, ZC_DIR_FLAGS);
(void) strlcpy(l_compat, compat, ZFS_MAXPROPLEN);
for (file = strtok_r(l_compat, ",", &ps);
file != NULL;
file = strtok_r(NULL, ",", &ps)) {
boolean_t l_features[SPA_FEATURES];
enum { Z_SYSCONF, Z_DATA } source;
/* try sysconfdir first, then datadir */
source = Z_SYSCONF;
if ((featfd = openat(sdirfd, file, O_RDONLY | O_CLOEXEC)) < 0) {
featfd = openat(ddirfd, file, O_RDONLY | O_CLOEXEC);
source = Z_DATA;
}
/* File readable and correct size? */
if (featfd < 0 ||
fstat(featfd, &fs) < 0 ||
fs.st_size < 1 ||
fs.st_size > ZPOOL_COMPAT_MAXSIZE) {
(void) close(featfd);
strlcat(err_badfile, file, ZFS_MAXPROPLEN);
strlcat(err_badfile, " ", ZFS_MAXPROPLEN);
ret_badfile = B_TRUE;
continue;
}
/* Prefault the file if system allows */
#if defined(MAP_POPULATE)
#define ZC_MMAP_FLAGS (MAP_PRIVATE | MAP_POPULATE)
#elif defined(MAP_PREFAULT_READ)
#define ZC_MMAP_FLAGS (MAP_PRIVATE | MAP_PREFAULT_READ)
#else
#define ZC_MMAP_FLAGS (MAP_PRIVATE)
#endif
/* private mmap() so we can strtok safely */
fc = (char *)mmap(NULL, fs.st_size, PROT_READ | PROT_WRITE,
ZC_MMAP_FLAGS, featfd, 0);
(void) close(featfd);
/* map ok, and last character == newline? */
if (fc == MAP_FAILED || fc[fs.st_size - 1] != '\n') {
(void) munmap((void *) fc, fs.st_size);
strlcat(err_badfile, file, ZFS_MAXPROPLEN);
strlcat(err_badfile, " ", ZFS_MAXPROPLEN);
ret_badfile = B_TRUE;
continue;
}
ret_nofiles = B_FALSE;
for (uint_t i = 0; i < SPA_FEATURES; i++)
l_features[i] = B_FALSE;
/* replace final newline with NULL to ensure string ends */
fc[fs.st_size - 1] = '\0';
for (line = strtok_r(fc, "\n", &ls);
line != NULL;
line = strtok_r(NULL, "\n", &ls)) {
/* discard comments */
char *r = strchr(line, '#');
if (r != NULL)
*r = '\0';
for (word = strtok_r(line, ", \t", &ws);
word != NULL;
word = strtok_r(NULL, ", \t", &ws)) {
/* Find matching feature name */
uint_t f;
for (f = 0; f < SPA_FEATURES; f++) {
zfeature_info_t *fi =
&spa_feature_table[f];
if (strcmp(word, fi->fi_uname) == 0) {
l_features[f] = B_TRUE;
break;
}
}
if (f < SPA_FEATURES)
continue;
/* found an unrecognized word */
/* lightly sanitize it */
if (strlen(word) > 32)
word[32] = '\0';
for (char *c = word; *c != '\0'; c++)
if (!isprint(*c))
*c = '?';
strlcat(err_badtoken, word, ZFS_MAXPROPLEN);
strlcat(err_badtoken, " ", ZFS_MAXPROPLEN);
if (source == Z_SYSCONF)
ret_badtoken = B_TRUE;
else
ret_warntoken = B_TRUE;
}
}
(void) munmap((void *) fc, fs.st_size);
if (features != NULL)
for (uint_t i = 0; i < SPA_FEATURES; i++)
features[i] &= l_features[i];
}
(void) close(sdirfd);
(void) close(ddirfd);
/* Return the most serious error */
if (ret_badfile) {
if (report != NULL)
snprintf(report, rlen, gettext("could not read/"
"parse feature file(s): %s"), err_badfile);
return (ZPOOL_COMPATIBILITY_BADFILE);
}
if (ret_nofiles) {
if (report != NULL)
strlcpy(report,
gettext("no valid compatibility files specified"),
rlen);
return (ZPOOL_COMPATIBILITY_NOFILES);
}
if (ret_badtoken) {
if (report != NULL)
snprintf(report, rlen, gettext("invalid feature "
"name(s) in local compatibility files: %s"),
err_badtoken);
return (ZPOOL_COMPATIBILITY_BADTOKEN);
}
if (ret_warntoken) {
if (report != NULL)
snprintf(report, rlen, gettext("unrecognized feature "
"name(s) in distribution compatibility files: %s"),
err_badtoken);
return (ZPOOL_COMPATIBILITY_WARNTOKEN);
}
if (report != NULL)
strlcpy(report, gettext("compatibility set ok"), rlen);
return (ZPOOL_COMPATIBILITY_OK);
}
static int
zpool_vdev_guid(zpool_handle_t *zhp, const char *vdevname, uint64_t *vdev_guid)
{
nvlist_t *tgt;
boolean_t avail_spare, l2cache;
verify(zhp != NULL);
if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "pool is in an unavailable state"));
return (zfs_error(zhp->zpool_hdl, EZFS_POOLUNAVAIL, errbuf));
}
if ((tgt = zpool_find_vdev(zhp, vdevname, &avail_spare, &l2cache,
NULL)) == NULL) {
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "can not find %s in %s"),
vdevname, zhp->zpool_name);
return (zfs_error(zhp->zpool_hdl, EZFS_NODEVICE, errbuf));
}
*vdev_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
return (0);
}
/*
* Get a vdev property value for 'prop' and return the value in
* a pre-allocated buffer.
*/
int
zpool_get_vdev_prop_value(nvlist_t *nvprop, vdev_prop_t prop, char *prop_name,
char *buf, size_t len, zprop_source_t *srctype, boolean_t literal)
{
nvlist_t *nv;
const char *strval;
uint64_t intval;
zprop_source_t src = ZPROP_SRC_NONE;
if (prop == VDEV_PROP_USERPROP) {
/* user property, prop_name must contain the property name */
assert(prop_name != NULL);
if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
strval = fnvlist_lookup_string(nv, ZPROP_VALUE);
} else {
/* user prop not found */
return (-1);
}
(void) strlcpy(buf, strval, len);
if (srctype)
*srctype = src;
return (0);
}
if (prop_name == NULL)
prop_name = (char *)vdev_prop_to_name(prop);
switch (vdev_prop_get_type(prop)) {
case PROP_TYPE_STRING:
if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
strval = fnvlist_lookup_string(nv, ZPROP_VALUE);
} else {
src = ZPROP_SRC_DEFAULT;
if ((strval = vdev_prop_default_string(prop)) == NULL)
strval = "-";
}
(void) strlcpy(buf, strval, len);
break;
case PROP_TYPE_NUMBER:
if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
intval = fnvlist_lookup_uint64(nv, ZPROP_VALUE);
} else {
src = ZPROP_SRC_DEFAULT;
intval = vdev_prop_default_numeric(prop);
}
switch (prop) {
case VDEV_PROP_ASIZE:
case VDEV_PROP_PSIZE:
case VDEV_PROP_SIZE:
case VDEV_PROP_BOOTSIZE:
case VDEV_PROP_ALLOCATED:
case VDEV_PROP_FREE:
case VDEV_PROP_READ_ERRORS:
case VDEV_PROP_WRITE_ERRORS:
case VDEV_PROP_CHECKSUM_ERRORS:
case VDEV_PROP_INITIALIZE_ERRORS:
case VDEV_PROP_OPS_NULL:
case VDEV_PROP_OPS_READ:
case VDEV_PROP_OPS_WRITE:
case VDEV_PROP_OPS_FREE:
case VDEV_PROP_OPS_CLAIM:
case VDEV_PROP_OPS_TRIM:
case VDEV_PROP_BYTES_NULL:
case VDEV_PROP_BYTES_READ:
case VDEV_PROP_BYTES_WRITE:
case VDEV_PROP_BYTES_FREE:
case VDEV_PROP_BYTES_CLAIM:
case VDEV_PROP_BYTES_TRIM:
if (literal) {
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
} else {
(void) zfs_nicenum(intval, buf, len);
}
break;
case VDEV_PROP_EXPANDSZ:
if (intval == 0) {
(void) strlcpy(buf, "-", len);
} else if (literal) {
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
} else {
(void) zfs_nicenum(intval, buf, len);
}
break;
case VDEV_PROP_CAPACITY:
if (literal) {
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
} else {
(void) snprintf(buf, len, "%llu%%",
(u_longlong_t)intval);
}
break;
case VDEV_PROP_FRAGMENTATION:
if (intval == UINT64_MAX) {
(void) strlcpy(buf, "-", len);
} else {
(void) snprintf(buf, len, "%llu%%",
(u_longlong_t)intval);
}
break;
case VDEV_PROP_STATE:
if (literal) {
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
} else {
(void) strlcpy(buf, zpool_state_to_name(intval,
VDEV_AUX_NONE), len);
}
break;
default:
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
}
break;
case PROP_TYPE_INDEX:
if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
intval = fnvlist_lookup_uint64(nv, ZPROP_VALUE);
} else {
src = ZPROP_SRC_DEFAULT;
intval = vdev_prop_default_numeric(prop);
}
if (vdev_prop_index_to_string(prop, intval,
(const char **)&strval) != 0)
return (-1);
(void) strlcpy(buf, strval, len);
break;
default:
abort();
}
if (srctype)
*srctype = src;
return (0);
}
/*
* Get a vdev property value for 'prop_name' and return the value in
* a pre-allocated buffer.
*/
int
zpool_get_vdev_prop(zpool_handle_t *zhp, const char *vdevname, vdev_prop_t prop,
char *prop_name, char *buf, size_t len, zprop_source_t *srctype,
boolean_t literal)
{
nvlist_t *reqnvl, *reqprops;
nvlist_t *retprops = NULL;
uint64_t vdev_guid = 0;
int ret;
if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)
return (ret);
if (nvlist_alloc(&reqnvl, NV_UNIQUE_NAME, 0) != 0)
return (no_memory(zhp->zpool_hdl));
if (nvlist_alloc(&reqprops, NV_UNIQUE_NAME, 0) != 0)
return (no_memory(zhp->zpool_hdl));
fnvlist_add_uint64(reqnvl, ZPOOL_VDEV_PROPS_GET_VDEV, vdev_guid);
if (prop != VDEV_PROP_USERPROP) {
/* prop_name overrides prop value */
if (prop_name != NULL)
prop = vdev_name_to_prop(prop_name);
else
prop_name = (char *)vdev_prop_to_name(prop);
assert(prop < VDEV_NUM_PROPS);
}
assert(prop_name != NULL);
if (nvlist_add_uint64(reqprops, prop_name, prop) != 0) {
nvlist_free(reqnvl);
nvlist_free(reqprops);
return (no_memory(zhp->zpool_hdl));
}
fnvlist_add_nvlist(reqnvl, ZPOOL_VDEV_PROPS_GET_PROPS, reqprops);
ret = lzc_get_vdev_prop(zhp->zpool_name, reqnvl, &retprops);
if (ret == 0) {
ret = zpool_get_vdev_prop_value(retprops, prop, prop_name, buf,
len, srctype, literal);
} else {
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot get vdev property %s from"
" %s in %s"), prop_name, vdevname, zhp->zpool_name);
(void) zpool_standard_error(zhp->zpool_hdl, ret, errbuf);
}
nvlist_free(reqnvl);
nvlist_free(reqprops);
nvlist_free(retprops);
return (ret);
}
/*
* Get all vdev properties
*/
int
zpool_get_all_vdev_props(zpool_handle_t *zhp, const char *vdevname,
nvlist_t **outnvl)
{
nvlist_t *nvl = NULL;
uint64_t vdev_guid = 0;
int ret;
if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)
return (ret);
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
return (no_memory(zhp->zpool_hdl));
fnvlist_add_uint64(nvl, ZPOOL_VDEV_PROPS_GET_VDEV, vdev_guid);
ret = lzc_get_vdev_prop(zhp->zpool_name, nvl, outnvl);
nvlist_free(nvl);
if (ret) {
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot get vdev properties for"
" %s in %s"), vdevname, zhp->zpool_name);
(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
}
return (ret);
}
/*
* Set vdev property
*/
int
zpool_set_vdev_prop(zpool_handle_t *zhp, const char *vdevname,
const char *propname, const char *propval)
{
int ret;
nvlist_t *nvl = NULL;
nvlist_t *outnvl = NULL;
nvlist_t *props;
nvlist_t *realprops;
prop_flags_t flags = { 0 };
uint64_t version;
uint64_t vdev_guid;
if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)
return (ret);
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
return (no_memory(zhp->zpool_hdl));
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
return (no_memory(zhp->zpool_hdl));
fnvlist_add_uint64(nvl, ZPOOL_VDEV_PROPS_SET_VDEV, vdev_guid);
if (nvlist_add_string(props, propname, propval) != 0) {
nvlist_free(props);
return (no_memory(zhp->zpool_hdl));
}
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot set property %s for %s on %s"),
propname, vdevname, zhp->zpool_name);
flags.vdevprop = 1;
version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
zhp->zpool_name, props, version, flags, errbuf)) == NULL) {
nvlist_free(props);
nvlist_free(nvl);
return (-1);
}
nvlist_free(props);
props = realprops;
fnvlist_add_nvlist(nvl, ZPOOL_VDEV_PROPS_SET_PROPS, props);
ret = lzc_set_vdev_prop(zhp->zpool_name, nvl, &outnvl);
nvlist_free(props);
nvlist_free(nvl);
nvlist_free(outnvl);
if (ret)
(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
return (ret);
}
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_sendrecv.c b/sys/contrib/openzfs/lib/libzfs/libzfs_sendrecv.c
index 640051e3b029..577ebf6aad42 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_sendrecv.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_sendrecv.c
@@ -1,5436 +1,5426 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2012, Joyent, Inc. All rights reserved.
* Copyright (c) 2012 Pawel Jakub Dawidek <pawel@dawidek.net>.
* All rights reserved
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
* Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
* Copyright (c) 2019 Datto Inc.
*/
#include <assert.h>
#include <ctype.h>
#include <errno.h>
#include <libintl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <stddef.h>
#include <fcntl.h>
#include <sys/mount.h>
#include <sys/mntent.h>
#include <sys/mnttab.h>
#include <sys/avl.h>
#include <sys/debug.h>
#include <sys/stat.h>
#include <pthread.h>
#include <umem.h>
#include <time.h>
#include <libzfs.h>
#include <libzfs_core.h>
#include <libzutil.h>
#include "zfs_namecheck.h"
#include "zfs_prop.h"
#include "zfs_fletcher.h"
#include "libzfs_impl.h"
#include <cityhash.h>
#include <zlib.h>
#include <sys/zio_checksum.h>
#include <sys/dsl_crypt.h>
#include <sys/ddt.h>
#include <sys/socket.h>
#include <sys/sha2.h>
static int zfs_receive_impl(libzfs_handle_t *, const char *, const char *,
recvflags_t *, int, const char *, nvlist_t *, avl_tree_t *, char **,
const char *, nvlist_t *);
static int guid_to_name_redact_snaps(libzfs_handle_t *hdl, const char *parent,
uint64_t guid, boolean_t bookmark_ok, uint64_t *redact_snap_guids,
uint64_t num_redact_snaps, char *name);
static int guid_to_name(libzfs_handle_t *, const char *,
uint64_t, boolean_t, char *);
typedef struct progress_arg {
zfs_handle_t *pa_zhp;
int pa_fd;
boolean_t pa_parsable;
boolean_t pa_estimate;
int pa_verbosity;
} progress_arg_t;
static int
dump_record(dmu_replay_record_t *drr, void *payload, size_t payload_len,
zio_cksum_t *zc, int outfd)
{
ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
fletcher_4_incremental_native(drr,
offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), zc);
if (drr->drr_type != DRR_BEGIN) {
ASSERT(ZIO_CHECKSUM_IS_ZERO(&drr->drr_u.
drr_checksum.drr_checksum));
drr->drr_u.drr_checksum.drr_checksum = *zc;
}
fletcher_4_incremental_native(&drr->drr_u.drr_checksum.drr_checksum,
sizeof (zio_cksum_t), zc);
if (write(outfd, drr, sizeof (*drr)) == -1)
return (errno);
if (payload_len != 0) {
fletcher_4_incremental_native(payload, payload_len, zc);
if (write(outfd, payload, payload_len) == -1)
return (errno);
}
return (0);
}
/*
* Routines for dealing with the AVL tree of fs-nvlists
*/
typedef struct fsavl_node {
avl_node_t fn_node;
nvlist_t *fn_nvfs;
char *fn_snapname;
uint64_t fn_guid;
} fsavl_node_t;
static int
fsavl_compare(const void *arg1, const void *arg2)
{
const fsavl_node_t *fn1 = (const fsavl_node_t *)arg1;
const fsavl_node_t *fn2 = (const fsavl_node_t *)arg2;
return (TREE_CMP(fn1->fn_guid, fn2->fn_guid));
}
/*
* Given the GUID of a snapshot, find its containing filesystem and
* (optionally) name.
*/
static nvlist_t *
fsavl_find(avl_tree_t *avl, uint64_t snapguid, char **snapname)
{
fsavl_node_t fn_find;
fsavl_node_t *fn;
fn_find.fn_guid = snapguid;
fn = avl_find(avl, &fn_find, NULL);
if (fn) {
if (snapname)
*snapname = fn->fn_snapname;
return (fn->fn_nvfs);
}
return (NULL);
}
static void
fsavl_destroy(avl_tree_t *avl)
{
fsavl_node_t *fn;
void *cookie;
if (avl == NULL)
return;
cookie = NULL;
while ((fn = avl_destroy_nodes(avl, &cookie)) != NULL)
free(fn);
avl_destroy(avl);
free(avl);
}
/*
* Given an nvlist, produce an avl tree of snapshots, ordered by guid
*/
static avl_tree_t *
fsavl_create(nvlist_t *fss)
{
avl_tree_t *fsavl;
nvpair_t *fselem = NULL;
if ((fsavl = malloc(sizeof (avl_tree_t))) == NULL)
return (NULL);
avl_create(fsavl, fsavl_compare, sizeof (fsavl_node_t),
offsetof(fsavl_node_t, fn_node));
while ((fselem = nvlist_next_nvpair(fss, fselem)) != NULL) {
nvlist_t *nvfs, *snaps;
nvpair_t *snapelem = NULL;
nvfs = fnvpair_value_nvlist(fselem);
snaps = fnvlist_lookup_nvlist(nvfs, "snaps");
while ((snapelem =
nvlist_next_nvpair(snaps, snapelem)) != NULL) {
fsavl_node_t *fn;
if ((fn = malloc(sizeof (fsavl_node_t))) == NULL) {
fsavl_destroy(fsavl);
return (NULL);
}
fn->fn_nvfs = nvfs;
fn->fn_snapname = nvpair_name(snapelem);
fn->fn_guid = fnvpair_value_uint64(snapelem);
/*
* Note: if there are multiple snaps with the
* same GUID, we ignore all but one.
*/
avl_index_t where = 0;
if (avl_find(fsavl, fn, &where) == NULL)
avl_insert(fsavl, fn, where);
else
free(fn);
}
}
return (fsavl);
}
/*
* Routines for dealing with the giant nvlist of fs-nvlists, etc.
*/
typedef struct send_data {
/*
* assigned inside every recursive call,
* restored from *_save on return:
*
* guid of fromsnap snapshot in parent dataset
* txg of fromsnap snapshot in current dataset
* txg of tosnap snapshot in current dataset
*/
uint64_t parent_fromsnap_guid;
uint64_t fromsnap_txg;
uint64_t tosnap_txg;
/* the nvlists get accumulated during depth-first traversal */
nvlist_t *parent_snaps;
nvlist_t *fss;
nvlist_t *snapprops;
nvlist_t *snapholds; /* user holds */
/* send-receive configuration, does not change during traversal */
const char *fsname;
const char *fromsnap;
const char *tosnap;
boolean_t recursive;
boolean_t raw;
boolean_t doall;
boolean_t replicate;
boolean_t skipmissing;
boolean_t verbose;
boolean_t backup;
boolean_t seenfrom;
boolean_t seento;
boolean_t holds; /* were holds requested with send -h */
boolean_t props;
/*
* The header nvlist is of the following format:
* {
* "tosnap" -> string
* "fromsnap" -> string (if incremental)
* "fss" -> {
* id -> {
*
* "name" -> string (full name; for debugging)
* "parentfromsnap" -> number (guid of fromsnap in parent)
*
* "props" -> { name -> value (only if set here) }
* "snaps" -> { name (lastname) -> number (guid) }
* "snapprops" -> { name (lastname) -> { name -> value } }
* "snapholds" -> { name (lastname) -> { holdname -> crtime } }
*
* "origin" -> number (guid) (if clone)
* "is_encroot" -> boolean
* "sent" -> boolean (not on-disk)
* }
* }
* }
*
*/
} send_data_t;
static void
send_iterate_prop(zfs_handle_t *zhp, boolean_t received_only, nvlist_t *nv);
/*
* Collect guid, valid props, optionally holds, etc. of a snapshot.
* This interface is intended for use as a zfs_iter_snapshots_sorted visitor.
*/
static int
send_iterate_snap(zfs_handle_t *zhp, void *arg)
{
send_data_t *sd = arg;
uint64_t guid = zhp->zfs_dmustats.dds_guid;
uint64_t txg = zhp->zfs_dmustats.dds_creation_txg;
boolean_t isfromsnap, istosnap, istosnapwithnofrom;
char *snapname;
const char *from = sd->fromsnap;
const char *to = sd->tosnap;
snapname = strrchr(zhp->zfs_name, '@');
assert(snapname != NULL);
++snapname;
isfromsnap = (from != NULL && strcmp(from, snapname) == 0);
istosnap = (to != NULL && strcmp(to, snapname) == 0);
istosnapwithnofrom = (istosnap && from == NULL);
if (sd->tosnap_txg != 0 && txg > sd->tosnap_txg) {
if (sd->verbose) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"skipping snapshot %s because it was created "
"after the destination snapshot (%s)\n"),
zhp->zfs_name, to);
}
zfs_close(zhp);
return (0);
}
fnvlist_add_uint64(sd->parent_snaps, snapname, guid);
/*
* NB: if there is no fromsnap here (it's a newly created fs in
* an incremental replication), we will substitute the tosnap.
*/
if (isfromsnap || (sd->parent_fromsnap_guid == 0 && istosnap))
sd->parent_fromsnap_guid = guid;
if (!sd->recursive) {
/*
* To allow a doall stream to work properly
* with a NULL fromsnap
*/
if (sd->doall && from == NULL && !sd->seenfrom)
sd->seenfrom = B_TRUE;
if (!sd->seenfrom && isfromsnap) {
sd->seenfrom = B_TRUE;
zfs_close(zhp);
return (0);
}
if ((sd->seento || !sd->seenfrom) && !istosnapwithnofrom) {
zfs_close(zhp);
return (0);
}
if (istosnap)
sd->seento = B_TRUE;
}
nvlist_t *nv = fnvlist_alloc();
send_iterate_prop(zhp, sd->backup, nv);
fnvlist_add_nvlist(sd->snapprops, snapname, nv);
fnvlist_free(nv);
if (sd->holds) {
nvlist_t *holds;
if (lzc_get_holds(zhp->zfs_name, &holds) == 0) {
fnvlist_add_nvlist(sd->snapholds, snapname, holds);
fnvlist_free(holds);
}
}
zfs_close(zhp);
return (0);
}
/*
* Collect all valid props from the handle snap into an nvlist.
*/
static void
send_iterate_prop(zfs_handle_t *zhp, boolean_t received_only, nvlist_t *nv)
{
nvlist_t *props;
if (received_only)
props = zfs_get_recvd_props(zhp);
else
props = zhp->zfs_props;
nvpair_t *elem = NULL;
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
char *propname = nvpair_name(elem);
zfs_prop_t prop = zfs_name_to_prop(propname);
if (!zfs_prop_user(propname)) {
/*
* Realistically, this should never happen. However,
* we want the ability to add DSL properties without
* needing to make incompatible version changes. We
* need to ignore unknown properties to allow older
* software to still send datasets containing these
* properties, with the unknown properties elided.
*/
if (prop == ZPROP_INVAL)
continue;
if (zfs_prop_readonly(prop))
continue;
}
nvlist_t *propnv = fnvpair_value_nvlist(elem);
boolean_t isspacelimit = (prop == ZFS_PROP_QUOTA ||
prop == ZFS_PROP_RESERVATION ||
prop == ZFS_PROP_REFQUOTA ||
prop == ZFS_PROP_REFRESERVATION);
if (isspacelimit && zhp->zfs_type == ZFS_TYPE_SNAPSHOT)
continue;
char *source;
if (nvlist_lookup_string(propnv, ZPROP_SOURCE, &source) == 0) {
if (strcmp(source, zhp->zfs_name) != 0 &&
strcmp(source, ZPROP_SOURCE_VAL_RECVD) != 0)
continue;
} else {
/*
* May have no source before SPA_VERSION_RECVD_PROPS,
* but is still modifiable.
*/
if (!isspacelimit)
continue;
}
if (zfs_prop_user(propname) ||
zfs_prop_get_type(prop) == PROP_TYPE_STRING) {
char *value;
value = fnvlist_lookup_string(propnv, ZPROP_VALUE);
fnvlist_add_string(nv, propname, value);
} else {
uint64_t value;
value = fnvlist_lookup_uint64(propnv, ZPROP_VALUE);
fnvlist_add_uint64(nv, propname, value);
}
}
}
/*
* returns snapshot guid
* and returns 0 if the snapshot does not exist
*/
static uint64_t
get_snap_guid(libzfs_handle_t *hdl, const char *fs, const char *snap)
{
char name[MAXPATHLEN + 1];
uint64_t guid = 0;
if (fs == NULL || fs[0] == '\0' || snap == NULL || snap[0] == '\0')
return (guid);
(void) snprintf(name, sizeof (name), "%s@%s", fs, snap);
zfs_handle_t *zhp = zfs_open(hdl, name, ZFS_TYPE_SNAPSHOT);
if (zhp != NULL) {
guid = zfs_prop_get_int(zhp, ZFS_PROP_GUID);
zfs_close(zhp);
}
return (guid);
}
/*
* returns snapshot creation txg
* and returns 0 if the snapshot does not exist
*/
static uint64_t
get_snap_txg(libzfs_handle_t *hdl, const char *fs, const char *snap)
{
char name[ZFS_MAX_DATASET_NAME_LEN];
uint64_t txg = 0;
if (fs == NULL || fs[0] == '\0' || snap == NULL || snap[0] == '\0')
return (txg);
(void) snprintf(name, sizeof (name), "%s@%s", fs, snap);
if (zfs_dataset_exists(hdl, name, ZFS_TYPE_SNAPSHOT)) {
zfs_handle_t *zhp = zfs_open(hdl, name, ZFS_TYPE_SNAPSHOT);
if (zhp != NULL) {
txg = zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG);
zfs_close(zhp);
}
}
return (txg);
}
/*
* Recursively generate nvlists describing datasets. See comment
* for the data structure send_data_t above for description of contents
* of the nvlist.
*/
static int
send_iterate_fs(zfs_handle_t *zhp, void *arg)
{
send_data_t *sd = arg;
nvlist_t *nvfs = NULL, *nv = NULL;
int rv = 0;
uint64_t min_txg = 0, max_txg = 0;
uint64_t txg = zhp->zfs_dmustats.dds_creation_txg;
uint64_t guid = zhp->zfs_dmustats.dds_guid;
uint64_t fromsnap_txg, tosnap_txg;
char guidstring[64];
/* These fields are restored on return from a recursive call. */
uint64_t parent_fromsnap_guid_save = sd->parent_fromsnap_guid;
uint64_t fromsnap_txg_save = sd->fromsnap_txg;
uint64_t tosnap_txg_save = sd->tosnap_txg;
fromsnap_txg = get_snap_txg(zhp->zfs_hdl, zhp->zfs_name, sd->fromsnap);
if (fromsnap_txg != 0)
sd->fromsnap_txg = fromsnap_txg;
tosnap_txg = get_snap_txg(zhp->zfs_hdl, zhp->zfs_name, sd->tosnap);
if (tosnap_txg != 0)
sd->tosnap_txg = tosnap_txg;
/*
* On the send side, if the current dataset does not have tosnap,
* perform two additional checks:
*
* - Skip sending the current dataset if it was created later than
* the parent tosnap.
* - Return error if the current dataset was created earlier than
* the parent tosnap, unless --skip-missing specified. Then
* just print a warning.
*/
if (sd->tosnap != NULL && tosnap_txg == 0) {
if (sd->tosnap_txg != 0 && txg > sd->tosnap_txg) {
if (sd->verbose) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"skipping dataset %s: snapshot %s does "
"not exist\n"), zhp->zfs_name, sd->tosnap);
}
} else if (sd->skipmissing) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"WARNING: skipping dataset %s and its children:"
" snapshot %s does not exist\n"),
zhp->zfs_name, sd->tosnap);
} else {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"cannot send %s@%s%s: snapshot %s@%s does not "
"exist\n"), sd->fsname, sd->tosnap, sd->recursive ?
dgettext(TEXT_DOMAIN, " recursively") : "",
zhp->zfs_name, sd->tosnap);
rv = EZFS_NOENT;
}
goto out;
}
nvfs = fnvlist_alloc();
fnvlist_add_string(nvfs, "name", zhp->zfs_name);
fnvlist_add_uint64(nvfs, "parentfromsnap", sd->parent_fromsnap_guid);
if (zhp->zfs_dmustats.dds_origin[0] != '\0') {
zfs_handle_t *origin = zfs_open(zhp->zfs_hdl,
zhp->zfs_dmustats.dds_origin, ZFS_TYPE_SNAPSHOT);
if (origin == NULL) {
rv = -1;
goto out;
}
fnvlist_add_uint64(nvfs, "origin",
origin->zfs_dmustats.dds_guid);
zfs_close(origin);
}
/* Iterate over props. */
if (sd->props || sd->backup || sd->recursive) {
nv = fnvlist_alloc();
send_iterate_prop(zhp, sd->backup, nv);
fnvlist_add_nvlist(nvfs, "props", nv);
}
if (zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) != ZIO_CRYPT_OFF) {
boolean_t encroot;
/* Determine if this dataset is an encryption root. */
if (zfs_crypto_get_encryption_root(zhp, &encroot, NULL) != 0) {
rv = -1;
goto out;
}
if (encroot)
fnvlist_add_boolean(nvfs, "is_encroot");
/*
* Encrypted datasets can only be sent with properties if
* the raw flag is specified because the receive side doesn't
* currently have a mechanism for recursively asking the user
* for new encryption parameters.
*/
if (!sd->raw) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"cannot send %s@%s: encrypted dataset %s may not "
"be sent with properties without the raw flag\n"),
sd->fsname, sd->tosnap, zhp->zfs_name);
rv = -1;
goto out;
}
}
/*
* Iterate over snaps, and set sd->parent_fromsnap_guid.
*
* If this is a "doall" send, a replicate send or we're just trying
* to gather a list of previous snapshots, iterate through all the
* snaps in the txg range. Otherwise just look at the one we're
* interested in.
*/
sd->parent_fromsnap_guid = 0;
sd->parent_snaps = fnvlist_alloc();
sd->snapprops = fnvlist_alloc();
if (sd->holds)
sd->snapholds = fnvlist_alloc();
if (sd->doall || sd->replicate || sd->tosnap == NULL) {
if (!sd->replicate && fromsnap_txg != 0)
min_txg = fromsnap_txg;
if (!sd->replicate && tosnap_txg != 0)
max_txg = tosnap_txg;
(void) zfs_iter_snapshots_sorted(zhp, send_iterate_snap, sd,
min_txg, max_txg);
} else {
char snapname[MAXPATHLEN];
zfs_handle_t *snap;
(void) snprintf(snapname, sizeof (snapname), "%s@%s",
zhp->zfs_name, sd->tosnap);
if (sd->fromsnap != NULL)
sd->seenfrom = B_TRUE;
snap = zfs_open(zhp->zfs_hdl, snapname, ZFS_TYPE_SNAPSHOT);
if (snap != NULL)
(void) send_iterate_snap(snap, sd);
}
fnvlist_add_nvlist(nvfs, "snaps", sd->parent_snaps);
fnvlist_free(sd->parent_snaps);
fnvlist_add_nvlist(nvfs, "snapprops", sd->snapprops);
fnvlist_free(sd->snapprops);
if (sd->holds) {
fnvlist_add_nvlist(nvfs, "snapholds", sd->snapholds);
fnvlist_free(sd->snapholds);
}
/* Do not allow the size of the properties list to exceed the limit */
if ((fnvlist_size(nvfs) + fnvlist_size(sd->fss)) >
zhp->zfs_hdl->libzfs_max_nvlist) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"warning: cannot send %s@%s: the size of the list of "
"snapshots and properties is too large to be received "
"successfully.\n"
"Select a smaller number of snapshots to send.\n"),
zhp->zfs_name, sd->tosnap);
rv = EZFS_NOSPC;
goto out;
}
/* Add this fs to nvlist. */
(void) snprintf(guidstring, sizeof (guidstring),
"0x%llx", (longlong_t)guid);
fnvlist_add_nvlist(sd->fss, guidstring, nvfs);
/* Iterate over children. */
if (sd->recursive)
rv = zfs_iter_filesystems(zhp, send_iterate_fs, sd);
out:
/* Restore saved fields. */
sd->parent_fromsnap_guid = parent_fromsnap_guid_save;
sd->fromsnap_txg = fromsnap_txg_save;
sd->tosnap_txg = tosnap_txg_save;
fnvlist_free(nv);
fnvlist_free(nvfs);
zfs_close(zhp);
return (rv);
}
static int
gather_nvlist(libzfs_handle_t *hdl, const char *fsname, const char *fromsnap,
const char *tosnap, boolean_t recursive, boolean_t raw, boolean_t doall,
boolean_t replicate, boolean_t skipmissing, boolean_t verbose,
boolean_t backup, boolean_t holds, boolean_t props, nvlist_t **nvlp,
avl_tree_t **avlp)
{
zfs_handle_t *zhp;
send_data_t sd = { 0 };
int error;
zhp = zfs_open(hdl, fsname, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
return (EZFS_BADTYPE);
sd.fss = fnvlist_alloc();
sd.fsname = fsname;
sd.fromsnap = fromsnap;
sd.tosnap = tosnap;
sd.recursive = recursive;
sd.raw = raw;
sd.doall = doall;
sd.replicate = replicate;
sd.skipmissing = skipmissing;
sd.verbose = verbose;
sd.backup = backup;
sd.holds = holds;
sd.props = props;
if ((error = send_iterate_fs(zhp, &sd)) != 0) {
fnvlist_free(sd.fss);
if (avlp != NULL)
*avlp = NULL;
*nvlp = NULL;
return (error);
}
if (avlp != NULL && (*avlp = fsavl_create(sd.fss)) == NULL) {
fnvlist_free(sd.fss);
*nvlp = NULL;
return (EZFS_NOMEM);
}
*nvlp = sd.fss;
return (0);
}
/*
* Routines specific to "zfs send"
*/
typedef struct send_dump_data {
/* these are all just the short snapname (the part after the @) */
const char *fromsnap;
const char *tosnap;
char prevsnap[ZFS_MAX_DATASET_NAME_LEN];
uint64_t prevsnap_obj;
boolean_t seenfrom, seento, replicate, doall, fromorigin;
boolean_t dryrun, parsable, progress, embed_data, std_out;
boolean_t large_block, compress, raw, holds;
int outfd;
boolean_t err;
nvlist_t *fss;
nvlist_t *snapholds;
avl_tree_t *fsavl;
snapfilter_cb_t *filter_cb;
void *filter_cb_arg;
nvlist_t *debugnv;
char holdtag[ZFS_MAX_DATASET_NAME_LEN];
int cleanup_fd;
int verbosity;
uint64_t size;
} send_dump_data_t;
static int
zfs_send_space(zfs_handle_t *zhp, const char *snapname, const char *from,
enum lzc_send_flags flags, uint64_t *spacep)
{
assert(snapname != NULL);
int error = lzc_send_space(snapname, from, flags, spacep);
if (error == 0)
return (0);
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"warning: cannot estimate space for '%s'"), snapname);
libzfs_handle_t *hdl = zhp->zfs_hdl;
switch (error) {
case EXDEV:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not an earlier snapshot from the same fs"));
return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
case ENOENT:
if (zfs_dataset_exists(hdl, snapname,
ZFS_TYPE_SNAPSHOT)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incremental source (%s) does not exist"),
snapname);
}
return (zfs_error(hdl, EZFS_NOENT, errbuf));
case EDQUOT:
case EFBIG:
case EIO:
case ENOLINK:
case ENOSPC:
case ENOSTR:
case ENXIO:
case EPIPE:
case ERANGE:
case EFAULT:
case EROFS:
case EINVAL:
zfs_error_aux(hdl, "%s", strerror(error));
return (zfs_error(hdl, EZFS_BADBACKUP, errbuf));
default:
return (zfs_standard_error(hdl, error, errbuf));
}
}
/*
* Dumps a backup of the given snapshot (incremental from fromsnap if it's not
* NULL) to the file descriptor specified by outfd.
*/
static int
dump_ioctl(zfs_handle_t *zhp, const char *fromsnap, uint64_t fromsnap_obj,
boolean_t fromorigin, int outfd, enum lzc_send_flags flags,
nvlist_t *debugnv)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvlist_t *thisdbg;
assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT);
assert(fromsnap_obj == 0 || !fromorigin);
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
zc.zc_cookie = outfd;
zc.zc_obj = fromorigin;
zc.zc_sendobj = zfs_prop_get_int(zhp, ZFS_PROP_OBJSETID);
zc.zc_fromobj = fromsnap_obj;
zc.zc_flags = flags;
if (debugnv != NULL) {
thisdbg = fnvlist_alloc();
if (fromsnap != NULL && fromsnap[0] != '\0')
fnvlist_add_string(thisdbg, "fromsnap", fromsnap);
}
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_SEND, &zc) != 0) {
char errbuf[ERRBUFLEN];
int error = errno;
(void) snprintf(errbuf, sizeof (errbuf), "%s '%s'",
dgettext(TEXT_DOMAIN, "warning: cannot send"),
zhp->zfs_name);
if (debugnv != NULL) {
fnvlist_add_uint64(thisdbg, "error", error);
fnvlist_add_nvlist(debugnv, zhp->zfs_name, thisdbg);
fnvlist_free(thisdbg);
}
switch (error) {
case EXDEV:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not an earlier snapshot from the same fs"));
return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
case EACCES:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"source key must be loaded"));
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
case ENOENT:
if (zfs_dataset_exists(hdl, zc.zc_name,
ZFS_TYPE_SNAPSHOT)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incremental source (@%s) does not exist"),
zc.zc_value);
}
return (zfs_error(hdl, EZFS_NOENT, errbuf));
case EDQUOT:
case EFBIG:
case EIO:
case ENOLINK:
case ENOSPC:
case ENOSTR:
case ENXIO:
case EPIPE:
case ERANGE:
case EFAULT:
case EROFS:
case EINVAL:
zfs_error_aux(hdl, "%s", strerror(errno));
return (zfs_error(hdl, EZFS_BADBACKUP, errbuf));
- case ENOTSUP:
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "large blocks detected but large_blocks feature "
- "is inactive; raw send unsupported"));
- return (zfs_error(hdl, EZFS_NOTSUP, errbuf));
default:
return (zfs_standard_error(hdl, errno, errbuf));
}
}
if (debugnv != NULL) {
fnvlist_add_nvlist(debugnv, zhp->zfs_name, thisdbg);
fnvlist_free(thisdbg);
}
return (0);
}
static void
gather_holds(zfs_handle_t *zhp, send_dump_data_t *sdd)
{
assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT);
/*
* zfs_send() only sets snapholds for sends that need them,
* e.g. replication and doall.
*/
if (sdd->snapholds == NULL)
return;
fnvlist_add_string(sdd->snapholds, zhp->zfs_name, sdd->holdtag);
}
int
zfs_send_progress(zfs_handle_t *zhp, int fd, uint64_t *bytes_written,
uint64_t *blocks_visited)
{
zfs_cmd_t zc = {"\0"};
if (bytes_written != NULL)
*bytes_written = 0;
if (blocks_visited != NULL)
*blocks_visited = 0;
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
zc.zc_cookie = fd;
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_SEND_PROGRESS, &zc) != 0)
return (errno);
if (bytes_written != NULL)
*bytes_written = zc.zc_cookie;
if (blocks_visited != NULL)
*blocks_visited = zc.zc_objset_type;
return (0);
}
static void *
send_progress_thread(void *arg)
{
progress_arg_t *pa = arg;
zfs_handle_t *zhp = pa->pa_zhp;
uint64_t bytes;
uint64_t blocks;
char buf[16];
time_t t;
struct tm tm;
int err;
if (!pa->pa_parsable) {
(void) fprintf(stderr,
"TIME %s %sSNAPSHOT %s\n",
pa->pa_estimate ? "BYTES" : " SENT",
pa->pa_verbosity >= 2 ? " BLOCKS " : "",
zhp->zfs_name);
}
/*
* Print the progress from ZFS_IOC_SEND_PROGRESS every second.
*/
for (;;) {
(void) sleep(1);
if ((err = zfs_send_progress(zhp, pa->pa_fd, &bytes,
&blocks)) != 0) {
if (err == EINTR || err == ENOENT)
return ((void *)0);
return ((void *)(uintptr_t)err);
}
(void) time(&t);
localtime_r(&t, &tm);
if (pa->pa_verbosity >= 2 && pa->pa_parsable) {
(void) fprintf(stderr,
"%02d:%02d:%02d\t%llu\t%llu\t%s\n",
tm.tm_hour, tm.tm_min, tm.tm_sec,
(u_longlong_t)bytes, (u_longlong_t)blocks,
zhp->zfs_name);
} else if (pa->pa_verbosity >= 2) {
zfs_nicenum(bytes, buf, sizeof (buf));
(void) fprintf(stderr,
"%02d:%02d:%02d %5s %8llu %s\n",
tm.tm_hour, tm.tm_min, tm.tm_sec,
buf, (u_longlong_t)blocks, zhp->zfs_name);
} else if (pa->pa_parsable) {
(void) fprintf(stderr, "%02d:%02d:%02d\t%llu\t%s\n",
tm.tm_hour, tm.tm_min, tm.tm_sec,
(u_longlong_t)bytes, zhp->zfs_name);
} else {
zfs_nicebytes(bytes, buf, sizeof (buf));
(void) fprintf(stderr, "%02d:%02d:%02d %5s %s\n",
tm.tm_hour, tm.tm_min, tm.tm_sec,
buf, zhp->zfs_name);
}
}
}
static boolean_t
send_progress_thread_exit(libzfs_handle_t *hdl, pthread_t ptid)
{
void *status = NULL;
(void) pthread_cancel(ptid);
(void) pthread_join(ptid, &status);
int error = (int)(uintptr_t)status;
if (error != 0 && status != PTHREAD_CANCELED)
return (zfs_standard_error(hdl, error,
dgettext(TEXT_DOMAIN, "progress thread exited nonzero")));
else
return (B_FALSE);
}
static void
send_print_verbose(FILE *fout, const char *tosnap, const char *fromsnap,
uint64_t size, boolean_t parsable)
{
if (parsable) {
if (fromsnap != NULL) {
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"incremental\t%s\t%s"), fromsnap, tosnap);
} else {
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"full\t%s"), tosnap);
}
(void) fprintf(fout, "\t%llu", (longlong_t)size);
} else {
if (fromsnap != NULL) {
if (strchr(fromsnap, '@') == NULL &&
strchr(fromsnap, '#') == NULL) {
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"send from @%s to %s"), fromsnap, tosnap);
} else {
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"send from %s to %s"), fromsnap, tosnap);
}
} else {
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"full send of %s"), tosnap);
}
if (size != 0) {
char buf[16];
zfs_nicebytes(size, buf, sizeof (buf));
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
" estimated size is %s"), buf);
}
}
(void) fprintf(fout, "\n");
}
/*
* Send a single filesystem snapshot, updating the send dump data.
* This interface is intended for use as a zfs_iter_snapshots_sorted visitor.
*/
static int
dump_snapshot(zfs_handle_t *zhp, void *arg)
{
send_dump_data_t *sdd = arg;
progress_arg_t pa = { 0 };
pthread_t tid;
char *thissnap;
enum lzc_send_flags flags = 0;
int err;
boolean_t isfromsnap, istosnap, fromorigin;
boolean_t exclude = B_FALSE;
FILE *fout = sdd->std_out ? stdout : stderr;
err = 0;
thissnap = strchr(zhp->zfs_name, '@') + 1;
isfromsnap = (sdd->fromsnap != NULL &&
strcmp(sdd->fromsnap, thissnap) == 0);
if (!sdd->seenfrom && isfromsnap) {
gather_holds(zhp, sdd);
sdd->seenfrom = B_TRUE;
(void) strlcpy(sdd->prevsnap, thissnap, sizeof (sdd->prevsnap));
sdd->prevsnap_obj = zfs_prop_get_int(zhp, ZFS_PROP_OBJSETID);
zfs_close(zhp);
return (0);
}
if (sdd->seento || !sdd->seenfrom) {
zfs_close(zhp);
return (0);
}
istosnap = (strcmp(sdd->tosnap, thissnap) == 0);
if (istosnap)
sdd->seento = B_TRUE;
if (sdd->large_block)
flags |= LZC_SEND_FLAG_LARGE_BLOCK;
if (sdd->embed_data)
flags |= LZC_SEND_FLAG_EMBED_DATA;
if (sdd->compress)
flags |= LZC_SEND_FLAG_COMPRESS;
if (sdd->raw)
flags |= LZC_SEND_FLAG_RAW;
if (!sdd->doall && !isfromsnap && !istosnap) {
if (sdd->replicate) {
char *snapname;
nvlist_t *snapprops;
/*
* Filter out all intermediate snapshots except origin
* snapshots needed to replicate clones.
*/
nvlist_t *nvfs = fsavl_find(sdd->fsavl,
zhp->zfs_dmustats.dds_guid, &snapname);
if (nvfs != NULL) {
snapprops = fnvlist_lookup_nvlist(nvfs,
"snapprops");
snapprops = fnvlist_lookup_nvlist(snapprops,
thissnap);
exclude = !nvlist_exists(snapprops,
"is_clone_origin");
}
} else {
exclude = B_TRUE;
}
}
/*
* If a filter function exists, call it to determine whether
* this snapshot will be sent.
*/
if (exclude || (sdd->filter_cb != NULL &&
sdd->filter_cb(zhp, sdd->filter_cb_arg) == B_FALSE)) {
/*
* This snapshot is filtered out. Don't send it, and don't
* set prevsnap_obj, so it will be as if this snapshot didn't
* exist, and the next accepted snapshot will be sent as
* an incremental from the last accepted one, or as the
* first (and full) snapshot in the case of a replication,
* non-incremental send.
*/
zfs_close(zhp);
return (0);
}
gather_holds(zhp, sdd);
fromorigin = sdd->prevsnap[0] == '\0' &&
(sdd->fromorigin || sdd->replicate);
if (sdd->verbosity != 0) {
uint64_t size = 0;
char fromds[ZFS_MAX_DATASET_NAME_LEN];
if (sdd->prevsnap[0] != '\0') {
(void) strlcpy(fromds, zhp->zfs_name, sizeof (fromds));
*(strchr(fromds, '@') + 1) = '\0';
(void) strlcat(fromds, sdd->prevsnap, sizeof (fromds));
}
if (zfs_send_space(zhp, zhp->zfs_name,
sdd->prevsnap[0] ? fromds : NULL, flags, &size) == 0) {
send_print_verbose(fout, zhp->zfs_name,
sdd->prevsnap[0] ? sdd->prevsnap : NULL,
size, sdd->parsable);
sdd->size += size;
}
}
if (!sdd->dryrun) {
/*
* If progress reporting is requested, spawn a new thread to
* poll ZFS_IOC_SEND_PROGRESS at a regular interval.
*/
if (sdd->progress) {
pa.pa_zhp = zhp;
pa.pa_fd = sdd->outfd;
pa.pa_parsable = sdd->parsable;
pa.pa_estimate = B_FALSE;
pa.pa_verbosity = sdd->verbosity;
if ((err = pthread_create(&tid, NULL,
send_progress_thread, &pa)) != 0) {
zfs_close(zhp);
return (err);
}
}
err = dump_ioctl(zhp, sdd->prevsnap, sdd->prevsnap_obj,
fromorigin, sdd->outfd, flags, sdd->debugnv);
if (sdd->progress &&
send_progress_thread_exit(zhp->zfs_hdl, tid))
return (-1);
}
(void) strcpy(sdd->prevsnap, thissnap);
sdd->prevsnap_obj = zfs_prop_get_int(zhp, ZFS_PROP_OBJSETID);
zfs_close(zhp);
return (err);
}
/*
* Send all snapshots for a filesystem, updating the send dump data.
*/
static int
dump_filesystem(zfs_handle_t *zhp, send_dump_data_t *sdd)
{
int rv = 0;
boolean_t missingfrom = B_FALSE;
zfs_cmd_t zc = {"\0"};
uint64_t min_txg = 0, max_txg = 0;
/*
* Make sure the tosnap exists.
*/
(void) snprintf(zc.zc_name, sizeof (zc.zc_name), "%s@%s",
zhp->zfs_name, sdd->tosnap);
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_OBJSET_STATS, &zc) != 0) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"WARNING: could not send %s@%s: does not exist\n"),
zhp->zfs_name, sdd->tosnap);
sdd->err = B_TRUE;
return (0);
}
/*
* If this fs does not have fromsnap, and we're doing
* recursive, we need to send a full stream from the
* beginning (or an incremental from the origin if this
* is a clone). If we're doing non-recursive, then let
* them get the error.
*/
if (sdd->replicate && sdd->fromsnap) {
/*
* Make sure the fromsnap exists.
*/
(void) snprintf(zc.zc_name, sizeof (zc.zc_name), "%s@%s",
zhp->zfs_name, sdd->fromsnap);
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_OBJSET_STATS, &zc) != 0)
missingfrom = B_TRUE;
}
sdd->seenfrom = sdd->seento = B_FALSE;
sdd->prevsnap[0] = '\0';
sdd->prevsnap_obj = 0;
if (sdd->fromsnap == NULL || missingfrom)
sdd->seenfrom = B_TRUE;
/*
* Iterate through all snapshots and process the ones we will be
* sending. If we only have a "from" and "to" snapshot to deal
* with, we can avoid iterating through all the other snapshots.
*/
if (sdd->doall || sdd->replicate || sdd->tosnap == NULL) {
if (!sdd->replicate) {
if (sdd->fromsnap != NULL) {
min_txg = get_snap_txg(zhp->zfs_hdl,
zhp->zfs_name, sdd->fromsnap);
}
if (sdd->tosnap != NULL) {
max_txg = get_snap_txg(zhp->zfs_hdl,
zhp->zfs_name, sdd->tosnap);
}
}
rv = zfs_iter_snapshots_sorted(zhp, dump_snapshot, sdd,
min_txg, max_txg);
} else {
char snapname[MAXPATHLEN] = { 0 };
zfs_handle_t *snap;
/* Dump fromsnap. */
if (!sdd->seenfrom) {
(void) snprintf(snapname, sizeof (snapname),
"%s@%s", zhp->zfs_name, sdd->fromsnap);
snap = zfs_open(zhp->zfs_hdl, snapname,
ZFS_TYPE_SNAPSHOT);
if (snap != NULL)
rv = dump_snapshot(snap, sdd);
else
rv = -1;
}
/* Dump tosnap. */
if (rv == 0) {
(void) snprintf(snapname, sizeof (snapname),
"%s@%s", zhp->zfs_name, sdd->tosnap);
snap = zfs_open(zhp->zfs_hdl, snapname,
ZFS_TYPE_SNAPSHOT);
if (snap != NULL)
rv = dump_snapshot(snap, sdd);
else
rv = -1;
}
}
if (!sdd->seenfrom) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"WARNING: could not send %s@%s:\n"
"incremental source (%s@%s) does not exist\n"),
zhp->zfs_name, sdd->tosnap,
zhp->zfs_name, sdd->fromsnap);
sdd->err = B_TRUE;
} else if (!sdd->seento) {
if (sdd->fromsnap) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"WARNING: could not send %s@%s:\n"
"incremental source (%s@%s) "
"is not earlier than it\n"),
zhp->zfs_name, sdd->tosnap,
zhp->zfs_name, sdd->fromsnap);
} else {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"WARNING: "
"could not send %s@%s: does not exist\n"),
zhp->zfs_name, sdd->tosnap);
}
sdd->err = B_TRUE;
}
return (rv);
}
/*
* Send all snapshots for all filesystems in sdd.
*/
static int
dump_filesystems(zfs_handle_t *rzhp, send_dump_data_t *sdd)
{
nvpair_t *fspair;
boolean_t needagain, progress;
if (!sdd->replicate)
return (dump_filesystem(rzhp, sdd));
/* Mark the clone origin snapshots. */
for (fspair = nvlist_next_nvpair(sdd->fss, NULL); fspair;
fspair = nvlist_next_nvpair(sdd->fss, fspair)) {
nvlist_t *nvfs;
uint64_t origin_guid = 0;
nvfs = fnvpair_value_nvlist(fspair);
(void) nvlist_lookup_uint64(nvfs, "origin", &origin_guid);
if (origin_guid != 0) {
char *snapname;
nvlist_t *origin_nv = fsavl_find(sdd->fsavl,
origin_guid, &snapname);
if (origin_nv != NULL) {
nvlist_t *snapprops;
snapprops = fnvlist_lookup_nvlist(origin_nv,
"snapprops");
snapprops = fnvlist_lookup_nvlist(snapprops,
snapname);
fnvlist_add_boolean(snapprops,
"is_clone_origin");
}
}
}
again:
needagain = progress = B_FALSE;
for (fspair = nvlist_next_nvpair(sdd->fss, NULL); fspair;
fspair = nvlist_next_nvpair(sdd->fss, fspair)) {
nvlist_t *fslist, *parent_nv;
char *fsname;
zfs_handle_t *zhp;
int err;
uint64_t origin_guid = 0;
uint64_t parent_guid = 0;
fslist = fnvpair_value_nvlist(fspair);
if (nvlist_lookup_boolean(fslist, "sent") == 0)
continue;
fsname = fnvlist_lookup_string(fslist, "name");
(void) nvlist_lookup_uint64(fslist, "origin", &origin_guid);
(void) nvlist_lookup_uint64(fslist, "parentfromsnap",
&parent_guid);
if (parent_guid != 0) {
parent_nv = fsavl_find(sdd->fsavl, parent_guid, NULL);
if (!nvlist_exists(parent_nv, "sent")) {
/* Parent has not been sent; skip this one. */
needagain = B_TRUE;
continue;
}
}
if (origin_guid != 0) {
nvlist_t *origin_nv = fsavl_find(sdd->fsavl,
origin_guid, NULL);
if (origin_nv != NULL &&
!nvlist_exists(origin_nv, "sent")) {
/*
* Origin has not been sent yet;
* skip this clone.
*/
needagain = B_TRUE;
continue;
}
}
zhp = zfs_open(rzhp->zfs_hdl, fsname, ZFS_TYPE_DATASET);
if (zhp == NULL)
return (-1);
err = dump_filesystem(zhp, sdd);
fnvlist_add_boolean(fslist, "sent");
progress = B_TRUE;
zfs_close(zhp);
if (err)
return (err);
}
if (needagain) {
assert(progress);
goto again;
}
/* Clean out the sent flags in case we reuse this fss. */
for (fspair = nvlist_next_nvpair(sdd->fss, NULL); fspair;
fspair = nvlist_next_nvpair(sdd->fss, fspair)) {
nvlist_t *fslist;
fslist = fnvpair_value_nvlist(fspair);
(void) nvlist_remove_all(fslist, "sent");
}
return (0);
}
nvlist_t *
zfs_send_resume_token_to_nvlist(libzfs_handle_t *hdl, const char *token)
{
unsigned int version;
int nread, i;
unsigned long long checksum, packed_len;
/*
* Decode token header, which is:
* <token version>-<checksum of payload>-<uncompressed payload length>
* Note that the only supported token version is 1.
*/
nread = sscanf(token, "%u-%llx-%llx-",
&version, &checksum, &packed_len);
if (nread != 3) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt (invalid format)"));
return (NULL);
}
if (version != ZFS_SEND_RESUME_TOKEN_VERSION) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt (invalid version %u)"),
version);
return (NULL);
}
/* Convert hexadecimal representation to binary. */
token = strrchr(token, '-') + 1;
int len = strlen(token) / 2;
unsigned char *compressed = zfs_alloc(hdl, len);
for (i = 0; i < len; i++) {
nread = sscanf(token + i * 2, "%2hhx", compressed + i);
if (nread != 1) {
free(compressed);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt "
"(payload is not hex-encoded)"));
return (NULL);
}
}
/* Verify checksum. */
zio_cksum_t cksum;
fletcher_4_native_varsize(compressed, len, &cksum);
if (cksum.zc_word[0] != checksum) {
free(compressed);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt (incorrect checksum)"));
return (NULL);
}
/* Uncompress. */
void *packed = zfs_alloc(hdl, packed_len);
uLongf packed_len_long = packed_len;
if (uncompress(packed, &packed_len_long, compressed, len) != Z_OK ||
packed_len_long != packed_len) {
free(packed);
free(compressed);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt (decompression failed)"));
return (NULL);
}
/* Unpack nvlist. */
nvlist_t *nv;
int error = nvlist_unpack(packed, packed_len, &nv, KM_SLEEP);
free(packed);
free(compressed);
if (error != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt (nvlist_unpack failed)"));
return (NULL);
}
return (nv);
}
static enum lzc_send_flags
lzc_flags_from_sendflags(const sendflags_t *flags)
{
enum lzc_send_flags lzc_flags = 0;
if (flags->largeblock)
lzc_flags |= LZC_SEND_FLAG_LARGE_BLOCK;
if (flags->embed_data)
lzc_flags |= LZC_SEND_FLAG_EMBED_DATA;
if (flags->compress)
lzc_flags |= LZC_SEND_FLAG_COMPRESS;
if (flags->raw)
lzc_flags |= LZC_SEND_FLAG_RAW;
if (flags->saved)
lzc_flags |= LZC_SEND_FLAG_SAVED;
return (lzc_flags);
}
static int
estimate_size(zfs_handle_t *zhp, const char *from, int fd, sendflags_t *flags,
uint64_t resumeobj, uint64_t resumeoff, uint64_t bytes,
const char *redactbook, char *errbuf)
{
uint64_t size;
FILE *fout = flags->dryrun ? stdout : stderr;
progress_arg_t pa = { 0 };
int err = 0;
pthread_t ptid;
if (flags->progress) {
pa.pa_zhp = zhp;
pa.pa_fd = fd;
pa.pa_parsable = flags->parsable;
pa.pa_estimate = B_TRUE;
pa.pa_verbosity = flags->verbosity;
err = pthread_create(&ptid, NULL,
send_progress_thread, &pa);
if (err != 0) {
zfs_error_aux(zhp->zfs_hdl, "%s", strerror(errno));
return (zfs_error(zhp->zfs_hdl,
EZFS_THREADCREATEFAILED, errbuf));
}
}
err = lzc_send_space_resume_redacted(zhp->zfs_name, from,
lzc_flags_from_sendflags(flags), resumeobj, resumeoff, bytes,
redactbook, fd, &size);
if (flags->progress && send_progress_thread_exit(zhp->zfs_hdl, ptid))
return (-1);
if (err != 0) {
zfs_error_aux(zhp->zfs_hdl, "%s", strerror(err));
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
errbuf));
}
send_print_verbose(fout, zhp->zfs_name, from, size,
flags->parsable);
if (flags->parsable) {
(void) fprintf(fout, "size\t%llu\n", (longlong_t)size);
} else {
char buf[16];
zfs_nicenum(size, buf, sizeof (buf));
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"total estimated size is %s\n"), buf);
}
return (0);
}
static boolean_t
redact_snaps_contains(const uint64_t *snaps, uint64_t num_snaps, uint64_t guid)
{
for (int i = 0; i < num_snaps; i++) {
if (snaps[i] == guid)
return (B_TRUE);
}
return (B_FALSE);
}
static boolean_t
redact_snaps_equal(const uint64_t *snaps1, uint64_t num_snaps1,
const uint64_t *snaps2, uint64_t num_snaps2)
{
if (num_snaps1 != num_snaps2)
return (B_FALSE);
for (int i = 0; i < num_snaps1; i++) {
if (!redact_snaps_contains(snaps2, num_snaps2, snaps1[i]))
return (B_FALSE);
}
return (B_TRUE);
}
static int
get_bookmarks(const char *path, nvlist_t **bmarksp)
{
nvlist_t *props = fnvlist_alloc();
int error;
fnvlist_add_boolean(props, "redact_complete");
fnvlist_add_boolean(props, zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS));
error = lzc_get_bookmarks(path, props, bmarksp);
fnvlist_free(props);
return (error);
}
static nvpair_t *
find_redact_pair(nvlist_t *bmarks, const uint64_t *redact_snap_guids,
int num_redact_snaps)
{
nvpair_t *pair;
for (pair = nvlist_next_nvpair(bmarks, NULL); pair;
pair = nvlist_next_nvpair(bmarks, pair)) {
nvlist_t *bmark = fnvpair_value_nvlist(pair);
nvlist_t *vallist = fnvlist_lookup_nvlist(bmark,
zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS));
uint_t len = 0;
uint64_t *bmarksnaps = fnvlist_lookup_uint64_array(vallist,
ZPROP_VALUE, &len);
if (redact_snaps_equal(redact_snap_guids,
num_redact_snaps, bmarksnaps, len)) {
break;
}
}
return (pair);
}
static boolean_t
get_redact_complete(nvpair_t *pair)
{
nvlist_t *bmark = fnvpair_value_nvlist(pair);
nvlist_t *vallist = fnvlist_lookup_nvlist(bmark, "redact_complete");
boolean_t complete = fnvlist_lookup_boolean_value(vallist,
ZPROP_VALUE);
return (complete);
}
/*
* Check that the list of redaction snapshots in the bookmark matches the send
* we're resuming, and return whether or not it's complete.
*
* Note that the caller needs to free the contents of *bookname with free() if
* this function returns successfully.
*/
static int
find_redact_book(libzfs_handle_t *hdl, const char *path,
const uint64_t *redact_snap_guids, int num_redact_snaps,
char **bookname)
{
char errbuf[ERRBUFLEN];
nvlist_t *bmarks;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot resume send"));
int error = get_bookmarks(path, &bmarks);
if (error != 0) {
if (error == ESRCH) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"nonexistent redaction bookmark provided"));
} else if (error == ENOENT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset to be sent no longer exists"));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"unknown error: %s"), strerror(error));
}
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
nvpair_t *pair = find_redact_pair(bmarks, redact_snap_guids,
num_redact_snaps);
if (pair == NULL) {
fnvlist_free(bmarks);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"no appropriate redaction bookmark exists"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
boolean_t complete = get_redact_complete(pair);
if (!complete) {
fnvlist_free(bmarks);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incomplete redaction bookmark provided"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
*bookname = strndup(nvpair_name(pair), ZFS_MAX_DATASET_NAME_LEN);
ASSERT3P(*bookname, !=, NULL);
fnvlist_free(bmarks);
return (0);
}
static enum lzc_send_flags
lzc_flags_from_resume_nvl(nvlist_t *resume_nvl)
{
enum lzc_send_flags lzc_flags = 0;
if (nvlist_exists(resume_nvl, "largeblockok"))
lzc_flags |= LZC_SEND_FLAG_LARGE_BLOCK;
if (nvlist_exists(resume_nvl, "embedok"))
lzc_flags |= LZC_SEND_FLAG_EMBED_DATA;
if (nvlist_exists(resume_nvl, "compressok"))
lzc_flags |= LZC_SEND_FLAG_COMPRESS;
if (nvlist_exists(resume_nvl, "rawok"))
lzc_flags |= LZC_SEND_FLAG_RAW;
if (nvlist_exists(resume_nvl, "savedok"))
lzc_flags |= LZC_SEND_FLAG_SAVED;
return (lzc_flags);
}
static int
zfs_send_resume_impl_cb_impl(libzfs_handle_t *hdl, sendflags_t *flags,
int outfd, nvlist_t *resume_nvl)
{
char errbuf[ERRBUFLEN];
char *toname;
char *fromname = NULL;
uint64_t resumeobj, resumeoff, toguid, fromguid, bytes;
zfs_handle_t *zhp;
int error = 0;
char name[ZFS_MAX_DATASET_NAME_LEN];
FILE *fout = (flags->verbosity > 0 && flags->dryrun) ? stdout : stderr;
uint64_t *redact_snap_guids = NULL;
int num_redact_snaps = 0;
char *redact_book = NULL;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot resume send"));
if (flags->verbosity != 0) {
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"resume token contents:\n"));
nvlist_print(fout, resume_nvl);
}
if (nvlist_lookup_string(resume_nvl, "toname", &toname) != 0 ||
nvlist_lookup_uint64(resume_nvl, "object", &resumeobj) != 0 ||
nvlist_lookup_uint64(resume_nvl, "offset", &resumeoff) != 0 ||
nvlist_lookup_uint64(resume_nvl, "bytes", &bytes) != 0 ||
nvlist_lookup_uint64(resume_nvl, "toguid", &toguid) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt"));
return (zfs_error(hdl, EZFS_FAULT, errbuf));
}
fromguid = 0;
(void) nvlist_lookup_uint64(resume_nvl, "fromguid", &fromguid);
if (flags->saved) {
(void) strcpy(name, toname);
} else {
error = guid_to_name(hdl, toname, toguid, B_FALSE, name);
if (error != 0) {
if (zfs_dataset_exists(hdl, toname, ZFS_TYPE_DATASET)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is no longer the same snapshot "
"used in the initial send"), toname);
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' used in the initial send no "
"longer exists"), toname);
}
return (zfs_error(hdl, EZFS_BADPATH, errbuf));
}
}
zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET);
if (zhp == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"unable to access '%s'"), name);
return (zfs_error(hdl, EZFS_BADPATH, errbuf));
}
if (nvlist_lookup_uint64_array(resume_nvl, "book_redact_snaps",
&redact_snap_guids, (uint_t *)&num_redact_snaps) != 0) {
num_redact_snaps = -1;
}
if (fromguid != 0) {
if (guid_to_name_redact_snaps(hdl, toname, fromguid, B_TRUE,
redact_snap_guids, num_redact_snaps, name) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incremental source %#llx no longer exists"),
(longlong_t)fromguid);
return (zfs_error(hdl, EZFS_BADPATH, errbuf));
}
fromname = name;
}
redact_snap_guids = NULL;
if (nvlist_lookup_uint64_array(resume_nvl,
zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS), &redact_snap_guids,
(uint_t *)&num_redact_snaps) == 0) {
char path[ZFS_MAX_DATASET_NAME_LEN];
(void) strlcpy(path, toname, sizeof (path));
char *at = strchr(path, '@');
ASSERT3P(at, !=, NULL);
*at = '\0';
if ((error = find_redact_book(hdl, path, redact_snap_guids,
num_redact_snaps, &redact_book)) != 0) {
return (error);
}
}
enum lzc_send_flags lzc_flags = lzc_flags_from_sendflags(flags) |
lzc_flags_from_resume_nvl(resume_nvl);
if (flags->verbosity != 0) {
/*
* Some of these may have come from the resume token, set them
* here for size estimate purposes.
*/
sendflags_t tmpflags = *flags;
if (lzc_flags & LZC_SEND_FLAG_LARGE_BLOCK)
tmpflags.largeblock = B_TRUE;
if (lzc_flags & LZC_SEND_FLAG_COMPRESS)
tmpflags.compress = B_TRUE;
if (lzc_flags & LZC_SEND_FLAG_EMBED_DATA)
tmpflags.embed_data = B_TRUE;
if (lzc_flags & LZC_SEND_FLAG_RAW)
tmpflags.raw = B_TRUE;
if (lzc_flags & LZC_SEND_FLAG_SAVED)
tmpflags.saved = B_TRUE;
error = estimate_size(zhp, fromname, outfd, &tmpflags,
resumeobj, resumeoff, bytes, redact_book, errbuf);
}
if (!flags->dryrun) {
progress_arg_t pa = { 0 };
pthread_t tid;
/*
* If progress reporting is requested, spawn a new thread to
* poll ZFS_IOC_SEND_PROGRESS at a regular interval.
*/
if (flags->progress) {
pa.pa_zhp = zhp;
pa.pa_fd = outfd;
pa.pa_parsable = flags->parsable;
pa.pa_estimate = B_FALSE;
pa.pa_verbosity = flags->verbosity;
error = pthread_create(&tid, NULL,
send_progress_thread, &pa);
if (error != 0) {
if (redact_book != NULL)
free(redact_book);
zfs_close(zhp);
return (error);
}
}
error = lzc_send_resume_redacted(zhp->zfs_name, fromname, outfd,
lzc_flags, resumeobj, resumeoff, redact_book);
if (redact_book != NULL)
free(redact_book);
if (flags->progress && send_progress_thread_exit(hdl, tid))
return (-1);
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"warning: cannot send '%s'"), zhp->zfs_name);
zfs_close(zhp);
switch (error) {
case 0:
return (0);
case EACCES:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"source key must be loaded"));
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
case ESRCH:
if (lzc_exists(zhp->zfs_name)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incremental source could not be found"));
}
return (zfs_error(hdl, EZFS_NOENT, errbuf));
case EXDEV:
case ENOENT:
case EDQUOT:
case EFBIG:
case EIO:
case ENOLINK:
case ENOSPC:
case ENOSTR:
case ENXIO:
case EPIPE:
case ERANGE:
case EFAULT:
case EROFS:
zfs_error_aux(hdl, "%s", strerror(errno));
return (zfs_error(hdl, EZFS_BADBACKUP, errbuf));
default:
return (zfs_standard_error(hdl, errno, errbuf));
}
} else {
if (redact_book != NULL)
free(redact_book);
}
zfs_close(zhp);
return (error);
}
struct zfs_send_resume_impl {
libzfs_handle_t *hdl;
sendflags_t *flags;
nvlist_t *resume_nvl;
};
static int
zfs_send_resume_impl_cb(int outfd, void *arg)
{
struct zfs_send_resume_impl *zsri = arg;
return (zfs_send_resume_impl_cb_impl(zsri->hdl, zsri->flags, outfd,
zsri->resume_nvl));
}
static int
zfs_send_resume_impl(libzfs_handle_t *hdl, sendflags_t *flags, int outfd,
nvlist_t *resume_nvl)
{
struct zfs_send_resume_impl zsri = {
.hdl = hdl,
.flags = flags,
.resume_nvl = resume_nvl,
};
return (lzc_send_wrapper(zfs_send_resume_impl_cb, outfd, &zsri));
}
int
zfs_send_resume(libzfs_handle_t *hdl, sendflags_t *flags, int outfd,
const char *resume_token)
{
int ret;
char errbuf[ERRBUFLEN];
nvlist_t *resume_nvl;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot resume send"));
resume_nvl = zfs_send_resume_token_to_nvlist(hdl, resume_token);
if (resume_nvl == NULL) {
/*
* zfs_error_aux has already been set by
* zfs_send_resume_token_to_nvlist()
*/
return (zfs_error(hdl, EZFS_FAULT, errbuf));
}
ret = zfs_send_resume_impl(hdl, flags, outfd, resume_nvl);
fnvlist_free(resume_nvl);
return (ret);
}
int
zfs_send_saved(zfs_handle_t *zhp, sendflags_t *flags, int outfd,
const char *resume_token)
{
int ret;
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvlist_t *saved_nvl = NULL, *resume_nvl = NULL;
uint64_t saved_guid = 0, resume_guid = 0;
uint64_t obj = 0, off = 0, bytes = 0;
char token_buf[ZFS_MAXPROPLEN];
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"saved send failed"));
ret = zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN,
token_buf, sizeof (token_buf), NULL, NULL, 0, B_TRUE);
if (ret != 0)
goto out;
saved_nvl = zfs_send_resume_token_to_nvlist(hdl, token_buf);
if (saved_nvl == NULL) {
/*
* zfs_error_aux has already been set by
* zfs_send_resume_token_to_nvlist()
*/
ret = zfs_error(hdl, EZFS_FAULT, errbuf);
goto out;
}
/*
* If a resume token is provided we use the object and offset
* from that instead of the default, which starts from the
* beginning.
*/
if (resume_token != NULL) {
resume_nvl = zfs_send_resume_token_to_nvlist(hdl,
resume_token);
if (resume_nvl == NULL) {
ret = zfs_error(hdl, EZFS_FAULT, errbuf);
goto out;
}
if (nvlist_lookup_uint64(resume_nvl, "object", &obj) != 0 ||
nvlist_lookup_uint64(resume_nvl, "offset", &off) != 0 ||
nvlist_lookup_uint64(resume_nvl, "bytes", &bytes) != 0 ||
nvlist_lookup_uint64(resume_nvl, "toguid",
&resume_guid) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"provided resume token is corrupt"));
ret = zfs_error(hdl, EZFS_FAULT, errbuf);
goto out;
}
if (nvlist_lookup_uint64(saved_nvl, "toguid",
&saved_guid)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset's resume token is corrupt"));
ret = zfs_error(hdl, EZFS_FAULT, errbuf);
goto out;
}
if (resume_guid != saved_guid) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"provided resume token does not match dataset"));
ret = zfs_error(hdl, EZFS_BADBACKUP, errbuf);
goto out;
}
}
(void) nvlist_remove_all(saved_nvl, "object");
fnvlist_add_uint64(saved_nvl, "object", obj);
(void) nvlist_remove_all(saved_nvl, "offset");
fnvlist_add_uint64(saved_nvl, "offset", off);
(void) nvlist_remove_all(saved_nvl, "bytes");
fnvlist_add_uint64(saved_nvl, "bytes", bytes);
(void) nvlist_remove_all(saved_nvl, "toname");
fnvlist_add_string(saved_nvl, "toname", zhp->zfs_name);
ret = zfs_send_resume_impl(hdl, flags, outfd, saved_nvl);
out:
fnvlist_free(saved_nvl);
fnvlist_free(resume_nvl);
return (ret);
}
/*
* This function informs the target system that the recursive send is complete.
* The record is also expected in the case of a send -p.
*/
static int
send_conclusion_record(int fd, zio_cksum_t *zc)
{
dmu_replay_record_t drr = { 0 };
drr.drr_type = DRR_END;
if (zc != NULL)
drr.drr_u.drr_end.drr_checksum = *zc;
if (write(fd, &drr, sizeof (drr)) == -1) {
return (errno);
}
return (0);
}
/*
* This function is responsible for sending the records that contain the
* necessary information for the target system's libzfs to be able to set the
* properties of the filesystem being received, or to be able to prepare for
* a recursive receive.
*
* The "zhp" argument is the handle of the snapshot we are sending
* (the "tosnap"). The "from" argument is the short snapshot name (the part
* after the @) of the incremental source.
*/
static int
send_prelim_records(zfs_handle_t *zhp, const char *from, int fd,
boolean_t gather_props, boolean_t recursive, boolean_t verbose,
boolean_t dryrun, boolean_t raw, boolean_t replicate, boolean_t skipmissing,
boolean_t backup, boolean_t holds, boolean_t props, boolean_t doall,
nvlist_t **fssp, avl_tree_t **fsavlp)
{
int err = 0;
char *packbuf = NULL;
size_t buflen = 0;
zio_cksum_t zc = { {0} };
int featureflags = 0;
/* name of filesystem/volume that contains snapshot we are sending */
char tofs[ZFS_MAX_DATASET_NAME_LEN];
/* short name of snap we are sending */
const char *tosnap = "";
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"warning: cannot send '%s'"), zhp->zfs_name);
if (zhp->zfs_type == ZFS_TYPE_FILESYSTEM && zfs_prop_get_int(zhp,
ZFS_PROP_VERSION) >= ZPL_VERSION_SA) {
featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
}
if (holds)
featureflags |= DMU_BACKUP_FEATURE_HOLDS;
(void) strlcpy(tofs, zhp->zfs_name, ZFS_MAX_DATASET_NAME_LEN);
char *at = strchr(tofs, '@');
if (at != NULL) {
*at = '\0';
tosnap = at + 1;
}
if (gather_props) {
nvlist_t *hdrnv = fnvlist_alloc();
nvlist_t *fss = NULL;
if (from != NULL)
fnvlist_add_string(hdrnv, "fromsnap", from);
fnvlist_add_string(hdrnv, "tosnap", tosnap);
if (!recursive)
fnvlist_add_boolean(hdrnv, "not_recursive");
if (raw) {
fnvlist_add_boolean(hdrnv, "raw");
}
if ((err = gather_nvlist(zhp->zfs_hdl, tofs,
from, tosnap, recursive, raw, doall, replicate, skipmissing,
verbose, backup, holds, props, &fss, fsavlp)) != 0) {
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
errbuf));
}
/*
* Do not allow the size of the properties list to exceed
* the limit
*/
if ((fnvlist_size(fss) + fnvlist_size(hdrnv)) >
zhp->zfs_hdl->libzfs_max_nvlist) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "warning: cannot send '%s': "
"the size of the list of snapshots and properties "
"is too large to be received successfully.\n"
"Select a smaller number of snapshots to send.\n"),
zhp->zfs_name);
return (zfs_error(zhp->zfs_hdl, EZFS_NOSPC,
errbuf));
}
fnvlist_add_nvlist(hdrnv, "fss", fss);
VERIFY0(nvlist_pack(hdrnv, &packbuf, &buflen, NV_ENCODE_XDR,
0));
if (fssp != NULL) {
*fssp = fss;
} else {
fnvlist_free(fss);
}
fnvlist_free(hdrnv);
}
if (!dryrun) {
dmu_replay_record_t drr = { 0 };
/* write first begin record */
drr.drr_type = DRR_BEGIN;
drr.drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
DMU_SET_STREAM_HDRTYPE(drr.drr_u.drr_begin.
drr_versioninfo, DMU_COMPOUNDSTREAM);
DMU_SET_FEATUREFLAGS(drr.drr_u.drr_begin.
drr_versioninfo, featureflags);
if (snprintf(drr.drr_u.drr_begin.drr_toname,
sizeof (drr.drr_u.drr_begin.drr_toname), "%s@%s", tofs,
tosnap) >= sizeof (drr.drr_u.drr_begin.drr_toname)) {
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
errbuf));
}
drr.drr_payloadlen = buflen;
err = dump_record(&drr, packbuf, buflen, &zc, fd);
free(packbuf);
if (err != 0) {
zfs_error_aux(zhp->zfs_hdl, "%s", strerror(err));
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
errbuf));
}
err = send_conclusion_record(fd, &zc);
if (err != 0) {
zfs_error_aux(zhp->zfs_hdl, "%s", strerror(err));
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
errbuf));
}
}
return (0);
}
/*
* Generate a send stream. The "zhp" argument is the filesystem/volume
* that contains the snapshot to send. The "fromsnap" argument is the
* short name (the part after the '@') of the snapshot that is the
* incremental source to send from (if non-NULL). The "tosnap" argument
* is the short name of the snapshot to send.
*
* The content of the send stream is the snapshot identified by
* 'tosnap'. Incremental streams are requested in two ways:
* - from the snapshot identified by "fromsnap" (if non-null) or
* - from the origin of the dataset identified by zhp, which must
* be a clone. In this case, "fromsnap" is null and "fromorigin"
* is TRUE.
*
* The send stream is recursive (i.e. dumps a hierarchy of snapshots) and
* uses a special header (with a hdrtype field of DMU_COMPOUNDSTREAM)
* if "replicate" is set. If "doall" is set, dump all the intermediate
* snapshots. The DMU_COMPOUNDSTREAM header is used in the "doall"
* case too. If "props" is set, send properties.
*
* Pre-wrapped (cf. lzc_send_wrapper()).
*/
static int
zfs_send_cb_impl(zfs_handle_t *zhp, const char *fromsnap, const char *tosnap,
sendflags_t *flags, int outfd, snapfilter_cb_t filter_func,
void *cb_arg, nvlist_t **debugnvp)
{
char errbuf[ERRBUFLEN];
send_dump_data_t sdd = { 0 };
int err = 0;
nvlist_t *fss = NULL;
avl_tree_t *fsavl = NULL;
static uint64_t holdseq;
int spa_version;
FILE *fout;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot send '%s'"), zhp->zfs_name);
if (fromsnap && fromsnap[0] == '\0') {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"zero-length incremental source"));
return (zfs_error(zhp->zfs_hdl, EZFS_NOENT, errbuf));
}
if (fromsnap) {
char full_fromsnap_name[ZFS_MAX_DATASET_NAME_LEN];
if (snprintf(full_fromsnap_name, sizeof (full_fromsnap_name),
"%s@%s", zhp->zfs_name, fromsnap) >=
sizeof (full_fromsnap_name)) {
err = EINVAL;
goto stderr_out;
}
zfs_handle_t *fromsnapn = zfs_open(zhp->zfs_hdl,
full_fromsnap_name, ZFS_TYPE_SNAPSHOT);
if (fromsnapn == NULL) {
err = -1;
goto err_out;
}
zfs_close(fromsnapn);
}
if (flags->replicate || flags->doall || flags->props ||
flags->holds || flags->backup) {
char full_tosnap_name[ZFS_MAX_DATASET_NAME_LEN];
if (snprintf(full_tosnap_name, sizeof (full_tosnap_name),
"%s@%s", zhp->zfs_name, tosnap) >=
sizeof (full_tosnap_name)) {
err = EINVAL;
goto stderr_out;
}
zfs_handle_t *tosnap = zfs_open(zhp->zfs_hdl,
full_tosnap_name, ZFS_TYPE_SNAPSHOT);
if (tosnap == NULL) {
err = -1;
goto err_out;
}
err = send_prelim_records(tosnap, fromsnap, outfd,
flags->replicate || flags->props || flags->holds,
flags->replicate, flags->verbosity > 0, flags->dryrun,
flags->raw, flags->replicate, flags->skipmissing,
flags->backup, flags->holds, flags->props, flags->doall,
&fss, &fsavl);
zfs_close(tosnap);
if (err != 0)
goto err_out;
}
/* dump each stream */
sdd.fromsnap = fromsnap;
sdd.tosnap = tosnap;
sdd.outfd = outfd;
sdd.replicate = flags->replicate;
sdd.doall = flags->doall;
sdd.fromorigin = flags->fromorigin;
sdd.fss = fss;
sdd.fsavl = fsavl;
sdd.verbosity = flags->verbosity;
sdd.parsable = flags->parsable;
sdd.progress = flags->progress;
sdd.dryrun = flags->dryrun;
sdd.large_block = flags->largeblock;
sdd.embed_data = flags->embed_data;
sdd.compress = flags->compress;
sdd.raw = flags->raw;
sdd.holds = flags->holds;
sdd.filter_cb = filter_func;
sdd.filter_cb_arg = cb_arg;
if (debugnvp)
sdd.debugnv = *debugnvp;
if (sdd.verbosity != 0 && sdd.dryrun)
sdd.std_out = B_TRUE;
fout = sdd.std_out ? stdout : stderr;
/*
* Some flags require that we place user holds on the datasets that are
* being sent so they don't get destroyed during the send. We can skip
* this step if the pool is imported read-only since the datasets cannot
* be destroyed.
*/
if (!flags->dryrun && !zpool_get_prop_int(zfs_get_pool_handle(zhp),
ZPOOL_PROP_READONLY, NULL) &&
zfs_spa_version(zhp, &spa_version) == 0 &&
spa_version >= SPA_VERSION_USERREFS &&
(flags->doall || flags->replicate)) {
++holdseq;
(void) snprintf(sdd.holdtag, sizeof (sdd.holdtag),
".send-%d-%llu", getpid(), (u_longlong_t)holdseq);
sdd.cleanup_fd = open(ZFS_DEV, O_RDWR | O_CLOEXEC);
if (sdd.cleanup_fd < 0) {
err = errno;
goto stderr_out;
}
sdd.snapholds = fnvlist_alloc();
} else {
sdd.cleanup_fd = -1;
sdd.snapholds = NULL;
}
if (flags->verbosity != 0 || sdd.snapholds != NULL) {
/*
* Do a verbose no-op dry run to get all the verbose output
* or to gather snapshot hold's before generating any data,
* then do a non-verbose real run to generate the streams.
*/
sdd.dryrun = B_TRUE;
err = dump_filesystems(zhp, &sdd);
if (err != 0)
goto stderr_out;
if (flags->verbosity != 0) {
if (flags->parsable) {
(void) fprintf(fout, "size\t%llu\n",
(longlong_t)sdd.size);
} else {
char buf[16];
zfs_nicebytes(sdd.size, buf, sizeof (buf));
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"total estimated size is %s\n"), buf);
}
}
/* Ensure no snaps found is treated as an error. */
if (!sdd.seento) {
err = ENOENT;
goto err_out;
}
/* Skip the second run if dryrun was requested. */
if (flags->dryrun)
goto err_out;
if (sdd.snapholds != NULL) {
err = zfs_hold_nvl(zhp, sdd.cleanup_fd, sdd.snapholds);
if (err != 0)
goto stderr_out;
fnvlist_free(sdd.snapholds);
sdd.snapholds = NULL;
}
sdd.dryrun = B_FALSE;
sdd.verbosity = 0;
}
err = dump_filesystems(zhp, &sdd);
fsavl_destroy(fsavl);
fnvlist_free(fss);
/* Ensure no snaps found is treated as an error. */
if (err == 0 && !sdd.seento)
err = ENOENT;
if (sdd.cleanup_fd != -1) {
VERIFY(0 == close(sdd.cleanup_fd));
sdd.cleanup_fd = -1;
}
if (!flags->dryrun && (flags->replicate || flags->doall ||
flags->props || flags->backup || flags->holds)) {
/*
* write final end record. NB: want to do this even if
* there was some error, because it might not be totally
* failed.
*/
int err2 = send_conclusion_record(outfd, NULL);
if (err2 != 0)
return (zfs_standard_error(zhp->zfs_hdl, err2, errbuf));
}
return (err || sdd.err);
stderr_out:
err = zfs_standard_error(zhp->zfs_hdl, err, errbuf);
err_out:
fsavl_destroy(fsavl);
fnvlist_free(fss);
fnvlist_free(sdd.snapholds);
if (sdd.cleanup_fd != -1)
VERIFY(0 == close(sdd.cleanup_fd));
return (err);
}
struct zfs_send {
zfs_handle_t *zhp;
const char *fromsnap;
const char *tosnap;
sendflags_t *flags;
snapfilter_cb_t *filter_func;
void *cb_arg;
nvlist_t **debugnvp;
};
static int
zfs_send_cb(int outfd, void *arg)
{
struct zfs_send *zs = arg;
return (zfs_send_cb_impl(zs->zhp, zs->fromsnap, zs->tosnap, zs->flags,
outfd, zs->filter_func, zs->cb_arg, zs->debugnvp));
}
int
zfs_send(zfs_handle_t *zhp, const char *fromsnap, const char *tosnap,
sendflags_t *flags, int outfd, snapfilter_cb_t filter_func,
void *cb_arg, nvlist_t **debugnvp)
{
struct zfs_send arg = {
.zhp = zhp,
.fromsnap = fromsnap,
.tosnap = tosnap,
.flags = flags,
.filter_func = filter_func,
.cb_arg = cb_arg,
.debugnvp = debugnvp,
};
return (lzc_send_wrapper(zfs_send_cb, outfd, &arg));
}
static zfs_handle_t *
name_to_dir_handle(libzfs_handle_t *hdl, const char *snapname)
{
char dirname[ZFS_MAX_DATASET_NAME_LEN];
(void) strlcpy(dirname, snapname, ZFS_MAX_DATASET_NAME_LEN);
char *c = strchr(dirname, '@');
if (c != NULL)
*c = '\0';
return (zfs_open(hdl, dirname, ZFS_TYPE_DATASET));
}
/*
* Returns B_TRUE if earlier is an earlier snapshot in later's timeline; either
* an earlier snapshot in the same filesystem, or a snapshot before later's
* origin, or it's origin's origin, etc.
*/
static boolean_t
snapshot_is_before(zfs_handle_t *earlier, zfs_handle_t *later)
{
boolean_t ret;
uint64_t later_txg =
(later->zfs_type == ZFS_TYPE_FILESYSTEM ||
later->zfs_type == ZFS_TYPE_VOLUME ?
UINT64_MAX : zfs_prop_get_int(later, ZFS_PROP_CREATETXG));
uint64_t earlier_txg = zfs_prop_get_int(earlier, ZFS_PROP_CREATETXG);
if (earlier_txg >= later_txg)
return (B_FALSE);
zfs_handle_t *earlier_dir = name_to_dir_handle(earlier->zfs_hdl,
earlier->zfs_name);
zfs_handle_t *later_dir = name_to_dir_handle(later->zfs_hdl,
later->zfs_name);
if (strcmp(earlier_dir->zfs_name, later_dir->zfs_name) == 0) {
zfs_close(earlier_dir);
zfs_close(later_dir);
return (B_TRUE);
}
char clonename[ZFS_MAX_DATASET_NAME_LEN];
if (zfs_prop_get(later_dir, ZFS_PROP_ORIGIN, clonename,
ZFS_MAX_DATASET_NAME_LEN, NULL, NULL, 0, B_TRUE) != 0) {
zfs_close(earlier_dir);
zfs_close(later_dir);
return (B_FALSE);
}
zfs_handle_t *origin = zfs_open(earlier->zfs_hdl, clonename,
ZFS_TYPE_DATASET);
uint64_t origin_txg = zfs_prop_get_int(origin, ZFS_PROP_CREATETXG);
/*
* If "earlier" is exactly the origin, then
* snapshot_is_before(earlier, origin) will return false (because
* they're the same).
*/
if (origin_txg == earlier_txg &&
strcmp(origin->zfs_name, earlier->zfs_name) == 0) {
zfs_close(earlier_dir);
zfs_close(later_dir);
zfs_close(origin);
return (B_TRUE);
}
zfs_close(earlier_dir);
zfs_close(later_dir);
ret = snapshot_is_before(earlier, origin);
zfs_close(origin);
return (ret);
}
/*
* The "zhp" argument is the handle of the dataset to send (typically a
* snapshot). The "from" argument is the full name of the snapshot or
* bookmark that is the incremental source.
*
* Pre-wrapped (cf. lzc_send_wrapper()).
*/
static int
zfs_send_one_cb_impl(zfs_handle_t *zhp, const char *from, int fd,
sendflags_t *flags, const char *redactbook)
{
int err;
libzfs_handle_t *hdl = zhp->zfs_hdl;
char *name = zhp->zfs_name;
pthread_t ptid;
progress_arg_t pa = { 0 };
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"warning: cannot send '%s'"), name);
if (from != NULL && strchr(from, '@')) {
zfs_handle_t *from_zhp = zfs_open(hdl, from,
ZFS_TYPE_DATASET);
if (from_zhp == NULL)
return (-1);
if (!snapshot_is_before(from_zhp, zhp)) {
zfs_close(from_zhp);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not an earlier snapshot from the same fs"));
return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
}
zfs_close(from_zhp);
}
if (redactbook != NULL) {
char bookname[ZFS_MAX_DATASET_NAME_LEN];
nvlist_t *redact_snaps;
zfs_handle_t *book_zhp;
char *at, *pound;
int dsnamelen;
pound = strchr(redactbook, '#');
if (pound != NULL)
redactbook = pound + 1;
at = strchr(name, '@');
if (at == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot do a redacted send to a filesystem"));
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
}
dsnamelen = at - name;
if (snprintf(bookname, sizeof (bookname), "%.*s#%s",
dsnamelen, name, redactbook)
>= sizeof (bookname)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid bookmark name"));
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
}
book_zhp = zfs_open(hdl, bookname, ZFS_TYPE_BOOKMARK);
if (book_zhp == NULL)
return (-1);
if (nvlist_lookup_nvlist(book_zhp->zfs_props,
zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS),
&redact_snaps) != 0 || redact_snaps == NULL) {
zfs_close(book_zhp);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not a redaction bookmark"));
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
}
zfs_close(book_zhp);
}
/*
* Send fs properties
*/
if (flags->props || flags->holds || flags->backup) {
/*
* Note: the header generated by send_prelim_records()
* assumes that the incremental source is in the same
* filesystem/volume as the target (which is a requirement
* when doing "zfs send -R"). But that isn't always the
* case here (e.g. send from snap in origin, or send from
* bookmark). We pass from=NULL, which will omit this
* information from the prelim records; it isn't used
* when receiving this type of stream.
*/
err = send_prelim_records(zhp, NULL, fd, B_TRUE, B_FALSE,
flags->verbosity > 0, flags->dryrun, flags->raw,
flags->replicate, B_FALSE, flags->backup, flags->holds,
flags->props, flags->doall, NULL, NULL);
if (err != 0)
return (err);
}
/*
* Perform size estimate if verbose was specified.
*/
if (flags->verbosity != 0) {
err = estimate_size(zhp, from, fd, flags, 0, 0, 0, redactbook,
errbuf);
if (err != 0)
return (err);
}
if (flags->dryrun)
return (0);
/*
* If progress reporting is requested, spawn a new thread to poll
* ZFS_IOC_SEND_PROGRESS at a regular interval.
*/
if (flags->progress) {
pa.pa_zhp = zhp;
pa.pa_fd = fd;
pa.pa_parsable = flags->parsable;
pa.pa_estimate = B_FALSE;
pa.pa_verbosity = flags->verbosity;
err = pthread_create(&ptid, NULL,
send_progress_thread, &pa);
if (err != 0) {
zfs_error_aux(zhp->zfs_hdl, "%s", strerror(errno));
return (zfs_error(zhp->zfs_hdl,
EZFS_THREADCREATEFAILED, errbuf));
}
}
err = lzc_send_redacted(name, from, fd,
lzc_flags_from_sendflags(flags), redactbook);
if (flags->progress && send_progress_thread_exit(hdl, ptid))
return (-1);
if (err == 0 && (flags->props || flags->holds || flags->backup)) {
/* Write the final end record. */
err = send_conclusion_record(fd, NULL);
if (err != 0)
return (zfs_standard_error(hdl, err, errbuf));
}
if (err != 0) {
switch (errno) {
case EXDEV:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not an earlier snapshot from the same fs"));
return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
case ENOENT:
case ESRCH:
if (lzc_exists(name)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incremental source (%s) does not exist"),
from);
}
return (zfs_error(hdl, EZFS_NOENT, errbuf));
case EACCES:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset key must be loaded"));
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
case EBUSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"target is busy; if a filesystem, "
"it must not be mounted"));
return (zfs_error(hdl, EZFS_BUSY, errbuf));
case EDQUOT:
case EFAULT:
case EFBIG:
case EINVAL:
case EIO:
case ENOLINK:
case ENOSPC:
case ENOSTR:
case ENXIO:
case EPIPE:
case ERANGE:
case EROFS:
zfs_error_aux(hdl, "%s", strerror(errno));
return (zfs_error(hdl, EZFS_BADBACKUP, errbuf));
- case ENOTSUP:
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "large blocks detected but large_blocks feature "
- "is inactive; raw send unsupported"));
- return (zfs_error(hdl, EZFS_NOTSUP, errbuf));
default:
return (zfs_standard_error(hdl, errno, errbuf));
}
}
return (err != 0);
}
struct zfs_send_one {
zfs_handle_t *zhp;
const char *from;
sendflags_t *flags;
const char *redactbook;
};
static int
zfs_send_one_cb(int fd, void *arg)
{
struct zfs_send_one *zso = arg;
return (zfs_send_one_cb_impl(zso->zhp, zso->from, fd, zso->flags,
zso->redactbook));
}
int
zfs_send_one(zfs_handle_t *zhp, const char *from, int fd, sendflags_t *flags,
const char *redactbook)
{
struct zfs_send_one zso = {
.zhp = zhp,
.from = from,
.flags = flags,
.redactbook = redactbook,
};
return (lzc_send_wrapper(zfs_send_one_cb, fd, &zso));
}
/*
* Routines specific to "zfs recv"
*/
static int
recv_read(libzfs_handle_t *hdl, int fd, void *buf, int ilen,
boolean_t byteswap, zio_cksum_t *zc)
{
char *cp = buf;
int rv;
int len = ilen;
do {
rv = read(fd, cp, len);
cp += rv;
len -= rv;
} while (rv > 0);
if (rv < 0 || len != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"failed to read from stream"));
return (zfs_error(hdl, EZFS_BADSTREAM, dgettext(TEXT_DOMAIN,
"cannot receive")));
}
if (zc) {
if (byteswap)
fletcher_4_incremental_byteswap(buf, ilen, zc);
else
fletcher_4_incremental_native(buf, ilen, zc);
}
return (0);
}
static int
recv_read_nvlist(libzfs_handle_t *hdl, int fd, int len, nvlist_t **nvp,
boolean_t byteswap, zio_cksum_t *zc)
{
char *buf;
int err;
buf = zfs_alloc(hdl, len);
if (len > hdl->libzfs_max_nvlist) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "nvlist too large"));
free(buf);
return (ENOMEM);
}
err = recv_read(hdl, fd, buf, len, byteswap, zc);
if (err != 0) {
free(buf);
return (err);
}
err = nvlist_unpack(buf, len, nvp, 0);
free(buf);
if (err != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
"stream (malformed nvlist)"));
return (EINVAL);
}
return (0);
}
/*
* Returns the grand origin (origin of origin of origin...) of a given handle.
* If this dataset is not a clone, it simply returns a copy of the original
* handle.
*/
static zfs_handle_t *
recv_open_grand_origin(zfs_handle_t *zhp)
{
char origin[ZFS_MAX_DATASET_NAME_LEN];
zprop_source_t src;
zfs_handle_t *ozhp = zfs_handle_dup(zhp);
while (ozhp != NULL) {
if (zfs_prop_get(ozhp, ZFS_PROP_ORIGIN, origin,
sizeof (origin), &src, NULL, 0, B_FALSE) != 0)
break;
(void) zfs_close(ozhp);
ozhp = zfs_open(zhp->zfs_hdl, origin, ZFS_TYPE_FILESYSTEM);
}
return (ozhp);
}
static int
recv_rename_impl(zfs_handle_t *zhp, const char *name, const char *newname)
{
int err;
zfs_handle_t *ozhp = NULL;
/*
* Attempt to rename the dataset. If it fails with EACCES we have
* attempted to rename the dataset outside of its encryption root.
* Force the dataset to become an encryption root and try again.
*/
err = lzc_rename(name, newname);
if (err == EACCES) {
ozhp = recv_open_grand_origin(zhp);
if (ozhp == NULL) {
err = ENOENT;
goto out;
}
err = lzc_change_key(ozhp->zfs_name, DCP_CMD_FORCE_NEW_KEY,
NULL, NULL, 0);
if (err != 0)
goto out;
err = lzc_rename(name, newname);
}
out:
if (ozhp != NULL)
zfs_close(ozhp);
return (err);
}
static int
recv_rename(libzfs_handle_t *hdl, const char *name, const char *tryname,
int baselen, char *newname, recvflags_t *flags)
{
static int seq;
int err;
prop_changelist_t *clp = NULL;
zfs_handle_t *zhp = NULL;
zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET);
if (zhp == NULL) {
err = -1;
goto out;
}
clp = changelist_gather(zhp, ZFS_PROP_NAME, 0,
flags->force ? MS_FORCE : 0);
if (clp == NULL) {
err = -1;
goto out;
}
err = changelist_prefix(clp);
if (err)
goto out;
if (tryname) {
(void) strcpy(newname, tryname);
if (flags->verbose) {
(void) printf("attempting rename %s to %s\n",
name, newname);
}
err = recv_rename_impl(zhp, name, newname);
if (err == 0)
changelist_rename(clp, name, tryname);
} else {
err = ENOENT;
}
if (err != 0 && strncmp(name + baselen, "recv-", 5) != 0) {
seq++;
(void) snprintf(newname, ZFS_MAX_DATASET_NAME_LEN,
"%.*srecv-%u-%u", baselen, name, getpid(), seq);
if (flags->verbose) {
(void) printf("failed - trying rename %s to %s\n",
name, newname);
}
err = recv_rename_impl(zhp, name, newname);
if (err == 0)
changelist_rename(clp, name, newname);
if (err && flags->verbose) {
(void) printf("failed (%u) - "
"will try again on next pass\n", errno);
}
err = EAGAIN;
} else if (flags->verbose) {
if (err == 0)
(void) printf("success\n");
else
(void) printf("failed (%u)\n", errno);
}
(void) changelist_postfix(clp);
out:
if (clp != NULL)
changelist_free(clp);
if (zhp != NULL)
zfs_close(zhp);
return (err);
}
static int
recv_promote(libzfs_handle_t *hdl, const char *fsname,
const char *origin_fsname, recvflags_t *flags)
{
int err;
zfs_cmd_t zc = {"\0"};
zfs_handle_t *zhp = NULL, *ozhp = NULL;
if (flags->verbose)
(void) printf("promoting %s\n", fsname);
(void) strlcpy(zc.zc_value, origin_fsname, sizeof (zc.zc_value));
(void) strlcpy(zc.zc_name, fsname, sizeof (zc.zc_name));
/*
* Attempt to promote the dataset. If it fails with EACCES the
* promotion would cause this dataset to leave its encryption root.
* Force the origin to become an encryption root and try again.
*/
err = zfs_ioctl(hdl, ZFS_IOC_PROMOTE, &zc);
if (err == EACCES) {
zhp = zfs_open(hdl, fsname, ZFS_TYPE_DATASET);
if (zhp == NULL) {
err = -1;
goto out;
}
ozhp = recv_open_grand_origin(zhp);
if (ozhp == NULL) {
err = -1;
goto out;
}
err = lzc_change_key(ozhp->zfs_name, DCP_CMD_FORCE_NEW_KEY,
NULL, NULL, 0);
if (err != 0)
goto out;
err = zfs_ioctl(hdl, ZFS_IOC_PROMOTE, &zc);
}
out:
if (zhp != NULL)
zfs_close(zhp);
if (ozhp != NULL)
zfs_close(ozhp);
return (err);
}
static int
recv_destroy(libzfs_handle_t *hdl, const char *name, int baselen,
char *newname, recvflags_t *flags)
{
int err = 0;
prop_changelist_t *clp;
zfs_handle_t *zhp;
boolean_t defer = B_FALSE;
int spa_version;
zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET);
if (zhp == NULL)
return (-1);
zfs_type_t type = zfs_get_type(zhp);
if (type == ZFS_TYPE_SNAPSHOT &&
zfs_spa_version(zhp, &spa_version) == 0 &&
spa_version >= SPA_VERSION_USERREFS)
defer = B_TRUE;
clp = changelist_gather(zhp, ZFS_PROP_NAME, 0,
flags->force ? MS_FORCE : 0);
zfs_close(zhp);
if (clp == NULL)
return (-1);
err = changelist_prefix(clp);
if (err)
return (err);
if (flags->verbose)
(void) printf("attempting destroy %s\n", name);
if (type == ZFS_TYPE_SNAPSHOT) {
nvlist_t *nv = fnvlist_alloc();
fnvlist_add_boolean(nv, name);
err = lzc_destroy_snaps(nv, defer, NULL);
fnvlist_free(nv);
} else {
err = lzc_destroy(name);
}
if (err == 0) {
if (flags->verbose)
(void) printf("success\n");
changelist_remove(clp, name);
}
(void) changelist_postfix(clp);
changelist_free(clp);
/*
* Deferred destroy might destroy the snapshot or only mark it to be
* destroyed later, and it returns success in either case.
*/
if (err != 0 || (defer && zfs_dataset_exists(hdl, name,
ZFS_TYPE_SNAPSHOT))) {
err = recv_rename(hdl, name, NULL, baselen, newname, flags);
}
return (err);
}
typedef struct guid_to_name_data {
uint64_t guid;
boolean_t bookmark_ok;
char *name;
char *skip;
uint64_t *redact_snap_guids;
uint64_t num_redact_snaps;
} guid_to_name_data_t;
static boolean_t
redact_snaps_match(zfs_handle_t *zhp, guid_to_name_data_t *gtnd)
{
uint64_t *bmark_snaps;
uint_t bmark_num_snaps;
nvlist_t *nvl;
if (zhp->zfs_type != ZFS_TYPE_BOOKMARK)
return (B_FALSE);
nvl = fnvlist_lookup_nvlist(zhp->zfs_props,
zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS));
bmark_snaps = fnvlist_lookup_uint64_array(nvl, ZPROP_VALUE,
&bmark_num_snaps);
if (bmark_num_snaps != gtnd->num_redact_snaps)
return (B_FALSE);
int i = 0;
for (; i < bmark_num_snaps; i++) {
int j = 0;
for (; j < bmark_num_snaps; j++) {
if (bmark_snaps[i] == gtnd->redact_snap_guids[j])
break;
}
if (j == bmark_num_snaps)
break;
}
return (i == bmark_num_snaps);
}
static int
guid_to_name_cb(zfs_handle_t *zhp, void *arg)
{
guid_to_name_data_t *gtnd = arg;
const char *slash;
int err;
if (gtnd->skip != NULL &&
(slash = strrchr(zhp->zfs_name, '/')) != NULL &&
strcmp(slash + 1, gtnd->skip) == 0) {
zfs_close(zhp);
return (0);
}
if (zfs_prop_get_int(zhp, ZFS_PROP_GUID) == gtnd->guid &&
(gtnd->num_redact_snaps == -1 || redact_snaps_match(zhp, gtnd))) {
(void) strcpy(gtnd->name, zhp->zfs_name);
zfs_close(zhp);
return (EEXIST);
}
err = zfs_iter_children(zhp, guid_to_name_cb, gtnd);
if (err != EEXIST && gtnd->bookmark_ok)
err = zfs_iter_bookmarks(zhp, guid_to_name_cb, gtnd);
zfs_close(zhp);
return (err);
}
/*
* Attempt to find the local dataset associated with this guid. In the case of
* multiple matches, we attempt to find the "best" match by searching
* progressively larger portions of the hierarchy. This allows one to send a
* tree of datasets individually and guarantee that we will find the source
* guid within that hierarchy, even if there are multiple matches elsewhere.
*
* If num_redact_snaps is not -1, we attempt to find a redaction bookmark with
* the specified number of redaction snapshots. If num_redact_snaps isn't 0 or
* -1, then redact_snap_guids will be an array of the guids of the snapshots the
* redaction bookmark was created with. If num_redact_snaps is -1, then we will
* attempt to find a snapshot or bookmark (if bookmark_ok is passed) with the
* given guid. Note that a redaction bookmark can be returned if
* num_redact_snaps == -1.
*/
static int
guid_to_name_redact_snaps(libzfs_handle_t *hdl, const char *parent,
uint64_t guid, boolean_t bookmark_ok, uint64_t *redact_snap_guids,
uint64_t num_redact_snaps, char *name)
{
char pname[ZFS_MAX_DATASET_NAME_LEN];
guid_to_name_data_t gtnd;
gtnd.guid = guid;
gtnd.bookmark_ok = bookmark_ok;
gtnd.name = name;
gtnd.skip = NULL;
gtnd.redact_snap_guids = redact_snap_guids;
gtnd.num_redact_snaps = num_redact_snaps;
/*
* Search progressively larger portions of the hierarchy, starting
* with the filesystem specified by 'parent'. This will
* select the "most local" version of the origin snapshot in the case
* that there are multiple matching snapshots in the system.
*/
(void) strlcpy(pname, parent, sizeof (pname));
char *cp = strrchr(pname, '@');
if (cp == NULL)
cp = strchr(pname, '\0');
for (; cp != NULL; cp = strrchr(pname, '/')) {
/* Chop off the last component and open the parent */
*cp = '\0';
zfs_handle_t *zhp = make_dataset_handle(hdl, pname);
if (zhp == NULL)
continue;
int err = guid_to_name_cb(zfs_handle_dup(zhp), &gtnd);
if (err != EEXIST)
err = zfs_iter_children(zhp, guid_to_name_cb, &gtnd);
if (err != EEXIST && bookmark_ok)
err = zfs_iter_bookmarks(zhp, guid_to_name_cb, &gtnd);
zfs_close(zhp);
if (err == EEXIST)
return (0);
/*
* Remember the last portion of the dataset so we skip it next
* time through (as we've already searched that portion of the
* hierarchy).
*/
gtnd.skip = strrchr(pname, '/') + 1;
}
return (ENOENT);
}
static int
guid_to_name(libzfs_handle_t *hdl, const char *parent, uint64_t guid,
boolean_t bookmark_ok, char *name)
{
return (guid_to_name_redact_snaps(hdl, parent, guid, bookmark_ok, NULL,
-1, name));
}
/*
* Return +1 if guid1 is before guid2, 0 if they are the same, and -1 if
* guid1 is after guid2.
*/
static int
created_before(libzfs_handle_t *hdl, avl_tree_t *avl,
uint64_t guid1, uint64_t guid2)
{
nvlist_t *nvfs;
char *fsname = NULL, *snapname = NULL;
char buf[ZFS_MAX_DATASET_NAME_LEN];
int rv;
zfs_handle_t *guid1hdl, *guid2hdl;
uint64_t create1, create2;
if (guid2 == 0)
return (0);
if (guid1 == 0)
return (1);
nvfs = fsavl_find(avl, guid1, &snapname);
fsname = fnvlist_lookup_string(nvfs, "name");
(void) snprintf(buf, sizeof (buf), "%s@%s", fsname, snapname);
guid1hdl = zfs_open(hdl, buf, ZFS_TYPE_SNAPSHOT);
if (guid1hdl == NULL)
return (-1);
nvfs = fsavl_find(avl, guid2, &snapname);
fsname = fnvlist_lookup_string(nvfs, "name");
(void) snprintf(buf, sizeof (buf), "%s@%s", fsname, snapname);
guid2hdl = zfs_open(hdl, buf, ZFS_TYPE_SNAPSHOT);
if (guid2hdl == NULL) {
zfs_close(guid1hdl);
return (-1);
}
create1 = zfs_prop_get_int(guid1hdl, ZFS_PROP_CREATETXG);
create2 = zfs_prop_get_int(guid2hdl, ZFS_PROP_CREATETXG);
if (create1 < create2)
rv = -1;
else if (create1 > create2)
rv = +1;
else
rv = 0;
zfs_close(guid1hdl);
zfs_close(guid2hdl);
return (rv);
}
/*
* This function reestablishes the hierarchy of encryption roots after a
* recursive incremental receive has completed. This must be done after the
* second call to recv_incremental_replication() has renamed and promoted all
* sent datasets to their final locations in the dataset hierarchy.
*/
static int
recv_fix_encryption_hierarchy(libzfs_handle_t *hdl, const char *top_zfs,
nvlist_t *stream_nv)
{
int err;
nvpair_t *fselem = NULL;
nvlist_t *stream_fss;
stream_fss = fnvlist_lookup_nvlist(stream_nv, "fss");
while ((fselem = nvlist_next_nvpair(stream_fss, fselem)) != NULL) {
zfs_handle_t *zhp = NULL;
uint64_t crypt;
nvlist_t *snaps, *props, *stream_nvfs = NULL;
nvpair_t *snapel = NULL;
boolean_t is_encroot, is_clone, stream_encroot;
char *cp;
char *stream_keylocation = NULL;
char keylocation[MAXNAMELEN];
char fsname[ZFS_MAX_DATASET_NAME_LEN];
keylocation[0] = '\0';
stream_nvfs = fnvpair_value_nvlist(fselem);
snaps = fnvlist_lookup_nvlist(stream_nvfs, "snaps");
props = fnvlist_lookup_nvlist(stream_nvfs, "props");
stream_encroot = nvlist_exists(stream_nvfs, "is_encroot");
/* find a snapshot from the stream that exists locally */
err = ENOENT;
while ((snapel = nvlist_next_nvpair(snaps, snapel)) != NULL) {
uint64_t guid;
guid = fnvpair_value_uint64(snapel);
err = guid_to_name(hdl, top_zfs, guid, B_FALSE,
fsname);
if (err == 0)
break;
}
if (err != 0)
continue;
cp = strchr(fsname, '@');
if (cp != NULL)
*cp = '\0';
zhp = zfs_open(hdl, fsname, ZFS_TYPE_DATASET);
if (zhp == NULL) {
err = ENOENT;
goto error;
}
crypt = zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION);
is_clone = zhp->zfs_dmustats.dds_origin[0] != '\0';
(void) zfs_crypto_get_encryption_root(zhp, &is_encroot, NULL);
/* we don't need to do anything for unencrypted datasets */
if (crypt == ZIO_CRYPT_OFF) {
zfs_close(zhp);
continue;
}
/*
* If the dataset is flagged as an encryption root, was not
* received as a clone and is not currently an encryption root,
* force it to become one. Fixup the keylocation if necessary.
*/
if (stream_encroot) {
if (!is_clone && !is_encroot) {
err = lzc_change_key(fsname,
DCP_CMD_FORCE_NEW_KEY, NULL, NULL, 0);
if (err != 0) {
zfs_close(zhp);
goto error;
}
}
stream_keylocation = fnvlist_lookup_string(props,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION));
/*
* Refresh the properties in case the call to
* lzc_change_key() changed the value.
*/
zfs_refresh_properties(zhp);
err = zfs_prop_get(zhp, ZFS_PROP_KEYLOCATION,
keylocation, sizeof (keylocation), NULL, NULL,
0, B_TRUE);
if (err != 0) {
zfs_close(zhp);
goto error;
}
if (strcmp(keylocation, stream_keylocation) != 0) {
err = zfs_prop_set(zhp,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION),
stream_keylocation);
if (err != 0) {
zfs_close(zhp);
goto error;
}
}
}
/*
* If the dataset is not flagged as an encryption root and is
* currently an encryption root, force it to inherit from its
* parent. The root of a raw send should never be
* force-inherited.
*/
if (!stream_encroot && is_encroot &&
strcmp(top_zfs, fsname) != 0) {
err = lzc_change_key(fsname, DCP_CMD_FORCE_INHERIT,
NULL, NULL, 0);
if (err != 0) {
zfs_close(zhp);
goto error;
}
}
zfs_close(zhp);
}
return (0);
error:
return (err);
}
static int
recv_incremental_replication(libzfs_handle_t *hdl, const char *tofs,
recvflags_t *flags, nvlist_t *stream_nv, avl_tree_t *stream_avl,
nvlist_t *renamed)
{
nvlist_t *local_nv, *deleted = NULL;
avl_tree_t *local_avl;
nvpair_t *fselem, *nextfselem;
char *fromsnap;
char newname[ZFS_MAX_DATASET_NAME_LEN];
char guidname[32];
int error;
boolean_t needagain, progress, recursive;
char *s1, *s2;
fromsnap = fnvlist_lookup_string(stream_nv, "fromsnap");
recursive = (nvlist_lookup_boolean(stream_nv, "not_recursive") ==
ENOENT);
if (flags->dryrun)
return (0);
again:
needagain = progress = B_FALSE;
deleted = fnvlist_alloc();
if ((error = gather_nvlist(hdl, tofs, fromsnap, NULL,
recursive, B_TRUE, B_FALSE, recursive, B_FALSE, B_FALSE, B_FALSE,
B_FALSE, B_TRUE, &local_nv, &local_avl)) != 0)
return (error);
/*
* Process deletes and renames
*/
for (fselem = nvlist_next_nvpair(local_nv, NULL);
fselem; fselem = nextfselem) {
nvlist_t *nvfs, *snaps;
nvlist_t *stream_nvfs = NULL;
nvpair_t *snapelem, *nextsnapelem;
uint64_t fromguid = 0;
uint64_t originguid = 0;
uint64_t stream_originguid = 0;
uint64_t parent_fromsnap_guid, stream_parent_fromsnap_guid;
char *fsname, *stream_fsname;
nextfselem = nvlist_next_nvpair(local_nv, fselem);
nvfs = fnvpair_value_nvlist(fselem);
snaps = fnvlist_lookup_nvlist(nvfs, "snaps");
fsname = fnvlist_lookup_string(nvfs, "name");
parent_fromsnap_guid = fnvlist_lookup_uint64(nvfs,
"parentfromsnap");
(void) nvlist_lookup_uint64(nvfs, "origin", &originguid);
/*
* First find the stream's fs, so we can check for
* a different origin (due to "zfs promote")
*/
for (snapelem = nvlist_next_nvpair(snaps, NULL);
snapelem; snapelem = nvlist_next_nvpair(snaps, snapelem)) {
uint64_t thisguid;
thisguid = fnvpair_value_uint64(snapelem);
stream_nvfs = fsavl_find(stream_avl, thisguid, NULL);
if (stream_nvfs != NULL)
break;
}
/* check for promote */
(void) nvlist_lookup_uint64(stream_nvfs, "origin",
&stream_originguid);
if (stream_nvfs && originguid != stream_originguid) {
switch (created_before(hdl, local_avl,
stream_originguid, originguid)) {
case 1: {
/* promote it! */
nvlist_t *origin_nvfs;
char *origin_fsname;
origin_nvfs = fsavl_find(local_avl, originguid,
NULL);
origin_fsname = fnvlist_lookup_string(
origin_nvfs, "name");
error = recv_promote(hdl, fsname, origin_fsname,
flags);
if (error == 0)
progress = B_TRUE;
break;
}
default:
break;
case -1:
fsavl_destroy(local_avl);
fnvlist_free(local_nv);
return (-1);
}
/*
* We had/have the wrong origin, therefore our
* list of snapshots is wrong. Need to handle
* them on the next pass.
*/
needagain = B_TRUE;
continue;
}
for (snapelem = nvlist_next_nvpair(snaps, NULL);
snapelem; snapelem = nextsnapelem) {
uint64_t thisguid;
char *stream_snapname;
nvlist_t *found, *props;
nextsnapelem = nvlist_next_nvpair(snaps, snapelem);
thisguid = fnvpair_value_uint64(snapelem);
found = fsavl_find(stream_avl, thisguid,
&stream_snapname);
/* check for delete */
if (found == NULL) {
char name[ZFS_MAX_DATASET_NAME_LEN];
if (!flags->force)
continue;
(void) snprintf(name, sizeof (name), "%s@%s",
fsname, nvpair_name(snapelem));
error = recv_destroy(hdl, name,
strlen(fsname)+1, newname, flags);
if (error)
needagain = B_TRUE;
else
progress = B_TRUE;
sprintf(guidname, "%llu",
(u_longlong_t)thisguid);
nvlist_add_boolean(deleted, guidname);
continue;
}
stream_nvfs = found;
if (0 == nvlist_lookup_nvlist(stream_nvfs, "snapprops",
&props) && 0 == nvlist_lookup_nvlist(props,
stream_snapname, &props)) {
zfs_cmd_t zc = {"\0"};
zc.zc_cookie = B_TRUE; /* received */
(void) snprintf(zc.zc_name, sizeof (zc.zc_name),
"%s@%s", fsname, nvpair_name(snapelem));
zcmd_write_src_nvlist(hdl, &zc, props);
(void) zfs_ioctl(hdl,
ZFS_IOC_SET_PROP, &zc);
zcmd_free_nvlists(&zc);
}
/* check for different snapname */
if (strcmp(nvpair_name(snapelem),
stream_snapname) != 0) {
char name[ZFS_MAX_DATASET_NAME_LEN];
char tryname[ZFS_MAX_DATASET_NAME_LEN];
(void) snprintf(name, sizeof (name), "%s@%s",
fsname, nvpair_name(snapelem));
(void) snprintf(tryname, sizeof (name), "%s@%s",
fsname, stream_snapname);
error = recv_rename(hdl, name, tryname,
strlen(fsname)+1, newname, flags);
if (error)
needagain = B_TRUE;
else
progress = B_TRUE;
}
if (strcmp(stream_snapname, fromsnap) == 0)
fromguid = thisguid;
}
/* check for delete */
if (stream_nvfs == NULL) {
if (!flags->force)
continue;
error = recv_destroy(hdl, fsname, strlen(tofs)+1,
newname, flags);
if (error)
needagain = B_TRUE;
else
progress = B_TRUE;
sprintf(guidname, "%llu",
(u_longlong_t)parent_fromsnap_guid);
nvlist_add_boolean(deleted, guidname);
continue;
}
if (fromguid == 0) {
if (flags->verbose) {
(void) printf("local fs %s does not have "
"fromsnap (%s in stream); must have "
"been deleted locally; ignoring\n",
fsname, fromsnap);
}
continue;
}
stream_fsname = fnvlist_lookup_string(stream_nvfs, "name");
stream_parent_fromsnap_guid = fnvlist_lookup_uint64(
stream_nvfs, "parentfromsnap");
s1 = strrchr(fsname, '/');
s2 = strrchr(stream_fsname, '/');
/*
* Check if we're going to rename based on parent guid change
* and the current parent guid was also deleted. If it was then
* rename will fail and is likely unneeded, so avoid this and
* force an early retry to determine the new
* parent_fromsnap_guid.
*/
if (stream_parent_fromsnap_guid != 0 &&
parent_fromsnap_guid != 0 &&
stream_parent_fromsnap_guid != parent_fromsnap_guid) {
sprintf(guidname, "%llu",
(u_longlong_t)parent_fromsnap_guid);
if (nvlist_exists(deleted, guidname)) {
progress = B_TRUE;
needagain = B_TRUE;
goto doagain;
}
}
/*
* Check for rename. If the exact receive path is specified, it
* does not count as a rename, but we still need to check the
* datasets beneath it.
*/
if ((stream_parent_fromsnap_guid != 0 &&
parent_fromsnap_guid != 0 &&
stream_parent_fromsnap_guid != parent_fromsnap_guid) ||
((flags->isprefix || strcmp(tofs, fsname) != 0) &&
(s1 != NULL) && (s2 != NULL) && strcmp(s1, s2) != 0)) {
nvlist_t *parent;
char tryname[ZFS_MAX_DATASET_NAME_LEN];
parent = fsavl_find(local_avl,
stream_parent_fromsnap_guid, NULL);
/*
* NB: parent might not be found if we used the
* tosnap for stream_parent_fromsnap_guid,
* because the parent is a newly-created fs;
* we'll be able to rename it after we recv the
* new fs.
*/
if (parent != NULL) {
char *pname;
pname = fnvlist_lookup_string(parent, "name");
(void) snprintf(tryname, sizeof (tryname),
"%s%s", pname, strrchr(stream_fsname, '/'));
} else {
tryname[0] = '\0';
if (flags->verbose) {
(void) printf("local fs %s new parent "
"not found\n", fsname);
}
}
newname[0] = '\0';
error = recv_rename(hdl, fsname, tryname,
strlen(tofs)+1, newname, flags);
if (renamed != NULL && newname[0] != '\0') {
fnvlist_add_boolean(renamed, newname);
}
if (error)
needagain = B_TRUE;
else
progress = B_TRUE;
}
}
doagain:
fsavl_destroy(local_avl);
fnvlist_free(local_nv);
fnvlist_free(deleted);
if (needagain && progress) {
/* do another pass to fix up temporary names */
if (flags->verbose)
(void) printf("another pass:\n");
goto again;
}
return (needagain || error != 0);
}
static int
zfs_receive_package(libzfs_handle_t *hdl, int fd, const char *destname,
recvflags_t *flags, dmu_replay_record_t *drr, zio_cksum_t *zc,
char **top_zfs, nvlist_t *cmdprops)
{
nvlist_t *stream_nv = NULL;
avl_tree_t *stream_avl = NULL;
char *fromsnap = NULL;
char *sendsnap = NULL;
char *cp;
char tofs[ZFS_MAX_DATASET_NAME_LEN];
char sendfs[ZFS_MAX_DATASET_NAME_LEN];
char errbuf[ERRBUFLEN];
dmu_replay_record_t drre;
int error;
boolean_t anyerr = B_FALSE;
boolean_t softerr = B_FALSE;
boolean_t recursive, raw;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot receive"));
assert(drr->drr_type == DRR_BEGIN);
assert(drr->drr_u.drr_begin.drr_magic == DMU_BACKUP_MAGIC);
assert(DMU_GET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo) ==
DMU_COMPOUNDSTREAM);
/*
* Read in the nvlist from the stream.
*/
if (drr->drr_payloadlen != 0) {
error = recv_read_nvlist(hdl, fd, drr->drr_payloadlen,
&stream_nv, flags->byteswap, zc);
if (error) {
error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
goto out;
}
}
recursive = (nvlist_lookup_boolean(stream_nv, "not_recursive") ==
ENOENT);
raw = (nvlist_lookup_boolean(stream_nv, "raw") == 0);
if (recursive && strchr(destname, '@')) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot specify snapshot name for multi-snapshot stream"));
error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
goto out;
}
/*
* Read in the end record and verify checksum.
*/
if (0 != (error = recv_read(hdl, fd, &drre, sizeof (drre),
flags->byteswap, NULL)))
goto out;
if (flags->byteswap) {
drre.drr_type = BSWAP_32(drre.drr_type);
drre.drr_u.drr_end.drr_checksum.zc_word[0] =
BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[0]);
drre.drr_u.drr_end.drr_checksum.zc_word[1] =
BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[1]);
drre.drr_u.drr_end.drr_checksum.zc_word[2] =
BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[2]);
drre.drr_u.drr_end.drr_checksum.zc_word[3] =
BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[3]);
}
if (drre.drr_type != DRR_END) {
error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
goto out;
}
if (!ZIO_CHECKSUM_EQUAL(drre.drr_u.drr_end.drr_checksum, *zc)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incorrect header checksum"));
error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
goto out;
}
(void) nvlist_lookup_string(stream_nv, "fromsnap", &fromsnap);
if (drr->drr_payloadlen != 0) {
nvlist_t *stream_fss;
stream_fss = fnvlist_lookup_nvlist(stream_nv, "fss");
if ((stream_avl = fsavl_create(stream_fss)) == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"couldn't allocate avl tree"));
error = zfs_error(hdl, EZFS_NOMEM, errbuf);
goto out;
}
if (fromsnap != NULL && recursive) {
nvlist_t *renamed = NULL;
nvpair_t *pair = NULL;
(void) strlcpy(tofs, destname, sizeof (tofs));
if (flags->isprefix) {
struct drr_begin *drrb = &drr->drr_u.drr_begin;
int i;
if (flags->istail) {
cp = strrchr(drrb->drr_toname, '/');
if (cp == NULL) {
(void) strlcat(tofs, "/",
sizeof (tofs));
i = 0;
} else {
i = (cp - drrb->drr_toname);
}
} else {
i = strcspn(drrb->drr_toname, "/@");
}
/* zfs_receive_one() will create_parents() */
(void) strlcat(tofs, &drrb->drr_toname[i],
sizeof (tofs));
*strchr(tofs, '@') = '\0';
}
if (!flags->dryrun && !flags->nomount) {
renamed = fnvlist_alloc();
}
softerr = recv_incremental_replication(hdl, tofs, flags,
stream_nv, stream_avl, renamed);
/* Unmount renamed filesystems before receiving. */
while ((pair = nvlist_next_nvpair(renamed,
pair)) != NULL) {
zfs_handle_t *zhp;
prop_changelist_t *clp = NULL;
zhp = zfs_open(hdl, nvpair_name(pair),
ZFS_TYPE_FILESYSTEM);
if (zhp != NULL) {
clp = changelist_gather(zhp,
ZFS_PROP_MOUNTPOINT, 0,
flags->forceunmount ? MS_FORCE : 0);
zfs_close(zhp);
if (clp != NULL) {
softerr |=
changelist_prefix(clp);
changelist_free(clp);
}
}
}
fnvlist_free(renamed);
}
}
/*
* Get the fs specified by the first path in the stream (the top level
* specified by 'zfs send') and pass it to each invocation of
* zfs_receive_one().
*/
(void) strlcpy(sendfs, drr->drr_u.drr_begin.drr_toname,
sizeof (sendfs));
if ((cp = strchr(sendfs, '@')) != NULL) {
*cp = '\0';
/*
* Find the "sendsnap", the final snapshot in a replication
* stream. zfs_receive_one() handles certain errors
* differently, depending on if the contained stream is the
* last one or not.
*/
sendsnap = (cp + 1);
}
/* Finally, receive each contained stream */
do {
/*
* we should figure out if it has a recoverable
* error, in which case do a recv_skip() and drive on.
* Note, if we fail due to already having this guid,
* zfs_receive_one() will take care of it (ie,
* recv_skip() and return 0).
*/
error = zfs_receive_impl(hdl, destname, NULL, flags, fd,
sendfs, stream_nv, stream_avl, top_zfs, sendsnap, cmdprops);
if (error == ENODATA) {
error = 0;
break;
}
anyerr |= error;
} while (error == 0);
if (drr->drr_payloadlen != 0 && recursive && fromsnap != NULL) {
/*
* Now that we have the fs's they sent us, try the
* renames again.
*/
softerr = recv_incremental_replication(hdl, tofs, flags,
stream_nv, stream_avl, NULL);
}
if (raw && softerr == 0 && *top_zfs != NULL) {
softerr = recv_fix_encryption_hierarchy(hdl, *top_zfs,
stream_nv);
}
out:
fsavl_destroy(stream_avl);
fnvlist_free(stream_nv);
if (softerr)
error = -2;
if (anyerr)
error = -1;
return (error);
}
static void
trunc_prop_errs(int truncated)
{
ASSERT(truncated != 0);
if (truncated == 1)
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"1 more property could not be set\n"));
else
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"%d more properties could not be set\n"), truncated);
}
static int
recv_skip(libzfs_handle_t *hdl, int fd, boolean_t byteswap)
{
dmu_replay_record_t *drr;
void *buf = zfs_alloc(hdl, SPA_MAXBLOCKSIZE);
uint64_t payload_size;
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot receive"));
/* XXX would be great to use lseek if possible... */
drr = buf;
while (recv_read(hdl, fd, drr, sizeof (dmu_replay_record_t),
byteswap, NULL) == 0) {
if (byteswap)
drr->drr_type = BSWAP_32(drr->drr_type);
switch (drr->drr_type) {
case DRR_BEGIN:
if (drr->drr_payloadlen != 0) {
(void) recv_read(hdl, fd, buf,
drr->drr_payloadlen, B_FALSE, NULL);
}
break;
case DRR_END:
free(buf);
return (0);
case DRR_OBJECT:
if (byteswap) {
drr->drr_u.drr_object.drr_bonuslen =
BSWAP_32(drr->drr_u.drr_object.
drr_bonuslen);
drr->drr_u.drr_object.drr_raw_bonuslen =
BSWAP_32(drr->drr_u.drr_object.
drr_raw_bonuslen);
}
payload_size =
DRR_OBJECT_PAYLOAD_SIZE(&drr->drr_u.drr_object);
(void) recv_read(hdl, fd, buf, payload_size,
B_FALSE, NULL);
break;
case DRR_WRITE:
if (byteswap) {
drr->drr_u.drr_write.drr_logical_size =
BSWAP_64(
drr->drr_u.drr_write.drr_logical_size);
drr->drr_u.drr_write.drr_compressed_size =
BSWAP_64(
drr->drr_u.drr_write.drr_compressed_size);
}
payload_size =
DRR_WRITE_PAYLOAD_SIZE(&drr->drr_u.drr_write);
assert(payload_size <= SPA_MAXBLOCKSIZE);
(void) recv_read(hdl, fd, buf,
payload_size, B_FALSE, NULL);
break;
case DRR_SPILL:
if (byteswap) {
drr->drr_u.drr_spill.drr_length =
BSWAP_64(drr->drr_u.drr_spill.drr_length);
drr->drr_u.drr_spill.drr_compressed_size =
BSWAP_64(drr->drr_u.drr_spill.
drr_compressed_size);
}
payload_size =
DRR_SPILL_PAYLOAD_SIZE(&drr->drr_u.drr_spill);
(void) recv_read(hdl, fd, buf, payload_size,
B_FALSE, NULL);
break;
case DRR_WRITE_EMBEDDED:
if (byteswap) {
drr->drr_u.drr_write_embedded.drr_psize =
BSWAP_32(drr->drr_u.drr_write_embedded.
drr_psize);
}
(void) recv_read(hdl, fd, buf,
P2ROUNDUP(drr->drr_u.drr_write_embedded.drr_psize,
8), B_FALSE, NULL);
break;
case DRR_OBJECT_RANGE:
case DRR_WRITE_BYREF:
case DRR_FREEOBJECTS:
case DRR_FREE:
break;
default:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid record type"));
free(buf);
return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
}
}
free(buf);
return (-1);
}
static void
recv_ecksum_set_aux(libzfs_handle_t *hdl, const char *target_snap,
boolean_t resumable, boolean_t checksum)
{
char target_fs[ZFS_MAX_DATASET_NAME_LEN];
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, (checksum ?
"checksum mismatch" : "incomplete stream")));
if (!resumable)
return;
(void) strlcpy(target_fs, target_snap, sizeof (target_fs));
*strchr(target_fs, '@') = '\0';
zfs_handle_t *zhp = zfs_open(hdl, target_fs,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
return;
char token_buf[ZFS_MAXPROPLEN];
int error = zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN,
token_buf, sizeof (token_buf),
NULL, NULL, 0, B_TRUE);
if (error == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"checksum mismatch or incomplete stream.\n"
"Partially received snapshot is saved.\n"
"A resuming stream can be generated on the sending "
"system by running:\n"
" zfs send -t %s"),
token_buf);
}
zfs_close(zhp);
}
/*
* Prepare a new nvlist of properties that are to override (-o) or be excluded
* (-x) from the received dataset
* recvprops: received properties from the send stream
* cmdprops: raw input properties from command line
* origprops: properties, both locally-set and received, currently set on the
* target dataset if it exists, NULL otherwise.
* oxprops: valid output override (-o) and excluded (-x) properties
*/
static int
zfs_setup_cmdline_props(libzfs_handle_t *hdl, zfs_type_t type,
char *fsname, boolean_t zoned, boolean_t recursive, boolean_t newfs,
boolean_t raw, boolean_t toplevel, nvlist_t *recvprops, nvlist_t *cmdprops,
nvlist_t *origprops, nvlist_t **oxprops, uint8_t **wkeydata_out,
uint_t *wkeylen_out, const char *errbuf)
{
nvpair_t *nvp;
nvlist_t *oprops, *voprops;
zfs_handle_t *zhp = NULL;
zpool_handle_t *zpool_hdl = NULL;
char *cp;
int ret = 0;
char namebuf[ZFS_MAX_DATASET_NAME_LEN];
if (nvlist_empty(cmdprops))
return (0); /* No properties to override or exclude */
*oxprops = fnvlist_alloc();
oprops = fnvlist_alloc();
strlcpy(namebuf, fsname, ZFS_MAX_DATASET_NAME_LEN);
/*
* Get our dataset handle. The target dataset may not exist yet.
*/
if (zfs_dataset_exists(hdl, namebuf, ZFS_TYPE_DATASET)) {
zhp = zfs_open(hdl, namebuf, ZFS_TYPE_DATASET);
if (zhp == NULL) {
ret = -1;
goto error;
}
}
/* open the zpool handle */
cp = strchr(namebuf, '/');
if (cp != NULL)
*cp = '\0';
zpool_hdl = zpool_open(hdl, namebuf);
if (zpool_hdl == NULL) {
ret = -1;
goto error;
}
/* restore namebuf to match fsname for later use */
if (cp != NULL)
*cp = '/';
/*
* first iteration: process excluded (-x) properties now and gather
* added (-o) properties to be later processed by zfs_valid_proplist()
*/
nvp = NULL;
while ((nvp = nvlist_next_nvpair(cmdprops, nvp)) != NULL) {
const char *name = nvpair_name(nvp);
zfs_prop_t prop = zfs_name_to_prop(name);
/*
* It turns out, if we don't normalize "aliased" names
* e.g. compress= against the "real" names (e.g. compression)
* here, then setting/excluding them does not work as
* intended.
*
* But since user-defined properties wouldn't have a valid
* mapping here, we do this conditional dance.
*/
const char *newname = name;
if (prop >= ZFS_PROP_TYPE)
newname = zfs_prop_to_name(prop);
/* "origin" is processed separately, don't handle it here */
if (prop == ZFS_PROP_ORIGIN)
continue;
/* raw streams can't override encryption properties */
if ((zfs_prop_encryption_key_param(prop) ||
prop == ZFS_PROP_ENCRYPTION) && raw) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"encryption property '%s' cannot "
"be set or excluded for raw streams."), name);
ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
/* incremental streams can only exclude encryption properties */
if ((zfs_prop_encryption_key_param(prop) ||
prop == ZFS_PROP_ENCRYPTION) && !newfs &&
nvpair_type(nvp) != DATA_TYPE_BOOLEAN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"encryption property '%s' cannot "
"be set for incremental streams."), name);
ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
switch (nvpair_type(nvp)) {
case DATA_TYPE_BOOLEAN: /* -x property */
/*
* DATA_TYPE_BOOLEAN is the way we're asked to "exclude"
* a property: this is done by forcing an explicit
* inherit on the destination so the effective value is
* not the one we received from the send stream.
*/
if (!zfs_prop_valid_for_type(prop, type, B_FALSE) &&
!zfs_prop_user(name)) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"Warning: %s: property '%s' does not "
"apply to datasets of this type\n"),
fsname, name);
continue;
}
/*
* We do this only if the property is not already
* locally-set, in which case its value will take
* priority over the received anyway.
*/
if (nvlist_exists(origprops, newname)) {
nvlist_t *attrs;
char *source = NULL;
attrs = fnvlist_lookup_nvlist(origprops,
newname);
if (nvlist_lookup_string(attrs,
ZPROP_SOURCE, &source) == 0 &&
strcmp(source, ZPROP_SOURCE_VAL_RECVD) != 0)
continue;
}
/*
* We can't force an explicit inherit on non-inheritable
* properties: if we're asked to exclude this kind of
* values we remove them from "recvprops" input nvlist.
*/
if (!zfs_prop_user(name) && /* can be inherited too */
!zfs_prop_inheritable(prop) &&
nvlist_exists(recvprops, newname))
fnvlist_remove(recvprops, newname);
else
fnvlist_add_boolean(*oxprops, newname);
break;
case DATA_TYPE_STRING: /* -o property=value */
/*
* we're trying to override a property that does not
* make sense for this type of dataset, but we don't
* want to fail if the receive is recursive: this comes
* in handy when the send stream contains, for
* instance, a child ZVOL and we're trying to receive
* it with "-o atime=on"
*/
if (!zfs_prop_valid_for_type(prop, type, B_FALSE) &&
!zfs_prop_user(name)) {
if (recursive)
continue;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' does not apply to datasets "
"of this type"), name);
ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
fnvlist_add_string(oprops, newname,
fnvpair_value_string(nvp));
break;
default:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' must be a string or boolean"), name);
ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
}
if (toplevel) {
/* convert override strings properties to native */
if ((voprops = zfs_valid_proplist(hdl, ZFS_TYPE_DATASET,
oprops, zoned, zhp, zpool_hdl, B_FALSE, errbuf)) == NULL) {
ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
/*
* zfs_crypto_create() requires the parent name. Get it
* by truncating the fsname copy stored in namebuf.
*/
cp = strrchr(namebuf, '/');
if (cp != NULL)
*cp = '\0';
if (!raw && zfs_crypto_create(hdl, namebuf, voprops, NULL,
B_FALSE, wkeydata_out, wkeylen_out) != 0) {
fnvlist_free(voprops);
ret = zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf);
goto error;
}
/* second pass: process "-o" properties */
fnvlist_merge(*oxprops, voprops);
fnvlist_free(voprops);
} else {
/* override props on child dataset are inherited */
nvp = NULL;
while ((nvp = nvlist_next_nvpair(oprops, nvp)) != NULL) {
const char *name = nvpair_name(nvp);
fnvlist_add_boolean(*oxprops, name);
}
}
error:
if (zhp != NULL)
zfs_close(zhp);
if (zpool_hdl != NULL)
zpool_close(zpool_hdl);
fnvlist_free(oprops);
return (ret);
}
/*
* Restores a backup of tosnap from the file descriptor specified by infd.
*/
static int
zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap,
const char *originsnap, recvflags_t *flags, dmu_replay_record_t *drr,
dmu_replay_record_t *drr_noswap, const char *sendfs, nvlist_t *stream_nv,
avl_tree_t *stream_avl, char **top_zfs,
const char *finalsnap, nvlist_t *cmdprops)
{
struct timespec begin_time;
int ioctl_err, ioctl_errno, err;
char *cp;
struct drr_begin *drrb = &drr->drr_u.drr_begin;
char errbuf[ERRBUFLEN];
const char *chopprefix;
boolean_t newfs = B_FALSE;
boolean_t stream_wantsnewfs, stream_resumingnewfs;
boolean_t newprops = B_FALSE;
uint64_t read_bytes = 0;
uint64_t errflags = 0;
uint64_t parent_snapguid = 0;
prop_changelist_t *clp = NULL;
nvlist_t *snapprops_nvlist = NULL;
nvlist_t *snapholds_nvlist = NULL;
zprop_errflags_t prop_errflags;
nvlist_t *prop_errors = NULL;
boolean_t recursive;
char *snapname = NULL;
char destsnap[MAXPATHLEN * 2];
char origin[MAXNAMELEN] = {0};
char name[MAXPATHLEN];
char tmp_keylocation[MAXNAMELEN] = {0};
nvlist_t *rcvprops = NULL; /* props received from the send stream */
nvlist_t *oxprops = NULL; /* override (-o) and exclude (-x) props */
nvlist_t *origprops = NULL; /* original props (if destination exists) */
zfs_type_t type = ZFS_TYPE_INVALID;
boolean_t toplevel = B_FALSE;
boolean_t zoned = B_FALSE;
boolean_t hastoken = B_FALSE;
boolean_t redacted;
uint8_t *wkeydata = NULL;
uint_t wkeylen = 0;
#ifndef CLOCK_MONOTONIC_RAW
#define CLOCK_MONOTONIC_RAW CLOCK_MONOTONIC
#endif
clock_gettime(CLOCK_MONOTONIC_RAW, &begin_time);
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot receive"));
recursive = (nvlist_lookup_boolean(stream_nv, "not_recursive") ==
ENOENT);
/* Did the user request holds be skipped via zfs recv -k? */
boolean_t holds = flags->holds && !flags->skipholds;
if (stream_avl != NULL) {
char *keylocation = NULL;
nvlist_t *lookup = NULL;
nvlist_t *fs = fsavl_find(stream_avl, drrb->drr_toguid,
&snapname);
(void) nvlist_lookup_uint64(fs, "parentfromsnap",
&parent_snapguid);
err = nvlist_lookup_nvlist(fs, "props", &rcvprops);
if (err) {
rcvprops = fnvlist_alloc();
newprops = B_TRUE;
}
/*
* The keylocation property may only be set on encryption roots,
* but this dataset might not become an encryption root until
* recv_fix_encryption_hierarchy() is called. That function
* will fixup the keylocation anyway, so we temporarily unset
* the keylocation for now to avoid any errors from the receive
* ioctl.
*/
err = nvlist_lookup_string(rcvprops,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION), &keylocation);
if (err == 0) {
strcpy(tmp_keylocation, keylocation);
(void) nvlist_remove_all(rcvprops,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION));
}
if (flags->canmountoff) {
fnvlist_add_uint64(rcvprops,
zfs_prop_to_name(ZFS_PROP_CANMOUNT), 0);
} else if (newprops) { /* nothing in rcvprops, eliminate it */
fnvlist_free(rcvprops);
rcvprops = NULL;
newprops = B_FALSE;
}
if (0 == nvlist_lookup_nvlist(fs, "snapprops", &lookup)) {
snapprops_nvlist = fnvlist_lookup_nvlist(lookup,
snapname);
}
if (holds) {
if (0 == nvlist_lookup_nvlist(fs, "snapholds",
&lookup)) {
snapholds_nvlist = fnvlist_lookup_nvlist(
lookup, snapname);
}
}
}
cp = NULL;
/*
* Determine how much of the snapshot name stored in the stream
* we are going to tack on to the name they specified on the
* command line, and how much we are going to chop off.
*
* If they specified a snapshot, chop the entire name stored in
* the stream.
*/
if (flags->istail) {
/*
* A filesystem was specified with -e. We want to tack on only
* the tail of the sent snapshot path.
*/
if (strchr(tosnap, '@')) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
"argument - snapshot not allowed with -e"));
err = zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
goto out;
}
chopprefix = strrchr(sendfs, '/');
if (chopprefix == NULL) {
/*
* The tail is the poolname, so we need to
* prepend a path separator.
*/
int len = strlen(drrb->drr_toname);
cp = malloc(len + 2);
cp[0] = '/';
(void) strcpy(&cp[1], drrb->drr_toname);
chopprefix = cp;
} else {
chopprefix = drrb->drr_toname + (chopprefix - sendfs);
}
} else if (flags->isprefix) {
/*
* A filesystem was specified with -d. We want to tack on
* everything but the first element of the sent snapshot path
* (all but the pool name).
*/
if (strchr(tosnap, '@')) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
"argument - snapshot not allowed with -d"));
err = zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
goto out;
}
chopprefix = strchr(drrb->drr_toname, '/');
if (chopprefix == NULL)
chopprefix = strchr(drrb->drr_toname, '@');
} else if (strchr(tosnap, '@') == NULL) {
/*
* If a filesystem was specified without -d or -e, we want to
* tack on everything after the fs specified by 'zfs send'.
*/
chopprefix = drrb->drr_toname + strlen(sendfs);
} else {
/* A snapshot was specified as an exact path (no -d or -e). */
if (recursive) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot specify snapshot name for multi-snapshot "
"stream"));
err = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
goto out;
}
chopprefix = drrb->drr_toname + strlen(drrb->drr_toname);
}
ASSERT(strstr(drrb->drr_toname, sendfs) == drrb->drr_toname);
ASSERT(chopprefix > drrb->drr_toname || strchr(sendfs, '/') == NULL);
ASSERT(chopprefix <= drrb->drr_toname + strlen(drrb->drr_toname) ||
strchr(sendfs, '/') == NULL);
ASSERT(chopprefix[0] == '/' || chopprefix[0] == '@' ||
chopprefix[0] == '\0');
/*
* Determine name of destination snapshot.
*/
(void) strlcpy(destsnap, tosnap, sizeof (destsnap));
(void) strlcat(destsnap, chopprefix, sizeof (destsnap));
free(cp);
if (!zfs_name_valid(destsnap, ZFS_TYPE_SNAPSHOT)) {
err = zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
goto out;
}
/*
* Determine the name of the origin snapshot.
*/
if (originsnap) {
(void) strlcpy(origin, originsnap, sizeof (origin));
if (flags->verbose)
(void) printf("using provided clone origin %s\n",
origin);
} else if (drrb->drr_flags & DRR_FLAG_CLONE) {
if (guid_to_name(hdl, destsnap,
drrb->drr_fromguid, B_FALSE, origin) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"local origin for clone %s does not exist"),
destsnap);
err = zfs_error(hdl, EZFS_NOENT, errbuf);
goto out;
}
if (flags->verbose)
(void) printf("found clone origin %s\n", origin);
}
if ((DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_DEDUP)) {
(void) fprintf(stderr,
gettext("ERROR: \"zfs receive\" no longer supports "
"deduplicated send streams. Use\n"
"the \"zstream redup\" command to convert this stream "
"to a regular,\n"
"non-deduplicated stream.\n"));
err = zfs_error(hdl, EZFS_NOTSUP, errbuf);
goto out;
}
boolean_t resuming = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_RESUMING;
boolean_t raw = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_RAW;
boolean_t embedded = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_EMBED_DATA;
stream_wantsnewfs = (drrb->drr_fromguid == 0 ||
(drrb->drr_flags & DRR_FLAG_CLONE) || originsnap) && !resuming;
stream_resumingnewfs = (drrb->drr_fromguid == 0 ||
(drrb->drr_flags & DRR_FLAG_CLONE) || originsnap) && resuming;
if (stream_wantsnewfs) {
/*
* if the parent fs does not exist, look for it based on
* the parent snap GUID
*/
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot receive new filesystem stream"));
(void) strcpy(name, destsnap);
cp = strrchr(name, '/');
if (cp)
*cp = '\0';
if (cp &&
!zfs_dataset_exists(hdl, name, ZFS_TYPE_DATASET)) {
char suffix[ZFS_MAX_DATASET_NAME_LEN];
(void) strcpy(suffix, strrchr(destsnap, '/'));
if (guid_to_name(hdl, name, parent_snapguid,
B_FALSE, destsnap) == 0) {
*strchr(destsnap, '@') = '\0';
(void) strcat(destsnap, suffix);
}
}
} else {
/*
* If the fs does not exist, look for it based on the
* fromsnap GUID.
*/
if (resuming) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot receive resume stream"));
} else {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot receive incremental stream"));
}
(void) strcpy(name, destsnap);
*strchr(name, '@') = '\0';
/*
* If the exact receive path was specified and this is the
* topmost path in the stream, then if the fs does not exist we
* should look no further.
*/
if ((flags->isprefix || (*(chopprefix = drrb->drr_toname +
strlen(sendfs)) != '\0' && *chopprefix != '@')) &&
!zfs_dataset_exists(hdl, name, ZFS_TYPE_DATASET)) {
char snap[ZFS_MAX_DATASET_NAME_LEN];
(void) strcpy(snap, strchr(destsnap, '@'));
if (guid_to_name(hdl, name, drrb->drr_fromguid,
B_FALSE, destsnap) == 0) {
*strchr(destsnap, '@') = '\0';
(void) strcat(destsnap, snap);
}
}
}
(void) strcpy(name, destsnap);
*strchr(name, '@') = '\0';
redacted = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_REDACTED;
if (flags->heal) {
if (flags->isprefix || flags->istail || flags->force ||
flags->canmountoff || flags->resumable || flags->nomount ||
flags->skipholds) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"corrective recv can not be used when combined with"
" this flag"));
err = zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
goto out;
}
uint64_t guid =
get_snap_guid(hdl, name, strchr(destsnap, '@') + 1);
if (guid == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"corrective recv must specify an existing snapshot"
" to heal"));
err = zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
goto out;
} else if (guid != drrb->drr_toguid) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"local snapshot doesn't match the snapshot"
" in the provided stream"));
err = zfs_error(hdl, EZFS_WRONG_PARENT, errbuf);
goto out;
}
} else if (zfs_dataset_exists(hdl, name, ZFS_TYPE_DATASET)) {
zfs_cmd_t zc = {"\0"};
zfs_handle_t *zhp = NULL;
boolean_t encrypted;
(void) strcpy(zc.zc_name, name);
/*
* Destination fs exists. It must be one of these cases:
* - an incremental send stream
* - the stream specifies a new fs (full stream or clone)
* and they want us to blow away the existing fs (and
* have therefore specified -F and removed any snapshots)
* - we are resuming a failed receive.
*/
if (stream_wantsnewfs) {
boolean_t is_volume = drrb->drr_type == DMU_OST_ZVOL;
if (!flags->force) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination '%s' exists\n"
"must specify -F to overwrite it"), name);
err = zfs_error(hdl, EZFS_EXISTS, errbuf);
goto out;
}
if (zfs_ioctl(hdl, ZFS_IOC_SNAPSHOT_LIST_NEXT,
&zc) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination has snapshots (eg. %s)\n"
"must destroy them to overwrite it"),
zc.zc_name);
err = zfs_error(hdl, EZFS_EXISTS, errbuf);
goto out;
}
if (is_volume && strrchr(name, '/') == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination %s is the root dataset\n"
"cannot overwrite with a ZVOL"),
name);
err = zfs_error(hdl, EZFS_EXISTS, errbuf);
goto out;
}
if (is_volume &&
zfs_ioctl(hdl, ZFS_IOC_DATASET_LIST_NEXT,
&zc) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination has children (eg. %s)\n"
"cannot overwrite with a ZVOL"),
zc.zc_name);
err = zfs_error(hdl, EZFS_WRONG_PARENT, errbuf);
goto out;
}
}
if ((zhp = zfs_open(hdl, name,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME)) == NULL) {
err = -1;
goto out;
}
if (stream_wantsnewfs &&
zhp->zfs_dmustats.dds_origin[0]) {
zfs_close(zhp);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination '%s' is a clone\n"
"must destroy it to overwrite it"), name);
err = zfs_error(hdl, EZFS_EXISTS, errbuf);
goto out;
}
/*
* Raw sends can not be performed as an incremental on top
* of existing unencrypted datasets. zfs recv -F can't be
* used to blow away an existing encrypted filesystem. This
* is because it would require the dsl dir to point to the
* new key (or lack of a key) and the old key at the same
* time. The -F flag may still be used for deleting
* intermediate snapshots that would otherwise prevent the
* receive from working.
*/
encrypted = zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) !=
ZIO_CRYPT_OFF;
if (!stream_wantsnewfs && !encrypted && raw) {
zfs_close(zhp);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot perform raw receive on top of "
"existing unencrypted dataset"));
err = zfs_error(hdl, EZFS_BADRESTORE, errbuf);
goto out;
}
if (stream_wantsnewfs && flags->force &&
((raw && !encrypted) || encrypted)) {
zfs_close(zhp);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"zfs receive -F cannot be used to destroy an "
"encrypted filesystem or overwrite an "
"unencrypted one with an encrypted one"));
err = zfs_error(hdl, EZFS_BADRESTORE, errbuf);
goto out;
}
if (!flags->dryrun && zhp->zfs_type == ZFS_TYPE_FILESYSTEM &&
(stream_wantsnewfs || stream_resumingnewfs)) {
/* We can't do online recv in this case */
clp = changelist_gather(zhp, ZFS_PROP_NAME, 0,
flags->forceunmount ? MS_FORCE : 0);
if (clp == NULL) {
zfs_close(zhp);
err = -1;
goto out;
}
if (changelist_prefix(clp) != 0) {
changelist_free(clp);
zfs_close(zhp);
err = -1;
goto out;
}
}
/*
* If we are resuming a newfs, set newfs here so that we will
* mount it if the recv succeeds this time. We can tell
* that it was a newfs on the first recv because the fs
* itself will be inconsistent (if the fs existed when we
* did the first recv, we would have received it into
* .../%recv).
*/
if (resuming && zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT))
newfs = B_TRUE;
/* we want to know if we're zoned when validating -o|-x props */
zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
/* may need this info later, get it now we have zhp around */
if (zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN, NULL, 0,
NULL, NULL, 0, B_TRUE) == 0)
hastoken = B_TRUE;
/* gather existing properties on destination */
origprops = fnvlist_alloc();
fnvlist_merge(origprops, zhp->zfs_props);
fnvlist_merge(origprops, zhp->zfs_user_props);
zfs_close(zhp);
} else {
zfs_handle_t *zhp;
/*
* Destination filesystem does not exist. Therefore we better
* be creating a new filesystem (either from a full backup, or
* a clone). It would therefore be invalid if the user
* specified only the pool name (i.e. if the destination name
* contained no slash character).
*/
cp = strrchr(name, '/');
if (!stream_wantsnewfs || cp == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination '%s' does not exist"), name);
err = zfs_error(hdl, EZFS_NOENT, errbuf);
goto out;
}
/*
* Trim off the final dataset component so we perform the
* recvbackup ioctl to the filesystems's parent.
*/
*cp = '\0';
if (flags->isprefix && !flags->istail && !flags->dryrun &&
create_parents(hdl, destsnap, strlen(tosnap)) != 0) {
err = zfs_error(hdl, EZFS_BADRESTORE, errbuf);
goto out;
}
/* validate parent */
zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET);
if (zhp == NULL) {
err = zfs_error(hdl, EZFS_BADRESTORE, errbuf);
goto out;
}
if (zfs_get_type(zhp) != ZFS_TYPE_FILESYSTEM) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"parent '%s' is not a filesystem"), name);
err = zfs_error(hdl, EZFS_WRONG_PARENT, errbuf);
zfs_close(zhp);
goto out;
}
zfs_close(zhp);
newfs = B_TRUE;
*cp = '/';
}
if (flags->verbose) {
(void) printf("%s %s%s stream of %s into %s\n",
flags->dryrun ? "would receive" : "receiving",
flags->heal ? " corrective" : "",
drrb->drr_fromguid ? "incremental" : "full",
drrb->drr_toname, destsnap);
(void) fflush(stdout);
}
/*
* If this is the top-level dataset, record it so we can use it
* for recursive operations later.
*/
if (top_zfs != NULL &&
(*top_zfs == NULL || strcmp(*top_zfs, name) == 0)) {
toplevel = B_TRUE;
if (*top_zfs == NULL)
*top_zfs = zfs_strdup(hdl, name);
}
if (drrb->drr_type == DMU_OST_ZVOL) {
type = ZFS_TYPE_VOLUME;
} else if (drrb->drr_type == DMU_OST_ZFS) {
type = ZFS_TYPE_FILESYSTEM;
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid record type: 0x%d"), drrb->drr_type);
err = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
goto out;
}
if ((err = zfs_setup_cmdline_props(hdl, type, name, zoned, recursive,
stream_wantsnewfs, raw, toplevel, rcvprops, cmdprops, origprops,
&oxprops, &wkeydata, &wkeylen, errbuf)) != 0)
goto out;
/*
* When sending with properties (zfs send -p), the encryption property
* is not included because it is a SETONCE property and therefore
* treated as read only. However, we are always able to determine its
* value because raw sends will include it in the DRR_BDEGIN payload
* and non-raw sends with properties are not allowed for encrypted
* datasets. Therefore, if this is a non-raw properties stream, we can
* infer that the value should be ZIO_CRYPT_OFF and manually add that
* to the received properties.
*/
if (stream_wantsnewfs && !raw && rcvprops != NULL &&
!nvlist_exists(cmdprops, zfs_prop_to_name(ZFS_PROP_ENCRYPTION))) {
if (oxprops == NULL)
oxprops = fnvlist_alloc();
fnvlist_add_uint64(oxprops,
zfs_prop_to_name(ZFS_PROP_ENCRYPTION), ZIO_CRYPT_OFF);
}
if (flags->dryrun) {
void *buf = zfs_alloc(hdl, SPA_MAXBLOCKSIZE);
/*
* We have read the DRR_BEGIN record, but we have
* not yet read the payload. For non-dryrun sends
* this will be done by the kernel, so we must
* emulate that here, before attempting to read
* more records.
*/
err = recv_read(hdl, infd, buf, drr->drr_payloadlen,
flags->byteswap, NULL);
free(buf);
if (err != 0)
goto out;
err = recv_skip(hdl, infd, flags->byteswap);
goto out;
}
if (flags->heal) {
err = ioctl_err = lzc_receive_with_heal(destsnap, rcvprops,
oxprops, wkeydata, wkeylen, origin, flags->force,
flags->heal, flags->resumable, raw, infd, drr_noswap, -1,
&read_bytes, &errflags, NULL, &prop_errors);
} else {
err = ioctl_err = lzc_receive_with_cmdprops(destsnap, rcvprops,
oxprops, wkeydata, wkeylen, origin, flags->force,
flags->resumable, raw, infd, drr_noswap, -1, &read_bytes,
&errflags, NULL, &prop_errors);
}
ioctl_errno = ioctl_err;
prop_errflags = errflags;
if (err == 0) {
nvpair_t *prop_err = NULL;
while ((prop_err = nvlist_next_nvpair(prop_errors,
prop_err)) != NULL) {
char tbuf[1024];
zfs_prop_t prop;
int intval;
prop = zfs_name_to_prop(nvpair_name(prop_err));
(void) nvpair_value_int32(prop_err, &intval);
if (strcmp(nvpair_name(prop_err),
ZPROP_N_MORE_ERRORS) == 0) {
trunc_prop_errs(intval);
break;
} else if (snapname == NULL || finalsnap == NULL ||
strcmp(finalsnap, snapname) == 0 ||
strcmp(nvpair_name(prop_err),
zfs_prop_to_name(ZFS_PROP_REFQUOTA)) != 0) {
/*
* Skip the special case of, for example,
* "refquota", errors on intermediate
* snapshots leading up to a final one.
* That's why we have all of the checks above.
*
* See zfs_ioctl.c's extract_delay_props() for
* a list of props which can fail on
* intermediate snapshots, but shouldn't
* affect the overall receive.
*/
(void) snprintf(tbuf, sizeof (tbuf),
dgettext(TEXT_DOMAIN,
"cannot receive %s property on %s"),
nvpair_name(prop_err), name);
zfs_setprop_error(hdl, prop, intval, tbuf);
}
}
}
if (err == 0 && snapprops_nvlist) {
zfs_cmd_t zc = {"\0"};
(void) strcpy(zc.zc_name, destsnap);
zc.zc_cookie = B_TRUE; /* received */
zcmd_write_src_nvlist(hdl, &zc, snapprops_nvlist);
(void) zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc);
zcmd_free_nvlists(&zc);
}
if (err == 0 && snapholds_nvlist) {
nvpair_t *pair;
nvlist_t *holds, *errors = NULL;
int cleanup_fd = -1;
VERIFY(0 == nvlist_alloc(&holds, 0, KM_SLEEP));
for (pair = nvlist_next_nvpair(snapholds_nvlist, NULL);
pair != NULL;
pair = nvlist_next_nvpair(snapholds_nvlist, pair)) {
fnvlist_add_string(holds, destsnap, nvpair_name(pair));
}
(void) lzc_hold(holds, cleanup_fd, &errors);
fnvlist_free(snapholds_nvlist);
fnvlist_free(holds);
}
if (err && (ioctl_errno == ENOENT || ioctl_errno == EEXIST)) {
/*
* It may be that this snapshot already exists,
* in which case we want to consume & ignore it
* rather than failing.
*/
avl_tree_t *local_avl;
nvlist_t *local_nv, *fs;
cp = strchr(destsnap, '@');
/*
* XXX Do this faster by just iterating over snaps in
* this fs. Also if zc_value does not exist, we will
* get a strange "does not exist" error message.
*/
*cp = '\0';
if (gather_nvlist(hdl, destsnap, NULL, NULL, B_FALSE, B_TRUE,
B_FALSE, B_FALSE, B_FALSE, B_FALSE, B_FALSE, B_FALSE,
B_TRUE, &local_nv, &local_avl) == 0) {
*cp = '@';
fs = fsavl_find(local_avl, drrb->drr_toguid, NULL);
fsavl_destroy(local_avl);
fnvlist_free(local_nv);
if (fs != NULL) {
if (flags->verbose) {
(void) printf("snap %s already exists; "
"ignoring\n", destsnap);
}
err = ioctl_err = recv_skip(hdl, infd,
flags->byteswap);
}
}
*cp = '@';
}
if (ioctl_err != 0) {
switch (ioctl_errno) {
case ENODEV:
cp = strchr(destsnap, '@');
*cp = '\0';
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"most recent snapshot of %s does not\n"
"match incremental source"), destsnap);
(void) zfs_error(hdl, EZFS_BADRESTORE, errbuf);
*cp = '@';
break;
case ETXTBSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination %s has been modified\n"
"since most recent snapshot"), name);
(void) zfs_error(hdl, EZFS_BADRESTORE, errbuf);
break;
case EACCES:
if (flags->heal) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"key must be loaded to do a non-raw "
"corrective recv on an encrypted "
"dataset."));
} else if (raw && stream_wantsnewfs) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"failed to create encryption key"));
} else if (raw && !stream_wantsnewfs) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"encryption key does not match "
"existing key"));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"inherited key must be loaded"));
}
(void) zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf);
break;
case EEXIST:
cp = strchr(destsnap, '@');
if (newfs) {
/* it's the containing fs that exists */
*cp = '\0';
}
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination already exists"));
(void) zfs_error_fmt(hdl, EZFS_EXISTS,
dgettext(TEXT_DOMAIN, "cannot restore to %s"),
destsnap);
*cp = '@';
break;
case EINVAL:
if (flags->resumable) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"kernel modules must be upgraded to "
"receive this stream."));
} else if (embedded && !raw) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incompatible embedded data stream "
"feature with encrypted receive."));
}
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case ECKSUM:
case ZFS_ERR_STREAM_TRUNCATED:
if (flags->heal)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"corrective receive was not able to "
"reconstruct the data needed for "
"healing."));
else
recv_ecksum_set_aux(hdl, destsnap,
flags->resumable, ioctl_err == ECKSUM);
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case ZFS_ERR_STREAM_LARGE_BLOCK_MISMATCH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incremental send stream requires -L "
"(--large-block), to match previous receive."));
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case ENOTSUP:
if (flags->heal)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"stream is not compatible with the "
"data in the pool."));
else
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded to receive this "
"stream."));
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case EDQUOT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination %s space quota exceeded."), name);
(void) zfs_error(hdl, EZFS_NOSPC, errbuf);
break;
case ZFS_ERR_FROM_IVSET_GUID_MISSING:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"IV set guid missing. See errata %u at "
"https://openzfs.github.io/openzfs-docs/msg/"
"ZFS-8000-ER."),
ZPOOL_ERRATA_ZOL_8308_ENCRYPTION);
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case ZFS_ERR_FROM_IVSET_GUID_MISMATCH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"IV set guid mismatch. See the 'zfs receive' "
"man page section\n discussing the limitations "
"of raw encrypted send streams."));
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case ZFS_ERR_SPILL_BLOCK_FLAG_MISSING:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Spill block flag missing for raw send.\n"
"The zfs software on the sending system must "
"be updated."));
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case EBUSY:
if (hastoken) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination %s contains "
"partially-complete state from "
"\"zfs receive -s\"."), name);
(void) zfs_error(hdl, EZFS_BUSY, errbuf);
break;
}
zfs_fallthrough;
default:
(void) zfs_standard_error(hdl, ioctl_errno, errbuf);
}
}
/*
* Mount the target filesystem (if created). Also mount any
* children of the target filesystem if we did a replication
* receive (indicated by stream_avl being non-NULL).
*/
if (clp) {
if (!flags->nomount)
err |= changelist_postfix(clp);
changelist_free(clp);
}
if ((newfs || stream_avl) && type == ZFS_TYPE_FILESYSTEM && !redacted)
flags->domount = B_TRUE;
if (prop_errflags & ZPROP_ERR_NOCLEAR) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Warning: "
"failed to clear unreceived properties on %s"), name);
(void) fprintf(stderr, "\n");
}
if (prop_errflags & ZPROP_ERR_NORESTORE) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Warning: "
"failed to restore original properties on %s"), name);
(void) fprintf(stderr, "\n");
}
if (err || ioctl_err) {
err = -1;
goto out;
}
if (flags->verbose) {
char buf1[64];
char buf2[64];
uint64_t bytes = read_bytes;
struct timespec delta;
clock_gettime(CLOCK_MONOTONIC_RAW, &delta);
if (begin_time.tv_nsec > delta.tv_nsec) {
delta.tv_nsec =
1000000000 + delta.tv_nsec - begin_time.tv_nsec;
delta.tv_sec -= 1;
} else
delta.tv_nsec -= begin_time.tv_nsec;
delta.tv_sec -= begin_time.tv_sec;
if (delta.tv_sec == 0 && delta.tv_nsec == 0)
delta.tv_nsec = 1;
double delta_f = delta.tv_sec + (delta.tv_nsec / 1e9);
zfs_nicebytes(bytes, buf1, sizeof (buf1));
zfs_nicebytes(bytes / delta_f, buf2, sizeof (buf2));
(void) printf("received %s stream in %.2f seconds (%s/sec)\n",
buf1, delta_f, buf2);
}
err = 0;
out:
if (prop_errors != NULL)
fnvlist_free(prop_errors);
if (tmp_keylocation[0] != '\0') {
fnvlist_add_string(rcvprops,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION), tmp_keylocation);
}
if (newprops)
fnvlist_free(rcvprops);
fnvlist_free(oxprops);
fnvlist_free(origprops);
return (err);
}
/*
* Check properties we were asked to override (both -o|-x)
*/
static boolean_t
zfs_receive_checkprops(libzfs_handle_t *hdl, nvlist_t *props,
const char *errbuf)
{
nvpair_t *nvp = NULL;
zfs_prop_t prop;
const char *name;
while ((nvp = nvlist_next_nvpair(props, nvp)) != NULL) {
name = nvpair_name(nvp);
prop = zfs_name_to_prop(name);
if (prop == ZPROP_USERPROP) {
if (!zfs_prop_user(name)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"%s: invalid property '%s'"), errbuf, name);
return (B_FALSE);
}
continue;
}
/*
* "origin" is readonly but is used to receive datasets as
* clones so we don't raise an error here
*/
if (prop == ZFS_PROP_ORIGIN)
continue;
/* encryption params have their own verification later */
if (prop == ZFS_PROP_ENCRYPTION ||
zfs_prop_encryption_key_param(prop))
continue;
/*
* cannot override readonly, set-once and other specific
* settable properties
*/
if (zfs_prop_readonly(prop) || prop == ZFS_PROP_VERSION ||
prop == ZFS_PROP_VOLSIZE) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"%s: invalid property '%s'"), errbuf, name);
return (B_FALSE);
}
}
return (B_TRUE);
}
static int
zfs_receive_impl(libzfs_handle_t *hdl, const char *tosnap,
const char *originsnap, recvflags_t *flags, int infd, const char *sendfs,
nvlist_t *stream_nv, avl_tree_t *stream_avl, char **top_zfs,
const char *finalsnap, nvlist_t *cmdprops)
{
int err;
dmu_replay_record_t drr, drr_noswap;
struct drr_begin *drrb = &drr.drr_u.drr_begin;
char errbuf[ERRBUFLEN];
zio_cksum_t zcksum = { { 0 } };
uint64_t featureflags;
int hdrtype;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot receive"));
/* check cmdline props, raise an error if they cannot be received */
if (!zfs_receive_checkprops(hdl, cmdprops, errbuf))
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
if (flags->isprefix &&
!zfs_dataset_exists(hdl, tosnap, ZFS_TYPE_DATASET)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "specified fs "
"(%s) does not exist"), tosnap);
return (zfs_error(hdl, EZFS_NOENT, errbuf));
}
if (originsnap &&
!zfs_dataset_exists(hdl, originsnap, ZFS_TYPE_DATASET)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "specified origin fs "
"(%s) does not exist"), originsnap);
return (zfs_error(hdl, EZFS_NOENT, errbuf));
}
/* read in the BEGIN record */
if (0 != (err = recv_read(hdl, infd, &drr, sizeof (drr), B_FALSE,
&zcksum)))
return (err);
if (drr.drr_type == DRR_END || drr.drr_type == BSWAP_32(DRR_END)) {
/* It's the double end record at the end of a package */
return (ENODATA);
}
/* the kernel needs the non-byteswapped begin record */
drr_noswap = drr;
flags->byteswap = B_FALSE;
if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
/*
* We computed the checksum in the wrong byteorder in
* recv_read() above; do it again correctly.
*/
memset(&zcksum, 0, sizeof (zio_cksum_t));
fletcher_4_incremental_byteswap(&drr, sizeof (drr), &zcksum);
flags->byteswap = B_TRUE;
drr.drr_type = BSWAP_32(drr.drr_type);
drr.drr_payloadlen = BSWAP_32(drr.drr_payloadlen);
drrb->drr_magic = BSWAP_64(drrb->drr_magic);
drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo);
drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time);
drrb->drr_type = BSWAP_32(drrb->drr_type);
drrb->drr_flags = BSWAP_32(drrb->drr_flags);
drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid);
}
if (drrb->drr_magic != DMU_BACKUP_MAGIC || drr.drr_type != DRR_BEGIN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
"stream (bad magic number)"));
return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
}
featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
hdrtype = DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo);
if (!DMU_STREAM_SUPPORTED(featureflags) ||
(hdrtype != DMU_SUBSTREAM && hdrtype != DMU_COMPOUNDSTREAM)) {
/*
* Let's be explicit about this one, since rather than
* being a new feature we can't know, it's an old
* feature we dropped.
*/
if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"stream has deprecated feature: dedup, try "
"'zstream redup [send in a file] | zfs recv "
"[...]'"));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"stream has unsupported feature, feature flags = "
"%llx (unknown flags = %llx)"),
(u_longlong_t)featureflags,
(u_longlong_t)((featureflags) &
~DMU_BACKUP_FEATURE_MASK));
}
return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
}
/* Holds feature is set once in the compound stream header. */
if (featureflags & DMU_BACKUP_FEATURE_HOLDS)
flags->holds = B_TRUE;
if (strchr(drrb->drr_toname, '@') == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
"stream (bad snapshot name)"));
return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
}
if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == DMU_SUBSTREAM) {
char nonpackage_sendfs[ZFS_MAX_DATASET_NAME_LEN];
if (sendfs == NULL) {
/*
* We were not called from zfs_receive_package(). Get
* the fs specified by 'zfs send'.
*/
char *cp;
(void) strlcpy(nonpackage_sendfs,
drr.drr_u.drr_begin.drr_toname,
sizeof (nonpackage_sendfs));
if ((cp = strchr(nonpackage_sendfs, '@')) != NULL)
*cp = '\0';
sendfs = nonpackage_sendfs;
VERIFY(finalsnap == NULL);
}
return (zfs_receive_one(hdl, infd, tosnap, originsnap, flags,
&drr, &drr_noswap, sendfs, stream_nv, stream_avl, top_zfs,
finalsnap, cmdprops));
} else {
assert(DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
DMU_COMPOUNDSTREAM);
return (zfs_receive_package(hdl, infd, tosnap, flags, &drr,
&zcksum, top_zfs, cmdprops));
}
}
/*
* Restores a backup of tosnap from the file descriptor specified by infd.
* Return 0 on total success, -2 if some things couldn't be
* destroyed/renamed/promoted, -1 if some things couldn't be received.
* (-1 will override -2, if -1 and the resumable flag was specified the
* transfer can be resumed if the sending side supports it).
*/
int
zfs_receive(libzfs_handle_t *hdl, const char *tosnap, nvlist_t *props,
recvflags_t *flags, int infd, avl_tree_t *stream_avl)
{
char *top_zfs = NULL;
int err;
struct stat sb;
char *originsnap = NULL;
/*
* The only way fstat can fail is if we do not have a valid file
* descriptor.
*/
if (fstat(infd, &sb) == -1) {
perror("fstat");
return (-2);
}
if (props) {
err = nvlist_lookup_string(props, "origin", &originsnap);
if (err && err != ENOENT)
return (err);
}
err = zfs_receive_impl(hdl, tosnap, originsnap, flags, infd, NULL, NULL,
stream_avl, &top_zfs, NULL, props);
if (err == 0 && !flags->nomount && flags->domount && top_zfs) {
zfs_handle_t *zhp = NULL;
prop_changelist_t *clp = NULL;
zhp = zfs_open(hdl, top_zfs,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL) {
err = -1;
goto out;
} else {
if (zhp->zfs_type == ZFS_TYPE_VOLUME) {
zfs_close(zhp);
goto out;
}
clp = changelist_gather(zhp, ZFS_PROP_MOUNTPOINT,
CL_GATHER_MOUNT_ALWAYS,
flags->forceunmount ? MS_FORCE : 0);
zfs_close(zhp);
if (clp == NULL) {
err = -1;
goto out;
}
/* mount and share received datasets */
err = changelist_postfix(clp);
changelist_free(clp);
if (err != 0)
err = -1;
}
}
out:
if (top_zfs)
free(top_zfs);
return (err);
}
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_util.c b/sys/contrib/openzfs/lib/libzfs/libzfs_util.c
index cca86d2d7828..bc00a8dffd81 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_util.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_util.c
@@ -1,2033 +1,2039 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2020 Joyent, Inc. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
* Copyright (c) 2017 Datto Inc.
* Copyright (c) 2020 The FreeBSD Foundation
*
* Portions of this software were developed by Allan Jude
* under sponsorship from the FreeBSD Foundation.
*/
/*
* Internal utility routines for the ZFS library.
*/
#include <errno.h>
#include <fcntl.h>
#include <libintl.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <math.h>
#if LIBFETCH_DYNAMIC
#include <dlfcn.h>
#endif
#include <sys/stat.h>
#include <sys/mnttab.h>
#include <sys/mntent.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <libzfs.h>
#include <libzfs_core.h>
#include "libzfs_impl.h"
#include "zfs_prop.h"
#include "zfeature_common.h"
#include <zfs_fletcher.h>
#include <libzutil.h>
/*
* We only care about the scheme in order to match the scheme
* with the handler. Each handler should validate the full URI
* as necessary.
*/
#define URI_REGEX "^\\([A-Za-z][A-Za-z0-9+.\\-]*\\):"
int
libzfs_errno(libzfs_handle_t *hdl)
{
return (hdl->libzfs_error);
}
const char *
libzfs_error_action(libzfs_handle_t *hdl)
{
return (hdl->libzfs_action);
}
const char *
libzfs_error_description(libzfs_handle_t *hdl)
{
if (hdl->libzfs_desc[0] != '\0')
return (hdl->libzfs_desc);
switch (hdl->libzfs_error) {
case EZFS_NOMEM:
return (dgettext(TEXT_DOMAIN, "out of memory"));
case EZFS_BADPROP:
return (dgettext(TEXT_DOMAIN, "invalid property value"));
case EZFS_PROPREADONLY:
return (dgettext(TEXT_DOMAIN, "read-only property"));
case EZFS_PROPTYPE:
return (dgettext(TEXT_DOMAIN, "property doesn't apply to "
"datasets of this type"));
case EZFS_PROPNONINHERIT:
return (dgettext(TEXT_DOMAIN, "property cannot be inherited"));
case EZFS_PROPSPACE:
return (dgettext(TEXT_DOMAIN, "invalid quota or reservation"));
case EZFS_BADTYPE:
return (dgettext(TEXT_DOMAIN, "operation not applicable to "
"datasets of this type"));
case EZFS_BUSY:
return (dgettext(TEXT_DOMAIN, "pool or dataset is busy"));
case EZFS_EXISTS:
return (dgettext(TEXT_DOMAIN, "pool or dataset exists"));
case EZFS_NOENT:
return (dgettext(TEXT_DOMAIN, "no such pool or dataset"));
case EZFS_BADSTREAM:
return (dgettext(TEXT_DOMAIN, "invalid backup stream"));
case EZFS_DSREADONLY:
return (dgettext(TEXT_DOMAIN, "dataset is read-only"));
case EZFS_VOLTOOBIG:
return (dgettext(TEXT_DOMAIN, "volume size exceeds limit for "
"this system"));
case EZFS_INVALIDNAME:
return (dgettext(TEXT_DOMAIN, "invalid name"));
case EZFS_BADRESTORE:
return (dgettext(TEXT_DOMAIN, "unable to restore to "
"destination"));
case EZFS_BADBACKUP:
return (dgettext(TEXT_DOMAIN, "backup failed"));
case EZFS_BADTARGET:
return (dgettext(TEXT_DOMAIN, "invalid target vdev"));
case EZFS_NODEVICE:
return (dgettext(TEXT_DOMAIN, "no such device in pool"));
case EZFS_BADDEV:
return (dgettext(TEXT_DOMAIN, "invalid device"));
case EZFS_NOREPLICAS:
return (dgettext(TEXT_DOMAIN, "no valid replicas"));
case EZFS_RESILVERING:
return (dgettext(TEXT_DOMAIN, "currently resilvering"));
case EZFS_BADVERSION:
return (dgettext(TEXT_DOMAIN, "unsupported version or "
"feature"));
case EZFS_POOLUNAVAIL:
return (dgettext(TEXT_DOMAIN, "pool is unavailable"));
case EZFS_DEVOVERFLOW:
return (dgettext(TEXT_DOMAIN, "too many devices in one vdev"));
case EZFS_BADPATH:
return (dgettext(TEXT_DOMAIN, "must be an absolute path"));
case EZFS_CROSSTARGET:
return (dgettext(TEXT_DOMAIN, "operation crosses datasets or "
"pools"));
case EZFS_ZONED:
return (dgettext(TEXT_DOMAIN, "dataset in use by local zone"));
case EZFS_MOUNTFAILED:
return (dgettext(TEXT_DOMAIN, "mount failed"));
case EZFS_UMOUNTFAILED:
return (dgettext(TEXT_DOMAIN, "unmount failed"));
case EZFS_UNSHARENFSFAILED:
return (dgettext(TEXT_DOMAIN, "NFS share removal failed"));
case EZFS_SHARENFSFAILED:
return (dgettext(TEXT_DOMAIN, "NFS share creation failed"));
case EZFS_UNSHARESMBFAILED:
return (dgettext(TEXT_DOMAIN, "SMB share removal failed"));
case EZFS_SHARESMBFAILED:
return (dgettext(TEXT_DOMAIN, "SMB share creation failed"));
case EZFS_PERM:
return (dgettext(TEXT_DOMAIN, "permission denied"));
case EZFS_NOSPC:
return (dgettext(TEXT_DOMAIN, "out of space"));
case EZFS_FAULT:
return (dgettext(TEXT_DOMAIN, "bad address"));
case EZFS_IO:
return (dgettext(TEXT_DOMAIN, "I/O error"));
case EZFS_INTR:
return (dgettext(TEXT_DOMAIN, "signal received"));
+ case EZFS_CKSUM:
+ return (dgettext(TEXT_DOMAIN, "insufficient replicas"));
case EZFS_ISSPARE:
return (dgettext(TEXT_DOMAIN, "device is reserved as a hot "
"spare"));
case EZFS_INVALCONFIG:
return (dgettext(TEXT_DOMAIN, "invalid vdev configuration"));
case EZFS_RECURSIVE:
return (dgettext(TEXT_DOMAIN, "recursive dataset dependency"));
case EZFS_NOHISTORY:
return (dgettext(TEXT_DOMAIN, "no history available"));
case EZFS_POOLPROPS:
return (dgettext(TEXT_DOMAIN, "failed to retrieve "
"pool properties"));
case EZFS_POOL_NOTSUP:
return (dgettext(TEXT_DOMAIN, "operation not supported "
"on this type of pool"));
case EZFS_POOL_INVALARG:
return (dgettext(TEXT_DOMAIN, "invalid argument for "
"this pool operation"));
case EZFS_NAMETOOLONG:
return (dgettext(TEXT_DOMAIN, "dataset name is too long"));
case EZFS_OPENFAILED:
return (dgettext(TEXT_DOMAIN, "open failed"));
case EZFS_NOCAP:
return (dgettext(TEXT_DOMAIN,
"disk capacity information could not be retrieved"));
case EZFS_LABELFAILED:
return (dgettext(TEXT_DOMAIN, "write of label failed"));
case EZFS_BADWHO:
return (dgettext(TEXT_DOMAIN, "invalid user/group"));
case EZFS_BADPERM:
return (dgettext(TEXT_DOMAIN, "invalid permission"));
case EZFS_BADPERMSET:
return (dgettext(TEXT_DOMAIN, "invalid permission set name"));
case EZFS_NODELEGATION:
return (dgettext(TEXT_DOMAIN, "delegated administration is "
"disabled on pool"));
case EZFS_BADCACHE:
return (dgettext(TEXT_DOMAIN, "invalid or missing cache file"));
case EZFS_ISL2CACHE:
return (dgettext(TEXT_DOMAIN, "device is in use as a cache"));
case EZFS_VDEVNOTSUP:
return (dgettext(TEXT_DOMAIN, "vdev specification is not "
"supported"));
case EZFS_NOTSUP:
return (dgettext(TEXT_DOMAIN, "operation not supported "
"on this dataset"));
case EZFS_IOC_NOTSUPPORTED:
return (dgettext(TEXT_DOMAIN, "operation not supported by "
"zfs kernel module"));
case EZFS_ACTIVE_SPARE:
return (dgettext(TEXT_DOMAIN, "pool has active shared spare "
"device"));
case EZFS_UNPLAYED_LOGS:
return (dgettext(TEXT_DOMAIN, "log device has unplayed intent "
"logs"));
case EZFS_REFTAG_RELE:
return (dgettext(TEXT_DOMAIN, "no such tag on this dataset"));
case EZFS_REFTAG_HOLD:
return (dgettext(TEXT_DOMAIN, "tag already exists on this "
"dataset"));
case EZFS_TAGTOOLONG:
return (dgettext(TEXT_DOMAIN, "tag too long"));
case EZFS_PIPEFAILED:
return (dgettext(TEXT_DOMAIN, "pipe create failed"));
case EZFS_THREADCREATEFAILED:
return (dgettext(TEXT_DOMAIN, "thread create failed"));
case EZFS_POSTSPLIT_ONLINE:
return (dgettext(TEXT_DOMAIN, "disk was split from this pool "
"into a new one"));
case EZFS_SCRUB_PAUSED:
return (dgettext(TEXT_DOMAIN, "scrub is paused; "
"use 'zpool scrub' to resume"));
case EZFS_SCRUBBING:
return (dgettext(TEXT_DOMAIN, "currently scrubbing; "
"use 'zpool scrub -s' to cancel current scrub"));
case EZFS_NO_SCRUB:
return (dgettext(TEXT_DOMAIN, "there is no active scrub"));
case EZFS_DIFF:
return (dgettext(TEXT_DOMAIN, "unable to generate diffs"));
case EZFS_DIFFDATA:
return (dgettext(TEXT_DOMAIN, "invalid diff data"));
case EZFS_POOLREADONLY:
return (dgettext(TEXT_DOMAIN, "pool is read-only"));
case EZFS_NO_PENDING:
return (dgettext(TEXT_DOMAIN, "operation is not "
"in progress"));
case EZFS_CHECKPOINT_EXISTS:
return (dgettext(TEXT_DOMAIN, "checkpoint exists"));
case EZFS_DISCARDING_CHECKPOINT:
return (dgettext(TEXT_DOMAIN, "currently discarding "
"checkpoint"));
case EZFS_NO_CHECKPOINT:
return (dgettext(TEXT_DOMAIN, "checkpoint does not exist"));
case EZFS_DEVRM_IN_PROGRESS:
return (dgettext(TEXT_DOMAIN, "device removal in progress"));
case EZFS_VDEV_TOO_BIG:
return (dgettext(TEXT_DOMAIN, "device exceeds supported size"));
case EZFS_ACTIVE_POOL:
return (dgettext(TEXT_DOMAIN, "pool is imported on a "
"different host"));
case EZFS_CRYPTOFAILED:
return (dgettext(TEXT_DOMAIN, "encryption failure"));
case EZFS_TOOMANY:
return (dgettext(TEXT_DOMAIN, "argument list too long"));
case EZFS_INITIALIZING:
return (dgettext(TEXT_DOMAIN, "currently initializing"));
case EZFS_NO_INITIALIZE:
return (dgettext(TEXT_DOMAIN, "there is no active "
"initialization"));
case EZFS_WRONG_PARENT:
return (dgettext(TEXT_DOMAIN, "invalid parent dataset"));
case EZFS_TRIMMING:
return (dgettext(TEXT_DOMAIN, "currently trimming"));
case EZFS_NO_TRIM:
return (dgettext(TEXT_DOMAIN, "there is no active trim"));
case EZFS_TRIM_NOTSUP:
return (dgettext(TEXT_DOMAIN, "trim operations are not "
"supported by this device"));
case EZFS_NO_RESILVER_DEFER:
return (dgettext(TEXT_DOMAIN, "this action requires the "
"resilver_defer feature"));
case EZFS_EXPORT_IN_PROGRESS:
return (dgettext(TEXT_DOMAIN, "pool export in progress"));
case EZFS_REBUILDING:
return (dgettext(TEXT_DOMAIN, "currently sequentially "
"resilvering"));
case EZFS_VDEV_NOTSUP:
return (dgettext(TEXT_DOMAIN, "operation not supported "
"on this type of vdev"));
case EZFS_NOT_USER_NAMESPACE:
return (dgettext(TEXT_DOMAIN, "the provided file "
"was not a user namespace file"));
case EZFS_UNKNOWN:
return (dgettext(TEXT_DOMAIN, "unknown error"));
default:
assert(hdl->libzfs_error == 0);
return (dgettext(TEXT_DOMAIN, "no error"));
}
}
void
zfs_error_aux(libzfs_handle_t *hdl, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
(void) vsnprintf(hdl->libzfs_desc, sizeof (hdl->libzfs_desc),
fmt, ap);
hdl->libzfs_desc_active = 1;
va_end(ap);
}
static void
zfs_verror(libzfs_handle_t *hdl, int error, const char *fmt, va_list ap)
{
(void) vsnprintf(hdl->libzfs_action, sizeof (hdl->libzfs_action),
fmt, ap);
hdl->libzfs_error = error;
if (hdl->libzfs_desc_active)
hdl->libzfs_desc_active = 0;
else
hdl->libzfs_desc[0] = '\0';
if (hdl->libzfs_printerr) {
if (error == EZFS_UNKNOWN) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "internal "
"error: %s: %s\n"), hdl->libzfs_action,
libzfs_error_description(hdl));
abort();
}
(void) fprintf(stderr, "%s: %s\n", hdl->libzfs_action,
libzfs_error_description(hdl));
if (error == EZFS_NOMEM)
exit(1);
}
}
int
zfs_error(libzfs_handle_t *hdl, int error, const char *msg)
{
return (zfs_error_fmt(hdl, error, "%s", msg));
}
int
zfs_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
zfs_verror(hdl, error, fmt, ap);
va_end(ap);
return (-1);
}
static int
zfs_common_error(libzfs_handle_t *hdl, int error, const char *fmt,
va_list ap)
{
switch (error) {
case EPERM:
case EACCES:
zfs_verror(hdl, EZFS_PERM, fmt, ap);
return (-1);
case ECANCELED:
zfs_verror(hdl, EZFS_NODELEGATION, fmt, ap);
return (-1);
case EIO:
zfs_verror(hdl, EZFS_IO, fmt, ap);
return (-1);
case EFAULT:
zfs_verror(hdl, EZFS_FAULT, fmt, ap);
return (-1);
case EINTR:
zfs_verror(hdl, EZFS_INTR, fmt, ap);
return (-1);
+
+ case ECKSUM:
+ zfs_verror(hdl, EZFS_CKSUM, fmt, ap);
+ return (-1);
}
return (0);
}
int
zfs_standard_error(libzfs_handle_t *hdl, int error, const char *msg)
{
return (zfs_standard_error_fmt(hdl, error, "%s", msg));
}
int
zfs_standard_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
if (zfs_common_error(hdl, error, fmt, ap) != 0) {
va_end(ap);
return (-1);
}
switch (error) {
case ENXIO:
case ENODEV:
case EPIPE:
zfs_verror(hdl, EZFS_IO, fmt, ap);
break;
case ENOENT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset does not exist"));
zfs_verror(hdl, EZFS_NOENT, fmt, ap);
break;
case ENOSPC:
case EDQUOT:
zfs_verror(hdl, EZFS_NOSPC, fmt, ap);
break;
case EEXIST:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset already exists"));
zfs_verror(hdl, EZFS_EXISTS, fmt, ap);
break;
case EBUSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset is busy"));
zfs_verror(hdl, EZFS_BUSY, fmt, ap);
break;
case EROFS:
zfs_verror(hdl, EZFS_POOLREADONLY, fmt, ap);
break;
case ENAMETOOLONG:
zfs_verror(hdl, EZFS_NAMETOOLONG, fmt, ap);
break;
case ENOTSUP:
zfs_verror(hdl, EZFS_BADVERSION, fmt, ap);
break;
case EAGAIN:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool I/O is currently suspended"));
zfs_verror(hdl, EZFS_POOLUNAVAIL, fmt, ap);
break;
case EREMOTEIO:
zfs_verror(hdl, EZFS_ACTIVE_POOL, fmt, ap);
break;
case ZFS_ERR_UNKNOWN_SEND_STREAM_FEATURE:
case ZFS_ERR_IOC_CMD_UNAVAIL:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "the loaded zfs "
"module does not support this operation. A reboot may "
"be required to enable this operation."));
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
break;
case ZFS_ERR_IOC_ARG_UNAVAIL:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "the loaded zfs "
"module does not support an option for this operation. "
"A reboot may be required to enable this option."));
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
break;
case ZFS_ERR_IOC_ARG_REQUIRED:
case ZFS_ERR_IOC_ARG_BADTYPE:
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
break;
case ZFS_ERR_WRONG_PARENT:
zfs_verror(hdl, EZFS_WRONG_PARENT, fmt, ap);
break;
case ZFS_ERR_BADPROP:
zfs_verror(hdl, EZFS_BADPROP, fmt, ap);
break;
case ZFS_ERR_NOT_USER_NAMESPACE:
zfs_verror(hdl, EZFS_NOT_USER_NAMESPACE, fmt, ap);
break;
default:
zfs_error_aux(hdl, "%s", strerror(error));
zfs_verror(hdl, EZFS_UNKNOWN, fmt, ap);
break;
}
va_end(ap);
return (-1);
}
void
zfs_setprop_error(libzfs_handle_t *hdl, zfs_prop_t prop, int err,
char *errbuf)
{
switch (err) {
case ENOSPC:
/*
* For quotas and reservations, ENOSPC indicates
* something different; setting a quota or reservation
* doesn't use any disk space.
*/
switch (prop) {
case ZFS_PROP_QUOTA:
case ZFS_PROP_REFQUOTA:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"size is less than current used or "
"reserved space"));
(void) zfs_error(hdl, EZFS_PROPSPACE, errbuf);
break;
case ZFS_PROP_RESERVATION:
case ZFS_PROP_REFRESERVATION:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"size is greater than available space"));
(void) zfs_error(hdl, EZFS_PROPSPACE, errbuf);
break;
default:
(void) zfs_standard_error(hdl, err, errbuf);
break;
}
break;
case EBUSY:
(void) zfs_standard_error(hdl, EBUSY, errbuf);
break;
case EROFS:
(void) zfs_error(hdl, EZFS_DSREADONLY, errbuf);
break;
case E2BIG:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property value too long"));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
break;
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool and or dataset must be upgraded to set this "
"property or value"));
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case ERANGE:
if (prop == ZFS_PROP_COMPRESSION ||
prop == ZFS_PROP_DNODESIZE ||
prop == ZFS_PROP_RECORDSIZE) {
(void) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property setting is not allowed on "
"bootable datasets"));
(void) zfs_error(hdl, EZFS_NOTSUP, errbuf);
} else if (prop == ZFS_PROP_CHECKSUM ||
prop == ZFS_PROP_DEDUP) {
(void) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property setting is not allowed on "
"root pools"));
(void) zfs_error(hdl, EZFS_NOTSUP, errbuf);
} else {
(void) zfs_standard_error(hdl, err, errbuf);
}
break;
case EINVAL:
if (prop == ZPROP_INVAL) {
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
} else {
(void) zfs_standard_error(hdl, err, errbuf);
}
break;
case ZFS_ERR_BADPROP:
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
break;
case EACCES:
if (prop == ZFS_PROP_KEYLOCATION) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"keylocation may only be set on encryption roots"));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
} else {
(void) zfs_standard_error(hdl, err, errbuf);
}
break;
case EOVERFLOW:
/*
* This platform can't address a volume this big.
*/
#ifdef _ILP32
if (prop == ZFS_PROP_VOLSIZE) {
(void) zfs_error(hdl, EZFS_VOLTOOBIG, errbuf);
break;
}
zfs_fallthrough;
#endif
default:
(void) zfs_standard_error(hdl, err, errbuf);
}
}
int
zpool_standard_error(libzfs_handle_t *hdl, int error, const char *msg)
{
return (zpool_standard_error_fmt(hdl, error, "%s", msg));
}
int
zpool_standard_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
if (zfs_common_error(hdl, error, fmt, ap) != 0) {
va_end(ap);
return (-1);
}
switch (error) {
case ENODEV:
zfs_verror(hdl, EZFS_NODEVICE, fmt, ap);
break;
case ENOENT:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "no such pool or dataset"));
zfs_verror(hdl, EZFS_NOENT, fmt, ap);
break;
case EEXIST:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool already exists"));
zfs_verror(hdl, EZFS_EXISTS, fmt, ap);
break;
case EBUSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool is busy"));
zfs_verror(hdl, EZFS_BUSY, fmt, ap);
break;
/* There is no pending operation to cancel */
case ENOTACTIVE:
zfs_verror(hdl, EZFS_NO_PENDING, fmt, ap);
break;
case ENXIO:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is currently unavailable"));
zfs_verror(hdl, EZFS_BADDEV, fmt, ap);
break;
case ENAMETOOLONG:
zfs_verror(hdl, EZFS_DEVOVERFLOW, fmt, ap);
break;
case ENOTSUP:
zfs_verror(hdl, EZFS_POOL_NOTSUP, fmt, ap);
break;
case EINVAL:
zfs_verror(hdl, EZFS_POOL_INVALARG, fmt, ap);
break;
case ENOSPC:
case EDQUOT:
zfs_verror(hdl, EZFS_NOSPC, fmt, ap);
- return (-1);
+ break;
case EAGAIN:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool I/O is currently suspended"));
zfs_verror(hdl, EZFS_POOLUNAVAIL, fmt, ap);
break;
case EROFS:
zfs_verror(hdl, EZFS_POOLREADONLY, fmt, ap);
break;
case EDOM:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"block size out of range or does not match"));
zfs_verror(hdl, EZFS_BADPROP, fmt, ap);
break;
case EREMOTEIO:
zfs_verror(hdl, EZFS_ACTIVE_POOL, fmt, ap);
break;
case ZFS_ERR_CHECKPOINT_EXISTS:
zfs_verror(hdl, EZFS_CHECKPOINT_EXISTS, fmt, ap);
break;
case ZFS_ERR_DISCARDING_CHECKPOINT:
zfs_verror(hdl, EZFS_DISCARDING_CHECKPOINT, fmt, ap);
break;
case ZFS_ERR_NO_CHECKPOINT:
zfs_verror(hdl, EZFS_NO_CHECKPOINT, fmt, ap);
break;
case ZFS_ERR_DEVRM_IN_PROGRESS:
zfs_verror(hdl, EZFS_DEVRM_IN_PROGRESS, fmt, ap);
break;
case ZFS_ERR_VDEV_TOO_BIG:
zfs_verror(hdl, EZFS_VDEV_TOO_BIG, fmt, ap);
break;
case ZFS_ERR_EXPORT_IN_PROGRESS:
zfs_verror(hdl, EZFS_EXPORT_IN_PROGRESS, fmt, ap);
break;
case ZFS_ERR_RESILVER_IN_PROGRESS:
zfs_verror(hdl, EZFS_RESILVERING, fmt, ap);
break;
case ZFS_ERR_REBUILD_IN_PROGRESS:
zfs_verror(hdl, EZFS_REBUILDING, fmt, ap);
break;
case ZFS_ERR_BADPROP:
zfs_verror(hdl, EZFS_BADPROP, fmt, ap);
break;
case ZFS_ERR_VDEV_NOTSUP:
zfs_verror(hdl, EZFS_VDEV_NOTSUP, fmt, ap);
break;
case ZFS_ERR_IOC_CMD_UNAVAIL:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "the loaded zfs "
"module does not support this operation. A reboot may "
"be required to enable this operation."));
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
break;
case ZFS_ERR_IOC_ARG_UNAVAIL:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "the loaded zfs "
"module does not support an option for this operation. "
"A reboot may be required to enable this option."));
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
break;
case ZFS_ERR_IOC_ARG_REQUIRED:
case ZFS_ERR_IOC_ARG_BADTYPE:
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
break;
default:
zfs_error_aux(hdl, "%s", strerror(error));
zfs_verror(hdl, EZFS_UNKNOWN, fmt, ap);
}
va_end(ap);
return (-1);
}
/*
* Display an out of memory error message and abort the current program.
*/
int
no_memory(libzfs_handle_t *hdl)
{
return (zfs_error(hdl, EZFS_NOMEM, "internal error"));
}
/*
* A safe form of malloc() which will die if the allocation fails.
*/
void *
zfs_alloc(libzfs_handle_t *hdl, size_t size)
{
void *data;
if ((data = calloc(1, size)) == NULL)
(void) no_memory(hdl);
return (data);
}
/*
* A safe form of asprintf() which will die if the allocation fails.
*/
char *
zfs_asprintf(libzfs_handle_t *hdl, const char *fmt, ...)
{
va_list ap;
char *ret;
int err;
va_start(ap, fmt);
err = vasprintf(&ret, fmt, ap);
va_end(ap);
if (err < 0) {
(void) no_memory(hdl);
ret = NULL;
}
return (ret);
}
/*
* A safe form of realloc(), which also zeroes newly allocated space.
*/
void *
zfs_realloc(libzfs_handle_t *hdl, void *ptr, size_t oldsize, size_t newsize)
{
void *ret;
if ((ret = realloc(ptr, newsize)) == NULL) {
(void) no_memory(hdl);
return (NULL);
}
memset((char *)ret + oldsize, 0, newsize - oldsize);
return (ret);
}
/*
* A safe form of strdup() which will die if the allocation fails.
*/
char *
zfs_strdup(libzfs_handle_t *hdl, const char *str)
{
char *ret;
if ((ret = strdup(str)) == NULL)
(void) no_memory(hdl);
return (ret);
}
void
libzfs_print_on_error(libzfs_handle_t *hdl, boolean_t printerr)
{
hdl->libzfs_printerr = printerr;
}
/*
* Read lines from an open file descriptor and store them in an array of
* strings until EOF. lines[] will be allocated and populated with all the
* lines read. All newlines are replaced with NULL terminators for
* convenience. lines[] must be freed after use with libzfs_free_str_array().
*
* Returns the number of lines read.
*/
static int
libzfs_read_stdout_from_fd(int fd, char **lines[])
{
FILE *fp;
int lines_cnt = 0;
size_t len = 0;
char *line = NULL;
char **tmp_lines = NULL, **tmp;
fp = fdopen(fd, "r");
if (fp == NULL) {
close(fd);
return (0);
}
while (getline(&line, &len, fp) != -1) {
tmp = realloc(tmp_lines, sizeof (*tmp_lines) * (lines_cnt + 1));
if (tmp == NULL) {
/* Return the lines we were able to process */
break;
}
tmp_lines = tmp;
/* Remove newline if not EOF */
if (line[strlen(line) - 1] == '\n')
line[strlen(line) - 1] = '\0';
tmp_lines[lines_cnt] = strdup(line);
if (tmp_lines[lines_cnt] == NULL)
break;
++lines_cnt;
}
free(line);
fclose(fp);
*lines = tmp_lines;
return (lines_cnt);
}
static int
libzfs_run_process_impl(const char *path, char *argv[], char *env[], int flags,
char **lines[], int *lines_cnt)
{
pid_t pid;
int error, devnull_fd;
int link[2];
/*
* Setup a pipe between our child and parent process if we're
* reading stdout.
*/
if (lines != NULL && pipe2(link, O_NONBLOCK | O_CLOEXEC) == -1)
return (-EPIPE);
pid = fork();
if (pid == 0) {
/* Child process */
devnull_fd = open("/dev/null", O_WRONLY | O_CLOEXEC);
if (devnull_fd < 0)
_exit(-1);
if (!(flags & STDOUT_VERBOSE) && (lines == NULL))
(void) dup2(devnull_fd, STDOUT_FILENO);
else if (lines != NULL) {
/* Save the output to lines[] */
dup2(link[1], STDOUT_FILENO);
}
if (!(flags & STDERR_VERBOSE))
(void) dup2(devnull_fd, STDERR_FILENO);
if (flags & NO_DEFAULT_PATH) {
if (env == NULL)
execv(path, argv);
else
execve(path, argv, env);
} else {
if (env == NULL)
execvp(path, argv);
else
execvpe(path, argv, env);
}
_exit(-1);
} else if (pid > 0) {
/* Parent process */
int status;
while ((error = waitpid(pid, &status, 0)) == -1 &&
errno == EINTR)
;
if (error < 0 || !WIFEXITED(status))
return (-1);
if (lines != NULL) {
close(link[1]);
*lines_cnt = libzfs_read_stdout_from_fd(link[0], lines);
}
return (WEXITSTATUS(status));
}
return (-1);
}
int
libzfs_run_process(const char *path, char *argv[], int flags)
{
return (libzfs_run_process_impl(path, argv, NULL, flags, NULL, NULL));
}
/*
* Run a command and store its stdout lines in an array of strings (lines[]).
* lines[] is allocated and populated for you, and the number of lines is set in
* lines_cnt. lines[] must be freed after use with libzfs_free_str_array().
* All newlines (\n) in lines[] are terminated for convenience.
*/
int
libzfs_run_process_get_stdout(const char *path, char *argv[], char *env[],
char **lines[], int *lines_cnt)
{
return (libzfs_run_process_impl(path, argv, env, 0, lines, lines_cnt));
}
/*
* Same as libzfs_run_process_get_stdout(), but run without $PATH set. This
* means that *path needs to be the full path to the executable.
*/
int
libzfs_run_process_get_stdout_nopath(const char *path, char *argv[],
char *env[], char **lines[], int *lines_cnt)
{
return (libzfs_run_process_impl(path, argv, env, NO_DEFAULT_PATH,
lines, lines_cnt));
}
/*
* Free an array of strings. Free both the strings contained in the array and
* the array itself.
*/
void
libzfs_free_str_array(char **strs, int count)
{
while (--count >= 0)
free(strs[count]);
free(strs);
}
/*
* Returns 1 if environment variable is set to "YES", "yes", "ON", "on", or
* a non-zero number.
*
* Returns 0 otherwise.
*/
boolean_t
libzfs_envvar_is_set(const char *envvar)
{
char *env = getenv(envvar);
return (env && (strtoul(env, NULL, 0) > 0 ||
(!strncasecmp(env, "YES", 3) && strnlen(env, 4) == 3) ||
(!strncasecmp(env, "ON", 2) && strnlen(env, 3) == 2)));
}
libzfs_handle_t *
libzfs_init(void)
{
libzfs_handle_t *hdl;
int error;
char *env;
if ((error = libzfs_load_module()) != 0) {
errno = error;
return (NULL);
}
if ((hdl = calloc(1, sizeof (libzfs_handle_t))) == NULL) {
return (NULL);
}
if (regcomp(&hdl->libzfs_urire, URI_REGEX, 0) != 0) {
free(hdl);
return (NULL);
}
if ((hdl->libzfs_fd = open(ZFS_DEV, O_RDWR|O_EXCL|O_CLOEXEC)) < 0) {
free(hdl);
return (NULL);
}
if (libzfs_core_init() != 0) {
(void) close(hdl->libzfs_fd);
free(hdl);
return (NULL);
}
zfs_prop_init();
zpool_prop_init();
zpool_feature_init();
vdev_prop_init();
libzfs_mnttab_init(hdl);
fletcher_4_init();
if (getenv("ZFS_PROP_DEBUG") != NULL) {
hdl->libzfs_prop_debug = B_TRUE;
}
if ((env = getenv("ZFS_SENDRECV_MAX_NVLIST")) != NULL) {
if ((error = zfs_nicestrtonum(hdl, env,
&hdl->libzfs_max_nvlist))) {
errno = error;
(void) close(hdl->libzfs_fd);
free(hdl);
return (NULL);
}
} else {
hdl->libzfs_max_nvlist = (SPA_MAXBLOCKSIZE * 4);
}
/*
* For testing, remove some settable properties and features
*/
if (libzfs_envvar_is_set("ZFS_SYSFS_PROP_SUPPORT_TEST")) {
zprop_desc_t *proptbl;
proptbl = zpool_prop_get_table();
proptbl[ZPOOL_PROP_COMMENT].pd_zfs_mod_supported = B_FALSE;
proptbl = zfs_prop_get_table();
proptbl[ZFS_PROP_DNODESIZE].pd_zfs_mod_supported = B_FALSE;
zfeature_info_t *ftbl = spa_feature_table;
ftbl[SPA_FEATURE_LARGE_BLOCKS].fi_zfs_mod_supported = B_FALSE;
}
return (hdl);
}
void
libzfs_fini(libzfs_handle_t *hdl)
{
(void) close(hdl->libzfs_fd);
zpool_free_handles(hdl);
namespace_clear(hdl);
libzfs_mnttab_fini(hdl);
libzfs_core_fini();
regfree(&hdl->libzfs_urire);
fletcher_4_fini();
#if LIBFETCH_DYNAMIC
if (hdl->libfetch != (void *)-1 && hdl->libfetch != NULL)
(void) dlclose(hdl->libfetch);
free(hdl->libfetch_load_error);
#endif
free(hdl);
}
libzfs_handle_t *
zpool_get_handle(zpool_handle_t *zhp)
{
return (zhp->zpool_hdl);
}
libzfs_handle_t *
zfs_get_handle(zfs_handle_t *zhp)
{
return (zhp->zfs_hdl);
}
zpool_handle_t *
zfs_get_pool_handle(const zfs_handle_t *zhp)
{
return (zhp->zpool_hdl);
}
/*
* Given a name, determine whether or not it's a valid path
* (starts with '/' or "./"). If so, walk the mnttab trying
* to match the device number. If not, treat the path as an
* fs/vol/snap/bkmark name.
*/
zfs_handle_t *
zfs_path_to_zhandle(libzfs_handle_t *hdl, const char *path, zfs_type_t argtype)
{
struct stat64 statbuf;
struct extmnttab entry;
if (path[0] != '/' && strncmp(path, "./", strlen("./")) != 0) {
/*
* It's not a valid path, assume it's a name of type 'argtype'.
*/
return (zfs_open(hdl, path, argtype));
}
if (getextmntent(path, &entry, &statbuf) != 0)
return (NULL);
if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0) {
(void) fprintf(stderr, gettext("'%s': not a ZFS filesystem\n"),
path);
return (NULL);
}
return (zfs_open(hdl, entry.mnt_special, ZFS_TYPE_FILESYSTEM));
}
/*
* Initialize the zc_nvlist_dst member to prepare for receiving an nvlist from
* an ioctl().
*/
void
zcmd_alloc_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, size_t len)
{
if (len == 0)
len = 256 * 1024;
zc->zc_nvlist_dst_size = len;
zc->zc_nvlist_dst =
(uint64_t)(uintptr_t)zfs_alloc(hdl, zc->zc_nvlist_dst_size);
}
/*
* Called when an ioctl() which returns an nvlist fails with ENOMEM. This will
* expand the nvlist to the size specified in 'zc_nvlist_dst_size', which was
* filled in by the kernel to indicate the actual required size.
*/
void
zcmd_expand_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc)
{
free((void *)(uintptr_t)zc->zc_nvlist_dst);
zc->zc_nvlist_dst =
(uint64_t)(uintptr_t)zfs_alloc(hdl, zc->zc_nvlist_dst_size);
}
/*
* Called to free the src and dst nvlists stored in the command structure.
*/
void
zcmd_free_nvlists(zfs_cmd_t *zc)
{
free((void *)(uintptr_t)zc->zc_nvlist_conf);
free((void *)(uintptr_t)zc->zc_nvlist_src);
free((void *)(uintptr_t)zc->zc_nvlist_dst);
zc->zc_nvlist_conf = 0;
zc->zc_nvlist_src = 0;
zc->zc_nvlist_dst = 0;
}
static void
zcmd_write_nvlist_com(libzfs_handle_t *hdl, uint64_t *outnv, uint64_t *outlen,
nvlist_t *nvl)
{
char *packed;
size_t len = fnvlist_size(nvl);
packed = zfs_alloc(hdl, len);
verify(nvlist_pack(nvl, &packed, &len, NV_ENCODE_NATIVE, 0) == 0);
*outnv = (uint64_t)(uintptr_t)packed;
*outlen = len;
}
void
zcmd_write_conf_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t *nvl)
{
zcmd_write_nvlist_com(hdl, &zc->zc_nvlist_conf,
&zc->zc_nvlist_conf_size, nvl);
}
void
zcmd_write_src_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t *nvl)
{
zcmd_write_nvlist_com(hdl, &zc->zc_nvlist_src,
&zc->zc_nvlist_src_size, nvl);
}
/*
* Unpacks an nvlist from the ZFS ioctl command structure.
*/
int
zcmd_read_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t **nvlp)
{
if (nvlist_unpack((void *)(uintptr_t)zc->zc_nvlist_dst,
zc->zc_nvlist_dst_size, nvlp, 0) != 0)
return (no_memory(hdl));
return (0);
}
/*
* ================================================================
* API shared by zfs and zpool property management
* ================================================================
*/
static void
zprop_print_headers(zprop_get_cbdata_t *cbp, zfs_type_t type)
{
zprop_list_t *pl = cbp->cb_proplist;
int i;
char *title;
size_t len;
cbp->cb_first = B_FALSE;
if (cbp->cb_scripted)
return;
/*
* Start with the length of the column headers.
*/
cbp->cb_colwidths[GET_COL_NAME] = strlen(dgettext(TEXT_DOMAIN, "NAME"));
cbp->cb_colwidths[GET_COL_PROPERTY] = strlen(dgettext(TEXT_DOMAIN,
"PROPERTY"));
cbp->cb_colwidths[GET_COL_VALUE] = strlen(dgettext(TEXT_DOMAIN,
"VALUE"));
cbp->cb_colwidths[GET_COL_RECVD] = strlen(dgettext(TEXT_DOMAIN,
"RECEIVED"));
cbp->cb_colwidths[GET_COL_SOURCE] = strlen(dgettext(TEXT_DOMAIN,
"SOURCE"));
/* first property is always NAME */
assert(cbp->cb_proplist->pl_prop ==
((type == ZFS_TYPE_POOL) ? ZPOOL_PROP_NAME :
((type == ZFS_TYPE_VDEV) ? VDEV_PROP_NAME : ZFS_PROP_NAME)));
/*
* Go through and calculate the widths for each column. For the
* 'source' column, we kludge it up by taking the worst-case scenario of
* inheriting from the longest name. This is acceptable because in the
* majority of cases 'SOURCE' is the last column displayed, and we don't
* use the width anyway. Note that the 'VALUE' column can be oversized,
* if the name of the property is much longer than any values we find.
*/
for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) {
/*
* 'PROPERTY' column
*/
if (pl->pl_prop != ZPROP_USERPROP) {
const char *propname = (type == ZFS_TYPE_POOL) ?
zpool_prop_to_name(pl->pl_prop) :
((type == ZFS_TYPE_VDEV) ?
vdev_prop_to_name(pl->pl_prop) :
zfs_prop_to_name(pl->pl_prop));
assert(propname != NULL);
len = strlen(propname);
if (len > cbp->cb_colwidths[GET_COL_PROPERTY])
cbp->cb_colwidths[GET_COL_PROPERTY] = len;
} else {
assert(pl->pl_user_prop != NULL);
len = strlen(pl->pl_user_prop);
if (len > cbp->cb_colwidths[GET_COL_PROPERTY])
cbp->cb_colwidths[GET_COL_PROPERTY] = len;
}
/*
* 'VALUE' column. The first property is always the 'name'
* property that was tacked on either by /sbin/zfs's
* zfs_do_get() or when calling zprop_expand_list(), so we
* ignore its width. If the user specified the name property
* to display, then it will be later in the list in any case.
*/
if (pl != cbp->cb_proplist &&
pl->pl_width > cbp->cb_colwidths[GET_COL_VALUE])
cbp->cb_colwidths[GET_COL_VALUE] = pl->pl_width;
/* 'RECEIVED' column. */
if (pl != cbp->cb_proplist &&
pl->pl_recvd_width > cbp->cb_colwidths[GET_COL_RECVD])
cbp->cb_colwidths[GET_COL_RECVD] = pl->pl_recvd_width;
/*
* 'NAME' and 'SOURCE' columns
*/
if (pl->pl_prop == ((type == ZFS_TYPE_POOL) ? ZPOOL_PROP_NAME :
((type == ZFS_TYPE_VDEV) ? VDEV_PROP_NAME :
ZFS_PROP_NAME)) && pl->pl_width >
cbp->cb_colwidths[GET_COL_NAME]) {
cbp->cb_colwidths[GET_COL_NAME] = pl->pl_width;
cbp->cb_colwidths[GET_COL_SOURCE] = pl->pl_width +
strlen(dgettext(TEXT_DOMAIN, "inherited from"));
}
}
/*
* Now go through and print the headers.
*/
for (i = 0; i < ZFS_GET_NCOLS; i++) {
switch (cbp->cb_columns[i]) {
case GET_COL_NAME:
title = dgettext(TEXT_DOMAIN, "NAME");
break;
case GET_COL_PROPERTY:
title = dgettext(TEXT_DOMAIN, "PROPERTY");
break;
case GET_COL_VALUE:
title = dgettext(TEXT_DOMAIN, "VALUE");
break;
case GET_COL_RECVD:
title = dgettext(TEXT_DOMAIN, "RECEIVED");
break;
case GET_COL_SOURCE:
title = dgettext(TEXT_DOMAIN, "SOURCE");
break;
default:
title = NULL;
}
if (title != NULL) {
if (i == (ZFS_GET_NCOLS - 1) ||
cbp->cb_columns[i + 1] == GET_COL_NONE)
(void) printf("%s", title);
else
(void) printf("%-*s ",
cbp->cb_colwidths[cbp->cb_columns[i]],
title);
}
}
(void) printf("\n");
}
/*
* Display a single line of output, according to the settings in the callback
* structure.
*/
void
zprop_print_one_property(const char *name, zprop_get_cbdata_t *cbp,
const char *propname, const char *value, zprop_source_t sourcetype,
const char *source, const char *recvd_value)
{
int i;
const char *str = NULL;
char buf[128];
/*
* Ignore those source types that the user has chosen to ignore.
*/
if ((sourcetype & cbp->cb_sources) == 0)
return;
if (cbp->cb_first)
zprop_print_headers(cbp, cbp->cb_type);
for (i = 0; i < ZFS_GET_NCOLS; i++) {
switch (cbp->cb_columns[i]) {
case GET_COL_NAME:
str = name;
break;
case GET_COL_PROPERTY:
str = propname;
break;
case GET_COL_VALUE:
str = value;
break;
case GET_COL_SOURCE:
switch (sourcetype) {
case ZPROP_SRC_NONE:
str = "-";
break;
case ZPROP_SRC_DEFAULT:
str = "default";
break;
case ZPROP_SRC_LOCAL:
str = "local";
break;
case ZPROP_SRC_TEMPORARY:
str = "temporary";
break;
case ZPROP_SRC_INHERITED:
(void) snprintf(buf, sizeof (buf),
"inherited from %s", source);
str = buf;
break;
case ZPROP_SRC_RECEIVED:
str = "received";
break;
default:
str = NULL;
assert(!"unhandled zprop_source_t");
}
break;
case GET_COL_RECVD:
str = (recvd_value == NULL ? "-" : recvd_value);
break;
default:
continue;
}
if (i == (ZFS_GET_NCOLS - 1) ||
cbp->cb_columns[i + 1] == GET_COL_NONE)
(void) printf("%s", str);
else if (cbp->cb_scripted)
(void) printf("%s\t", str);
else
(void) printf("%-*s ",
cbp->cb_colwidths[cbp->cb_columns[i]],
str);
}
(void) printf("\n");
}
/*
* Given a numeric suffix, convert the value into a number of bits that the
* resulting value must be shifted.
*/
static int
str2shift(libzfs_handle_t *hdl, const char *buf)
{
const char *ends = "BKMGTPEZ";
int i;
if (buf[0] == '\0')
return (0);
for (i = 0; i < strlen(ends); i++) {
if (toupper(buf[0]) == ends[i])
break;
}
if (i == strlen(ends)) {
if (hdl)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid numeric suffix '%s'"), buf);
return (-1);
}
/*
* Allow 'G' = 'GB' = 'GiB', case-insensitively.
* However, 'BB' and 'BiB' are disallowed.
*/
if (buf[1] == '\0' ||
(toupper(buf[0]) != 'B' &&
((toupper(buf[1]) == 'B' && buf[2] == '\0') ||
(toupper(buf[1]) == 'I' && toupper(buf[2]) == 'B' &&
buf[3] == '\0'))))
return (10 * i);
if (hdl)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid numeric suffix '%s'"), buf);
return (-1);
}
/*
* Convert a string of the form '100G' into a real number. Used when setting
* properties or creating a volume. 'buf' is used to place an extended error
* message for the caller to use.
*/
int
zfs_nicestrtonum(libzfs_handle_t *hdl, const char *value, uint64_t *num)
{
char *end;
int shift;
*num = 0;
/* Check to see if this looks like a number. */
if ((value[0] < '0' || value[0] > '9') && value[0] != '.') {
if (hdl)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"bad numeric value '%s'"), value);
return (-1);
}
/* Rely on strtoull() to process the numeric portion. */
errno = 0;
*num = strtoull(value, &end, 10);
/*
* Check for ERANGE, which indicates that the value is too large to fit
* in a 64-bit value.
*/
if (errno == ERANGE) {
if (hdl)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"numeric value is too large"));
return (-1);
}
/*
* If we have a decimal value, then do the computation with floating
* point arithmetic. Otherwise, use standard arithmetic.
*/
if (*end == '.') {
double fval = strtod(value, &end);
if ((shift = str2shift(hdl, end)) == -1)
return (-1);
fval *= pow(2, shift);
/*
* UINT64_MAX is not exactly representable as a double.
* The closest representation is UINT64_MAX + 1, so we
* use a >= comparison instead of > for the bounds check.
*/
if (fval >= (double)UINT64_MAX) {
if (hdl)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"numeric value is too large"));
return (-1);
}
*num = (uint64_t)fval;
} else {
if ((shift = str2shift(hdl, end)) == -1)
return (-1);
/* Check for overflow */
if (shift >= 64 || (*num << shift) >> shift != *num) {
if (hdl)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"numeric value is too large"));
return (-1);
}
*num <<= shift;
}
return (0);
}
/*
* Given a propname=value nvpair to set, parse any numeric properties
* (index, boolean, etc) if they are specified as strings and add the
* resulting nvpair to the returned nvlist.
*
* At the DSL layer, all properties are either 64-bit numbers or strings.
* We want the user to be able to ignore this fact and specify properties
* as native values (numbers, for example) or as strings (to simplify
* command line utilities). This also handles converting index types
* (compression, checksum, etc) from strings to their on-disk index.
*/
int
zprop_parse_value(libzfs_handle_t *hdl, nvpair_t *elem, int prop,
zfs_type_t type, nvlist_t *ret, char **svalp, uint64_t *ivalp,
const char *errbuf)
{
data_type_t datatype = nvpair_type(elem);
zprop_type_t proptype;
const char *propname;
char *value;
boolean_t isnone = B_FALSE;
boolean_t isauto = B_FALSE;
int err = 0;
if (type == ZFS_TYPE_POOL) {
proptype = zpool_prop_get_type(prop);
propname = zpool_prop_to_name(prop);
} else if (type == ZFS_TYPE_VDEV) {
proptype = vdev_prop_get_type(prop);
propname = vdev_prop_to_name(prop);
} else {
proptype = zfs_prop_get_type(prop);
propname = zfs_prop_to_name(prop);
}
/*
* Convert any properties to the internal DSL value types.
*/
*svalp = NULL;
*ivalp = 0;
switch (proptype) {
case PROP_TYPE_STRING:
if (datatype != DATA_TYPE_STRING) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a string"), nvpair_name(elem));
goto error;
}
err = nvpair_value_string(elem, svalp);
if (err != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is invalid"), nvpair_name(elem));
goto error;
}
if (strlen(*svalp) >= ZFS_MAXPROPLEN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is too long"), nvpair_name(elem));
goto error;
}
break;
case PROP_TYPE_NUMBER:
if (datatype == DATA_TYPE_STRING) {
(void) nvpair_value_string(elem, &value);
if (strcmp(value, "none") == 0) {
isnone = B_TRUE;
} else if (strcmp(value, "auto") == 0) {
isauto = B_TRUE;
} else if (zfs_nicestrtonum(hdl, value, ivalp) != 0) {
goto error;
}
} else if (datatype == DATA_TYPE_UINT64) {
(void) nvpair_value_uint64(elem, ivalp);
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a number"), nvpair_name(elem));
goto error;
}
/*
* Quota special: force 'none' and don't allow 0.
*/
if ((type & ZFS_TYPE_DATASET) && *ivalp == 0 && !isnone &&
(prop == ZFS_PROP_QUOTA || prop == ZFS_PROP_REFQUOTA)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"use 'none' to disable quota/refquota"));
goto error;
}
/*
* Special handling for "*_limit=none". In this case it's not
* 0 but UINT64_MAX.
*/
if ((type & ZFS_TYPE_DATASET) && isnone &&
(prop == ZFS_PROP_FILESYSTEM_LIMIT ||
prop == ZFS_PROP_SNAPSHOT_LIMIT)) {
*ivalp = UINT64_MAX;
}
/*
* Special handling for setting 'refreservation' to 'auto'. Use
* UINT64_MAX to tell the caller to use zfs_fix_auto_resv().
* 'auto' is only allowed on volumes.
*/
if (isauto) {
switch (prop) {
case ZFS_PROP_REFRESERVATION:
if ((type & ZFS_TYPE_VOLUME) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s=auto' only allowed on "
"volumes"), nvpair_name(elem));
goto error;
}
*ivalp = UINT64_MAX;
break;
default:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'auto' is invalid value for '%s'"),
nvpair_name(elem));
goto error;
}
}
break;
case PROP_TYPE_INDEX:
if (datatype != DATA_TYPE_STRING) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a string"), nvpair_name(elem));
goto error;
}
(void) nvpair_value_string(elem, &value);
if (zprop_string_to_index(prop, value, ivalp, type) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be one of '%s'"), propname,
zprop_values(prop, type));
goto error;
}
break;
default:
abort();
}
/*
* Add the result to our return set of properties.
*/
if (*svalp != NULL) {
if (nvlist_add_string(ret, propname, *svalp) != 0) {
(void) no_memory(hdl);
return (-1);
}
} else {
if (nvlist_add_uint64(ret, propname, *ivalp) != 0) {
(void) no_memory(hdl);
return (-1);
}
}
return (0);
error:
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
return (-1);
}
static int
addlist(libzfs_handle_t *hdl, const char *propname, zprop_list_t **listp,
zfs_type_t type)
{
int prop = zprop_name_to_prop(propname, type);
if (prop != ZPROP_INVAL && !zprop_valid_for_type(prop, type, B_FALSE))
prop = ZPROP_INVAL;
/*
* Return failure if no property table entry was found and this isn't
* a user-defined property.
*/
if (prop == ZPROP_USERPROP && ((type == ZFS_TYPE_POOL &&
!zpool_prop_feature(propname) &&
!zpool_prop_unsupported(propname)) ||
((type == ZFS_TYPE_DATASET) && !zfs_prop_user(propname) &&
!zfs_prop_userquota(propname) && !zfs_prop_written(propname)) ||
((type == ZFS_TYPE_VDEV) && !vdev_prop_user(propname)))) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property '%s'"), propname);
return (zfs_error(hdl, EZFS_BADPROP,
dgettext(TEXT_DOMAIN, "bad property list")));
}
zprop_list_t *entry = zfs_alloc(hdl, sizeof (*entry));
entry->pl_prop = prop;
if (prop == ZPROP_USERPROP) {
entry->pl_user_prop = zfs_strdup(hdl, propname);
entry->pl_width = strlen(propname);
} else {
entry->pl_width = zprop_width(prop, &entry->pl_fixed,
type);
}
*listp = entry;
return (0);
}
/*
* Given a comma-separated list of properties, construct a property list
* containing both user-defined and native properties. This function will
* return a NULL list if 'all' is specified, which can later be expanded
* by zprop_expand_list().
*/
int
zprop_get_list(libzfs_handle_t *hdl, char *props, zprop_list_t **listp,
zfs_type_t type)
{
*listp = NULL;
/*
* If 'all' is specified, return a NULL list.
*/
if (strcmp(props, "all") == 0)
return (0);
/*
* If no props were specified, return an error.
*/
if (props[0] == '\0') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"no properties specified"));
return (zfs_error(hdl, EZFS_BADPROP, dgettext(TEXT_DOMAIN,
"bad property list")));
}
for (char *p; (p = strsep(&props, ",")); )
if (strcmp(p, "space") == 0) {
static const char *const spaceprops[] = {
"name", "avail", "used", "usedbysnapshots",
"usedbydataset", "usedbyrefreservation",
"usedbychildren"
};
for (int i = 0; i < ARRAY_SIZE(spaceprops); i++) {
if (addlist(hdl, spaceprops[i], listp, type))
return (-1);
listp = &(*listp)->pl_next;
}
} else {
if (addlist(hdl, p, listp, type))
return (-1);
listp = &(*listp)->pl_next;
}
return (0);
}
void
zprop_free_list(zprop_list_t *pl)
{
zprop_list_t *next;
while (pl != NULL) {
next = pl->pl_next;
free(pl->pl_user_prop);
free(pl);
pl = next;
}
}
typedef struct expand_data {
zprop_list_t **last;
libzfs_handle_t *hdl;
zfs_type_t type;
} expand_data_t;
static int
zprop_expand_list_cb(int prop, void *cb)
{
zprop_list_t *entry;
expand_data_t *edp = cb;
entry = zfs_alloc(edp->hdl, sizeof (zprop_list_t));
entry->pl_prop = prop;
entry->pl_width = zprop_width(prop, &entry->pl_fixed, edp->type);
entry->pl_all = B_TRUE;
*(edp->last) = entry;
edp->last = &entry->pl_next;
return (ZPROP_CONT);
}
int
zprop_expand_list(libzfs_handle_t *hdl, zprop_list_t **plp, zfs_type_t type)
{
zprop_list_t *entry;
zprop_list_t **last;
expand_data_t exp;
if (*plp == NULL) {
/*
* If this is the very first time we've been called for an 'all'
* specification, expand the list to include all native
* properties.
*/
last = plp;
exp.last = last;
exp.hdl = hdl;
exp.type = type;
if (zprop_iter_common(zprop_expand_list_cb, &exp, B_FALSE,
B_FALSE, type) == ZPROP_INVAL)
return (-1);
/*
* Add 'name' to the beginning of the list, which is handled
* specially.
*/
entry = zfs_alloc(hdl, sizeof (zprop_list_t));
entry->pl_prop = ((type == ZFS_TYPE_POOL) ? ZPOOL_PROP_NAME :
((type == ZFS_TYPE_VDEV) ? VDEV_PROP_NAME : ZFS_PROP_NAME));
entry->pl_width = zprop_width(entry->pl_prop,
&entry->pl_fixed, type);
entry->pl_all = B_TRUE;
entry->pl_next = *plp;
*plp = entry;
}
return (0);
}
int
zprop_iter(zprop_func func, void *cb, boolean_t show_all, boolean_t ordered,
zfs_type_t type)
{
return (zprop_iter_common(func, cb, show_all, ordered, type));
}
const char *
zfs_version_userland(void)
{
return (ZFS_META_ALIAS);
}
/*
* Prints both zfs userland and kernel versions
* Returns 0 on success, and -1 on error
*/
int
zfs_version_print(void)
{
(void) puts(ZFS_META_ALIAS);
char *kver = zfs_version_kernel();
if (kver == NULL) {
fprintf(stderr, "zfs_version_kernel() failed: %s\n",
strerror(errno));
return (-1);
}
(void) printf("zfs-kmod-%s\n", kver);
free(kver);
return (0);
}
/*
* Return 1 if the user requested ANSI color output, and our terminal supports
* it. Return 0 for no color.
*/
static int
use_color(void)
{
static int use_color = -1;
char *term;
/*
* Optimization:
*
* For each zpool invocation, we do a single check to see if we should
* be using color or not, and cache that value for the lifetime of the
* the zpool command. That makes it cheap to call use_color() when
* we're printing with color. We assume that the settings are not going
* to change during the invocation of a zpool command (the user isn't
* going to change the ZFS_COLOR value while zpool is running, for
* example).
*/
if (use_color != -1) {
/*
* We've already figured out if we should be using color or
* not. Return the cached value.
*/
return (use_color);
}
term = getenv("TERM");
/*
* The user sets the ZFS_COLOR env var set to enable zpool ANSI color
* output. However if NO_COLOR is set (https://no-color.org/) then
* don't use it. Also, don't use color if terminal doesn't support
* it.
*/
if (libzfs_envvar_is_set("ZFS_COLOR") &&
!libzfs_envvar_is_set("NO_COLOR") &&
isatty(STDOUT_FILENO) && term && strcmp("dumb", term) != 0 &&
strcmp("unknown", term) != 0) {
/* Color supported */
use_color = 1;
} else {
use_color = 0;
}
return (use_color);
}
/*
* color_start() and color_end() are used for when you want to colorize a block
* of text. For example:
*
* color_start(ANSI_RED_FG)
* printf("hello");
* printf("world");
* color_end();
*/
void
color_start(const char *color)
{
if (use_color())
fputs(color, stdout);
}
void
color_end(void)
{
if (use_color())
fputs(ANSI_RESET, stdout);
}
/* printf() with a color. If color is NULL, then do a normal printf. */
int
printf_color(const char *color, const char *format, ...)
{
va_list aptr;
int rc;
if (color)
color_start(color);
va_start(aptr, format);
rc = vprintf(format, aptr);
va_end(aptr);
if (color)
color_end();
return (rc);
}
diff --git a/sys/contrib/openzfs/lib/libzpool/Makefile.am b/sys/contrib/openzfs/lib/libzpool/Makefile.am
index eaa920e56106..0cc1997f7a99 100644
--- a/sys/contrib/openzfs/lib/libzpool/Makefile.am
+++ b/sys/contrib/openzfs/lib/libzpool/Makefile.am
@@ -1,214 +1,213 @@
libzpool_la_CFLAGS = $(AM_CFLAGS) $(KERNEL_CFLAGS) $(LIBRARY_CFLAGS)
libzpool_la_CFLAGS += $(ZLIB_CFLAGS)
libzpool_la_CPPFLAGS = $(AM_CPPFLAGS) $(FORCEDEBUG_CPPFLAGS)
libzpool_la_CPPFLAGS += -I$(srcdir)/include/os/@ac_system_l@/zfs
libzpool_la_CPPFLAGS += -DLIB_ZPOOL_BUILD
lib_LTLIBRARIES += libzpool.la
CPPCHECKTARGETS += libzpool.la
dist_libzpool_la_SOURCES = \
%D%/kernel.c \
%D%/taskq.c \
%D%/util.c
nodist_libzpool_la_SOURCES = \
module/lua/lapi.c \
module/lua/lauxlib.c \
module/lua/lbaselib.c \
module/lua/lcode.c \
module/lua/lcompat.c \
module/lua/lcorolib.c \
module/lua/lctype.c \
module/lua/ldebug.c \
module/lua/ldo.c \
module/lua/lfunc.c \
module/lua/lgc.c \
module/lua/llex.c \
module/lua/lmem.c \
module/lua/lobject.c \
module/lua/lopcodes.c \
module/lua/lparser.c \
module/lua/lstate.c \
module/lua/lstring.c \
module/lua/lstrlib.c \
module/lua/ltable.c \
module/lua/ltablib.c \
module/lua/ltm.c \
module/lua/lvm.c \
module/lua/lzio.c \
\
module/os/linux/zfs/abd_os.c \
module/os/linux/zfs/arc_os.c \
module/os/linux/zfs/trace.c \
module/os/linux/zfs/vdev_file.c \
module/os/linux/zfs/zfs_debug.c \
module/os/linux/zfs/zfs_racct.c \
module/os/linux/zfs/zfs_znode.c \
module/os/linux/zfs/zio_crypt.c \
\
module/zcommon/cityhash.c \
module/zcommon/zfeature_common.c \
module/zcommon/zfs_comutil.c \
module/zcommon/zfs_deleg.c \
module/zcommon/zfs_fletcher.c \
module/zcommon/zfs_fletcher_aarch64_neon.c \
module/zcommon/zfs_fletcher_avx512.c \
module/zcommon/zfs_fletcher_intel.c \
module/zcommon/zfs_fletcher_sse.c \
module/zcommon/zfs_fletcher_superscalar.c \
module/zcommon/zfs_fletcher_superscalar4.c \
module/zcommon/zfs_namecheck.c \
module/zcommon/zfs_prop.c \
module/zcommon/zpool_prop.c \
module/zcommon/zprop_common.c \
\
module/zfs/abd.c \
module/zfs/aggsum.c \
module/zfs/arc.c \
module/zfs/blake3_zfs.c \
module/zfs/blkptr.c \
module/zfs/bplist.c \
module/zfs/bpobj.c \
module/zfs/bptree.c \
module/zfs/bqueue.c \
module/zfs/btree.c \
module/zfs/dbuf.c \
module/zfs/dbuf_stats.c \
module/zfs/ddt.c \
module/zfs/ddt_zap.c \
module/zfs/dmu.c \
module/zfs/dmu_diff.c \
module/zfs/dmu_object.c \
module/zfs/dmu_objset.c \
module/zfs/dmu_recv.c \
module/zfs/dmu_redact.c \
module/zfs/dmu_send.c \
module/zfs/dmu_traverse.c \
module/zfs/dmu_tx.c \
module/zfs/dmu_zfetch.c \
module/zfs/dnode.c \
module/zfs/dnode_sync.c \
module/zfs/dsl_bookmark.c \
module/zfs/dsl_crypt.c \
module/zfs/dsl_dataset.c \
module/zfs/dsl_deadlist.c \
module/zfs/dsl_deleg.c \
module/zfs/dsl_destroy.c \
module/zfs/dsl_dir.c \
module/zfs/dsl_pool.c \
module/zfs/dsl_prop.c \
module/zfs/dsl_scan.c \
module/zfs/dsl_synctask.c \
module/zfs/dsl_userhold.c \
module/zfs/edonr_zfs.c \
module/zfs/fm.c \
module/zfs/gzip.c \
module/zfs/hkdf.c \
module/zfs/lz4.c \
module/zfs/lz4_zfs.c \
module/zfs/lzjb.c \
module/zfs/metaslab.c \
module/zfs/mmp.c \
module/zfs/multilist.c \
module/zfs/objlist.c \
module/zfs/pathname.c \
module/zfs/range_tree.c \
module/zfs/refcount.c \
module/zfs/rrwlock.c \
module/zfs/sa.c \
module/zfs/sha256.c \
module/zfs/skein_zfs.c \
module/zfs/spa.c \
- module/zfs/spa_boot.c \
module/zfs/spa_checkpoint.c \
module/zfs/spa_config.c \
module/zfs/spa_errlog.c \
module/zfs/spa_history.c \
module/zfs/spa_log_spacemap.c \
module/zfs/spa_misc.c \
module/zfs/spa_stats.c \
module/zfs/space_map.c \
module/zfs/space_reftree.c \
module/zfs/txg.c \
module/zfs/uberblock.c \
module/zfs/unique.c \
module/zfs/vdev.c \
module/zfs/vdev_cache.c \
module/zfs/vdev_draid.c \
module/zfs/vdev_draid_rand.c \
module/zfs/vdev_indirect.c \
module/zfs/vdev_indirect_births.c \
module/zfs/vdev_indirect_mapping.c \
module/zfs/vdev_initialize.c \
module/zfs/vdev_label.c \
module/zfs/vdev_mirror.c \
module/zfs/vdev_missing.c \
module/zfs/vdev_queue.c \
module/zfs/vdev_raidz.c \
module/zfs/vdev_raidz_math.c \
module/zfs/vdev_raidz_math_aarch64_neon.c \
module/zfs/vdev_raidz_math_aarch64_neonx2.c \
module/zfs/vdev_raidz_math_avx2.c \
module/zfs/vdev_raidz_math_avx512bw.c \
module/zfs/vdev_raidz_math_avx512f.c \
module/zfs/vdev_raidz_math_powerpc_altivec.c \
module/zfs/vdev_raidz_math_scalar.c \
module/zfs/vdev_raidz_math_sse2.c \
module/zfs/vdev_raidz_math_ssse3.c \
module/zfs/vdev_rebuild.c \
module/zfs/vdev_removal.c \
module/zfs/vdev_root.c \
module/zfs/vdev_trim.c \
module/zfs/zap.c \
module/zfs/zap_leaf.c \
module/zfs/zap_micro.c \
module/zfs/zcp.c \
module/zfs/zcp_get.c \
module/zfs/zcp_global.c \
module/zfs/zcp_iter.c \
module/zfs/zcp_set.c \
module/zfs/zcp_synctask.c \
module/zfs/zfeature.c \
module/zfs/zfs_byteswap.c \
module/zfs/zfs_chksum.c \
module/zfs/zfs_fm.c \
module/zfs/zfs_fuid.c \
module/zfs/zfs_ratelimit.c \
module/zfs/zfs_rlock.c \
module/zfs/zfs_sa.c \
module/zfs/zil.c \
module/zfs/zio.c \
module/zfs/zio_checksum.c \
module/zfs/zio_compress.c \
module/zfs/zio_inject.c \
module/zfs/zle.c \
module/zfs/zrlock.c \
module/zfs/zthr.c
libzpool_la_LIBADD = \
libicp.la \
libunicode.la \
libnvpair.la \
libzstd.la \
libzutil.la
libzpool_la_LIBADD += $(LIBCLOCK_GETTIME) $(ZLIB_LIBS) -ldl -lm
libzpool_la_LDFLAGS = -pthread
if !ASAN_ENABLED
libzpool_la_LDFLAGS += -Wl,-z,defs
endif
if BUILD_FREEBSD
libzpool_la_LIBADD += -lgeom
endif
libzpool_la_LDFLAGS += -version-info 5:0:0
if TARGET_CPU_POWERPC
module/zfs/libzpool_la-vdev_raidz_math_powerpc_altivec.$(OBJEXT) : CFLAGS += -maltivec
module/zfs/libzpool_la-vdev_raidz_math_powerpc_altivec.l$(OBJEXT): CFLAGS += -maltivec
endif
diff --git a/sys/contrib/openzfs/lib/libzutil/os/linux/zutil_device_path_os.c b/sys/contrib/openzfs/lib/libzutil/os/linux/zutil_device_path_os.c
index f081ef53da79..9f4c74f50f3c 100644
--- a/sys/contrib/openzfs/lib/libzutil/os/linux/zutil_device_path_os.c
+++ b/sys/contrib/openzfs/lib/libzutil/os/linux/zutil_device_path_os.c
@@ -1,692 +1,691 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
*/
#include <ctype.h>
#include <dirent.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/efi_partition.h>
#ifdef HAVE_LIBUDEV
#include <libudev.h>
#endif
#include <libzutil.h>
/*
* Append partition suffix to an otherwise fully qualified device path.
* This is used to generate the name the full path as its stored in
* ZPOOL_CONFIG_PATH for whole disk devices. On success the new length
* of 'path' will be returned on error a negative value is returned.
*/
int
zfs_append_partition(char *path, size_t max_len)
{
int len = strlen(path);
if ((strncmp(path, UDISK_ROOT, strlen(UDISK_ROOT)) == 0) ||
(strncmp(path, ZVOL_ROOT, strlen(ZVOL_ROOT)) == 0)) {
if (len + 6 >= max_len)
return (-1);
(void) strcat(path, "-part1");
len += 6;
} else {
if (len + 2 >= max_len)
return (-1);
if (isdigit(path[len-1])) {
(void) strcat(path, "p1");
len += 2;
} else {
(void) strcat(path, "1");
len += 1;
}
}
return (len);
}
/*
* Remove partition suffix from a vdev path. Partition suffixes may take three
* forms: "-partX", "pX", or "X", where X is a string of digits. The second
* case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
* third case only occurs when preceded by a string matching the regular
* expression "^([hsv]|xv)d[a-z]+", i.e. a scsi, ide, virtio or xen disk.
*
* caller must free the returned string
*/
char *
zfs_strip_partition(const char *path)
{
char *tmp = strdup(path);
char *part = NULL, *d = NULL;
if (!tmp)
return (NULL);
if ((part = strstr(tmp, "-part")) && part != tmp) {
d = part + 5;
} else if ((part = strrchr(tmp, 'p')) &&
part > tmp + 1 && isdigit(*(part-1))) {
d = part + 1;
} else if ((tmp[0] == 'h' || tmp[0] == 's' || tmp[0] == 'v') &&
tmp[1] == 'd') {
for (d = &tmp[2]; isalpha(*d); part = ++d) { }
} else if (strncmp("xvd", tmp, 3) == 0) {
for (d = &tmp[3]; isalpha(*d); part = ++d) { }
}
if (part && d && *d != '\0') {
for (; isdigit(*d); d++) { }
if (*d == '\0')
*part = '\0';
}
return (tmp);
}
/*
* Same as zfs_strip_partition, but allows "/dev/" to be in the pathname
*
* path: /dev/sda1
* returns: /dev/sda
*
* Returned string must be freed.
*/
static char *
zfs_strip_partition_path(const char *path)
{
char *newpath = strdup(path);
char *sd_offset;
char *new_sd;
if (!newpath)
return (NULL);
/* Point to "sda1" part of "/dev/sda1" */
sd_offset = strrchr(newpath, '/') + 1;
/* Get our new name "sda" */
new_sd = zfs_strip_partition(sd_offset);
if (!new_sd) {
free(newpath);
return (NULL);
}
/* Paste the "sda" where "sda1" was */
strlcpy(sd_offset, new_sd, strlen(sd_offset) + 1);
/* Free temporary "sda" */
free(new_sd);
return (newpath);
}
/*
* Strip the unwanted portion of a device path.
*/
const char *
zfs_strip_path(const char *path)
{
size_t spath_count;
const char *const *spaths = zpool_default_search_paths(&spath_count);
for (size_t i = 0; i < spath_count; ++i)
if (strncmp(path, spaths[i], strlen(spaths[i])) == 0 &&
path[strlen(spaths[i])] == '/')
return (path + strlen(spaths[i]) + 1);
return (path);
}
/*
* Read the contents of a sysfs file into an allocated buffer and remove the
* last newline.
*
* This is useful for reading sysfs files that return a single string. Return
* an allocated string pointer on success, NULL otherwise. Returned buffer
* must be freed by the user.
*/
static char *
zfs_read_sysfs_file(char *filepath)
{
char buf[4096]; /* all sysfs files report 4k size */
char *str = NULL;
FILE *fp = fopen(filepath, "r");
if (fp == NULL) {
return (NULL);
}
if (fgets(buf, sizeof (buf), fp) == buf) {
/* success */
/* Remove the last newline (if any) */
size_t len = strlen(buf);
if (buf[len - 1] == '\n') {
buf[len - 1] = '\0';
}
str = strdup(buf);
}
fclose(fp);
return (str);
}
/*
* Given a dev name like "nvme0n1", return the full PCI slot sysfs path to
* the drive (in /sys/bus/pci/slots).
*
* For example:
* dev: "nvme0n1"
* returns: "/sys/bus/pci/slots/0"
*
* 'dev' must be an NVMe device.
*
* Returned string must be freed. Returns NULL on error or no sysfs path.
*/
static char *
zfs_get_pci_slots_sys_path(const char *dev_name)
{
DIR *dp = NULL;
struct dirent *ep;
char *address1 = NULL;
char *address2 = NULL;
char *path = NULL;
char buf[MAXPATHLEN];
char *tmp;
/* If they preface 'dev' with a path (like "/dev") then strip it off */
tmp = strrchr(dev_name, '/');
if (tmp != NULL)
dev_name = tmp + 1; /* +1 since we want the chr after '/' */
if (strncmp("nvme", dev_name, 4) != 0)
return (NULL);
(void) snprintf(buf, sizeof (buf), "/sys/block/%s/device/address",
dev_name);
address1 = zfs_read_sysfs_file(buf);
if (!address1)
return (NULL);
/*
* /sys/block/nvme0n1/device/address format will
* be "0000:01:00.0" while /sys/bus/pci/slots/0/address will be
* "0000:01:00". Just NULL terminate at the '.' so they match.
*/
tmp = strrchr(address1, '.');
if (tmp != NULL)
*tmp = '\0';
dp = opendir("/sys/bus/pci/slots/");
if (dp == NULL) {
free(address1);
return (NULL);
}
/*
* Look through all the /sys/bus/pci/slots/ subdirs
*/
while ((ep = readdir(dp))) {
/*
* We only care about directory names that are a single number.
* Sometimes there's other directories like
* "/sys/bus/pci/slots/0-3/" in there - skip those.
*/
if (!zfs_isnumber(ep->d_name))
continue;
(void) snprintf(buf, sizeof (buf),
"/sys/bus/pci/slots/%s/address", ep->d_name);
address2 = zfs_read_sysfs_file(buf);
if (!address2)
continue;
if (strcmp(address1, address2) == 0) {
/* Addresses match, we're all done */
free(address2);
if (asprintf(&path, "/sys/bus/pci/slots/%s",
ep->d_name) == -1) {
- free(tmp);
continue;
}
break;
}
free(address2);
}
closedir(dp);
free(address1);
return (path);
}
/*
* Given a dev name like "sda", return the full enclosure sysfs path to
* the disk. You can also pass in the name with "/dev" prepended
* to it (like /dev/sda). This works for both JBODs and NVMe PCI devices.
*
* For example, disk "sda" in enclosure slot 1:
* dev_name: "sda"
* returns: "/sys/class/enclosure/1:0:3:0/Slot 1"
*
* Or:
*
* dev_name: "nvme0n1"
* returns: "/sys/bus/pci/slots/0"
*
* 'dev' must be a non-devicemapper device.
*
* Returned string must be freed. Returns NULL on error.
*/
char *
zfs_get_enclosure_sysfs_path(const char *dev_name)
{
DIR *dp = NULL;
struct dirent *ep;
char buf[MAXPATHLEN];
char *tmp1 = NULL;
char *tmp2 = NULL;
char *tmp3 = NULL;
char *path = NULL;
size_t size;
int tmpsize;
if (dev_name == NULL)
return (NULL);
/* If they preface 'dev' with a path (like "/dev") then strip it off */
tmp1 = strrchr(dev_name, '/');
if (tmp1 != NULL)
dev_name = tmp1 + 1; /* +1 since we want the chr after '/' */
tmpsize = asprintf(&tmp1, "/sys/block/%s/device", dev_name);
if (tmpsize == -1 || tmp1 == NULL) {
tmp1 = NULL;
goto end;
}
dp = opendir(tmp1);
if (dp == NULL)
goto end;
/*
* Look though all sysfs entries in /sys/block/<dev>/device for
* the enclosure symlink.
*/
while ((ep = readdir(dp))) {
/* Ignore everything that's not our enclosure_device link */
if (strstr(ep->d_name, "enclosure_device") == NULL)
continue;
if (asprintf(&tmp2, "%s/%s", tmp1, ep->d_name) == -1) {
tmp2 = NULL;
break;
}
size = readlink(tmp2, buf, sizeof (buf));
/* Did readlink fail or crop the link name? */
if (size == -1 || size >= sizeof (buf))
break;
/*
* We got a valid link. readlink() doesn't terminate strings
* so we have to do it.
*/
buf[size] = '\0';
/*
* Our link will look like:
*
* "../../../../port-11:1:2/..STUFF../enclosure/1:0:3:0/SLOT 1"
*
* We want to grab the "enclosure/1:0:3:0/SLOT 1" part
*/
tmp3 = strstr(buf, "enclosure");
if (tmp3 == NULL)
break;
if (asprintf(&path, "/sys/class/%s", tmp3) == -1) {
/* If asprintf() fails, 'path' is undefined */
path = NULL;
break;
}
if (path == NULL)
break;
}
end:
free(tmp2);
free(tmp1);
if (dp != NULL)
closedir(dp);
if (!path) {
/*
* This particular disk isn't in a JBOD. It could be an NVMe
* drive. If so, look up the NVMe device's path in
* /sys/bus/pci/slots/. Within that directory is a 'attention'
* file which controls the NVMe fault LED.
*/
path = zfs_get_pci_slots_sys_path(dev_name);
}
return (path);
}
/*
* Allocate and return the underlying device name for a device mapper device.
*
* For example, dm_name = "/dev/dm-0" could return "/dev/sda". Symlinks to a
* DM device (like /dev/disk/by-vdev/A0) are also allowed.
*
* If the DM device has multiple underlying devices (like with multipath
* DM devices), then favor underlying devices that have a symlink back to their
* back to their enclosure device in sysfs. This will be useful for the
* zedlet scripts that toggle the fault LED.
*
* Returns an underlying device name, or NULL on error or no match. If dm_name
* is not a DM device then return NULL.
*
* NOTE: The returned name string must be *freed*.
*/
static char *
dm_get_underlying_path(const char *dm_name)
{
DIR *dp = NULL;
struct dirent *ep;
char *realp;
char *tmp = NULL;
char *path = NULL;
char *dev_str;
int size;
char *first_path = NULL;
char *enclosure_path;
if (dm_name == NULL)
return (NULL);
/* dm name may be a symlink (like /dev/disk/by-vdev/A0) */
realp = realpath(dm_name, NULL);
if (realp == NULL)
return (NULL);
/*
* If they preface 'dev' with a path (like "/dev") then strip it off.
* We just want the 'dm-N' part.
*/
tmp = strrchr(realp, '/');
if (tmp != NULL)
dev_str = tmp + 1; /* +1 since we want the chr after '/' */
else
dev_str = tmp;
if ((size = asprintf(&tmp, "/sys/block/%s/slaves/", dev_str)) == -1) {
tmp = NULL;
goto end;
}
dp = opendir(tmp);
if (dp == NULL)
goto end;
/*
* A device-mapper device can have multiple paths to it (multipath).
* Favor paths that have a symlink back to their enclosure device.
* We have to do this since some enclosures may only provide a symlink
* back for one underlying path to a disk and not the other.
*
* If no paths have links back to their enclosure, then just return the
* first path.
*/
while ((ep = readdir(dp))) {
if (ep->d_type != DT_DIR) { /* skip "." and ".." dirs */
if (!first_path)
first_path = strdup(ep->d_name);
enclosure_path =
zfs_get_enclosure_sysfs_path(ep->d_name);
if (!enclosure_path)
continue;
if ((size = asprintf(
&path, "/dev/%s", ep->d_name)) == -1)
path = NULL;
free(enclosure_path);
break;
}
}
end:
if (dp != NULL)
closedir(dp);
free(tmp);
free(realp);
if (!path && first_path) {
/*
* None of the underlying paths had a link back to their
* enclosure devices. Throw up out hands and return the first
* underlying path.
*/
if ((size = asprintf(&path, "/dev/%s", first_path)) == -1)
path = NULL;
}
free(first_path);
return (path);
}
/*
* Return B_TRUE if device is a device mapper or multipath device.
* Return B_FALSE if not.
*/
boolean_t
zfs_dev_is_dm(const char *dev_name)
{
char *tmp;
tmp = dm_get_underlying_path(dev_name);
if (tmp == NULL)
return (B_FALSE);
free(tmp);
return (B_TRUE);
}
/*
* By "whole disk" we mean an entire physical disk (something we can
* label, toggle the write cache on, etc.) as opposed to the full
* capacity of a pseudo-device such as lofi or did. We act as if we
* are labeling the disk, which should be a pretty good test of whether
* it's a viable device or not. Returns B_TRUE if it is and B_FALSE if
* it isn't.
*/
boolean_t
zfs_dev_is_whole_disk(const char *dev_name)
{
struct dk_gpt *label = NULL;
int fd;
if ((fd = open(dev_name, O_RDONLY | O_DIRECT | O_CLOEXEC)) < 0)
return (B_FALSE);
if (efi_alloc_and_init(fd, EFI_NUMPAR, &label) != 0) {
(void) close(fd);
return (B_FALSE);
}
efi_free(label);
(void) close(fd);
return (B_TRUE);
}
/*
* Lookup the underlying device for a device name
*
* Often you'll have a symlink to a device, a partition device,
* or a multipath device, and want to look up the underlying device.
* This function returns the underlying device name. If the device
* name is already the underlying device, then just return the same
* name. If the device is a DM device with multiple underlying devices
* then return the first one.
*
* For example:
*
* 1. /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001 -> ../../sda
* dev_name: /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001
* returns: /dev/sda
*
* 2. /dev/mapper/mpatha (made up of /dev/sda and /dev/sdb)
* dev_name: /dev/mapper/mpatha
* returns: /dev/sda (first device)
*
* 3. /dev/sda (already the underlying device)
* dev_name: /dev/sda
* returns: /dev/sda
*
* 4. /dev/dm-3 (mapped to /dev/sda)
* dev_name: /dev/dm-3
* returns: /dev/sda
*
* 5. /dev/disk/by-id/scsi-0QEMU_drive-scsi0-0-0-0-part9 -> ../../sdb9
* dev_name: /dev/disk/by-id/scsi-0QEMU_drive-scsi0-0-0-0-part9
* returns: /dev/sdb
*
* 6. /dev/disk/by-uuid/5df030cf-3cd9-46e4-8e99-3ccb462a4e9a -> ../dev/sda2
* dev_name: /dev/disk/by-uuid/5df030cf-3cd9-46e4-8e99-3ccb462a4e9a
* returns: /dev/sda
*
* Returns underlying device name, or NULL on error or no match.
*
* NOTE: The returned name string must be *freed*.
*/
char *
zfs_get_underlying_path(const char *dev_name)
{
char *name = NULL;
char *tmp;
if (dev_name == NULL)
return (NULL);
tmp = dm_get_underlying_path(dev_name);
/* dev_name not a DM device, so just un-symlinkize it */
if (tmp == NULL)
tmp = realpath(dev_name, NULL);
if (tmp != NULL) {
name = zfs_strip_partition_path(tmp);
free(tmp);
}
return (name);
}
#ifdef HAVE_LIBUDEV
/*
* A disk is considered a multipath whole disk when:
* DEVNAME key value has "dm-"
* DM_UUID key exists and starts with 'mpath-'
* ID_PART_TABLE_TYPE key does not exist or is not gpt
* ID_FS_LABEL key does not exist (disk isn't labeled)
*/
static boolean_t
is_mpath_udev_sane(struct udev_device *dev)
{
const char *devname, *type, *uuid, *label;
devname = udev_device_get_property_value(dev, "DEVNAME");
type = udev_device_get_property_value(dev, "ID_PART_TABLE_TYPE");
uuid = udev_device_get_property_value(dev, "DM_UUID");
label = udev_device_get_property_value(dev, "ID_FS_LABEL");
if ((devname != NULL && strncmp(devname, "/dev/dm-", 8) == 0) &&
((type == NULL) || (strcmp(type, "gpt") != 0)) &&
((uuid != NULL) && (strncmp(uuid, "mpath-", 6) == 0)) &&
(label == NULL)) {
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Check if a disk is a multipath "blank" disk:
*
* 1. The disk has udev values that suggest it's a multipath disk
* 2. The disk is not currently labeled with a filesystem of any type
* 3. There are no partitions on the disk
*/
boolean_t
is_mpath_whole_disk(const char *path)
{
struct udev *udev;
struct udev_device *dev = NULL;
char nodepath[MAXPATHLEN];
char *sysname;
if (realpath(path, nodepath) == NULL)
return (B_FALSE);
sysname = strrchr(nodepath, '/') + 1;
if (strncmp(sysname, "dm-", 3) != 0)
return (B_FALSE);
if ((udev = udev_new()) == NULL)
return (B_FALSE);
if ((dev = udev_device_new_from_subsystem_sysname(udev, "block",
sysname)) == NULL) {
udev_device_unref(dev);
return (B_FALSE);
}
/* Sanity check some udev values */
boolean_t is_sane = is_mpath_udev_sane(dev);
udev_device_unref(dev);
return (is_sane);
}
#else /* HAVE_LIBUDEV */
boolean_t
is_mpath_whole_disk(const char *path)
{
(void) path;
return (B_FALSE);
}
#endif /* HAVE_LIBUDEV */
diff --git a/sys/contrib/openzfs/lib/libzutil/zutil_import.c b/sys/contrib/openzfs/lib/libzutil/zutil_import.c
index 0bbf232f24a4..252b0bac685c 100644
--- a/sys/contrib/openzfs/lib/libzutil/zutil_import.c
+++ b/sys/contrib/openzfs/lib/libzutil/zutil_import.c
@@ -1,1937 +1,1939 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright 2015 RackTop Systems.
* Copyright (c) 2016, Intel Corporation.
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
*/
/*
* Pool import support functions.
*
* Used by zpool, ztest, zdb, and zhack to locate importable configs. Since
* these commands are expected to run in the global zone, we can assume
* that the devices are all readable when called.
*
* To import a pool, we rely on reading the configuration information from the
* ZFS label of each device. If we successfully read the label, then we
* organize the configuration information in the following hierarchy:
*
* pool guid -> toplevel vdev guid -> label txg
*
* Duplicate entries matching this same tuple will be discarded. Once we have
* examined every device, we pick the best label txg config for each toplevel
* vdev. We then arrange these toplevel vdevs into a complete pool config, and
* update any paths that have changed. Finally, we attempt to import the pool
* using our derived config, and record the results.
*/
#ifdef HAVE_AIO_H
#include <aio.h>
#endif
#include <ctype.h>
#include <dirent.h>
#include <errno.h>
#include <libintl.h>
#include <libgen.h>
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/dktp/fdisk.h>
#include <sys/vdev_impl.h>
#include <sys/fs/zfs.h>
#include <thread_pool.h>
#include <libzutil.h>
#include <libnvpair.h>
#include "zutil_import.h"
static __attribute__((format(printf, 2, 3))) void
zutil_error_aux(libpc_handle_t *hdl, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
(void) vsnprintf(hdl->lpc_desc, sizeof (hdl->lpc_desc), fmt, ap);
hdl->lpc_desc_active = B_TRUE;
va_end(ap);
}
static void
zutil_verror(libpc_handle_t *hdl, const char *error, const char *fmt,
va_list ap)
{
char action[1024];
(void) vsnprintf(action, sizeof (action), fmt, ap);
if (hdl->lpc_desc_active)
hdl->lpc_desc_active = B_FALSE;
else
hdl->lpc_desc[0] = '\0';
if (hdl->lpc_printerr) {
if (hdl->lpc_desc[0] != '\0')
error = hdl->lpc_desc;
(void) fprintf(stderr, "%s: %s\n", action, error);
}
}
static __attribute__((format(printf, 3, 4))) int
zutil_error_fmt(libpc_handle_t *hdl, const char *error, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
zutil_verror(hdl, error, fmt, ap);
va_end(ap);
return (-1);
}
static int
zutil_error(libpc_handle_t *hdl, const char *error, const char *msg)
{
return (zutil_error_fmt(hdl, error, "%s", msg));
}
static int
zutil_no_memory(libpc_handle_t *hdl)
{
zutil_error(hdl, EZFS_NOMEM, "internal error");
exit(1);
}
void *
zutil_alloc(libpc_handle_t *hdl, size_t size)
{
void *data;
if ((data = calloc(1, size)) == NULL)
(void) zutil_no_memory(hdl);
return (data);
}
char *
zutil_strdup(libpc_handle_t *hdl, const char *str)
{
char *ret;
if ((ret = strdup(str)) == NULL)
(void) zutil_no_memory(hdl);
return (ret);
}
static char *
zutil_strndup(libpc_handle_t *hdl, const char *str, size_t n)
{
char *ret;
if ((ret = strndup(str, n)) == NULL)
(void) zutil_no_memory(hdl);
return (ret);
}
/*
* Intermediate structures used to gather configuration information.
*/
typedef struct config_entry {
uint64_t ce_txg;
nvlist_t *ce_config;
struct config_entry *ce_next;
} config_entry_t;
typedef struct vdev_entry {
uint64_t ve_guid;
config_entry_t *ve_configs;
struct vdev_entry *ve_next;
} vdev_entry_t;
typedef struct pool_entry {
uint64_t pe_guid;
vdev_entry_t *pe_vdevs;
struct pool_entry *pe_next;
} pool_entry_t;
typedef struct name_entry {
char *ne_name;
uint64_t ne_guid;
uint64_t ne_order;
uint64_t ne_num_labels;
struct name_entry *ne_next;
} name_entry_t;
typedef struct pool_list {
pool_entry_t *pools;
name_entry_t *names;
} pool_list_t;
/*
* Go through and fix up any path and/or devid information for the given vdev
* configuration.
*/
static int
fix_paths(libpc_handle_t *hdl, nvlist_t *nv, name_entry_t *names)
{
nvlist_t **child;
uint_t c, children;
uint64_t guid;
name_entry_t *ne, *best;
char *path;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (c = 0; c < children; c++)
if (fix_paths(hdl, child[c], names) != 0)
return (-1);
return (0);
}
/*
* This is a leaf (file or disk) vdev. In either case, go through
* the name list and see if we find a matching guid. If so, replace
* the path and see if we can calculate a new devid.
*
* There may be multiple names associated with a particular guid, in
* which case we have overlapping partitions or multiple paths to the
* same disk. In this case we prefer to use the path name which
* matches the ZPOOL_CONFIG_PATH. If no matching entry is found we
* use the lowest order device which corresponds to the first match
* while traversing the ZPOOL_IMPORT_PATH search path.
*/
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
path = NULL;
best = NULL;
for (ne = names; ne != NULL; ne = ne->ne_next) {
if (ne->ne_guid == guid) {
if (path == NULL) {
best = ne;
break;
}
if ((strlen(path) == strlen(ne->ne_name)) &&
strncmp(path, ne->ne_name, strlen(path)) == 0) {
best = ne;
break;
}
if (best == NULL) {
best = ne;
continue;
}
/* Prefer paths with move vdev labels. */
if (ne->ne_num_labels > best->ne_num_labels) {
best = ne;
continue;
}
/* Prefer paths earlier in the search order. */
if (ne->ne_num_labels == best->ne_num_labels &&
ne->ne_order < best->ne_order) {
best = ne;
continue;
}
}
}
if (best == NULL)
return (0);
if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
return (-1);
update_vdev_config_dev_strs(nv);
return (0);
}
/*
* Add the given configuration to the list of known devices.
*/
static int
add_config(libpc_handle_t *hdl, pool_list_t *pl, const char *path,
int order, int num_labels, nvlist_t *config)
{
uint64_t pool_guid, vdev_guid, top_guid, txg, state;
pool_entry_t *pe;
vdev_entry_t *ve;
config_entry_t *ce;
name_entry_t *ne;
/*
* If this is a hot spare not currently in use or level 2 cache
* device, add it to the list of names to translate, but don't do
* anything else.
*/
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
&state) == 0 &&
(state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
if ((ne = zutil_alloc(hdl, sizeof (name_entry_t))) == NULL)
return (-1);
if ((ne->ne_name = zutil_strdup(hdl, path)) == NULL) {
free(ne);
return (-1);
}
ne->ne_guid = vdev_guid;
ne->ne_order = order;
ne->ne_num_labels = num_labels;
ne->ne_next = pl->names;
pl->names = ne;
return (0);
}
/*
* If we have a valid config but cannot read any of these fields, then
* it means we have a half-initialized label. In vdev_label_init()
* we write a label with txg == 0 so that we can identify the device
* in case the user refers to the same disk later on. If we fail to
* create the pool, we'll be left with a label in this state
* which should not be considered part of a valid pool.
*/
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&pool_guid) != 0 ||
nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
&vdev_guid) != 0 ||
nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
&top_guid) != 0 ||
nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
&txg) != 0 || txg == 0) {
return (0);
}
/*
* First, see if we know about this pool. If not, then add it to the
* list of known pools.
*/
for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
if (pe->pe_guid == pool_guid)
break;
}
if (pe == NULL) {
if ((pe = zutil_alloc(hdl, sizeof (pool_entry_t))) == NULL) {
return (-1);
}
pe->pe_guid = pool_guid;
pe->pe_next = pl->pools;
pl->pools = pe;
}
/*
* Second, see if we know about this toplevel vdev. Add it if its
* missing.
*/
for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
if (ve->ve_guid == top_guid)
break;
}
if (ve == NULL) {
if ((ve = zutil_alloc(hdl, sizeof (vdev_entry_t))) == NULL) {
return (-1);
}
ve->ve_guid = top_guid;
ve->ve_next = pe->pe_vdevs;
pe->pe_vdevs = ve;
}
/*
* Third, see if we have a config with a matching transaction group. If
* so, then we do nothing. Otherwise, add it to the list of known
* configs.
*/
for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
if (ce->ce_txg == txg)
break;
}
if (ce == NULL) {
if ((ce = zutil_alloc(hdl, sizeof (config_entry_t))) == NULL) {
return (-1);
}
ce->ce_txg = txg;
ce->ce_config = fnvlist_dup(config);
ce->ce_next = ve->ve_configs;
ve->ve_configs = ce;
}
/*
* At this point we've successfully added our config to the list of
* known configs. The last thing to do is add the vdev guid -> path
* mappings so that we can fix up the configuration as necessary before
* doing the import.
*/
if ((ne = zutil_alloc(hdl, sizeof (name_entry_t))) == NULL)
return (-1);
if ((ne->ne_name = zutil_strdup(hdl, path)) == NULL) {
free(ne);
return (-1);
}
ne->ne_guid = vdev_guid;
ne->ne_order = order;
ne->ne_num_labels = num_labels;
ne->ne_next = pl->names;
pl->names = ne;
return (0);
}
static int
zutil_pool_active(libpc_handle_t *hdl, const char *name, uint64_t guid,
boolean_t *isactive)
{
ASSERT(hdl->lpc_ops->pco_pool_active != NULL);
int error = hdl->lpc_ops->pco_pool_active(hdl->lpc_lib_handle, name,
guid, isactive);
return (error);
}
static nvlist_t *
zutil_refresh_config(libpc_handle_t *hdl, nvlist_t *tryconfig)
{
ASSERT(hdl->lpc_ops->pco_refresh_config != NULL);
return (hdl->lpc_ops->pco_refresh_config(hdl->lpc_lib_handle,
tryconfig));
}
/*
* Determine if the vdev id is a hole in the namespace.
*/
static boolean_t
vdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id)
{
int c;
for (c = 0; c < holes; c++) {
/* Top-level is a hole */
if (hole_array[c] == id)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Convert our list of pools into the definitive set of configurations. We
* start by picking the best config for each toplevel vdev. Once that's done,
* we assemble the toplevel vdevs into a full config for the pool. We make a
* pass to fix up any incorrect paths, and then add it to the main list to
* return to the user.
*/
static nvlist_t *
get_configs(libpc_handle_t *hdl, pool_list_t *pl, boolean_t active_ok,
nvlist_t *policy)
{
pool_entry_t *pe;
vdev_entry_t *ve;
config_entry_t *ce;
nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot;
nvlist_t **spares, **l2cache;
uint_t i, nspares, nl2cache;
boolean_t config_seen;
uint64_t best_txg;
char *name, *hostname = NULL;
uint64_t guid;
uint_t children = 0;
nvlist_t **child = NULL;
uint_t holes;
uint64_t *hole_array, max_id;
uint_t c;
boolean_t isactive;
uint64_t hostid;
nvlist_t *nvl;
boolean_t valid_top_config = B_FALSE;
if (nvlist_alloc(&ret, 0, 0) != 0)
goto nomem;
for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
uint64_t id, max_txg = 0;
if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
goto nomem;
config_seen = B_FALSE;
/*
* Iterate over all toplevel vdevs. Grab the pool configuration
* from the first one we find, and then go through the rest and
* add them as necessary to the 'vdevs' member of the config.
*/
for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
/*
* Determine the best configuration for this vdev by
* selecting the config with the latest transaction
* group.
*/
best_txg = 0;
for (ce = ve->ve_configs; ce != NULL;
ce = ce->ce_next) {
if (ce->ce_txg > best_txg) {
tmp = ce->ce_config;
best_txg = ce->ce_txg;
}
}
/*
* We rely on the fact that the max txg for the
* pool will contain the most up-to-date information
* about the valid top-levels in the vdev namespace.
*/
if (best_txg > max_txg) {
(void) nvlist_remove(config,
ZPOOL_CONFIG_VDEV_CHILDREN,
DATA_TYPE_UINT64);
(void) nvlist_remove(config,
ZPOOL_CONFIG_HOLE_ARRAY,
DATA_TYPE_UINT64_ARRAY);
max_txg = best_txg;
hole_array = NULL;
holes = 0;
max_id = 0;
valid_top_config = B_FALSE;
if (nvlist_lookup_uint64(tmp,
ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) {
verify(nvlist_add_uint64(config,
ZPOOL_CONFIG_VDEV_CHILDREN,
max_id) == 0);
valid_top_config = B_TRUE;
}
if (nvlist_lookup_uint64_array(tmp,
ZPOOL_CONFIG_HOLE_ARRAY, &hole_array,
&holes) == 0) {
verify(nvlist_add_uint64_array(config,
ZPOOL_CONFIG_HOLE_ARRAY,
hole_array, holes) == 0);
}
}
if (!config_seen) {
/*
* Copy the relevant pieces of data to the pool
* configuration:
*
* version
* pool guid
* name
* comment (if available)
* compatibility features (if available)
* pool state
* hostid (if available)
* hostname (if available)
*/
uint64_t state, version;
char *comment = NULL;
char *compatibility = NULL;
version = fnvlist_lookup_uint64(tmp,
ZPOOL_CONFIG_VERSION);
fnvlist_add_uint64(config,
ZPOOL_CONFIG_VERSION, version);
guid = fnvlist_lookup_uint64(tmp,
ZPOOL_CONFIG_POOL_GUID);
fnvlist_add_uint64(config,
ZPOOL_CONFIG_POOL_GUID, guid);
name = fnvlist_lookup_string(tmp,
ZPOOL_CONFIG_POOL_NAME);
fnvlist_add_string(config,
ZPOOL_CONFIG_POOL_NAME, name);
if (nvlist_lookup_string(tmp,
ZPOOL_CONFIG_COMMENT, &comment) == 0)
fnvlist_add_string(config,
ZPOOL_CONFIG_COMMENT, comment);
if (nvlist_lookup_string(tmp,
ZPOOL_CONFIG_COMPATIBILITY,
&compatibility) == 0)
fnvlist_add_string(config,
ZPOOL_CONFIG_COMPATIBILITY,
compatibility);
state = fnvlist_lookup_uint64(tmp,
ZPOOL_CONFIG_POOL_STATE);
fnvlist_add_uint64(config,
ZPOOL_CONFIG_POOL_STATE, state);
hostid = 0;
if (nvlist_lookup_uint64(tmp,
ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
fnvlist_add_uint64(config,
ZPOOL_CONFIG_HOSTID, hostid);
hostname = fnvlist_lookup_string(tmp,
ZPOOL_CONFIG_HOSTNAME);
fnvlist_add_string(config,
ZPOOL_CONFIG_HOSTNAME, hostname);
}
config_seen = B_TRUE;
}
/*
* Add this top-level vdev to the child array.
*/
verify(nvlist_lookup_nvlist(tmp,
ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
&id) == 0);
if (id >= children) {
nvlist_t **newchild;
newchild = zutil_alloc(hdl, (id + 1) *
sizeof (nvlist_t *));
if (newchild == NULL)
goto nomem;
for (c = 0; c < children; c++)
newchild[c] = child[c];
free(child);
child = newchild;
children = id + 1;
}
if (nvlist_dup(nvtop, &child[id], 0) != 0)
goto nomem;
}
/*
* If we have information about all the top-levels then
* clean up the nvlist which we've constructed. This
* means removing any extraneous devices that are
* beyond the valid range or adding devices to the end
* of our array which appear to be missing.
*/
if (valid_top_config) {
if (max_id < children) {
for (c = max_id; c < children; c++)
nvlist_free(child[c]);
children = max_id;
} else if (max_id > children) {
nvlist_t **newchild;
newchild = zutil_alloc(hdl, (max_id) *
sizeof (nvlist_t *));
if (newchild == NULL)
goto nomem;
for (c = 0; c < children; c++)
newchild[c] = child[c];
free(child);
child = newchild;
children = max_id;
}
}
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&guid) == 0);
/*
* The vdev namespace may contain holes as a result of
* device removal. We must add them back into the vdev
* tree before we process any missing devices.
*/
if (holes > 0) {
ASSERT(valid_top_config);
for (c = 0; c < children; c++) {
nvlist_t *holey;
if (child[c] != NULL ||
!vdev_is_hole(hole_array, holes, c))
continue;
if (nvlist_alloc(&holey, NV_UNIQUE_NAME,
0) != 0)
goto nomem;
/*
* Holes in the namespace are treated as
* "hole" top-level vdevs and have a
* special flag set on them.
*/
if (nvlist_add_string(holey,
ZPOOL_CONFIG_TYPE,
VDEV_TYPE_HOLE) != 0 ||
nvlist_add_uint64(holey,
ZPOOL_CONFIG_ID, c) != 0 ||
nvlist_add_uint64(holey,
ZPOOL_CONFIG_GUID, 0ULL) != 0) {
nvlist_free(holey);
goto nomem;
}
child[c] = holey;
}
}
/*
* Look for any missing top-level vdevs. If this is the case,
* create a faked up 'missing' vdev as a placeholder. We cannot
* simply compress the child array, because the kernel performs
* certain checks to make sure the vdev IDs match their location
* in the configuration.
*/
for (c = 0; c < children; c++) {
if (child[c] == NULL) {
nvlist_t *missing;
if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
0) != 0)
goto nomem;
if (nvlist_add_string(missing,
ZPOOL_CONFIG_TYPE,
VDEV_TYPE_MISSING) != 0 ||
nvlist_add_uint64(missing,
ZPOOL_CONFIG_ID, c) != 0 ||
nvlist_add_uint64(missing,
ZPOOL_CONFIG_GUID, 0ULL) != 0) {
nvlist_free(missing);
goto nomem;
}
child[c] = missing;
}
}
/*
* Put all of this pool's top-level vdevs into a root vdev.
*/
if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
goto nomem;
if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
VDEV_TYPE_ROOT) != 0 ||
nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
(const nvlist_t **)child, children) != 0) {
nvlist_free(nvroot);
goto nomem;
}
for (c = 0; c < children; c++)
nvlist_free(child[c]);
free(child);
children = 0;
child = NULL;
/*
* Go through and fix up any paths and/or devids based on our
* known list of vdev GUID -> path mappings.
*/
if (fix_paths(hdl, nvroot, pl->names) != 0) {
nvlist_free(nvroot);
goto nomem;
}
/*
* Add the root vdev to this pool's configuration.
*/
if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
nvroot) != 0) {
nvlist_free(nvroot);
goto nomem;
}
nvlist_free(nvroot);
/*
* zdb uses this path to report on active pools that were
* imported or created using -R.
*/
if (active_ok)
goto add_pool;
/*
* Determine if this pool is currently active, in which case we
* can't actually import it.
*/
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&name) == 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&guid) == 0);
if (zutil_pool_active(hdl, name, guid, &isactive) != 0)
goto error;
if (isactive) {
nvlist_free(config);
config = NULL;
continue;
}
if (policy != NULL) {
if (nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
policy) != 0)
goto nomem;
}
if ((nvl = zutil_refresh_config(hdl, config)) == NULL) {
nvlist_free(config);
config = NULL;
continue;
}
nvlist_free(config);
config = nvl;
/*
* Go through and update the paths for spares, now that we have
* them.
*/
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
for (i = 0; i < nspares; i++) {
if (fix_paths(hdl, spares[i], pl->names) != 0)
goto nomem;
}
}
/*
* Update the paths for l2cache devices.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0) {
for (i = 0; i < nl2cache; i++) {
if (fix_paths(hdl, l2cache[i], pl->names) != 0)
goto nomem;
}
}
/*
* Restore the original information read from the actual label.
*/
(void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
DATA_TYPE_UINT64);
(void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
DATA_TYPE_STRING);
if (hostid != 0) {
verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
hostid) == 0);
verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
hostname) == 0);
}
add_pool:
/*
* Add this pool to the list of configs.
*/
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&name) == 0);
if (nvlist_add_nvlist(ret, name, config) != 0)
goto nomem;
nvlist_free(config);
config = NULL;
}
return (ret);
nomem:
(void) zutil_no_memory(hdl);
error:
nvlist_free(config);
nvlist_free(ret);
for (c = 0; c < children; c++)
nvlist_free(child[c]);
free(child);
return (NULL);
}
/*
* Return the offset of the given label.
*/
static uint64_t
label_offset(uint64_t size, int l)
{
ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
}
/*
* The same description applies as to zpool_read_label below,
* except here we do it without aio, presumably because an aio call
* errored out in a way we think not using it could circumvent.
*/
static int
zpool_read_label_slow(int fd, nvlist_t **config, int *num_labels)
{
struct stat64 statbuf;
int l, count = 0;
vdev_phys_t *label;
nvlist_t *expected_config = NULL;
uint64_t expected_guid = 0, size;
int error;
*config = NULL;
if (fstat64_blk(fd, &statbuf) == -1)
return (0);
size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
error = posix_memalign((void **)&label, PAGESIZE, sizeof (*label));
if (error)
return (-1);
for (l = 0; l < VDEV_LABELS; l++) {
uint64_t state, guid, txg;
off_t offset = label_offset(size, l) + VDEV_SKIP_SIZE;
if (pread64(fd, label, sizeof (vdev_phys_t),
offset) != sizeof (vdev_phys_t))
continue;
if (nvlist_unpack(label->vp_nvlist,
sizeof (label->vp_nvlist), config, 0) != 0)
continue;
if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_GUID,
&guid) != 0 || guid == 0) {
nvlist_free(*config);
continue;
}
if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
&state) != 0 || state > POOL_STATE_L2CACHE) {
nvlist_free(*config);
continue;
}
if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
(nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
&txg) != 0 || txg == 0)) {
nvlist_free(*config);
continue;
}
if (expected_guid) {
if (expected_guid == guid)
count++;
nvlist_free(*config);
} else {
expected_config = *config;
expected_guid = guid;
count++;
}
}
if (num_labels != NULL)
*num_labels = count;
free(label);
*config = expected_config;
return (0);
}
/*
* Given a file descriptor, read the label information and return an nvlist
* describing the configuration, if there is one. The number of valid
* labels found will be returned in num_labels when non-NULL.
*/
int
zpool_read_label(int fd, nvlist_t **config, int *num_labels)
{
#ifndef HAVE_AIO_H
return (zpool_read_label_slow(fd, config, num_labels));
#else
struct stat64 statbuf;
struct aiocb aiocbs[VDEV_LABELS];
struct aiocb *aiocbps[VDEV_LABELS];
vdev_phys_t *labels;
nvlist_t *expected_config = NULL;
uint64_t expected_guid = 0, size;
int error, l, count = 0;
*config = NULL;
if (fstat64_blk(fd, &statbuf) == -1)
return (0);
size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
error = posix_memalign((void **)&labels, PAGESIZE,
VDEV_LABELS * sizeof (*labels));
if (error)
return (-1);
memset(aiocbs, 0, sizeof (aiocbs));
for (l = 0; l < VDEV_LABELS; l++) {
off_t offset = label_offset(size, l) + VDEV_SKIP_SIZE;
aiocbs[l].aio_fildes = fd;
aiocbs[l].aio_offset = offset;
aiocbs[l].aio_buf = &labels[l];
aiocbs[l].aio_nbytes = sizeof (vdev_phys_t);
aiocbs[l].aio_lio_opcode = LIO_READ;
aiocbps[l] = &aiocbs[l];
}
if (lio_listio(LIO_WAIT, aiocbps, VDEV_LABELS, NULL) != 0) {
int saved_errno = errno;
boolean_t do_slow = B_FALSE;
error = -1;
if (errno == EAGAIN || errno == EINTR || errno == EIO) {
/*
* A portion of the requests may have been submitted.
* Clean them up.
*/
for (l = 0; l < VDEV_LABELS; l++) {
errno = 0;
switch (aio_error(&aiocbs[l])) {
case EINVAL:
break;
case EINPROGRESS:
// This shouldn't be possible to
// encounter, die if we do.
ASSERT(B_FALSE);
zfs_fallthrough;
case EOPNOTSUPP:
case ENOSYS:
do_slow = B_TRUE;
zfs_fallthrough;
case 0:
default:
(void) aio_return(&aiocbs[l]);
}
}
}
if (do_slow) {
/*
* At least some IO involved access unsafe-for-AIO
* files. Let's try again, without AIO this time.
*/
error = zpool_read_label_slow(fd, config, num_labels);
saved_errno = errno;
}
free(labels);
errno = saved_errno;
return (error);
}
for (l = 0; l < VDEV_LABELS; l++) {
uint64_t state, guid, txg;
if (aio_return(&aiocbs[l]) != sizeof (vdev_phys_t))
continue;
if (nvlist_unpack(labels[l].vp_nvlist,
sizeof (labels[l].vp_nvlist), config, 0) != 0)
continue;
if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_GUID,
&guid) != 0 || guid == 0) {
nvlist_free(*config);
continue;
}
if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
&state) != 0 || state > POOL_STATE_L2CACHE) {
nvlist_free(*config);
continue;
}
if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
(nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
&txg) != 0 || txg == 0)) {
nvlist_free(*config);
continue;
}
if (expected_guid) {
if (expected_guid == guid)
count++;
nvlist_free(*config);
} else {
expected_config = *config;
expected_guid = guid;
count++;
}
}
if (num_labels != NULL)
*num_labels = count;
free(labels);
*config = expected_config;
return (0);
#endif
}
/*
* Sorted by full path and then vdev guid to allow for multiple entries with
* the same full path name. This is required because it's possible to
* have multiple block devices with labels that refer to the same
* ZPOOL_CONFIG_PATH yet have different vdev guids. In this case both
* entries need to be added to the cache. Scenarios where this can occur
* include overwritten pool labels, devices which are visible from multiple
* hosts and multipath devices.
*/
int
slice_cache_compare(const void *arg1, const void *arg2)
{
const char *nm1 = ((rdsk_node_t *)arg1)->rn_name;
const char *nm2 = ((rdsk_node_t *)arg2)->rn_name;
uint64_t guid1 = ((rdsk_node_t *)arg1)->rn_vdev_guid;
uint64_t guid2 = ((rdsk_node_t *)arg2)->rn_vdev_guid;
int rv;
rv = TREE_ISIGN(strcmp(nm1, nm2));
if (rv)
return (rv);
return (TREE_CMP(guid1, guid2));
}
static int
label_paths_impl(libpc_handle_t *hdl, nvlist_t *nvroot, uint64_t pool_guid,
uint64_t vdev_guid, char **path, char **devid)
{
nvlist_t **child;
uint_t c, children;
uint64_t guid;
char *val;
int error;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (c = 0; c < children; c++) {
error = label_paths_impl(hdl, child[c],
pool_guid, vdev_guid, path, devid);
if (error)
return (error);
}
return (0);
}
if (nvroot == NULL)
return (0);
error = nvlist_lookup_uint64(nvroot, ZPOOL_CONFIG_GUID, &guid);
if ((error != 0) || (guid != vdev_guid))
return (0);
error = nvlist_lookup_string(nvroot, ZPOOL_CONFIG_PATH, &val);
if (error == 0)
*path = val;
error = nvlist_lookup_string(nvroot, ZPOOL_CONFIG_DEVID, &val);
if (error == 0)
*devid = val;
return (0);
}
/*
* Given a disk label fetch the ZPOOL_CONFIG_PATH and ZPOOL_CONFIG_DEVID
* and store these strings as config_path and devid_path respectively.
* The returned pointers are only valid as long as label remains valid.
*/
int
label_paths(libpc_handle_t *hdl, nvlist_t *label, char **path, char **devid)
{
nvlist_t *nvroot;
uint64_t pool_guid;
uint64_t vdev_guid;
*path = NULL;
*devid = NULL;
if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvroot) ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &pool_guid) ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &vdev_guid))
return (ENOENT);
return (label_paths_impl(hdl, nvroot, pool_guid, vdev_guid, path,
devid));
}
static void
zpool_find_import_scan_add_slice(libpc_handle_t *hdl, pthread_mutex_t *lock,
avl_tree_t *cache, const char *path, const char *name, int order)
{
avl_index_t where;
rdsk_node_t *slice;
slice = zutil_alloc(hdl, sizeof (rdsk_node_t));
if (asprintf(&slice->rn_name, "%s/%s", path, name) == -1) {
free(slice);
return;
}
slice->rn_vdev_guid = 0;
slice->rn_lock = lock;
slice->rn_avl = cache;
slice->rn_hdl = hdl;
slice->rn_order = order + IMPORT_ORDER_SCAN_OFFSET;
slice->rn_labelpaths = B_FALSE;
pthread_mutex_lock(lock);
if (avl_find(cache, slice, &where)) {
free(slice->rn_name);
free(slice);
} else {
avl_insert(cache, slice, where);
}
pthread_mutex_unlock(lock);
}
static int
zpool_find_import_scan_dir(libpc_handle_t *hdl, pthread_mutex_t *lock,
avl_tree_t *cache, const char *dir, int order)
{
int error;
char path[MAXPATHLEN];
struct dirent64 *dp;
DIR *dirp;
if (realpath(dir, path) == NULL) {
error = errno;
if (error == ENOENT)
return (0);
zutil_error_aux(hdl, "%s", strerror(error));
(void) zutil_error_fmt(hdl, EZFS_BADPATH, dgettext(
TEXT_DOMAIN, "cannot resolve path '%s'"), dir);
return (error);
}
dirp = opendir(path);
if (dirp == NULL) {
error = errno;
zutil_error_aux(hdl, "%s", strerror(error));
(void) zutil_error_fmt(hdl, EZFS_BADPATH,
dgettext(TEXT_DOMAIN, "cannot open '%s'"), path);
return (error);
}
while ((dp = readdir64(dirp)) != NULL) {
const char *name = dp->d_name;
if (strcmp(name, ".") == 0 || strcmp(name, "..") == 0)
continue;
switch (dp->d_type) {
case DT_UNKNOWN:
case DT_BLK:
case DT_LNK:
#ifdef __FreeBSD__
case DT_CHR:
#endif
case DT_REG:
break;
default:
continue;
}
zpool_find_import_scan_add_slice(hdl, lock, cache, path, name,
order);
}
(void) closedir(dirp);
return (0);
}
static int
zpool_find_import_scan_path(libpc_handle_t *hdl, pthread_mutex_t *lock,
avl_tree_t *cache, const char *dir, int order)
{
int error = 0;
char path[MAXPATHLEN];
char *d = NULL;
ssize_t dl;
const char *dpath, *name;
/*
* Separate the directory and the basename.
* We do this so that we can get the realpath of
* the directory. We don't get the realpath on the
* whole path because if it's a symlink, we want the
* path of the symlink not where it points to.
*/
name = zfs_basename(dir);
if ((dl = zfs_dirnamelen(dir)) == -1)
dpath = ".";
else
dpath = d = zutil_strndup(hdl, dir, dl);
if (realpath(dpath, path) == NULL) {
error = errno;
if (error == ENOENT) {
error = 0;
goto out;
}
zutil_error_aux(hdl, "%s", strerror(error));
(void) zutil_error_fmt(hdl, EZFS_BADPATH, dgettext(
TEXT_DOMAIN, "cannot resolve path '%s'"), dir);
goto out;
}
zpool_find_import_scan_add_slice(hdl, lock, cache, path, name, order);
out:
free(d);
return (error);
}
/*
* Scan a list of directories for zfs devices.
*/
static int
zpool_find_import_scan(libpc_handle_t *hdl, pthread_mutex_t *lock,
avl_tree_t **slice_cache, const char * const *dir, size_t dirs)
{
avl_tree_t *cache;
rdsk_node_t *slice;
void *cookie;
int i, error;
*slice_cache = NULL;
cache = zutil_alloc(hdl, sizeof (avl_tree_t));
avl_create(cache, slice_cache_compare, sizeof (rdsk_node_t),
offsetof(rdsk_node_t, rn_node));
for (i = 0; i < dirs; i++) {
struct stat sbuf;
if (stat(dir[i], &sbuf) != 0) {
error = errno;
if (error == ENOENT)
continue;
zutil_error_aux(hdl, "%s", strerror(error));
(void) zutil_error_fmt(hdl, EZFS_BADPATH, dgettext(
TEXT_DOMAIN, "cannot resolve path '%s'"), dir[i]);
goto error;
}
/*
* If dir[i] is a directory, we walk through it and add all
* the entries to the cache. If it's not a directory, we just
* add it to the cache.
*/
if (S_ISDIR(sbuf.st_mode)) {
if ((error = zpool_find_import_scan_dir(hdl, lock,
cache, dir[i], i)) != 0)
goto error;
} else {
if ((error = zpool_find_import_scan_path(hdl, lock,
cache, dir[i], i)) != 0)
goto error;
}
}
*slice_cache = cache;
return (0);
error:
cookie = NULL;
while ((slice = avl_destroy_nodes(cache, &cookie)) != NULL) {
free(slice->rn_name);
free(slice);
}
free(cache);
return (error);
}
/*
* Given a list of directories to search, find all pools stored on disk. This
* includes partial pools which are not available to import. If no args are
* given (argc is 0), then the default directory (/dev/dsk) is searched.
* poolname or guid (but not both) are provided by the caller when trying
* to import a specific pool.
*/
static nvlist_t *
zpool_find_import_impl(libpc_handle_t *hdl, importargs_t *iarg,
pthread_mutex_t *lock, avl_tree_t *cache)
{
(void) lock;
nvlist_t *ret = NULL;
pool_list_t pools = { 0 };
pool_entry_t *pe, *penext;
vdev_entry_t *ve, *venext;
config_entry_t *ce, *cenext;
name_entry_t *ne, *nenext;
rdsk_node_t *slice;
void *cookie;
tpool_t *t;
verify(iarg->poolname == NULL || iarg->guid == 0);
/*
* Create a thread pool to parallelize the process of reading and
* validating labels, a large number of threads can be used due to
* minimal contention.
*/
t = tpool_create(1, 2 * sysconf(_SC_NPROCESSORS_ONLN), 0, NULL);
for (slice = avl_first(cache); slice;
(slice = avl_walk(cache, slice, AVL_AFTER)))
(void) tpool_dispatch(t, zpool_open_func, slice);
tpool_wait(t);
tpool_destroy(t);
/*
* Process the cache, filtering out any entries which are not
* for the specified pool then adding matching label configs.
*/
cookie = NULL;
while ((slice = avl_destroy_nodes(cache, &cookie)) != NULL) {
if (slice->rn_config != NULL) {
nvlist_t *config = slice->rn_config;
boolean_t matched = B_TRUE;
boolean_t aux = B_FALSE;
int fd;
/*
* Check if it's a spare or l2cache device. If it is,
* we need to skip the name and guid check since they
* don't exist on aux device label.
*/
if (iarg->poolname != NULL || iarg->guid != 0) {
uint64_t state;
aux = nvlist_lookup_uint64(config,
ZPOOL_CONFIG_POOL_STATE, &state) == 0 &&
(state == POOL_STATE_SPARE ||
state == POOL_STATE_L2CACHE);
}
if (iarg->poolname != NULL && !aux) {
char *pname;
matched = nvlist_lookup_string(config,
ZPOOL_CONFIG_POOL_NAME, &pname) == 0 &&
strcmp(iarg->poolname, pname) == 0;
} else if (iarg->guid != 0 && !aux) {
uint64_t this_guid;
matched = nvlist_lookup_uint64(config,
ZPOOL_CONFIG_POOL_GUID, &this_guid) == 0 &&
iarg->guid == this_guid;
}
if (matched) {
/*
* Verify all remaining entries can be opened
* exclusively. This will prune all underlying
* multipath devices which otherwise could
* result in the vdev appearing as UNAVAIL.
*
* Under zdb, this step isn't required and
* would prevent a zdb -e of active pools with
* no cachefile.
*/
fd = open(slice->rn_name,
O_RDONLY | O_EXCL | O_CLOEXEC);
if (fd >= 0 || iarg->can_be_active) {
if (fd >= 0)
close(fd);
add_config(hdl, &pools,
slice->rn_name, slice->rn_order,
slice->rn_num_labels, config);
}
}
nvlist_free(config);
}
free(slice->rn_name);
free(slice);
}
avl_destroy(cache);
free(cache);
ret = get_configs(hdl, &pools, iarg->can_be_active, iarg->policy);
for (pe = pools.pools; pe != NULL; pe = penext) {
penext = pe->pe_next;
for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
venext = ve->ve_next;
for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
cenext = ce->ce_next;
nvlist_free(ce->ce_config);
free(ce);
}
free(ve);
}
free(pe);
}
for (ne = pools.names; ne != NULL; ne = nenext) {
nenext = ne->ne_next;
free(ne->ne_name);
free(ne);
}
return (ret);
}
/*
* Given a config, discover the paths for the devices which
* exist in the config.
*/
static int
discover_cached_paths(libpc_handle_t *hdl, nvlist_t *nv,
avl_tree_t *cache, pthread_mutex_t *lock)
{
char *path = NULL;
ssize_t dl;
uint_t children;
nvlist_t **child;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (int c = 0; c < children; c++) {
discover_cached_paths(hdl, child[c], cache, lock);
}
}
/*
* Once we have the path, we need to add the directory to
* our directory cache.
*/
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
if ((dl = zfs_dirnamelen(path)) == -1)
path = (char *)".";
else
path[dl] = '\0';
return (zpool_find_import_scan_dir(hdl, lock, cache,
path, 0));
}
return (0);
}
/*
* Given a cache file, return the contents as a list of importable pools.
* poolname or guid (but not both) are provided by the caller when trying
* to import a specific pool.
*/
static nvlist_t *
zpool_find_import_cached(libpc_handle_t *hdl, importargs_t *iarg)
{
char *buf;
int fd;
struct stat64 statbuf;
nvlist_t *raw, *src, *dst;
nvlist_t *pools;
nvpair_t *elem;
char *name;
uint64_t this_guid;
boolean_t active;
verify(iarg->poolname == NULL || iarg->guid == 0);
if ((fd = open(iarg->cachefile, O_RDONLY | O_CLOEXEC)) < 0) {
zutil_error_aux(hdl, "%s", strerror(errno));
(void) zutil_error(hdl, EZFS_BADCACHE,
dgettext(TEXT_DOMAIN, "failed to open cache file"));
return (NULL);
}
if (fstat64(fd, &statbuf) != 0) {
zutil_error_aux(hdl, "%s", strerror(errno));
(void) close(fd);
(void) zutil_error(hdl, EZFS_BADCACHE,
dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
return (NULL);
}
if ((buf = zutil_alloc(hdl, statbuf.st_size)) == NULL) {
(void) close(fd);
return (NULL);
}
if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
(void) close(fd);
free(buf);
(void) zutil_error(hdl, EZFS_BADCACHE,
dgettext(TEXT_DOMAIN,
"failed to read cache file contents"));
return (NULL);
}
(void) close(fd);
if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
free(buf);
(void) zutil_error(hdl, EZFS_BADCACHE,
dgettext(TEXT_DOMAIN,
"invalid or corrupt cache file contents"));
return (NULL);
}
free(buf);
/*
* Go through and get the current state of the pools and refresh their
* state.
*/
if (nvlist_alloc(&pools, 0, 0) != 0) {
(void) zutil_no_memory(hdl);
nvlist_free(raw);
return (NULL);
}
elem = NULL;
while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
src = fnvpair_value_nvlist(elem);
name = fnvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME);
if (iarg->poolname != NULL && strcmp(iarg->poolname, name) != 0)
continue;
this_guid = fnvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID);
if (iarg->guid != 0 && iarg->guid != this_guid)
continue;
if (zutil_pool_active(hdl, name, this_guid, &active) != 0) {
nvlist_free(raw);
nvlist_free(pools);
return (NULL);
}
if (active)
continue;
if (iarg->scan) {
uint64_t saved_guid = iarg->guid;
const char *saved_poolname = iarg->poolname;
pthread_mutex_t lock;
/*
* Create the device cache that will hold the
* devices we will scan based on the cachefile.
* This will get destroyed and freed by
* zpool_find_import_impl.
*/
avl_tree_t *cache = zutil_alloc(hdl,
sizeof (avl_tree_t));
avl_create(cache, slice_cache_compare,
sizeof (rdsk_node_t),
offsetof(rdsk_node_t, rn_node));
nvlist_t *nvroot = fnvlist_lookup_nvlist(src,
ZPOOL_CONFIG_VDEV_TREE);
/*
* We only want to find the pool with this_guid.
* We will reset these values back later.
*/
iarg->guid = this_guid;
iarg->poolname = NULL;
/*
* We need to build up a cache of devices that exists
* in the paths pointed to by the cachefile. This allows
* us to preserve the device namespace that was
* originally specified by the user but also lets us
* scan devices in those directories in case they had
* been renamed.
*/
pthread_mutex_init(&lock, NULL);
discover_cached_paths(hdl, nvroot, cache, &lock);
nvlist_t *nv = zpool_find_import_impl(hdl, iarg,
&lock, cache);
pthread_mutex_destroy(&lock);
/*
* zpool_find_import_impl will return back
* a list of pools that it found based on the
* device cache. There should only be one pool
* since we're looking for a specific guid.
* We will use that pool to build up the final
* pool nvlist which is returned back to the
* caller.
*/
nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
+ if (pair == NULL)
+ continue;
fnvlist_add_nvlist(pools, nvpair_name(pair),
fnvpair_value_nvlist(pair));
VERIFY3P(nvlist_next_nvpair(nv, pair), ==, NULL);
iarg->guid = saved_guid;
iarg->poolname = saved_poolname;
continue;
}
if (nvlist_add_string(src, ZPOOL_CONFIG_CACHEFILE,
iarg->cachefile) != 0) {
(void) zutil_no_memory(hdl);
nvlist_free(raw);
nvlist_free(pools);
return (NULL);
}
update_vdevs_config_dev_sysfs_path(src);
if ((dst = zutil_refresh_config(hdl, src)) == NULL) {
nvlist_free(raw);
nvlist_free(pools);
return (NULL);
}
if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
(void) zutil_no_memory(hdl);
nvlist_free(dst);
nvlist_free(raw);
nvlist_free(pools);
return (NULL);
}
nvlist_free(dst);
}
nvlist_free(raw);
return (pools);
}
static nvlist_t *
zpool_find_import(libpc_handle_t *hdl, importargs_t *iarg)
{
pthread_mutex_t lock;
avl_tree_t *cache;
nvlist_t *pools = NULL;
verify(iarg->poolname == NULL || iarg->guid == 0);
pthread_mutex_init(&lock, NULL);
/*
* Locate pool member vdevs by blkid or by directory scanning.
* On success a newly allocated AVL tree which is populated with an
* entry for each discovered vdev will be returned in the cache.
* It's the caller's responsibility to consume and destroy this tree.
*/
if (iarg->scan || iarg->paths != 0) {
size_t dirs = iarg->paths;
const char * const *dir = (const char * const *)iarg->path;
if (dirs == 0)
dir = zpool_default_search_paths(&dirs);
if (zpool_find_import_scan(hdl, &lock, &cache,
dir, dirs) != 0) {
pthread_mutex_destroy(&lock);
return (NULL);
}
} else {
if (zpool_find_import_blkid(hdl, &lock, &cache) != 0) {
pthread_mutex_destroy(&lock);
return (NULL);
}
}
pools = zpool_find_import_impl(hdl, iarg, &lock, cache);
pthread_mutex_destroy(&lock);
return (pools);
}
nvlist_t *
zpool_search_import(void *hdl, importargs_t *import,
const pool_config_ops_t *pco)
{
libpc_handle_t handle = { 0 };
nvlist_t *pools = NULL;
handle.lpc_lib_handle = hdl;
handle.lpc_ops = pco;
handle.lpc_printerr = B_TRUE;
verify(import->poolname == NULL || import->guid == 0);
if (import->cachefile != NULL)
pools = zpool_find_import_cached(&handle, import);
else
pools = zpool_find_import(&handle, import);
if ((pools == NULL || nvlist_empty(pools)) &&
handle.lpc_open_access_error && geteuid() != 0) {
(void) zutil_error(&handle, EZFS_EACESS, dgettext(TEXT_DOMAIN,
"no pools found"));
}
return (pools);
}
static boolean_t
pool_match(nvlist_t *cfg, char *tgt)
{
uint64_t v, guid = strtoull(tgt, NULL, 0);
char *s;
if (guid != 0) {
if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_GUID, &v) == 0)
return (v == guid);
} else {
if (nvlist_lookup_string(cfg, ZPOOL_CONFIG_POOL_NAME, &s) == 0)
return (strcmp(s, tgt) == 0);
}
return (B_FALSE);
}
int
zpool_find_config(void *hdl, const char *target, nvlist_t **configp,
importargs_t *args, const pool_config_ops_t *pco)
{
nvlist_t *pools;
nvlist_t *match = NULL;
nvlist_t *config = NULL;
char *sepp = NULL;
int count = 0;
char *targetdup = strdup(target);
*configp = NULL;
if ((sepp = strpbrk(targetdup, "/@")) != NULL)
*sepp = '\0';
pools = zpool_search_import(hdl, args, pco);
if (pools != NULL) {
nvpair_t *elem = NULL;
while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
VERIFY0(nvpair_value_nvlist(elem, &config));
if (pool_match(config, targetdup)) {
count++;
if (match != NULL) {
/* multiple matches found */
continue;
} else {
match = fnvlist_dup(config);
}
}
}
fnvlist_free(pools);
}
if (count == 0) {
free(targetdup);
return (ENOENT);
}
if (count > 1) {
free(targetdup);
fnvlist_free(match);
return (EINVAL);
}
*configp = match;
free(targetdup);
return (0);
}
/*
* Internal function for iterating over the vdevs.
*
* For each vdev, func() will be called and will be passed 'zhp' (which is
* typically the zpool_handle_t cast as a void pointer), the vdev's nvlist, and
* a user-defined data pointer).
*
* The return values from all the func() calls will be OR'd together and
* returned.
*/
int
for_each_vdev_cb(void *zhp, nvlist_t *nv, pool_vdev_iter_f func,
void *data)
{
nvlist_t **child;
uint_t c, children;
int ret = 0;
int i;
char *type;
const char *list[] = {
ZPOOL_CONFIG_SPARES,
ZPOOL_CONFIG_L2CACHE,
ZPOOL_CONFIG_CHILDREN
};
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
return (ret);
/* Don't run our function on root or indirect vdevs */
if ((strcmp(type, VDEV_TYPE_ROOT) != 0) &&
(strcmp(type, VDEV_TYPE_INDIRECT) != 0)) {
ret |= func(zhp, nv, data);
}
for (i = 0; i < ARRAY_SIZE(list); i++) {
if (nvlist_lookup_nvlist_array(nv, list[i], &child,
&children) == 0) {
for (c = 0; c < children; c++) {
uint64_t ishole = 0;
(void) nvlist_lookup_uint64(child[c],
ZPOOL_CONFIG_IS_HOLE, &ishole);
if (ishole)
continue;
ret |= for_each_vdev_cb(zhp, child[c],
func, data);
}
}
}
return (ret);
}
/*
* Given an ZPOOL_CONFIG_VDEV_TREE nvpair, iterate over all the vdevs, calling
* func() for each one. func() is passed the vdev's nvlist and an optional
* user-defined 'data' pointer.
*/
int
for_each_vdev_in_nvlist(nvlist_t *nvroot, pool_vdev_iter_f func, void *data)
{
return (for_each_vdev_cb(NULL, nvroot, func, data));
}
diff --git a/sys/contrib/openzfs/man/man4/zfs.4 b/sys/contrib/openzfs/man/man4/zfs.4
index cc55ee32ba24..90a8ca788c78 100644
--- a/sys/contrib/openzfs/man/man4/zfs.4
+++ b/sys/contrib/openzfs/man/man4/zfs.4
@@ -1,2546 +1,2579 @@
.\"
.\" Copyright (c) 2013 by Turbo Fredriksson <turbo@bayour.com>. All rights reserved.
.\" Copyright (c) 2019, 2021 by Delphix. All rights reserved.
.\" Copyright (c) 2019 Datto Inc.
.\" The contents of this file are subject to the terms of the Common Development
.\" and Distribution License (the "License"). You may not use this file except
.\" in compliance with the License. You can obtain a copy of the license at
.\" usr/src/OPENSOLARIS.LICENSE or https://opensource.org/licenses/CDDL-1.0.
.\"
.\" See the License for the specific language governing permissions and
.\" limitations under the License. When distributing Covered Code, include this
.\" CDDL HEADER in each file and include the License file at
.\" usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this
.\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your
.\" own identifying information:
.\" Portions Copyright [yyyy] [name of copyright owner]
.\"
.Dd June 1, 2021
.Dt ZFS 4
.Os
.
.Sh NAME
.Nm zfs
.Nd tuning of the ZFS kernel module
.
.Sh DESCRIPTION
The ZFS module supports these parameters:
.Bl -tag -width Ds
.It Sy dbuf_cache_max_bytes Ns = Ns Sy ULONG_MAX Ns B Pq ulong
Maximum size in bytes of the dbuf cache.
The target size is determined by the MIN versus
.No 1/2^ Ns Sy dbuf_cache_shift Pq 1/32nd
of the target ARC size.
The behavior of the dbuf cache and its associated settings
can be observed via the
.Pa /proc/spl/kstat/zfs/dbufstats
kstat.
.
.It Sy dbuf_metadata_cache_max_bytes Ns = Ns Sy ULONG_MAX Ns B Pq ulong
Maximum size in bytes of the metadata dbuf cache.
The target size is determined by the MIN versus
.No 1/2^ Ns Sy dbuf_metadata_cache_shift Pq 1/64th
of the target ARC size.
The behavior of the metadata dbuf cache and its associated settings
can be observed via the
.Pa /proc/spl/kstat/zfs/dbufstats
kstat.
.
.It Sy dbuf_cache_hiwater_pct Ns = Ns Sy 10 Ns % Pq uint
The percentage over
.Sy dbuf_cache_max_bytes
when dbufs must be evicted directly.
.
.It Sy dbuf_cache_lowater_pct Ns = Ns Sy 10 Ns % Pq uint
The percentage below
.Sy dbuf_cache_max_bytes
when the evict thread stops evicting dbufs.
.
.It Sy dbuf_cache_shift Ns = Ns Sy 5 Pq int
Set the size of the dbuf cache
.Pq Sy dbuf_cache_max_bytes
to a log2 fraction of the target ARC size.
.
.It Sy dbuf_metadata_cache_shift Ns = Ns Sy 6 Pq int
Set the size of the dbuf metadata cache
.Pq Sy dbuf_metadata_cache_max_bytes
to a log2 fraction of the target ARC size.
.
.It Sy dmu_object_alloc_chunk_shift Ns = Ns Sy 7 Po 128 Pc Pq int
dnode slots allocated in a single operation as a power of 2.
The default value minimizes lock contention for the bulk operation performed.
.
.It Sy dmu_prefetch_max Ns = Ns Sy 134217728 Ns B Po 128 MiB Pc Pq int
Limit the amount we can prefetch with one call to this amount in bytes.
This helps to limit the amount of memory that can be used by prefetching.
.
.It Sy ignore_hole_birth Pq int
Alias for
.Sy send_holes_without_birth_time .
.
.It Sy l2arc_feed_again Ns = Ns Sy 1 Ns | Ns 0 Pq int
Turbo L2ARC warm-up.
When the L2ARC is cold the fill interval will be set as fast as possible.
.
.It Sy l2arc_feed_min_ms Ns = Ns Sy 200 Pq ulong
Min feed interval in milliseconds.
Requires
.Sy l2arc_feed_again Ns = Ns Ar 1
and only applicable in related situations.
.
.It Sy l2arc_feed_secs Ns = Ns Sy 1 Pq ulong
Seconds between L2ARC writing.
.
.It Sy l2arc_headroom Ns = Ns Sy 2 Pq ulong
How far through the ARC lists to search for L2ARC cacheable content,
expressed as a multiplier of
.Sy l2arc_write_max .
ARC persistence across reboots can be achieved with persistent L2ARC
by setting this parameter to
.Sy 0 ,
allowing the full length of ARC lists to be searched for cacheable content.
.
.It Sy l2arc_headroom_boost Ns = Ns Sy 200 Ns % Pq ulong
Scales
.Sy l2arc_headroom
by this percentage when L2ARC contents are being successfully compressed
before writing.
A value of
.Sy 100
disables this feature.
.
.It Sy l2arc_exclude_special Ns = Ns Sy 0 Ns | Ns 1 Pq int
Controls whether buffers present on special vdevs are eligible for caching
into L2ARC.
If set to 1, exclude dbufs on special vdevs from being cached to L2ARC.
.
.It Sy l2arc_mfuonly Ns = Ns Sy 0 Ns | Ns 1 Pq int
Controls whether only MFU metadata and data are cached from ARC into L2ARC.
This may be desired to avoid wasting space on L2ARC when reading/writing large
amounts of data that are not expected to be accessed more than once.
.Pp
The default is off,
meaning both MRU and MFU data and metadata are cached.
When turning off this feature, some MRU buffers will still be present
in ARC and eventually cached on L2ARC.
.No If Sy l2arc_noprefetch Ns = Ns Sy 0 ,
some prefetched buffers will be cached to L2ARC, and those might later
transition to MRU, in which case the
.Sy l2arc_mru_asize No arcstat will not be Sy 0 .
.Pp
Regardless of
.Sy l2arc_noprefetch ,
some MFU buffers might be evicted from ARC,
accessed later on as prefetches and transition to MRU as prefetches.
If accessed again they are counted as MRU and the
.Sy l2arc_mru_asize No arcstat will not be Sy 0 .
.Pp
The ARC status of L2ARC buffers when they were first cached in
L2ARC can be seen in the
.Sy l2arc_mru_asize , Sy l2arc_mfu_asize , No and Sy l2arc_prefetch_asize
arcstats when importing the pool or onlining a cache
device if persistent L2ARC is enabled.
.Pp
The
.Sy evict_l2_eligible_mru
arcstat does not take into account if this option is enabled as the information
provided by the
.Sy evict_l2_eligible_m[rf]u
arcstats can be used to decide if toggling this option is appropriate
for the current workload.
.
.It Sy l2arc_meta_percent Ns = Ns Sy 33 Ns % Pq int
Percent of ARC size allowed for L2ARC-only headers.
Since L2ARC buffers are not evicted on memory pressure,
too many headers on a system with an irrationally large L2ARC
can render it slow or unusable.
This parameter limits L2ARC writes and rebuilds to achieve the target.
.
.It Sy l2arc_trim_ahead Ns = Ns Sy 0 Ns % Pq ulong
Trims ahead of the current write size
.Pq Sy l2arc_write_max
on L2ARC devices by this percentage of write size if we have filled the device.
If set to
.Sy 100
we TRIM twice the space required to accommodate upcoming writes.
A minimum of
.Sy 64 MiB
will be trimmed.
It also enables TRIM of the whole L2ARC device upon creation
or addition to an existing pool or if the header of the device is
invalid upon importing a pool or onlining a cache device.
A value of
.Sy 0
disables TRIM on L2ARC altogether and is the default as it can put significant
stress on the underlying storage devices.
This will vary depending of how well the specific device handles these commands.
.
.It Sy l2arc_noprefetch Ns = Ns Sy 1 Ns | Ns 0 Pq int
Do not write buffers to L2ARC if they were prefetched but not used by
applications.
In case there are prefetched buffers in L2ARC and this option
is later set, we do not read the prefetched buffers from L2ARC.
Unsetting this option is useful for caching sequential reads from the
disks to L2ARC and serve those reads from L2ARC later on.
This may be beneficial in case the L2ARC device is significantly faster
in sequential reads than the disks of the pool.
.Pp
Use
.Sy 1
to disable and
.Sy 0
to enable caching/reading prefetches to/from L2ARC.
.
.It Sy l2arc_norw Ns = Ns Sy 0 Ns | Ns 1 Pq int
No reads during writes.
.
.It Sy l2arc_write_boost Ns = Ns Sy 8388608 Ns B Po 8 MiB Pc Pq ulong
Cold L2ARC devices will have
.Sy l2arc_write_max
increased by this amount while they remain cold.
.
.It Sy l2arc_write_max Ns = Ns Sy 8388608 Ns B Po 8 MiB Pc Pq ulong
Max write bytes per interval.
.
.It Sy l2arc_rebuild_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Rebuild the L2ARC when importing a pool (persistent L2ARC).
This can be disabled if there are problems importing a pool
or attaching an L2ARC device (e.g. the L2ARC device is slow
in reading stored log metadata, or the metadata
has become somehow fragmented/unusable).
.
.It Sy l2arc_rebuild_blocks_min_l2size Ns = Ns Sy 1073741824 Ns B Po 1 GiB Pc Pq ulong
Mininum size of an L2ARC device required in order to write log blocks in it.
The log blocks are used upon importing the pool to rebuild the persistent L2ARC.
.Pp
For L2ARC devices less than 1 GiB, the amount of data
.Fn l2arc_evict
evicts is significant compared to the amount of restored L2ARC data.
In this case, do not write log blocks in L2ARC in order not to waste space.
.
.It Sy metaslab_aliquot Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq ulong
Metaslab granularity, in bytes.
This is roughly similar to what would be referred to as the "stripe size"
in traditional RAID arrays.
In normal operation, ZFS will try to write this amount of data to each disk
before moving on to the next top-level vdev.
.
.It Sy metaslab_bias_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable metaslab group biasing based on their vdevs' over- or under-utilization
relative to the pool.
.
.It Sy metaslab_force_ganging Ns = Ns Sy 16777217 Ns B Po 16 MiB + 1 B Pc Pq ulong
Make some blocks above a certain size be gang blocks.
This option is used by the test suite to facilitate testing.
.
.It Sy zfs_history_output_max Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq int
When attempting to log an output nvlist of an ioctl in the on-disk history,
the output will not be stored if it is larger than this size (in bytes).
This must be less than
.Sy DMU_MAX_ACCESS Pq 64 MiB .
This applies primarily to
.Fn zfs_ioc_channel_program Pq cf. Xr zfs-program 8 .
.
.It Sy zfs_keep_log_spacemaps_at_export Ns = Ns Sy 0 Ns | Ns 1 Pq int
Prevent log spacemaps from being destroyed during pool exports and destroys.
.
.It Sy zfs_metaslab_segment_weight_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable/disable segment-based metaslab selection.
.
.It Sy zfs_metaslab_switch_threshold Ns = Ns Sy 2 Pq int
When using segment-based metaslab selection, continue allocating
from the active metaslab until this option's
worth of buckets have been exhausted.
.
.It Sy metaslab_debug_load Ns = Ns Sy 0 Ns | Ns 1 Pq int
Load all metaslabs during pool import.
.
.It Sy metaslab_debug_unload Ns = Ns Sy 0 Ns | Ns 1 Pq int
Prevent metaslabs from being unloaded.
.
.It Sy metaslab_fragmentation_factor_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable use of the fragmentation metric in computing metaslab weights.
.
.It Sy metaslab_df_max_search Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq int
Maximum distance to search forward from the last offset.
Without this limit, fragmented pools can see
.Em >100`000
iterations and
.Fn metaslab_block_picker
becomes the performance limiting factor on high-performance storage.
.Pp
With the default setting of
.Sy 16 MiB ,
we typically see less than
.Em 500
iterations, even with very fragmented
.Sy ashift Ns = Ns Sy 9
pools.
The maximum number of iterations possible is
.Sy metaslab_df_max_search / 2^(ashift+1) .
With the default setting of
.Sy 16 MiB
this is
.Em 16*1024 Pq with Sy ashift Ns = Ns Sy 9
or
.Em 2*1024 Pq with Sy ashift Ns = Ns Sy 12 .
.
.It Sy metaslab_df_use_largest_segment Ns = Ns Sy 0 Ns | Ns 1 Pq int
If not searching forward (due to
.Sy metaslab_df_max_search , metaslab_df_free_pct ,
.No or Sy metaslab_df_alloc_threshold ) ,
this tunable controls which segment is used.
If set, we will use the largest free segment.
If unset, we will use a segment of at least the requested size.
.
.It Sy zfs_metaslab_max_size_cache_sec Ns = Ns Sy 3600 Ns s Po 1 hour Pc Pq ulong
When we unload a metaslab, we cache the size of the largest free chunk.
We use that cached size to determine whether or not to load a metaslab
for a given allocation.
As more frees accumulate in that metaslab while it's unloaded,
the cached max size becomes less and less accurate.
After a number of seconds controlled by this tunable,
we stop considering the cached max size and start
considering only the histogram instead.
.
.It Sy zfs_metaslab_mem_limit Ns = Ns Sy 25 Ns % Pq int
When we are loading a new metaslab, we check the amount of memory being used
to store metaslab range trees.
If it is over a threshold, we attempt to unload the least recently used metaslab
to prevent the system from clogging all of its memory with range trees.
This tunable sets the percentage of total system memory that is the threshold.
.
.It Sy zfs_metaslab_try_hard_before_gang Ns = Ns Sy 0 Ns | Ns 1 Pq int
.Bl -item -compact
.It
If unset, we will first try normal allocation.
.It
If that fails then we will do a gang allocation.
.It
If that fails then we will do a "try hard" gang allocation.
.It
If that fails then we will have a multi-layer gang block.
.El
.Pp
.Bl -item -compact
.It
If set, we will first try normal allocation.
.It
If that fails then we will do a "try hard" allocation.
.It
If that fails we will do a gang allocation.
.It
If that fails we will do a "try hard" gang allocation.
.It
If that fails then we will have a multi-layer gang block.
.El
.
.It Sy zfs_metaslab_find_max_tries Ns = Ns Sy 100 Pq int
When not trying hard, we only consider this number of the best metaslabs.
This improves performance, especially when there are many metaslabs per vdev
and the allocation can't actually be satisfied
(so we would otherwise iterate all metaslabs).
.
.It Sy zfs_vdev_default_ms_count Ns = Ns Sy 200 Pq int
When a vdev is added, target this number of metaslabs per top-level vdev.
.
.It Sy zfs_vdev_default_ms_shift Ns = Ns Sy 29 Po 512 MiB Pc Pq int
Default limit for metaslab size.
.
-.It Sy zfs_vdev_max_auto_ashift Ns = Ns Sy ASHIFT_MAX Po 16 Pc Pq ulong
+.It Sy zfs_vdev_max_auto_ashift Ns = Ns Sy 14 Pq ulong
Maximum ashift used when optimizing for logical \[->] physical sector size on new
top-level vdevs.
+May be increased up to
+.Sy ASHIFT_MAX Po 16 Pc ,
+but this may negatively impact pool space efficiency.
.
.It Sy zfs_vdev_min_auto_ashift Ns = Ns Sy ASHIFT_MIN Po 9 Pc Pq ulong
Minimum ashift used when creating new top-level vdevs.
.
.It Sy zfs_vdev_min_ms_count Ns = Ns Sy 16 Pq int
Minimum number of metaslabs to create in a top-level vdev.
.
.It Sy vdev_validate_skip Ns = Ns Sy 0 Ns | Ns 1 Pq int
Skip label validation steps during pool import.
Changing is not recommended unless you know what you're doing
and are recovering a damaged label.
.
.It Sy zfs_vdev_ms_count_limit Ns = Ns Sy 131072 Po 128k Pc Pq int
Practical upper limit of total metaslabs per top-level vdev.
.
.It Sy metaslab_preload_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable metaslab group preloading.
.
.It Sy metaslab_lba_weighting_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Give more weight to metaslabs with lower LBAs,
assuming they have greater bandwidth,
as is typically the case on a modern constant angular velocity disk drive.
.
.It Sy metaslab_unload_delay Ns = Ns Sy 32 Pq int
After a metaslab is used, we keep it loaded for this many TXGs, to attempt to
reduce unnecessary reloading.
Note that both this many TXGs and
.Sy metaslab_unload_delay_ms
milliseconds must pass before unloading will occur.
.
.It Sy metaslab_unload_delay_ms Ns = Ns Sy 600000 Ns ms Po 10 min Pc Pq int
After a metaslab is used, we keep it loaded for this many milliseconds,
to attempt to reduce unnecessary reloading.
Note, that both this many milliseconds and
.Sy metaslab_unload_delay
TXGs must pass before unloading will occur.
.
.It Sy reference_history Ns = Ns Sy 3 Pq int
Maximum reference holders being tracked when reference_tracking_enable is active.
.
.It Sy reference_tracking_enable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Track reference holders to
.Sy refcount_t
objects (debug builds only).
.
.It Sy send_holes_without_birth_time Ns = Ns Sy 1 Ns | Ns 0 Pq int
When set, the
.Sy hole_birth
optimization will not be used, and all holes will always be sent during a
.Nm zfs Cm send .
This is useful if you suspect your datasets are affected by a bug in
.Sy hole_birth .
.
.It Sy spa_config_path Ns = Ns Pa /etc/zfs/zpool.cache Pq charp
SPA config file.
.
.It Sy spa_asize_inflation Ns = Ns Sy 24 Pq int
Multiplication factor used to estimate actual disk consumption from the
size of data being written.
The default value is a worst case estimate,
but lower values may be valid for a given pool depending on its configuration.
Pool administrators who understand the factors involved
may wish to specify a more realistic inflation factor,
particularly if they operate close to quota or capacity limits.
.
.It Sy spa_load_print_vdev_tree Ns = Ns Sy 0 Ns | Ns 1 Pq int
Whether to print the vdev tree in the debugging message buffer during pool import.
.
.It Sy spa_load_verify_data Ns = Ns Sy 1 Ns | Ns 0 Pq int
Whether to traverse data blocks during an "extreme rewind"
.Pq Fl X
import.
.Pp
An extreme rewind import normally performs a full traversal of all
blocks in the pool for verification.
If this parameter is unset, the traversal skips non-metadata blocks.
It can be toggled once the
import has started to stop or start the traversal of non-metadata blocks.
.
.It Sy spa_load_verify_metadata Ns = Ns Sy 1 Ns | Ns 0 Pq int
Whether to traverse blocks during an "extreme rewind"
.Pq Fl X
pool import.
.Pp
An extreme rewind import normally performs a full traversal of all
blocks in the pool for verification.
If this parameter is unset, the traversal is not performed.
It can be toggled once the import has started to stop or start the traversal.
.
.It Sy spa_load_verify_shift Ns = Ns Sy 4 Po 1/16th Pc Pq int
Sets the maximum number of bytes to consume during pool import to the log2
fraction of the target ARC size.
.
.It Sy spa_slop_shift Ns = Ns Sy 5 Po 1/32nd Pc Pq int
Normally, we don't allow the last
.Sy 3.2% Pq Sy 1/2^spa_slop_shift
of space in the pool to be consumed.
This ensures that we don't run the pool completely out of space,
due to unaccounted changes (e.g. to the MOS).
It also limits the worst-case time to allocate space.
If we have less than this amount of free space,
most ZPL operations (e.g. write, create) will return
.Sy ENOSPC .
.
.It Sy spa_upgrade_errlog_limit Ns = Ns Sy 0 Pq uint
Limits the number of on-disk error log entries that will be converted to the
new format when enabling the
.Sy head_errlog
feature.
The default is to convert all log entries.
.
.It Sy vdev_removal_max_span Ns = Ns Sy 32768 Ns B Po 32 KiB Pc Pq int
During top-level vdev removal, chunks of data are copied from the vdev
which may include free space in order to trade bandwidth for IOPS.
This parameter determines the maximum span of free space, in bytes,
which will be included as "unnecessary" data in a chunk of copied data.
.Pp
The default value here was chosen to align with
.Sy zfs_vdev_read_gap_limit ,
which is a similar concept when doing
regular reads (but there's no reason it has to be the same).
.
.It Sy vdev_file_logical_ashift Ns = Ns Sy 9 Po 512 B Pc Pq ulong
Logical ashift for file-based devices.
.
.It Sy vdev_file_physical_ashift Ns = Ns Sy 9 Po 512 B Pc Pq ulong
Physical ashift for file-based devices.
.
.It Sy zap_iterate_prefetch Ns = Ns Sy 1 Ns | Ns 0 Pq int
If set, when we start iterating over a ZAP object,
prefetch the entire object (all leaf blocks).
However, this is limited by
.Sy dmu_prefetch_max .
.
.It Sy zfetch_array_rd_sz Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq ulong
If prefetching is enabled, disable prefetching for reads larger than this size.
.
.It Sy zfetch_min_distance Ns = Ns Sy 4194304 Ns B Po 4 MiB Pc Pq uint
Min bytes to prefetch per stream.
Prefetch distance starts from the demand access size and quickly grows to
this value, doubling on each hit.
After that it may grow further by 1/8 per hit, but only if some prefetch
since last time haven't completed in time to satisfy demand request, i.e.
prefetch depth didn't cover the read latency or the pool got saturated.
.
.It Sy zfetch_max_distance Ns = Ns Sy 67108864 Ns B Po 64 MiB Pc Pq uint
Max bytes to prefetch per stream.
.
.It Sy zfetch_max_idistance Ns = Ns Sy 67108864 Ns B Po 64 MiB Pc Pq uint
Max bytes to prefetch indirects for per stream.
.
.It Sy zfetch_max_streams Ns = Ns Sy 8 Pq uint
Max number of streams per zfetch (prefetch streams per file).
.
.It Sy zfetch_min_sec_reap Ns = Ns Sy 1 Pq uint
Min time before inactive prefetch stream can be reclaimed
.
.It Sy zfetch_max_sec_reap Ns = Ns Sy 2 Pq uint
Max time before inactive prefetch stream can be deleted
.
.It Sy zfs_abd_scatter_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enables ARC from using scatter/gather lists and forces all allocations to be
linear in kernel memory.
Disabling can improve performance in some code paths
at the expense of fragmented kernel memory.
.
.It Sy zfs_abd_scatter_max_order Ns = Ns Sy MAX_ORDER\-1 Pq uint
Maximum number of consecutive memory pages allocated in a single block for
scatter/gather lists.
.Pp
The value of
.Sy MAX_ORDER
depends on kernel configuration.
.
.It Sy zfs_abd_scatter_min_size Ns = Ns Sy 1536 Ns B Po 1.5 KiB Pc Pq uint
This is the minimum allocation size that will use scatter (page-based) ABDs.
Smaller allocations will use linear ABDs.
.
.It Sy zfs_arc_dnode_limit Ns = Ns Sy 0 Ns B Pq ulong
When the number of bytes consumed by dnodes in the ARC exceeds this number of
bytes, try to unpin some of it in response to demand for non-metadata.
This value acts as a ceiling to the amount of dnode metadata, and defaults to
.Sy 0 ,
which indicates that a percent which is based on
.Sy zfs_arc_dnode_limit_percent
of the ARC meta buffers that may be used for dnodes.
.Pp
Also see
.Sy zfs_arc_meta_prune
which serves a similar purpose but is used
when the amount of metadata in the ARC exceeds
.Sy zfs_arc_meta_limit
rather than in response to overall demand for non-metadata.
.
.It Sy zfs_arc_dnode_limit_percent Ns = Ns Sy 10 Ns % Pq ulong
Percentage that can be consumed by dnodes of ARC meta buffers.
.Pp
See also
.Sy zfs_arc_dnode_limit ,
which serves a similar purpose but has a higher priority if nonzero.
.
.It Sy zfs_arc_dnode_reduce_percent Ns = Ns Sy 10 Ns % Pq ulong
Percentage of ARC dnodes to try to scan in response to demand for non-metadata
when the number of bytes consumed by dnodes exceeds
.Sy zfs_arc_dnode_limit .
.
.It Sy zfs_arc_average_blocksize Ns = Ns Sy 8192 Ns B Po 8 KiB Pc Pq int
The ARC's buffer hash table is sized based on the assumption of an average
block size of this value.
This works out to roughly 1 MiB of hash table per 1 GiB of physical memory
with 8-byte pointers.
For configurations with a known larger average block size,
this value can be increased to reduce the memory footprint.
.
.It Sy zfs_arc_eviction_pct Ns = Ns Sy 200 Ns % Pq int
When
.Fn arc_is_overflowing ,
.Fn arc_get_data_impl
waits for this percent of the requested amount of data to be evicted.
For example, by default, for every
.Em 2 KiB
that's evicted,
.Em 1 KiB
of it may be "reused" by a new allocation.
Since this is above
.Sy 100 Ns % ,
it ensures that progress is made towards getting
.Sy arc_size No under Sy arc_c .
Since this is finite, it ensures that allocations can still happen,
even during the potentially long time that
.Sy arc_size No is more than Sy arc_c .
.
.It Sy zfs_arc_evict_batch_limit Ns = Ns Sy 10 Pq int
Number ARC headers to evict per sub-list before proceeding to another sub-list.
This batch-style operation prevents entire sub-lists from being evicted at once
but comes at a cost of additional unlocking and locking.
.
.It Sy zfs_arc_grow_retry Ns = Ns Sy 0 Ns s Pq int
If set to a non zero value, it will replace the
.Sy arc_grow_retry
value with this value.
The
.Sy arc_grow_retry
.No value Pq default Sy 5 Ns s
is the number of seconds the ARC will wait before
trying to resume growth after a memory pressure event.
.
.It Sy zfs_arc_lotsfree_percent Ns = Ns Sy 10 Ns % Pq int
Throttle I/O when free system memory drops below this percentage of total
system memory.
Setting this value to
.Sy 0
will disable the throttle.
.
.It Sy zfs_arc_max Ns = Ns Sy 0 Ns B Pq ulong
Max size of ARC in bytes.
If
.Sy 0 ,
then the max size of ARC is determined by the amount of system memory installed.
Under Linux, half of system memory will be used as the limit.
Under
.Fx ,
the larger of
.Sy all_system_memory No \- Sy 1 GiB
and
.Sy 5/8 No \(mu Sy all_system_memory
will be used as the limit.
This value must be at least
.Sy 67108864 Ns B Pq 64 MiB .
.Pp
This value can be changed dynamically, with some caveats.
It cannot be set back to
.Sy 0
while running, and reducing it below the current ARC size will not cause
the ARC to shrink without memory pressure to induce shrinking.
.
.It Sy zfs_arc_meta_adjust_restarts Ns = Ns Sy 4096 Pq ulong
The number of restart passes to make while scanning the ARC attempting
the free buffers in order to stay below the
.Sy fs_arc_meta_limit .
This value should not need to be tuned but is available to facilitate
performance analysis.
.
.It Sy zfs_arc_meta_limit Ns = Ns Sy 0 Ns B Pq ulong
The maximum allowed size in bytes that metadata buffers are allowed to
consume in the ARC.
When this limit is reached, metadata buffers will be reclaimed,
even if the overall
.Sy arc_c_max
has not been reached.
It defaults to
.Sy 0 ,
which indicates that a percentage based on
.Sy zfs_arc_meta_limit_percent
of the ARC may be used for metadata.
.Pp
This value my be changed dynamically, except that must be set to an explicit value
.Pq cannot be set back to Sy 0 .
.
.It Sy zfs_arc_meta_limit_percent Ns = Ns Sy 75 Ns % Pq ulong
Percentage of ARC buffers that can be used for metadata.
.Pp
See also
.Sy zfs_arc_meta_limit ,
which serves a similar purpose but has a higher priority if nonzero.
.
.It Sy zfs_arc_meta_min Ns = Ns Sy 0 Ns B Pq ulong
The minimum allowed size in bytes that metadata buffers may consume in
the ARC.
.
.It Sy zfs_arc_meta_prune Ns = Ns Sy 10000 Pq int
The number of dentries and inodes to be scanned looking for entries
which can be dropped.
This may be required when the ARC reaches the
.Sy zfs_arc_meta_limit
because dentries and inodes can pin buffers in the ARC.
Increasing this value will cause to dentry and inode caches
to be pruned more aggressively.
Setting this value to
.Sy 0
will disable pruning the inode and dentry caches.
.
.It Sy zfs_arc_meta_strategy Ns = Ns Sy 1 Ns | Ns 0 Pq int
Define the strategy for ARC metadata buffer eviction (meta reclaim strategy):
.Bl -tag -compact -offset 4n -width "0 (META_ONLY)"
.It Sy 0 Pq META_ONLY
evict only the ARC metadata buffers
.It Sy 1 Pq BALANCED
additional data buffers may be evicted if required
to evict the required number of metadata buffers.
.El
.
.It Sy zfs_arc_min Ns = Ns Sy 0 Ns B Pq ulong
Min size of ARC in bytes.
.No If set to Sy 0 , arc_c_min
will default to consuming the larger of
.Sy 32 MiB
and
.Sy all_system_memory No / Sy 32 .
.
.It Sy zfs_arc_min_prefetch_ms Ns = Ns Sy 0 Ns ms Ns Po Ns ≡ Ns 1s Pc Pq int
Minimum time prefetched blocks are locked in the ARC.
.
.It Sy zfs_arc_min_prescient_prefetch_ms Ns = Ns Sy 0 Ns ms Ns Po Ns ≡ Ns 6s Pc Pq int
Minimum time "prescient prefetched" blocks are locked in the ARC.
These blocks are meant to be prefetched fairly aggressively ahead of
the code that may use them.
.
.It Sy zfs_arc_prune_task_threads Ns = Ns Sy 1 Pq int
Number of arc_prune threads.
.Fx
does not need more than one.
Linux may theoretically use one per mount point up to number of CPUs,
but that was not proven to be useful.
.
.It Sy zfs_max_missing_tvds Ns = Ns Sy 0 Pq int
Number of missing top-level vdevs which will be allowed during
pool import (only in read-only mode).
.
.It Sy zfs_max_nvlist_src_size Ns = Sy 0 Pq ulong
Maximum size in bytes allowed to be passed as
.Sy zc_nvlist_src_size
for ioctls on
.Pa /dev/zfs .
This prevents a user from causing the kernel to allocate
an excessive amount of memory.
When the limit is exceeded, the ioctl fails with
.Sy EINVAL
and a description of the error is sent to the
.Pa zfs-dbgmsg
log.
This parameter should not need to be touched under normal circumstances.
If
.Sy 0 ,
equivalent to a quarter of the user-wired memory limit under
.Fx
and to
.Sy 134217728 Ns B Pq 128 MiB
under Linux.
.
.It Sy zfs_multilist_num_sublists Ns = Ns Sy 0 Pq int
To allow more fine-grained locking, each ARC state contains a series
of lists for both data and metadata objects.
Locking is performed at the level of these "sub-lists".
This parameters controls the number of sub-lists per ARC state,
and also applies to other uses of the multilist data structure.
.Pp
If
.Sy 0 ,
equivalent to the greater of the number of online CPUs and
.Sy 4 .
.
.It Sy zfs_arc_overflow_shift Ns = Ns Sy 8 Pq int
The ARC size is considered to be overflowing if it exceeds the current
ARC target size
.Pq Sy arc_c
by thresholds determined by this parameter.
Exceeding by
.Sy ( arc_c No >> Sy zfs_arc_overflow_shift ) No / Sy 2
starts ARC reclamation process.
If that appears insufficient, exceeding by
.Sy ( arc_c No >> Sy zfs_arc_overflow_shift ) No \(mu Sy 1.5
blocks new buffer allocation until the reclaim thread catches up.
Started reclamation process continues till ARC size returns below the
target size.
.Pp
The default value of
.Sy 8
causes the ARC to start reclamation if it exceeds the target size by
.Em 0.2%
of the target size, and block allocations by
.Em 0.6% .
.
.It Sy zfs_arc_p_min_shift Ns = Ns Sy 0 Pq int
If nonzero, this will update
.Sy arc_p_min_shift Pq default Sy 4
with the new value.
.Sy arc_p_min_shift No is used as a shift of Sy arc_c
when calculating the minumum
.Sy arc_p No size.
.
.It Sy zfs_arc_p_dampener_disable Ns = Ns Sy 1 Ns | Ns 0 Pq int
Disable
.Sy arc_p
adapt dampener, which reduces the maximum single adjustment to
.Sy arc_p .
.
.It Sy zfs_arc_shrink_shift Ns = Ns Sy 0 Pq int
If nonzero, this will update
.Sy arc_shrink_shift Pq default Sy 7
with the new value.
.
.It Sy zfs_arc_pc_percent Ns = Ns Sy 0 Ns % Po off Pc Pq uint
Percent of pagecache to reclaim ARC to.
.Pp
This tunable allows the ZFS ARC to play more nicely
with the kernel's LRU pagecache.
It can guarantee that the ARC size won't collapse under scanning
pressure on the pagecache, yet still allows the ARC to be reclaimed down to
.Sy zfs_arc_min
if necessary.
This value is specified as percent of pagecache size (as measured by
.Sy NR_FILE_PAGES ) ,
where that percent may exceed
.Sy 100 .
This
only operates during memory pressure/reclaim.
.
.It Sy zfs_arc_shrinker_limit Ns = Ns Sy 10000 Pq int
This is a limit on how many pages the ARC shrinker makes available for
eviction in response to one page allocation attempt.
Note that in practice, the kernel's shrinker can ask us to evict
up to about four times this for one allocation attempt.
.Pp
The default limit of
.Sy 10000 Pq in practice, Em 160 MiB No per allocation attempt with 4 KiB pages
limits the amount of time spent attempting to reclaim ARC memory to
less than 100 ms per allocation attempt,
even with a small average compressed block size of ~8 KiB.
.Pp
The parameter can be set to 0 (zero) to disable the limit,
and only applies on Linux.
.
.It Sy zfs_arc_sys_free Ns = Ns Sy 0 Ns B Pq ulong
The target number of bytes the ARC should leave as free memory on the system.
If zero, equivalent to the bigger of
.Sy 512 KiB No and Sy all_system_memory/64 .
.
.It Sy zfs_autoimport_disable Ns = Ns Sy 1 Ns | Ns 0 Pq int
Disable pool import at module load by ignoring the cache file
.Pq Sy spa_config_path .
.
.It Sy zfs_checksum_events_per_second Ns = Ns Sy 20 Ns /s Pq uint
Rate limit checksum events to this many per second.
Note that this should not be set below the ZED thresholds
(currently 10 checksums over 10 seconds)
or else the daemon may not trigger any action.
.
.It Sy zfs_commit_timeout_pct Ns = Ns Sy 5 Ns % Pq int
This controls the amount of time that a ZIL block (lwb) will remain "open"
when it isn't "full", and it has a thread waiting for it to be committed to
stable storage.
The timeout is scaled based on a percentage of the last lwb
latency to avoid significantly impacting the latency of each individual
transaction record (itx).
.
.It Sy zfs_condense_indirect_commit_entry_delay_ms Ns = Ns Sy 0 Ns ms Pq int
Vdev indirection layer (used for device removal) sleeps for this many
milliseconds during mapping generation.
Intended for use with the test suite to throttle vdev removal speed.
.
.It Sy zfs_condense_indirect_obsolete_pct Ns = Ns Sy 25 Ns % Pq int
Minimum percent of obsolete bytes in vdev mapping required to attempt to condense
.Pq see Sy zfs_condense_indirect_vdevs_enable .
Intended for use with the test suite
to facilitate triggering condensing as needed.
.
.It Sy zfs_condense_indirect_vdevs_enable Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable condensing indirect vdev mappings.
When set, attempt to condense indirect vdev mappings
if the mapping uses more than
.Sy zfs_condense_min_mapping_bytes
bytes of memory and if the obsolete space map object uses more than
.Sy zfs_condense_max_obsolete_bytes
bytes on-disk.
The condensing process is an attempt to save memory by removing obsolete mappings.
.
.It Sy zfs_condense_max_obsolete_bytes Ns = Ns Sy 1073741824 Ns B Po 1 GiB Pc Pq ulong
Only attempt to condense indirect vdev mappings if the on-disk size
of the obsolete space map object is greater than this number of bytes
.Pq see Sy zfs_condense_indirect_vdevs_enable .
.
.It Sy zfs_condense_min_mapping_bytes Ns = Ns Sy 131072 Ns B Po 128 KiB Pc Pq ulong
Minimum size vdev mapping to attempt to condense
.Pq see Sy zfs_condense_indirect_vdevs_enable .
.
.It Sy zfs_dbgmsg_enable Ns = Ns Sy 1 Ns | Ns 0 Pq int
Internally ZFS keeps a small log to facilitate debugging.
The log is enabled by default, and can be disabled by unsetting this option.
The contents of the log can be accessed by reading
.Pa /proc/spl/kstat/zfs/dbgmsg .
Writing
.Sy 0
to the file clears the log.
.Pp
This setting does not influence debug prints due to
.Sy zfs_flags .
.
.It Sy zfs_dbgmsg_maxsize Ns = Ns Sy 4194304 Ns B Po 4 MiB Pc Pq int
Maximum size of the internal ZFS debug log.
.
.It Sy zfs_dbuf_state_index Ns = Ns Sy 0 Pq int
Historically used for controlling what reporting was available under
.Pa /proc/spl/kstat/zfs .
No effect.
.
.It Sy zfs_deadman_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
When a pool sync operation takes longer than
.Sy zfs_deadman_synctime_ms ,
or when an individual I/O operation takes longer than
.Sy zfs_deadman_ziotime_ms ,
then the operation is considered to be "hung".
If
.Sy zfs_deadman_enabled
is set, then the deadman behavior is invoked as described by
.Sy zfs_deadman_failmode .
By default, the deadman is enabled and set to
.Sy wait
which results in "hung" I/O operations only being logged.
The deadman is automatically disabled when a pool gets suspended.
.
.It Sy zfs_deadman_failmode Ns = Ns Sy wait Pq charp
Controls the failure behavior when the deadman detects a "hung" I/O operation.
Valid values are:
.Bl -tag -compact -offset 4n -width "continue"
.It Sy wait
Wait for a "hung" operation to complete.
For each "hung" operation a "deadman" event will be posted
describing that operation.
.It Sy continue
Attempt to recover from a "hung" operation by re-dispatching it
to the I/O pipeline if possible.
.It Sy panic
Panic the system.
This can be used to facilitate automatic fail-over
to a properly configured fail-over partner.
.El
.
.It Sy zfs_deadman_checktime_ms Ns = Ns Sy 60000 Ns ms Po 1 min Pc Pq int
Check time in milliseconds.
This defines the frequency at which we check for hung I/O requests
and potentially invoke the
.Sy zfs_deadman_failmode
behavior.
.
.It Sy zfs_deadman_synctime_ms Ns = Ns Sy 600000 Ns ms Po 10 min Pc Pq ulong
Interval in milliseconds after which the deadman is triggered and also
the interval after which a pool sync operation is considered to be "hung".
Once this limit is exceeded the deadman will be invoked every
.Sy zfs_deadman_checktime_ms
milliseconds until the pool sync completes.
.
.It Sy zfs_deadman_ziotime_ms Ns = Ns Sy 300000 Ns ms Po 5 min Pc Pq ulong
Interval in milliseconds after which the deadman is triggered and an
individual I/O operation is considered to be "hung".
As long as the operation remains "hung",
the deadman will be invoked every
.Sy zfs_deadman_checktime_ms
milliseconds until the operation completes.
.
.It Sy zfs_dedup_prefetch Ns = Ns Sy 0 Ns | Ns 1 Pq int
Enable prefetching dedup-ed blocks which are going to be freed.
.
.It Sy zfs_delay_min_dirty_percent Ns = Ns Sy 60 Ns % Pq int
Start to delay each transaction once there is this amount of dirty data,
expressed as a percentage of
.Sy zfs_dirty_data_max .
This value should be at least
.Sy zfs_vdev_async_write_active_max_dirty_percent .
.No See Sx ZFS TRANSACTION DELAY .
.
.It Sy zfs_delay_scale Ns = Ns Sy 500000 Pq int
This controls how quickly the transaction delay approaches infinity.
Larger values cause longer delays for a given amount of dirty data.
.Pp
For the smoothest delay, this value should be about 1 billion divided
by the maximum number of operations per second.
This will smoothly handle between ten times and a tenth of this number.
.No See Sx ZFS TRANSACTION DELAY .
.Pp
.Sy zfs_delay_scale No \(mu Sy zfs_dirty_data_max Em must No be smaller than Sy 2^64 .
.
.It Sy zfs_disable_ivset_guid_check Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disables requirement for IVset GUIDs to be present and match when doing a raw
receive of encrypted datasets.
Intended for users whose pools were created with
OpenZFS pre-release versions and now have compatibility issues.
.
.It Sy zfs_key_max_salt_uses Ns = Ns Sy 400000000 Po 4*10^8 Pc Pq ulong
Maximum number of uses of a single salt value before generating a new one for
encrypted datasets.
The default value is also the maximum.
.
.It Sy zfs_object_mutex_size Ns = Ns Sy 64 Pq uint
Size of the znode hashtable used for holds.
.Pp
Due to the need to hold locks on objects that may not exist yet, kernel mutexes
are not created per-object and instead a hashtable is used where collisions
will result in objects waiting when there is not actually contention on the
same object.
.
.It Sy zfs_slow_io_events_per_second Ns = Ns Sy 20 Ns /s Pq int
Rate limit delay and deadman zevents (which report slow I/O operations) to this many per
second.
.
.It Sy zfs_unflushed_max_mem_amt Ns = Ns Sy 1073741824 Ns B Po 1 GiB Pc Pq ulong
Upper-bound limit for unflushed metadata changes to be held by the
log spacemap in memory, in bytes.
.
.It Sy zfs_unflushed_max_mem_ppm Ns = Ns Sy 1000 Ns ppm Po 0.1% Pc Pq ulong
Part of overall system memory that ZFS allows to be used
for unflushed metadata changes by the log spacemap, in millionths.
.
.It Sy zfs_unflushed_log_block_max Ns = Ns Sy 131072 Po 128k Pc Pq ulong
Describes the maximum number of log spacemap blocks allowed for each pool.
The default value means that the space in all the log spacemaps
can add up to no more than
.Sy 131072
blocks (which means
.Em 16 GiB
of logical space before compression and ditto blocks,
assuming that blocksize is
.Em 128 KiB ) .
.Pp
This tunable is important because it involves a trade-off between import
time after an unclean export and the frequency of flushing metaslabs.
The higher this number is, the more log blocks we allow when the pool is
active which means that we flush metaslabs less often and thus decrease
the number of I/O operations for spacemap updates per TXG.
At the same time though, that means that in the event of an unclean export,
there will be more log spacemap blocks for us to read, inducing overhead
in the import time of the pool.
The lower the number, the amount of flushing increases, destroying log
blocks quicker as they become obsolete faster, which leaves less blocks
to be read during import time after a crash.
.Pp
Each log spacemap block existing during pool import leads to approximately
one extra logical I/O issued.
This is the reason why this tunable is exposed in terms of blocks rather
than space used.
.
.It Sy zfs_unflushed_log_block_min Ns = Ns Sy 1000 Pq ulong
If the number of metaslabs is small and our incoming rate is high,
we could get into a situation that we are flushing all our metaslabs every TXG.
Thus we always allow at least this many log blocks.
.
.It Sy zfs_unflushed_log_block_pct Ns = Ns Sy 400 Ns % Pq ulong
Tunable used to determine the number of blocks that can be used for
the spacemap log, expressed as a percentage of the total number of
unflushed metaslabs in the pool.
.
.It Sy zfs_unflushed_log_txg_max Ns = Ns Sy 1000 Pq ulong
Tunable limiting maximum time in TXGs any metaslab may remain unflushed.
It effectively limits maximum number of unflushed per-TXG spacemap logs
that need to be read after unclean pool export.
.
.It Sy zfs_unlink_suspend_progress Ns = Ns Sy 0 Ns | Ns 1 Pq uint
When enabled, files will not be asynchronously removed from the list of pending
unlinks and the space they consume will be leaked.
Once this option has been disabled and the dataset is remounted,
the pending unlinks will be processed and the freed space returned to the pool.
This option is used by the test suite.
.
.It Sy zfs_delete_blocks Ns = Ns Sy 20480 Pq ulong
This is the used to define a large file for the purposes of deletion.
Files containing more than
.Sy zfs_delete_blocks
will be deleted asynchronously, while smaller files are deleted synchronously.
Decreasing this value will reduce the time spent in an
.Xr unlink 2
system call, at the expense of a longer delay before the freed space is available.
.
.It Sy zfs_dirty_data_max Ns = Pq int
Determines the dirty space limit in bytes.
Once this limit is exceeded, new writes are halted until space frees up.
This parameter takes precedence over
.Sy zfs_dirty_data_max_percent .
.No See Sx ZFS TRANSACTION DELAY .
.Pp
Defaults to
.Sy physical_ram/10 ,
capped at
.Sy zfs_dirty_data_max_max .
.
.It Sy zfs_dirty_data_max_max Ns = Pq int
Maximum allowable value of
.Sy zfs_dirty_data_max ,
expressed in bytes.
This limit is only enforced at module load time, and will be ignored if
.Sy zfs_dirty_data_max
is later changed.
This parameter takes precedence over
.Sy zfs_dirty_data_max_max_percent .
.No See Sx ZFS TRANSACTION DELAY .
.Pp
Defaults to
.Sy physical_ram/4 ,
.
.It Sy zfs_dirty_data_max_max_percent Ns = Ns Sy 25 Ns % Pq int
Maximum allowable value of
.Sy zfs_dirty_data_max ,
expressed as a percentage of physical RAM.
This limit is only enforced at module load time, and will be ignored if
.Sy zfs_dirty_data_max
is later changed.
The parameter
.Sy zfs_dirty_data_max_max
takes precedence over this one.
.No See Sx ZFS TRANSACTION DELAY .
.
.It Sy zfs_dirty_data_max_percent Ns = Ns Sy 10 Ns % Pq int
Determines the dirty space limit, expressed as a percentage of all memory.
Once this limit is exceeded, new writes are halted until space frees up.
The parameter
.Sy zfs_dirty_data_max
takes precedence over this one.
.No See Sx ZFS TRANSACTION DELAY .
.Pp
Subject to
.Sy zfs_dirty_data_max_max .
.
.It Sy zfs_dirty_data_sync_percent Ns = Ns Sy 20 Ns % Pq int
Start syncing out a transaction group if there's at least this much dirty data
.Pq as a percentage of Sy zfs_dirty_data_max .
This should be less than
.Sy zfs_vdev_async_write_active_min_dirty_percent .
.
.It Sy zfs_wrlog_data_max Ns = Pq int
The upper limit of write-transaction zil log data size in bytes.
Write operations are throttled when approaching the limit until log data is
cleared out after transaction group sync.
Because of some overhead, it should be set at least 2 times the size of
.Sy zfs_dirty_data_max
.No to prevent harming normal write throughput.
It also should be smaller than the size of the slog device if slog is present.
.Pp
Defaults to
.Sy zfs_dirty_data_max*2
.
.It Sy zfs_fallocate_reserve_percent Ns = Ns Sy 110 Ns % Pq uint
Since ZFS is a copy-on-write filesystem with snapshots, blocks cannot be
preallocated for a file in order to guarantee that later writes will not
run out of space.
Instead,
.Xr fallocate 2
space preallocation only checks that sufficient space is currently available
in the pool or the user's project quota allocation,
and then creates a sparse file of the requested size.
The requested space is multiplied by
.Sy zfs_fallocate_reserve_percent
to allow additional space for indirect blocks and other internal metadata.
Setting this to
.Sy 0
disables support for
.Xr fallocate 2
and causes it to return
.Sy EOPNOTSUPP .
.
.It Sy zfs_fletcher_4_impl Ns = Ns Sy fastest Pq string
Select a fletcher 4 implementation.
.Pp
Supported selectors are:
.Sy fastest , scalar , sse2 , ssse3 , avx2 , avx512f , avx512bw ,
.No and Sy aarch64_neon .
All except
.Sy fastest No and Sy scalar
require instruction set extensions to be available,
and will only appear if ZFS detects that they are present at runtime.
If multiple implementations of fletcher 4 are available, the
.Sy fastest
will be chosen using a micro benchmark.
Selecting
.Sy scalar
results in the original CPU-based calculation being used.
Selecting any option other than
.Sy fastest No or Sy scalar
results in vector instructions
from the respective CPU instruction set being used.
.
+.It Sy zfs_blake3_impl Ns = Ns Sy fastest Pq string
+Select a BLAKE3 implementation.
+.Pp
+Supported selectors are:
+.Sy cycle , fastest , generic , sse2 , sse41 , avx2 , avx512 .
+All except
+.Sy cycle , fastest No and Sy generic
+require instruction set extensions to be available,
+and will only appear if ZFS detects that they are present at runtime.
+If multiple implementations of BLAKE3 are available, the
+.Sy fastest will be chosen using a micro benchmark. You can see the
+benchmark results by reading this kstat file:
+.Pa /proc/spl/kstat/zfs/chksum_bench .
+.
.It Sy zfs_free_bpobj_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable/disable the processing of the free_bpobj object.
.
.It Sy zfs_async_block_max_blocks Ns = Ns Sy ULONG_MAX Po unlimited Pc Pq ulong
Maximum number of blocks freed in a single TXG.
.
.It Sy zfs_max_async_dedup_frees Ns = Ns Sy 100000 Po 10^5 Pc Pq ulong
Maximum number of dedup blocks freed in a single TXG.
.
.It Sy zfs_vdev_async_read_max_active Ns = Ns Sy 3 Pq int
Maximum asynchronous read I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_async_read_min_active Ns = Ns Sy 1 Pq int
Minimum asynchronous read I/O operation active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_async_write_active_max_dirty_percent Ns = Ns Sy 60 Ns % Pq int
When the pool has more than this much dirty data, use
.Sy zfs_vdev_async_write_max_active
to limit active async writes.
If the dirty data is between the minimum and maximum,
the active I/O limit is linearly interpolated.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_async_write_active_min_dirty_percent Ns = Ns Sy 30 Ns % Pq int
When the pool has less than this much dirty data, use
.Sy zfs_vdev_async_write_min_active
to limit active async writes.
If the dirty data is between the minimum and maximum,
the active I/O limit is linearly
interpolated.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_async_write_max_active Ns = Ns Sy 30 Pq int
Maximum asynchronous write I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_async_write_min_active Ns = Ns Sy 2 Pq int
Minimum asynchronous write I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.Pp
Lower values are associated with better latency on rotational media but poorer
resilver performance.
The default value of
.Sy 2
was chosen as a compromise.
A value of
.Sy 3
has been shown to improve resilver performance further at a cost of
further increasing latency.
.
.It Sy zfs_vdev_initializing_max_active Ns = Ns Sy 1 Pq int
Maximum initializing I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_initializing_min_active Ns = Ns Sy 1 Pq int
Minimum initializing I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_max_active Ns = Ns Sy 1000 Pq int
The maximum number of I/O operations active to each device.
Ideally, this will be at least the sum of each queue's
.Sy max_active .
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_rebuild_max_active Ns = Ns Sy 3 Pq int
Maximum sequential resilver I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_rebuild_min_active Ns = Ns Sy 1 Pq int
Minimum sequential resilver I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_removal_max_active Ns = Ns Sy 2 Pq int
Maximum removal I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_removal_min_active Ns = Ns Sy 1 Pq int
Minimum removal I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_scrub_max_active Ns = Ns Sy 2 Pq int
Maximum scrub I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_scrub_min_active Ns = Ns Sy 1 Pq int
Minimum scrub I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_sync_read_max_active Ns = Ns Sy 10 Pq int
Maximum synchronous read I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_sync_read_min_active Ns = Ns Sy 10 Pq int
Minimum synchronous read I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_sync_write_max_active Ns = Ns Sy 10 Pq int
Maximum synchronous write I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_sync_write_min_active Ns = Ns Sy 10 Pq int
Minimum synchronous write I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_trim_max_active Ns = Ns Sy 2 Pq int
Maximum trim/discard I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_trim_min_active Ns = Ns Sy 1 Pq int
Minimum trim/discard I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_nia_delay Ns = Ns Sy 5 Pq int
For non-interactive I/O (scrub, resilver, removal, initialize and rebuild),
the number of concurrently-active I/O operations is limited to
.Sy zfs_*_min_active ,
unless the vdev is "idle".
When there are no interactive I/O operations active (synchronous or otherwise),
and
.Sy zfs_vdev_nia_delay
operations have completed since the last interactive operation,
then the vdev is considered to be "idle",
and the number of concurrently-active non-interactive operations is increased to
.Sy zfs_*_max_active .
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_nia_credit Ns = Ns Sy 5 Pq int
Some HDDs tend to prioritize sequential I/O so strongly, that concurrent
random I/O latency reaches several seconds.
On some HDDs this happens even if sequential I/O operations
are submitted one at a time, and so setting
.Sy zfs_*_max_active Ns = Sy 1
does not help.
To prevent non-interactive I/O, like scrub,
from monopolizing the device, no more than
.Sy zfs_vdev_nia_credit operations can be sent
while there are outstanding incomplete interactive operations.
This enforced wait ensures the HDD services the interactive I/O
within a reasonable amount of time.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_queue_depth_pct Ns = Ns Sy 1000 Ns % Pq int
Maximum number of queued allocations per top-level vdev expressed as
a percentage of
.Sy zfs_vdev_async_write_max_active ,
which allows the system to detect devices that are more capable
of handling allocations and to allocate more blocks to those devices.
This allows for dynamic allocation distribution when devices are imbalanced,
as fuller devices will tend to be slower than empty devices.
.Pp
Also see
.Sy zio_dva_throttle_enabled .
.
.It Sy zfs_expire_snapshot Ns = Ns Sy 300 Ns s Pq int
Time before expiring
.Pa .zfs/snapshot .
.
.It Sy zfs_admin_snapshot Ns = Ns Sy 0 Ns | Ns 1 Pq int
Allow the creation, removal, or renaming of entries in the
.Sy .zfs/snapshot
directory to cause the creation, destruction, or renaming of snapshots.
When enabled, this functionality works both locally and over NFS exports
which have the
.Em no_root_squash
option set.
.
.It Sy zfs_flags Ns = Ns Sy 0 Pq int
Set additional debugging flags.
The following flags may be bitwise-ored together:
.TS
box;
lbz r l l .
Value Symbolic Name Description
_
1 ZFS_DEBUG_DPRINTF Enable dprintf entries in the debug log.
* 2 ZFS_DEBUG_DBUF_VERIFY Enable extra dbuf verifications.
* 4 ZFS_DEBUG_DNODE_VERIFY Enable extra dnode verifications.
8 ZFS_DEBUG_SNAPNAMES Enable snapshot name verification.
16 ZFS_DEBUG_MODIFY Check for illegally modified ARC buffers.
64 ZFS_DEBUG_ZIO_FREE Enable verification of block frees.
128 ZFS_DEBUG_HISTOGRAM_VERIFY Enable extra spacemap histogram verifications.
256 ZFS_DEBUG_METASLAB_VERIFY Verify space accounting on disk matches in-memory \fBrange_trees\fP.
512 ZFS_DEBUG_SET_ERROR Enable \fBSET_ERROR\fP and dprintf entries in the debug log.
1024 ZFS_DEBUG_INDIRECT_REMAP Verify split blocks created by device removal.
2048 ZFS_DEBUG_TRIM Verify TRIM ranges are always within the allocatable range tree.
4096 ZFS_DEBUG_LOG_SPACEMAP Verify that the log summary is consistent with the spacemap log
and enable \fBzfs_dbgmsgs\fP for metaslab loading and flushing.
.TE
.Sy \& * No Requires debug build.
.
+.It Sy zfs_btree_verify_intensity Ns = Ns Sy 0 Pq uint
+Enables btree verification.
+The following settings are culminative:
+.TS
+box;
+lbz r l l .
+ Value Description
+
+ 1 Verify height.
+ 2 Verify pointers from children to parent.
+ 3 Verify element counts.
+ 4 Verify element order. (expensive)
+* 5 Verify unused memory is poisoned. (expensive)
+.TE
+.Sy \& * No Requires debug build.
+.
.It Sy zfs_free_leak_on_eio Ns = Ns Sy 0 Ns | Ns 1 Pq int
If destroy encounters an
.Sy EIO
while reading metadata (e.g. indirect blocks),
space referenced by the missing metadata can not be freed.
Normally this causes the background destroy to become "stalled",
as it is unable to make forward progress.
While in this stalled state, all remaining space to free
from the error-encountering filesystem is "temporarily leaked".
Set this flag to cause it to ignore the
.Sy EIO ,
permanently leak the space from indirect blocks that can not be read,
and continue to free everything else that it can.
.Pp
The default "stalling" behavior is useful if the storage partially
fails (i.e. some but not all I/O operations fail), and then later recovers.
In this case, we will be able to continue pool operations while it is
partially failed, and when it recovers, we can continue to free the
space, with no leaks.
Note, however, that this case is actually fairly rare.
.Pp
Typically pools either
.Bl -enum -compact -offset 4n -width "1."
.It
fail completely (but perhaps temporarily,
e.g. due to a top-level vdev going offline), or
.It
have localized, permanent errors (e.g. disk returns the wrong data
due to bit flip or firmware bug).
.El
In the former case, this setting does not matter because the
pool will be suspended and the sync thread will not be able to make
forward progress regardless.
In the latter, because the error is permanent, the best we can do
is leak the minimum amount of space,
which is what setting this flag will do.
It is therefore reasonable for this flag to normally be set,
but we chose the more conservative approach of not setting it,
so that there is no possibility of
leaking space in the "partial temporary" failure case.
.
.It Sy zfs_free_min_time_ms Ns = Ns Sy 1000 Ns ms Po 1s Pc Pq int
During a
.Nm zfs Cm destroy
operation using the
.Sy async_destroy
feature,
a minimum of this much time will be spent working on freeing blocks per TXG.
.
.It Sy zfs_obsolete_min_time_ms Ns = Ns Sy 500 Ns ms Pq int
Similar to
.Sy zfs_free_min_time_ms ,
but for cleanup of old indirection records for removed vdevs.
.
.It Sy zfs_immediate_write_sz Ns = Ns Sy 32768 Ns B Po 32 KiB Pc Pq long
Largest data block to write to the ZIL.
Larger blocks will be treated as if the dataset being written to had the
.Sy logbias Ns = Ns Sy throughput
property set.
.
.It Sy zfs_initialize_value Ns = Ns Sy 16045690984833335022 Po 0xDEADBEEFDEADBEEE Pc Pq ulong
Pattern written to vdev free space by
.Xr zpool-initialize 8 .
.
.It Sy zfs_initialize_chunk_size Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq ulong
Size of writes used by
.Xr zpool-initialize 8 .
This option is used by the test suite.
.
.It Sy zfs_livelist_max_entries Ns = Ns Sy 500000 Po 5*10^5 Pc Pq ulong
The threshold size (in block pointers) at which we create a new sub-livelist.
Larger sublists are more costly from a memory perspective but the fewer
sublists there are, the lower the cost of insertion.
.
.It Sy zfs_livelist_min_percent_shared Ns = Ns Sy 75 Ns % Pq int
If the amount of shared space between a snapshot and its clone drops below
this threshold, the clone turns off the livelist and reverts to the old
deletion method.
This is in place because livelists no long give us a benefit
once a clone has been overwritten enough.
.
.It Sy zfs_livelist_condense_new_alloc Ns = Ns Sy 0 Pq int
Incremented each time an extra ALLOC blkptr is added to a livelist entry while
it is being condensed.
This option is used by the test suite to track race conditions.
.
.It Sy zfs_livelist_condense_sync_cancel Ns = Ns Sy 0 Pq int
Incremented each time livelist condensing is canceled while in
.Fn spa_livelist_condense_sync .
This option is used by the test suite to track race conditions.
.
.It Sy zfs_livelist_condense_sync_pause Ns = Ns Sy 0 Ns | Ns 1 Pq int
When set, the livelist condense process pauses indefinitely before
executing the synctask \(em
.Fn spa_livelist_condense_sync .
This option is used by the test suite to trigger race conditions.
.
.It Sy zfs_livelist_condense_zthr_cancel Ns = Ns Sy 0 Pq int
Incremented each time livelist condensing is canceled while in
.Fn spa_livelist_condense_cb .
This option is used by the test suite to track race conditions.
.
.It Sy zfs_livelist_condense_zthr_pause Ns = Ns Sy 0 Ns | Ns 1 Pq int
When set, the livelist condense process pauses indefinitely before
executing the open context condensing work in
.Fn spa_livelist_condense_cb .
This option is used by the test suite to trigger race conditions.
.
.It Sy zfs_lua_max_instrlimit Ns = Ns Sy 100000000 Po 10^8 Pc Pq ulong
The maximum execution time limit that can be set for a ZFS channel program,
specified as a number of Lua instructions.
.
.It Sy zfs_lua_max_memlimit Ns = Ns Sy 104857600 Po 100 MiB Pc Pq ulong
The maximum memory limit that can be set for a ZFS channel program, specified
in bytes.
.
.It Sy zfs_max_dataset_nesting Ns = Ns Sy 50 Pq int
The maximum depth of nested datasets.
This value can be tuned temporarily to
fix existing datasets that exceed the predefined limit.
.
.It Sy zfs_max_log_walking Ns = Ns Sy 5 Pq ulong
The number of past TXGs that the flushing algorithm of the log spacemap
feature uses to estimate incoming log blocks.
.
.It Sy zfs_max_logsm_summary_length Ns = Ns Sy 10 Pq ulong
Maximum number of rows allowed in the summary of the spacemap log.
.
.It Sy zfs_max_recordsize Ns = Ns Sy 16777216 Po 16 MiB Pc Pq int
We currently support block sizes from
.Em 512 Po 512 B Pc No to Em 16777216 Po 16 MiB Pc .
The benefits of larger blocks, and thus larger I/O,
need to be weighed against the cost of COWing a giant block to modify one byte.
Additionally, very large blocks can have an impact on I/O latency,
and also potentially on the memory allocator.
Therefore, we formerly forbade creating blocks larger than 1M.
Larger blocks could be created by changing it,
and pools with larger blocks can always be imported and used,
regardless of this setting.
.
.It Sy zfs_allow_redacted_dataset_mount Ns = Ns Sy 0 Ns | Ns 1 Pq int
Allow datasets received with redacted send/receive to be mounted.
Normally disabled because these datasets may be missing key data.
.
.It Sy zfs_min_metaslabs_to_flush Ns = Ns Sy 1 Pq ulong
Minimum number of metaslabs to flush per dirty TXG.
.
.It Sy zfs_metaslab_fragmentation_threshold Ns = Ns Sy 70 Ns % Pq int
Allow metaslabs to keep their active state as long as their fragmentation
percentage is no more than this value.
An active metaslab that exceeds this threshold
will no longer keep its active status allowing better metaslabs to be selected.
.
.It Sy zfs_mg_fragmentation_threshold Ns = Ns Sy 95 Ns % Pq int
Metaslab groups are considered eligible for allocations if their
fragmentation metric (measured as a percentage) is less than or equal to
this value.
If a metaslab group exceeds this threshold then it will be
skipped unless all metaslab groups within the metaslab class have also
crossed this threshold.
.
.It Sy zfs_mg_noalloc_threshold Ns = Ns Sy 0 Ns % Pq int
Defines a threshold at which metaslab groups should be eligible for allocations.
The value is expressed as a percentage of free space
beyond which a metaslab group is always eligible for allocations.
If a metaslab group's free space is less than or equal to the
threshold, the allocator will avoid allocating to that group
unless all groups in the pool have reached the threshold.
Once all groups have reached the threshold, all groups are allowed to accept
allocations.
The default value of
.Sy 0
disables the feature and causes all metaslab groups to be eligible for allocations.
.Pp
This parameter allows one to deal with pools having heavily imbalanced
vdevs such as would be the case when a new vdev has been added.
Setting the threshold to a non-zero percentage will stop allocations
from being made to vdevs that aren't filled to the specified percentage
and allow lesser filled vdevs to acquire more allocations than they
otherwise would under the old
.Sy zfs_mg_alloc_failures
facility.
.
.It Sy zfs_ddt_data_is_special Ns = Ns Sy 1 Ns | Ns 0 Pq int
If enabled, ZFS will place DDT data into the special allocation class.
.
.It Sy zfs_user_indirect_is_special Ns = Ns Sy 1 Ns | Ns 0 Pq int
If enabled, ZFS will place user data indirect blocks
into the special allocation class.
.
.It Sy zfs_multihost_history Ns = Ns Sy 0 Pq int
Historical statistics for this many latest multihost updates will be available in
.Pa /proc/spl/kstat/zfs/ Ns Ao Ar pool Ac Ns Pa /multihost .
.
.It Sy zfs_multihost_interval Ns = Ns Sy 1000 Ns ms Po 1 s Pc Pq ulong
Used to control the frequency of multihost writes which are performed when the
.Sy multihost
pool property is on.
This is one of the factors used to determine the
length of the activity check during import.
.Pp
The multihost write period is
.Sy zfs_multihost_interval No / Sy leaf-vdevs .
On average a multihost write will be issued for each leaf vdev
every
.Sy zfs_multihost_interval
milliseconds.
In practice, the observed period can vary with the I/O load
and this observed value is the delay which is stored in the uberblock.
.
.It Sy zfs_multihost_import_intervals Ns = Ns Sy 20 Pq uint
Used to control the duration of the activity test on import.
Smaller values of
.Sy zfs_multihost_import_intervals
will reduce the import time but increase
the risk of failing to detect an active pool.
The total activity check time is never allowed to drop below one second.
.Pp
On import the activity check waits a minimum amount of time determined by
.Sy zfs_multihost_interval No \(mu Sy zfs_multihost_import_intervals ,
or the same product computed on the host which last had the pool imported,
whichever is greater.
The activity check time may be further extended if the value of MMP
delay found in the best uberblock indicates actual multihost updates happened
at longer intervals than
.Sy zfs_multihost_interval .
A minimum of
.Em 100 ms
is enforced.
.Pp
.Sy 0 No is equivalent to Sy 1 .
.
.It Sy zfs_multihost_fail_intervals Ns = Ns Sy 10 Pq uint
Controls the behavior of the pool when multihost write failures or delays are
detected.
.Pp
When
.Sy 0 ,
multihost write failures or delays are ignored.
The failures will still be reported to the ZED which depending on
its configuration may take action such as suspending the pool or offlining a
device.
.Pp
Otherwise, the pool will be suspended if
.Sy zfs_multihost_fail_intervals No \(mu Sy zfs_multihost_interval
milliseconds pass without a successful MMP write.
This guarantees the activity test will see MMP writes if the pool is imported.
.Sy 1 No is equivalent to Sy 2 ;
this is necessary to prevent the pool from being suspended
due to normal, small I/O latency variations.
.
.It Sy zfs_no_scrub_io Ns = Ns Sy 0 Ns | Ns 1 Pq int
Set to disable scrub I/O.
This results in scrubs not actually scrubbing data and
simply doing a metadata crawl of the pool instead.
.
.It Sy zfs_no_scrub_prefetch Ns = Ns Sy 0 Ns | Ns 1 Pq int
Set to disable block prefetching for scrubs.
.
.It Sy zfs_nocacheflush Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable cache flush operations on disks when writing.
Setting this will cause pool corruption on power loss
if a volatile out-of-order write cache is enabled.
.
.It Sy zfs_nopwrite_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Allow no-operation writes.
The occurrence of nopwrites will further depend on other pool properties
.Pq i.a. the checksumming and compression algorithms .
.
.It Sy zfs_dmu_offset_next_sync Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable forcing TXG sync to find holes.
When enabled forces ZFS to sync data when
.Sy SEEK_HOLE No or Sy SEEK_DATA
flags are used allowing holes in a file to be accurately reported.
When disabled holes will not be reported in recently dirtied files.
.
.It Sy zfs_pd_bytes_max Ns = Ns Sy 52428800 Ns B Po 50 MiB Pc Pq int
The number of bytes which should be prefetched during a pool traversal, like
.Nm zfs Cm send
or other data crawling operations.
.
.It Sy zfs_traverse_indirect_prefetch_limit Ns = Ns Sy 32 Pq int
The number of blocks pointed by indirect (non-L0) block which should be
prefetched during a pool traversal, like
.Nm zfs Cm send
or other data crawling operations.
.
.It Sy zfs_per_txg_dirty_frees_percent Ns = Ns Sy 5 Ns % Pq ulong
Control percentage of dirtied indirect blocks from frees allowed into one TXG.
After this threshold is crossed, additional frees will wait until the next TXG.
.Sy 0 No disables this throttle.
.
.It Sy zfs_prefetch_disable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable predictive prefetch.
Note that it leaves "prescient" prefetch
.Pq for, e.g., Nm zfs Cm send
intact.
Unlike predictive prefetch, prescient prefetch never issues I/O
that ends up not being needed, so it can't hurt performance.
.
.It Sy zfs_qat_checksum_disable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable QAT hardware acceleration for SHA256 checksums.
May be unset after the ZFS modules have been loaded to initialize the QAT
hardware as long as support is compiled in and the QAT driver is present.
.
.It Sy zfs_qat_compress_disable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable QAT hardware acceleration for gzip compression.
May be unset after the ZFS modules have been loaded to initialize the QAT
hardware as long as support is compiled in and the QAT driver is present.
.
.It Sy zfs_qat_encrypt_disable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable QAT hardware acceleration for AES-GCM encryption.
May be unset after the ZFS modules have been loaded to initialize the QAT
hardware as long as support is compiled in and the QAT driver is present.
.
.It Sy zfs_vnops_read_chunk_size Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq long
Bytes to read per chunk.
.
.It Sy zfs_read_history Ns = Ns Sy 0 Pq int
Historical statistics for this many latest reads will be available in
.Pa /proc/spl/kstat/zfs/ Ns Ao Ar pool Ac Ns Pa /reads .
.
.It Sy zfs_read_history_hits Ns = Ns Sy 0 Ns | Ns 1 Pq int
Include cache hits in read history
.
.It Sy zfs_rebuild_max_segment Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq ulong
Maximum read segment size to issue when sequentially resilvering a
top-level vdev.
.
.It Sy zfs_rebuild_scrub_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Automatically start a pool scrub when the last active sequential resilver
completes in order to verify the checksums of all blocks which have been
resilvered.
This is enabled by default and strongly recommended.
.
.It Sy zfs_rebuild_vdev_limit Ns = Ns Sy 33554432 Ns B Po 32 MiB Pc Pq ulong
Maximum amount of I/O that can be concurrently issued for a sequential
resilver per leaf device, given in bytes.
.
.It Sy zfs_reconstruct_indirect_combinations_max Ns = Ns Sy 4096 Pq int
If an indirect split block contains more than this many possible unique
combinations when being reconstructed, consider it too computationally
expensive to check them all.
Instead, try at most this many randomly selected
combinations each time the block is accessed.
This allows all segment copies to participate fairly
in the reconstruction when all combinations
cannot be checked and prevents repeated use of one bad copy.
.
.It Sy zfs_recover Ns = Ns Sy 0 Ns | Ns 1 Pq int
Set to attempt to recover from fatal errors.
This should only be used as a last resort,
as it typically results in leaked space, or worse.
.
.It Sy zfs_removal_ignore_errors Ns = Ns Sy 0 Ns | Ns 1 Pq int
Ignore hard I/O errors during device removal.
When set, if a device encounters a hard I/O error during the removal process
the removal will not be cancelled.
This can result in a normally recoverable block becoming permanently damaged
and is hence not recommended.
This should only be used as a last resort when the
pool cannot be returned to a healthy state prior to removing the device.
.
.It Sy zfs_removal_suspend_progress Ns = Ns Sy 0 Ns | Ns 1 Pq int
This is used by the test suite so that it can ensure that certain actions
happen while in the middle of a removal.
.
.It Sy zfs_remove_max_segment Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq int
The largest contiguous segment that we will attempt to allocate when removing
a device.
If there is a performance problem with attempting to allocate large blocks,
consider decreasing this.
The default value is also the maximum.
.
.It Sy zfs_resilver_disable_defer Ns = Ns Sy 0 Ns | Ns 1 Pq int
Ignore the
.Sy resilver_defer
feature, causing an operation that would start a resilver to
immediately restart the one in progress.
.
.It Sy zfs_resilver_min_time_ms Ns = Ns Sy 3000 Ns ms Po 3 s Pc Pq int
Resilvers are processed by the sync thread.
While resilvering, it will spend at least this much time
working on a resilver between TXG flushes.
.
.It Sy zfs_scan_ignore_errors Ns = Ns Sy 0 Ns | Ns 1 Pq int
If set, remove the DTL (dirty time list) upon completion of a pool scan (scrub),
even if there were unrepairable errors.
Intended to be used during pool repair or recovery to
stop resilvering when the pool is next imported.
.
.It Sy zfs_scrub_min_time_ms Ns = Ns Sy 1000 Ns ms Po 1 s Pc Pq int
Scrubs are processed by the sync thread.
While scrubbing, it will spend at least this much time
working on a scrub between TXG flushes.
.
.It Sy zfs_scan_checkpoint_intval Ns = Ns Sy 7200 Ns s Po 2 hour Pc Pq int
To preserve progress across reboots, the sequential scan algorithm periodically
needs to stop metadata scanning and issue all the verification I/O to disk.
The frequency of this flushing is determined by this tunable.
.
.It Sy zfs_scan_fill_weight Ns = Ns Sy 3 Pq int
This tunable affects how scrub and resilver I/O segments are ordered.
A higher number indicates that we care more about how filled in a segment is,
while a lower number indicates we care more about the size of the extent without
considering the gaps within a segment.
This value is only tunable upon module insertion.
Changing the value afterwards will have no effect on scrub or resilver performance.
.
.It Sy zfs_scan_issue_strategy Ns = Ns Sy 0 Pq int
Determines the order that data will be verified while scrubbing or resilvering:
.Bl -tag -compact -offset 4n -width "a"
.It Sy 1
Data will be verified as sequentially as possible, given the
amount of memory reserved for scrubbing
.Pq see Sy zfs_scan_mem_lim_fact .
This may improve scrub performance if the pool's data is very fragmented.
.It Sy 2
The largest mostly-contiguous chunk of found data will be verified first.
By deferring scrubbing of small segments, we may later find adjacent data
to coalesce and increase the segment size.
.It Sy 0
.No Use strategy Sy 1 No during normal verification
.No and strategy Sy 2 No while taking a checkpoint.
.El
.
.It Sy zfs_scan_legacy Ns = Ns Sy 0 Ns | Ns 1 Pq int
If unset, indicates that scrubs and resilvers will gather metadata in
memory before issuing sequential I/O.
Otherwise indicates that the legacy algorithm will be used,
where I/O is initiated as soon as it is discovered.
Unsetting will not affect scrubs or resilvers that are already in progress.
.
.It Sy zfs_scan_max_ext_gap Ns = Ns Sy 2097152 Ns B Po 2 MiB Pc Pq int
Sets the largest gap in bytes between scrub/resilver I/O operations
that will still be considered sequential for sorting purposes.
Changing this value will not
affect scrubs or resilvers that are already in progress.
.
.It Sy zfs_scan_mem_lim_fact Ns = Ns Sy 20 Ns ^-1 Pq int
Maximum fraction of RAM used for I/O sorting by sequential scan algorithm.
This tunable determines the hard limit for I/O sorting memory usage.
When the hard limit is reached we stop scanning metadata and start issuing
data verification I/O.
This is done until we get below the soft limit.
.
.It Sy zfs_scan_mem_lim_soft_fact Ns = Ns Sy 20 Ns ^-1 Pq int
The fraction of the hard limit used to determined the soft limit for I/O sorting
by the sequential scan algorithm.
When we cross this limit from below no action is taken.
When we cross this limit from above it is because we are issuing verification I/O.
In this case (unless the metadata scan is done) we stop issuing verification I/O
and start scanning metadata again until we get to the hard limit.
.
.It Sy zfs_scan_strict_mem_lim Ns = Ns Sy 0 Ns | Ns 1 Pq int
Enforce tight memory limits on pool scans when a sequential scan is in progress.
When disabled, the memory limit may be exceeded by fast disks.
.
.It Sy zfs_scan_suspend_progress Ns = Ns Sy 0 Ns | Ns 1 Pq int
Freezes a scrub/resilver in progress without actually pausing it.
Intended for testing/debugging.
.
.It Sy zfs_scan_vdev_limit Ns = Ns Sy 4194304 Ns B Po 4 MiB Pc Pq int
Maximum amount of data that can be concurrently issued at once for scrubs and
resilvers per leaf device, given in bytes.
.
.It Sy zfs_send_corrupt_data Ns = Ns Sy 0 Ns | Ns 1 Pq int
Allow sending of corrupt data (ignore read/checksum errors when sending).
.
.It Sy zfs_send_unmodified_spill_blocks Ns = Ns Sy 1 Ns | Ns 0 Pq int
Include unmodified spill blocks in the send stream.
Under certain circumstances, previous versions of ZFS could incorrectly
remove the spill block from an existing object.
Including unmodified copies of the spill blocks creates a backwards-compatible
stream which will recreate a spill block if it was incorrectly removed.
.
.It Sy zfs_send_no_prefetch_queue_ff Ns = Ns Sy 20 Ns ^\-1 Pq int
The fill fraction of the
.Nm zfs Cm send
internal queues.
The fill fraction controls the timing with which internal threads are woken up.
.
.It Sy zfs_send_no_prefetch_queue_length Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq int
The maximum number of bytes allowed in
.Nm zfs Cm send Ns 's
internal queues.
.
.It Sy zfs_send_queue_ff Ns = Ns Sy 20 Ns ^\-1 Pq int
The fill fraction of the
.Nm zfs Cm send
prefetch queue.
The fill fraction controls the timing with which internal threads are woken up.
.
.It Sy zfs_send_queue_length Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq int
The maximum number of bytes allowed that will be prefetched by
.Nm zfs Cm send .
This value must be at least twice the maximum block size in use.
.
.It Sy zfs_recv_queue_ff Ns = Ns Sy 20 Ns ^\-1 Pq int
The fill fraction of the
.Nm zfs Cm receive
queue.
The fill fraction controls the timing with which internal threads are woken up.
.
.It Sy zfs_recv_queue_length Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq int
The maximum number of bytes allowed in the
.Nm zfs Cm receive
queue.
This value must be at least twice the maximum block size in use.
.
.It Sy zfs_recv_write_batch_size Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq int
The maximum amount of data, in bytes, that
.Nm zfs Cm receive
will write in one DMU transaction.
This is the uncompressed size, even when receiving a compressed send stream.
This setting will not reduce the write size below a single block.
Capped at a maximum of
.Sy 32 MiB .
.
.It Sy zfs_recv_best_effort_corrective Ns = Ns Sy 0 Pq int
When this variable is set to non-zero a corrective receive:
.Bl -enum -compact -offset 4n -width "1."
.It
Does not enforce the restriction of source & destination snapshot GUIDs
matching.
.It
If there is an error during healing, the healing receive is not
terminated instead it moves on to the next record.
.El
.
.It Sy zfs_override_estimate_recordsize Ns = Ns Sy 0 Ns | Ns 1 Pq ulong
Setting this variable overrides the default logic for estimating block
sizes when doing a
.Nm zfs Cm send .
The default heuristic is that the average block size
will be the current recordsize.
Override this value if most data in your dataset is not of that size
and you require accurate zfs send size estimates.
.
.It Sy zfs_sync_pass_deferred_free Ns = Ns Sy 2 Pq int
Flushing of data to disk is done in passes.
Defer frees starting in this pass.
.
.It Sy zfs_spa_discard_memory_limit Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq int
Maximum memory used for prefetching a checkpoint's space map on each
vdev while discarding the checkpoint.
.
.It Sy zfs_special_class_metadata_reserve_pct Ns = Ns Sy 25 Ns % Pq int
Only allow small data blocks to be allocated on the special and dedup vdev
types when the available free space percentage on these vdevs exceeds this value.
This ensures reserved space is available for pool metadata as the
special vdevs approach capacity.
.
.It Sy zfs_sync_pass_dont_compress Ns = Ns Sy 8 Pq int
Starting in this sync pass, disable compression (including of metadata).
With the default setting, in practice, we don't have this many sync passes,
so this has no effect.
.Pp
The original intent was that disabling compression would help the sync passes
to converge.
However, in practice, disabling compression increases
the average number of sync passes; because when we turn compression off,
many blocks' size will change, and thus we have to re-allocate
(not overwrite) them.
It also increases the number of
.Em 128 KiB
allocations (e.g. for indirect blocks and spacemaps)
because these will not be compressed.
The
.Em 128 KiB
allocations are especially detrimental to performance
on highly fragmented systems, which may have very few free segments of this size,
and may need to load new metaslabs to satisfy these allocations.
.
.It Sy zfs_sync_pass_rewrite Ns = Ns Sy 2 Pq int
Rewrite new block pointers starting in this pass.
.
.It Sy zfs_sync_taskq_batch_pct Ns = Ns Sy 75 Ns % Pq int
This controls the number of threads used by
.Sy dp_sync_taskq .
The default value of
.Sy 75%
will create a maximum of one thread per CPU.
.
.It Sy zfs_trim_extent_bytes_max Ns = Ns Sy 134217728 Ns B Po 128 MiB Pc Pq uint
Maximum size of TRIM command.
Larger ranges will be split into chunks no larger than this value before issuing.
.
.It Sy zfs_trim_extent_bytes_min Ns = Ns Sy 32768 Ns B Po 32 KiB Pc Pq uint
Minimum size of TRIM commands.
TRIM ranges smaller than this will be skipped,
unless they're part of a larger range which was chunked.
This is done because it's common for these small TRIMs
to negatively impact overall performance.
.
.It Sy zfs_trim_metaslab_skip Ns = Ns Sy 0 Ns | Ns 1 Pq uint
Skip uninitialized metaslabs during the TRIM process.
This option is useful for pools constructed from large thinly-provisioned devices
where TRIM operations are slow.
As a pool ages, an increasing fraction of the pool's metaslabs
will be initialized, progressively degrading the usefulness of this option.
This setting is stored when starting a manual TRIM and will
persist for the duration of the requested TRIM.
.
.It Sy zfs_trim_queue_limit Ns = Ns Sy 10 Pq uint
Maximum number of queued TRIMs outstanding per leaf vdev.
The number of concurrent TRIM commands issued to the device is controlled by
.Sy zfs_vdev_trim_min_active No and Sy zfs_vdev_trim_max_active .
.
.It Sy zfs_trim_txg_batch Ns = Ns Sy 32 Pq uint
The number of transaction groups' worth of frees which should be aggregated
before TRIM operations are issued to the device.
This setting represents a trade-off between issuing larger,
more efficient TRIM operations and the delay
before the recently trimmed space is available for use by the device.
.Pp
Increasing this value will allow frees to be aggregated for a longer time.
This will result is larger TRIM operations and potentially increased memory usage.
Decreasing this value will have the opposite effect.
The default of
.Sy 32
was determined to be a reasonable compromise.
.
.It Sy zfs_txg_history Ns = Ns Sy 0 Pq int
Historical statistics for this many latest TXGs will be available in
.Pa /proc/spl/kstat/zfs/ Ns Ao Ar pool Ac Ns Pa /TXGs .
.
.It Sy zfs_txg_timeout Ns = Ns Sy 5 Ns s Pq int
Flush dirty data to disk at least every this many seconds (maximum TXG duration).
.
.It Sy zfs_vdev_aggregate_trim Ns = Ns Sy 0 Ns | Ns 1 Pq int
Allow TRIM I/O operations to be aggregated.
This is normally not helpful because the extents to be trimmed
will have been already been aggregated by the metaslab.
This option is provided for debugging and performance analysis.
.
.It Sy zfs_vdev_aggregation_limit Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq int
Max vdev I/O aggregation size.
.
.It Sy zfs_vdev_aggregation_limit_non_rotating Ns = Ns Sy 131072 Ns B Po 128 KiB Pc Pq int
Max vdev I/O aggregation size for non-rotating media.
.
.It Sy zfs_vdev_cache_bshift Ns = Ns Sy 16 Po 64 KiB Pc Pq int
Shift size to inflate reads to.
.
.It Sy zfs_vdev_cache_max Ns = Ns Sy 16384 Ns B Po 16 KiB Pc Pq int
Inflate reads smaller than this value to meet the
.Sy zfs_vdev_cache_bshift
size
.Pq default Sy 64 KiB .
.
.It Sy zfs_vdev_cache_size Ns = Ns Sy 0 Pq int
Total size of the per-disk cache in bytes.
.Pp
Currently this feature is disabled, as it has been found to not be helpful
for performance and in some cases harmful.
.
.It Sy zfs_vdev_mirror_rotating_inc Ns = Ns Sy 0 Pq int
A number by which the balancing algorithm increments the load calculation for
the purpose of selecting the least busy mirror member when an I/O operation
immediately follows its predecessor on rotational vdevs
for the purpose of making decisions based on load.
.
.It Sy zfs_vdev_mirror_rotating_seek_inc Ns = Ns Sy 5 Pq int
A number by which the balancing algorithm increments the load calculation for
the purpose of selecting the least busy mirror member when an I/O operation
lacks locality as defined by
.Sy zfs_vdev_mirror_rotating_seek_offset .
Operations within this that are not immediately following the previous operation
are incremented by half.
.
.It Sy zfs_vdev_mirror_rotating_seek_offset Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq int
The maximum distance for the last queued I/O operation in which
the balancing algorithm considers an operation to have locality.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_mirror_non_rotating_inc Ns = Ns Sy 0 Pq int
A number by which the balancing algorithm increments the load calculation for
the purpose of selecting the least busy mirror member on non-rotational vdevs
when I/O operations do not immediately follow one another.
.
.It Sy zfs_vdev_mirror_non_rotating_seek_inc Ns = Ns Sy 1 Pq int
A number by which the balancing algorithm increments the load calculation for
the purpose of selecting the least busy mirror member when an I/O operation lacks
locality as defined by the
.Sy zfs_vdev_mirror_rotating_seek_offset .
Operations within this that are not immediately following the previous operation
are incremented by half.
.
.It Sy zfs_vdev_read_gap_limit Ns = Ns Sy 32768 Ns B Po 32 KiB Pc Pq int
Aggregate read I/O operations if the on-disk gap between them is within this
threshold.
.
.It Sy zfs_vdev_write_gap_limit Ns = Ns Sy 4096 Ns B Po 4 KiB Pc Pq int
Aggregate write I/O operations if the on-disk gap between them is within this
threshold.
.
.It Sy zfs_vdev_raidz_impl Ns = Ns Sy fastest Pq string
Select the raidz parity implementation to use.
.Pp
Variants that don't depend on CPU-specific features
may be selected on module load, as they are supported on all systems.
The remaining options may only be set after the module is loaded,
as they are available only if the implementations are compiled in
and supported on the running system.
.Pp
Once the module is loaded,
.Pa /sys/module/zfs/parameters/zfs_vdev_raidz_impl
will show the available options,
with the currently selected one enclosed in square brackets.
.Pp
.TS
lb l l .
fastest selected by built-in benchmark
original original implementation
scalar scalar implementation
sse2 SSE2 instruction set 64-bit x86
ssse3 SSSE3 instruction set 64-bit x86
avx2 AVX2 instruction set 64-bit x86
avx512f AVX512F instruction set 64-bit x86
avx512bw AVX512F & AVX512BW instruction sets 64-bit x86
aarch64_neon NEON Aarch64/64-bit ARMv8
aarch64_neonx2 NEON with more unrolling Aarch64/64-bit ARMv8
powerpc_altivec Altivec PowerPC
.TE
.
.It Sy zfs_vdev_scheduler Pq charp
.Sy DEPRECATED .
Prints warning to kernel log for compatibility.
.
.It Sy zfs_zevent_len_max Ns = Ns Sy 512 Pq int
Max event queue length.
Events in the queue can be viewed with
.Xr zpool-events 8 .
.
.It Sy zfs_zevent_retain_max Ns = Ns Sy 2000 Pq int
Maximum recent zevent records to retain for duplicate checking.
Setting this to
.Sy 0
disables duplicate detection.
.
.It Sy zfs_zevent_retain_expire_secs Ns = Ns Sy 900 Ns s Po 15 min Pc Pq int
Lifespan for a recent ereport that was retained for duplicate checking.
.
.It Sy zfs_zil_clean_taskq_maxalloc Ns = Ns Sy 1048576 Pq int
The maximum number of taskq entries that are allowed to be cached.
When this limit is exceeded transaction records (itxs)
will be cleaned synchronously.
.
.It Sy zfs_zil_clean_taskq_minalloc Ns = Ns Sy 1024 Pq int
The number of taskq entries that are pre-populated when the taskq is first
created and are immediately available for use.
.
.It Sy zfs_zil_clean_taskq_nthr_pct Ns = Ns Sy 100 Ns % Pq int
This controls the number of threads used by
.Sy dp_zil_clean_taskq .
The default value of
.Sy 100%
will create a maximum of one thread per cpu.
.
.It Sy zil_maxblocksize Ns = Ns Sy 131072 Ns B Po 128 KiB Pc Pq int
This sets the maximum block size used by the ZIL.
On very fragmented pools, lowering this
.Pq typically to Sy 36 KiB
can improve performance.
.
.It Sy zil_nocacheflush Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable the cache flush commands that are normally sent to disk by
the ZIL after an LWB write has completed.
Setting this will cause ZIL corruption on power loss
if a volatile out-of-order write cache is enabled.
.
.It Sy zil_replay_disable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable intent logging replay.
Can be disabled for recovery from corrupted ZIL.
.
.It Sy zil_slog_bulk Ns = Ns Sy 786432 Ns B Po 768 KiB Pc Pq ulong
Limit SLOG write size per commit executed with synchronous priority.
Any writes above that will be executed with lower (asynchronous) priority
to limit potential SLOG device abuse by single active ZIL writer.
.
.It Sy zfs_zil_saxattr Ns = Ns Sy 1 Ns | Ns 0 Pq int
Setting this tunable to zero disables ZIL logging of new
.Sy xattr Ns = Ns Sy sa
records if the
.Sy org.openzfs:zilsaxattr
feature is enabled on the pool.
This would only be necessary to work around bugs in the ZIL logging or replay
code for this record type.
The tunable has no effect if the feature is disabled.
.
.It Sy zfs_embedded_slog_min_ms Ns = Ns Sy 64 Pq int
Usually, one metaslab from each normal-class vdev is dedicated for use by
the ZIL to log synchronous writes.
However, if there are fewer than
.Sy zfs_embedded_slog_min_ms
metaslabs in the vdev, this functionality is disabled.
This ensures that we don't set aside an unreasonable amount of space for the ZIL.
.
.It Sy zstd_earlyabort_pass Ns = Ns Sy 1 Pq int
Whether heuristic for detection of incompressible data with zstd levels >= 3
using LZ4 and zstd-1 passes is enabled.
.
.It Sy zstd_abort_size Ns = Ns Sy 131072 Pq int
Minimal uncompressed size (inclusive) of a record before the early abort
heuristic will be attempted.
.
.It Sy zio_deadman_log_all Ns = Ns Sy 0 Ns | Ns 1 Pq int
If non-zero, the zio deadman will produce debugging messages
.Pq see Sy zfs_dbgmsg_enable
for all zios, rather than only for leaf zios possessing a vdev.
This is meant to be used by developers to gain
diagnostic information for hang conditions which don't involve a mutex
or other locking primitive: typically conditions in which a thread in
the zio pipeline is looping indefinitely.
.
.It Sy zio_slow_io_ms Ns = Ns Sy 30000 Ns ms Po 30 s Pc Pq int
When an I/O operation takes more than this much time to complete,
it's marked as slow.
Each slow operation causes a delay zevent.
Slow I/O counters can be seen with
.Nm zpool Cm status Fl s .
.
.It Sy zio_dva_throttle_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Throttle block allocations in the I/O pipeline.
This allows for dynamic allocation distribution when devices are imbalanced.
When enabled, the maximum number of pending allocations per top-level vdev
is limited by
.Sy zfs_vdev_queue_depth_pct .
.
.It Sy zfs_xattr_compat Ns = Ns 0 Ns | Ns 1 Pq int
Control the naming scheme used when setting new xattrs in the user namespace.
If
.Sy 0
.Pq the default on Linux ,
user namespace xattr names are prefixed with the namespace, to be backwards
compatible with previous versions of ZFS on Linux.
If
.Sy 1
.Pq the default on Fx ,
user namespace xattr names are not prefixed, to be backwards compatible with
previous versions of ZFS on illumos and
.Fx .
.Pp
Either naming scheme can be read on this and future versions of ZFS, regardless
of this tunable, but legacy ZFS on illumos or
.Fx
are unable to read user namespace xattrs written in the Linux format, and
legacy versions of ZFS on Linux are unable to read user namespace xattrs written
in the legacy ZFS format.
.Pp
An existing xattr with the alternate naming scheme is removed when overwriting
the xattr so as to not accumulate duplicates.
.
.It Sy zio_requeue_io_start_cut_in_line Ns = Ns Sy 0 Ns | Ns 1 Pq int
Prioritize requeued I/O.
.
.It Sy zio_taskq_batch_pct Ns = Ns Sy 80 Ns % Pq uint
Percentage of online CPUs which will run a worker thread for I/O.
These workers are responsible for I/O work such as compression and
checksum calculations.
Fractional number of CPUs will be rounded down.
.Pp
The default value of
.Sy 80%
was chosen to avoid using all CPUs which can result in
latency issues and inconsistent application performance,
especially when slower compression and/or checksumming is enabled.
.
.It Sy zio_taskq_batch_tpq Ns = Ns Sy 0 Pq uint
Number of worker threads per taskq.
Lower values improve I/O ordering and CPU utilization,
while higher reduces lock contention.
.Pp
If
.Sy 0 ,
generate a system-dependent value close to 6 threads per taskq.
.
.It Sy zvol_inhibit_dev Ns = Ns Sy 0 Ns | Ns 1 Pq uint
Do not create zvol device nodes.
This may slightly improve startup time on
systems with a very large number of zvols.
.
.It Sy zvol_major Ns = Ns Sy 230 Pq uint
Major number for zvol block devices.
.
.It Sy zvol_max_discard_blocks Ns = Ns Sy 16384 Pq ulong
Discard (TRIM) operations done on zvols will be done in batches of this
many blocks, where block size is determined by the
.Sy volblocksize
property of a zvol.
.
.It Sy zvol_prefetch_bytes Ns = Ns Sy 131072 Ns B Po 128 KiB Pc Pq uint
When adding a zvol to the system, prefetch this many bytes
from the start and end of the volume.
Prefetching these regions of the volume is desirable,
because they are likely to be accessed immediately by
.Xr blkid 8
or the kernel partitioner.
.
.It Sy zvol_request_sync Ns = Ns Sy 0 Ns | Ns 1 Pq uint
When processing I/O requests for a zvol, submit them synchronously.
This effectively limits the queue depth to
.Em 1
for each I/O submitter.
When unset, requests are handled asynchronously by a thread pool.
The number of requests which can be handled concurrently is controlled by
.Sy zvol_threads .
.Sy zvol_request_sync
is ignored when running on a kernel that supports block multiqueue
.Pq Li blk-mq .
.
.It Sy zvol_threads Ns = Ns Sy 0 Pq uint
The number of system wide threads to use for processing zvol block IOs.
If
.Sy 0
(the default) then internally set
.Sy zvol_threads
to the number of CPUs present or 32 (whichever is greater).
.
.It Sy zvol_blk_mq_threads Ns = Ns Sy 0 Pq uint
The number of threads per zvol to use for queuing IO requests.
This parameter will only appear if your kernel supports
.Li blk-mq
and is only read and assigned to a zvol at zvol load time.
If
.Sy 0
(the default) then internally set
.Sy zvol_blk_mq_threads
to the number of CPUs present.
.
.It Sy zvol_use_blk_mq Ns = Ns Sy 0 Ns | Ns 1 Pq uint
Set to
.Sy 1
to use the
.Li blk-mq
API for zvols.
Set to
.Sy 0
(the default) to use the legacy zvol APIs.
This setting can give better or worse zvol performance depending on
the workload.
This parameter will only appear if your kernel supports
.Li blk-mq
and is only read and assigned to a zvol at zvol load time.
.
.It Sy zvol_blk_mq_blocks_per_thread Ns = Ns Sy 8 Pq uint
If
.Sy zvol_use_blk_mq
is enabled, then process this number of
.Sy volblocksize Ns -sized blocks per zvol thread.
This tunable can be use to favor better performance for zvol reads (lower
values) or writes (higher values).
If set to
.Sy 0 ,
then the zvol layer will process the maximum number of blocks
per thread that it can.
This parameter will only appear if your kernel supports
.Li blk-mq
and is only applied at each zvol's load time.
.
.It Sy zvol_blk_mq_queue_depth Ns = Ns Sy 0 Pq uint
The queue_depth value for the zvol
.Li blk-mq
interface.
This parameter will only appear if your kernel supports
.Li blk-mq
and is only applied at each zvol's load time.
If
.Sy 0
(the default) then use the kernel's default queue depth.
Values are clamped to the kernel's
.Dv BLKDEV_MIN_RQ
and
.Dv BLKDEV_MAX_RQ Ns / Ns Dv BLKDEV_DEFAULT_RQ
limits.
.
.It Sy zvol_volmode Ns = Ns Sy 1 Pq uint
Defines zvol block devices behaviour when
.Sy volmode Ns = Ns Sy default :
.Bl -tag -compact -offset 4n -width "a"
.It Sy 1
.No equivalent to Sy full
.It Sy 2
.No equivalent to Sy dev
.It Sy 3
.No equivalent to Sy none
.El
.El
.
.Sh ZFS I/O SCHEDULER
ZFS issues I/O operations to leaf vdevs to satisfy and complete I/O operations.
The scheduler determines when and in what order those operations are issued.
The scheduler divides operations into five I/O classes,
prioritized in the following order: sync read, sync write, async read,
async write, and scrub/resilver.
Each queue defines the minimum and maximum number of concurrent operations
that may be issued to the device.
In addition, the device has an aggregate maximum,
.Sy zfs_vdev_max_active .
Note that the sum of the per-queue minima must not exceed the aggregate maximum.
If the sum of the per-queue maxima exceeds the aggregate maximum,
then the number of active operations may reach
.Sy zfs_vdev_max_active ,
in which case no further operations will be issued,
regardless of whether all per-queue minima have been met.
.Pp
For many physical devices, throughput increases with the number of
concurrent operations, but latency typically suffers.
Furthermore, physical devices typically have a limit
at which more concurrent operations have no
effect on throughput or can actually cause it to decrease.
.Pp
The scheduler selects the next operation to issue by first looking for an
I/O class whose minimum has not been satisfied.
Once all are satisfied and the aggregate maximum has not been hit,
the scheduler looks for classes whose maximum has not been satisfied.
Iteration through the I/O classes is done in the order specified above.
No further operations are issued
if the aggregate maximum number of concurrent operations has been hit,
or if there are no operations queued for an I/O class that has not hit its maximum.
Every time an I/O operation is queued or an operation completes,
the scheduler looks for new operations to issue.
.Pp
In general, smaller
.Sy max_active Ns s
will lead to lower latency of synchronous operations.
Larger
.Sy max_active Ns s
may lead to higher overall throughput, depending on underlying storage.
.Pp
The ratio of the queues'
.Sy max_active Ns s
determines the balance of performance between reads, writes, and scrubs.
For example, increasing
.Sy zfs_vdev_scrub_max_active
will cause the scrub or resilver to complete more quickly,
but reads and writes to have higher latency and lower throughput.
.Pp
All I/O classes have a fixed maximum number of outstanding operations,
except for the async write class.
Asynchronous writes represent the data that is committed to stable storage
during the syncing stage for transaction groups.
Transaction groups enter the syncing state periodically,
so the number of queued async writes will quickly burst up
and then bleed down to zero.
Rather than servicing them as quickly as possible,
the I/O scheduler changes the maximum number of active async write operations
according to the amount of dirty data in the pool.
Since both throughput and latency typically increase with the number of
concurrent operations issued to physical devices, reducing the
burstiness in the number of concurrent operations also stabilizes the
response time of operations from other – and in particular synchronous – queues.
In broad strokes, the I/O scheduler will issue more concurrent operations
from the async write queue as there's more dirty data in the pool.
.
.Ss Async Writes
The number of concurrent operations issued for the async write I/O class
follows a piece-wise linear function defined by a few adjustable points:
.Bd -literal
| o---------| <-- \fBzfs_vdev_async_write_max_active\fP
^ | /^ |
| | / | |
active | / | |
I/O | / | |
count | / | |
| / | |
|-------o | | <-- \fBzfs_vdev_async_write_min_active\fP
0|_______^______|_________|
0% | | 100% of \fBzfs_dirty_data_max\fP
| |
| `-- \fBzfs_vdev_async_write_active_max_dirty_percent\fP
`--------- \fBzfs_vdev_async_write_active_min_dirty_percent\fP
.Ed
.Pp
Until the amount of dirty data exceeds a minimum percentage of the dirty
data allowed in the pool, the I/O scheduler will limit the number of
concurrent operations to the minimum.
As that threshold is crossed, the number of concurrent operations issued
increases linearly to the maximum at the specified maximum percentage
of the dirty data allowed in the pool.
.Pp
Ideally, the amount of dirty data on a busy pool will stay in the sloped
part of the function between
.Sy zfs_vdev_async_write_active_min_dirty_percent
and
.Sy zfs_vdev_async_write_active_max_dirty_percent .
If it exceeds the maximum percentage,
this indicates that the rate of incoming data is
greater than the rate that the backend storage can handle.
In this case, we must further throttle incoming writes,
as described in the next section.
.
.Sh ZFS TRANSACTION DELAY
We delay transactions when we've determined that the backend storage
isn't able to accommodate the rate of incoming writes.
.Pp
If there is already a transaction waiting, we delay relative to when
that transaction will finish waiting.
This way the calculated delay time
is independent of the number of threads concurrently executing transactions.
.Pp
If we are the only waiter, wait relative to when the transaction started,
rather than the current time.
This credits the transaction for "time already served",
e.g. reading indirect blocks.
.Pp
The minimum time for a transaction to take is calculated as
.D1 min_time = min( Ns Sy zfs_delay_scale No \(mu Po Sy dirty No \- Sy min Pc / Po Sy max No \- Sy dirty Pc , 100ms)
.Pp
The delay has two degrees of freedom that can be adjusted via tunables.
The percentage of dirty data at which we start to delay is defined by
.Sy zfs_delay_min_dirty_percent .
This should typically be at or above
.Sy zfs_vdev_async_write_active_max_dirty_percent ,
so that we only start to delay after writing at full speed
has failed to keep up with the incoming write rate.
The scale of the curve is defined by
.Sy zfs_delay_scale .
Roughly speaking, this variable determines the amount of delay at the midpoint of the curve.
.Bd -literal
delay
10ms +-------------------------------------------------------------*+
| *|
9ms + *+
| *|
8ms + *+
| * |
7ms + * +
| * |
6ms + * +
| * |
5ms + * +
| * |
4ms + * +
| * |
3ms + * +
| * |
2ms + (midpoint) * +
| | ** |
1ms + v *** +
| \fBzfs_delay_scale\fP ----------> ******** |
0 +-------------------------------------*********----------------+
0% <- \fBzfs_dirty_data_max\fP -> 100%
.Ed
.Pp
Note, that since the delay is added to the outstanding time remaining on the
most recent transaction it's effectively the inverse of IOPS.
Here, the midpoint of
.Em 500 us
translates to
.Em 2000 IOPS .
The shape of the curve
was chosen such that small changes in the amount of accumulated dirty data
in the first three quarters of the curve yield relatively small differences
in the amount of delay.
.Pp
The effects can be easier to understand when the amount of delay is
represented on a logarithmic scale:
.Bd -literal
delay
100ms +-------------------------------------------------------------++
+ +
| |
+ *+
10ms + *+
+ ** +
| (midpoint) ** |
+ | ** +
1ms + v **** +
+ \fBzfs_delay_scale\fP ----------> ***** +
| **** |
+ **** +
100us + ** +
+ * +
| * |
+ * +
10us + * +
+ +
| |
+ +
+--------------------------------------------------------------+
0% <- \fBzfs_dirty_data_max\fP -> 100%
.Ed
.Pp
Note here that only as the amount of dirty data approaches its limit does
the delay start to increase rapidly.
The goal of a properly tuned system should be to keep the amount of dirty data
out of that range by first ensuring that the appropriate limits are set
for the I/O scheduler to reach optimal throughput on the back-end storage,
and then by changing the value of
.Sy zfs_delay_scale
to increase the steepness of the curve.
diff --git a/sys/contrib/openzfs/man/man7/zpoolprops.7 b/sys/contrib/openzfs/man/man7/zpoolprops.7
index a150f6d43706..2164c1260111 100644
--- a/sys/contrib/openzfs/man/man7/zpoolprops.7
+++ b/sys/contrib/openzfs/man/man7/zpoolprops.7
@@ -1,417 +1,417 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or https://opensource.org/licenses/CDDL-1.0.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
.\" Copyright (c) 2012, 2018 by Delphix. All rights reserved.
.\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved.
.\" Copyright (c) 2017 Datto Inc.
.\" Copyright (c) 2018 George Melikov. All Rights Reserved.
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\" Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
.\"
.Dd May 27, 2021
.Dt ZPOOLPROPS 7
.Os
.
.Sh NAME
.Nm zpoolprops
.Nd properties of ZFS storage pools
.
.Sh DESCRIPTION
Each pool has several properties associated with it.
Some properties are read-only statistics while others are configurable and
change the behavior of the pool.
.Pp
The following are read-only properties:
.Bl -tag -width "unsupported@guid"
.It Cm allocated
Amount of storage used within the pool.
See
.Sy fragmentation
and
.Sy free
for more information.
.It Sy capacity
Percentage of pool space used.
This property can also be referred to by its shortened column name,
.Sy cap .
.It Sy expandsize
Amount of uninitialized space within the pool or device that can be used to
increase the total capacity of the pool.
On whole-disk vdevs, this is the space beyond the end of the GPT –
typically occurring when a LUN is dynamically expanded
or a disk replaced with a larger one.
On partition vdevs, this is the space appended to the partition after it was
added to the pool – most likely by resizing it in-place.
The space can be claimed for the pool by bringing it online with
.Sy autoexpand=on
or using
.Nm zpool Cm online Fl e .
.It Sy fragmentation
The amount of fragmentation in the pool.
As the amount of space
.Sy allocated
increases, it becomes more difficult to locate
.Sy free
space.
This may result in lower write performance compared to pools with more
unfragmented free space.
.It Sy free
The amount of free space available in the pool.
By contrast, the
.Xr zfs 8
.Sy available
property describes how much new data can be written to ZFS filesystems/volumes.
The zpool
.Sy free
property is not generally useful for this purpose, and can be substantially more than the zfs
.Sy available
space.
This discrepancy is due to several factors, including raidz parity;
zfs reservation, quota, refreservation, and refquota properties; and space set aside by
.Sy spa_slop_shift
(see
.Xr zfs 4
for more information).
.It Sy freeing
After a file system or snapshot is destroyed, the space it was using is
returned to the pool asynchronously.
.Sy freeing
is the amount of space remaining to be reclaimed.
Over time
.Sy freeing
will decrease while
.Sy free
increases.
.It Sy leaked
Space not released while
.Sy freeing
due to corruption, now permanently leaked into the pool.
.It Sy health
The current health of the pool.
Health can be one of
.Sy ONLINE , DEGRADED , FAULTED , OFFLINE, REMOVED , UNAVAIL .
.It Sy guid
A unique identifier for the pool.
.It Sy load_guid
A unique identifier for the pool.
Unlike the
.Sy guid
property, this identifier is generated every time we load the pool (i.e. does
not persist across imports/exports) and never changes while the pool is loaded
(even if a
.Sy reguid
operation takes place).
.It Sy size
Total size of the storage pool.
.It Sy unsupported@ Ns Em guid
Information about unsupported features that are enabled on the pool.
See
.Xr zpool-features 7
for details.
.El
.Pp
The space usage properties report actual physical space available to the
storage pool.
The physical space can be different from the total amount of space that any
contained datasets can actually use.
The amount of space used in a raidz configuration depends on the characteristics
of the data being written.
In addition, ZFS reserves some space for internal accounting that the
.Xr zfs 8
command takes into account, but the
.Nm
command does not.
For non-full pools of a reasonable size, these effects should be invisible.
For small pools, or pools that are close to being completely full, these
discrepancies may become more noticeable.
.Pp
The following property can be set at creation time and import time:
.Bl -tag -width Ds
.It Sy altroot
Alternate root directory.
If set, this directory is prepended to any mount points within the pool.
This can be used when examining an unknown pool where the mount points cannot be
trusted, or in an alternate boot environment, where the typical paths are not
valid.
.Sy altroot
is not a persistent property.
It is valid only while the system is up.
Setting
.Sy altroot
defaults to using
.Sy cachefile Ns = Ns Sy none ,
though this may be overridden using an explicit setting.
.El
.Pp
The following property can be set only at import time:
.Bl -tag -width Ds
.It Sy readonly Ns = Ns Sy on Ns | Ns Sy off
If set to
.Sy on ,
the pool will be imported in read-only mode.
This property can also be referred to by its shortened column name,
.Sy rdonly .
.El
.Pp
The following properties can be set at creation time and import time, and later
changed with the
.Nm zpool Cm set
command:
.Bl -tag -width Ds
-.It Sy ashift Ns = Ns Sy ashift
+.It Sy ashift Ns = Ns Ar ashift
Pool sector size exponent, to the power of
.Sy 2
(internally referred to as
.Sy ashift ) .
Values from 9 to 16, inclusive, are valid; also, the
value 0 (the default) means to auto-detect using the kernel's block
layer and a ZFS internal exception list.
I/O operations will be aligned to the specified size boundaries.
Additionally, the minimum (disk)
write size will be set to the specified size, so this represents a
space/performance trade-off.
For optimal performance, the pool sector size should be greater than
or equal to the sector size of the underlying disks.
The typical case for setting this property is when
performance is important and the underlying disks use 4KiB sectors but
report 512B sectors to the OS (for compatibility reasons); in that
case, set
.Sy ashift Ns = Ns Sy 12
(which is
.Sy 1<<12 No = Sy 4096 ) .
When set, this property is
used as the default hint value in subsequent vdev operations (add,
attach and replace).
Changing this value will not modify any existing
vdev, not even on disk replacement; however it can be used, for
instance, to replace a dying 512B sectors disk with a newer 4KiB
sectors device: this will probably result in bad performance but at the
same time could prevent loss of data.
.It Sy autoexpand Ns = Ns Sy on Ns | Ns Sy off
Controls automatic pool expansion when the underlying LUN is grown.
If set to
.Sy on ,
the pool will be resized according to the size of the expanded device.
If the device is part of a mirror or raidz then all devices within that
mirror/raidz group must be expanded before the new space is made available to
the pool.
The default behavior is
.Sy off .
This property can also be referred to by its shortened column name,
.Sy expand .
.It Sy autoreplace Ns = Ns Sy on Ns | Ns Sy off
Controls automatic device replacement.
If set to
.Sy off ,
device replacement must be initiated by the administrator by using the
.Nm zpool Cm replace
command.
If set to
.Sy on ,
any new device, found in the same physical location as a device that previously
belonged to the pool, is automatically formatted and replaced.
The default behavior is
.Sy off .
This property can also be referred to by its shortened column name,
.Sy replace .
Autoreplace can also be used with virtual disks (like device
mapper) provided that you use the /dev/disk/by-vdev paths setup by
vdev_id.conf.
See the
.Xr vdev_id 8
manual page for more details.
Autoreplace and autoonline require the ZFS Event Daemon be configured and
running.
See the
.Xr zed 8
manual page for more details.
.It Sy autotrim Ns = Ns Sy on Ns | Ns Sy off
When set to
.Sy on
space which has been recently freed, and is no longer allocated by the pool,
will be periodically trimmed.
This allows block device vdevs which support
BLKDISCARD, such as SSDs, or file vdevs on which the underlying file system
supports hole-punching, to reclaim unused blocks.
The default value for this property is
.Sy off .
.Pp
Automatic TRIM does not immediately reclaim blocks after a free.
Instead, it will optimistically delay allowing smaller ranges to be aggregated
into a few larger ones.
These can then be issued more efficiently to the storage.
TRIM on L2ARC devices is enabled by setting
.Sy l2arc_trim_ahead > 0 .
.Pp
Be aware that automatic trimming of recently freed data blocks can put
significant stress on the underlying storage devices.
This will vary depending of how well the specific device handles these commands.
For lower-end devices it is often possible to achieve most of the benefits
of automatic trimming by running an on-demand (manual) TRIM periodically
using the
.Nm zpool Cm trim
command.
.It Sy bootfs Ns = Ns Sy (unset) Ns | Ns Ar pool Ns Op / Ns Ar dataset
Identifies the default bootable dataset for the root pool.
This property is expected to be set mainly by the installation and upgrade programs.
Not all Linux distribution boot processes use the bootfs property.
.It Sy cachefile Ns = Ns Ar path Ns | Ns Sy none
Controls the location of where the pool configuration is cached.
Discovering all pools on system startup requires a cached copy of the
configuration data that is stored on the root file system.
All pools in this cache are automatically imported when the system boots.
Some environments, such as install and clustering, need to cache this
information in a different location so that pools are not automatically
imported.
Setting this property caches the pool configuration in a different location that
can later be imported with
.Nm zpool Cm import Fl c .
Setting it to the value
.Sy none
creates a temporary pool that is never cached, and the
.Qq
.Pq empty string
uses the default location.
.Pp
Multiple pools can share the same cache file.
Because the kernel destroys and recreates this file when pools are added and
removed, care should be taken when attempting to access this file.
When the last pool using a
.Sy cachefile
is exported or destroyed, the file will be empty.
.It Sy comment Ns = Ns Ar text
A text string consisting of printable ASCII characters that will be stored
such that it is available even if the pool becomes faulted.
An administrator can provide additional information about a pool using this
property.
.It Sy compatibility Ns = Ns Sy off Ns | Ns Sy legacy Ns | Ns Ar file Ns Oo , Ns Ar file Oc Ns …
Specifies that the pool maintain compatibility with specific feature sets.
When set to
.Sy off
(or unset) compatibility is disabled (all features may be enabled); when set to
.Sy legacy Ns
no features may be enabled.
When set to a comma-separated list of filenames
(each filename may either be an absolute path, or relative to
.Pa /etc/zfs/compatibility.d
or
.Pa /usr/share/zfs/compatibility.d )
the lists of requested features are read from those files, separated by
whitespace and/or commas.
Only features present in all files may be enabled.
.Pp
See
.Xr zpool-features 7 ,
.Xr zpool-create 8
and
.Xr zpool-upgrade 8
for more information on the operation of compatibility feature sets.
.It Sy dedupditto Ns = Ns Ar number
This property is deprecated and no longer has any effect.
.It Sy delegation Ns = Ns Sy on Ns | Ns Sy off
Controls whether a non-privileged user is granted access based on the dataset
permissions defined on the dataset.
See
.Xr zfs 8
for more information on ZFS delegated administration.
.It Sy failmode Ns = Ns Sy wait Ns | Ns Sy continue Ns | Ns Sy panic
Controls the system behavior in the event of catastrophic pool failure.
This condition is typically a result of a loss of connectivity to the underlying
storage device(s) or a failure of all devices within the pool.
The behavior of such an event is determined as follows:
.Bl -tag -width "continue"
.It Sy wait
Blocks all I/O access until the device connectivity is recovered and the errors
are cleared with
.Nm zpool Cm clear .
This is the default behavior.
.It Sy continue
Returns
.Er EIO
to any new write I/O requests but allows reads to any of the remaining healthy
devices.
Any write requests that have yet to be committed to disk would be blocked.
.It Sy panic
Prints out a message to the console and generates a system crash dump.
.El
.It Sy feature@ Ns Ar feature_name Ns = Ns Sy enabled
The value of this property is the current state of
.Ar feature_name .
The only valid value when setting this property is
.Sy enabled
which moves
.Ar feature_name
to the enabled state.
See
.Xr zpool-features 7
for details on feature states.
.It Sy listsnapshots Ns = Ns Sy on Ns | Ns Sy off
Controls whether information about snapshots associated with this pool is
output when
.Nm zfs Cm list
is run without the
.Fl t
option.
The default value is
.Sy off .
This property can also be referred to by its shortened name,
.Sy listsnaps .
.It Sy multihost Ns = Ns Sy on Ns | Ns Sy off
Controls whether a pool activity check should be performed during
.Nm zpool Cm import .
When a pool is determined to be active it cannot be imported, even with the
.Fl f
option.
This property is intended to be used in failover configurations
where multiple hosts have access to a pool on shared storage.
.Pp
Multihost provides protection on import only.
It does not protect against an
individual device being used in multiple pools, regardless of the type of vdev.
See the discussion under
.Nm zpool Cm create .
.Pp
When this property is on, periodic writes to storage occur to show the pool is
in use.
See
.Sy zfs_multihost_interval
in the
.Xr zfs 4
manual page.
In order to enable this property each host must set a unique hostid.
See
.Xr genhostid 1
.Xr zgenhostid 8
.Xr spl 4
for additional details.
The default value is
.Sy off .
.It Sy version Ns = Ns Ar version
The current on-disk version of the pool.
This can be increased, but never decreased.
The preferred method of updating pools is with the
.Nm zpool Cm upgrade
command, though this property can be used when a specific version is needed for
backwards compatibility.
Once feature flags are enabled on a pool this property will no longer have a
value.
.El
diff --git a/sys/contrib/openzfs/man/man8/zfs-program.8 b/sys/contrib/openzfs/man/man8/zfs-program.8
index 06415b2190eb..928620362be7 100644
--- a/sys/contrib/openzfs/man/man8/zfs-program.8
+++ b/sys/contrib/openzfs/man/man8/zfs-program.8
@@ -1,631 +1,644 @@
.\"
.\" This file and its contents are supplied under the terms of the
.\" Common Development and Distribution License ("CDDL"), version 1.0.
.\" You may only use this file in accordance with the terms of version
.\" 1.0 of the CDDL.
.\"
.\" A full copy of the text of the CDDL should have accompanied this
.\" source. A copy of the CDDL is also available via the Internet at
.\" http://www.illumos.org/license/CDDL.
.\"
.\" Copyright (c) 2016, 2019 by Delphix. All Rights Reserved.
.\" Copyright (c) 2019, 2020 by Christian Schwarz. All Rights Reserved.
.\" Copyright 2020 Joyent, Inc.
.\"
.Dd May 27, 2021
.Dt ZFS-PROGRAM 8
.Os
.
.Sh NAME
.Nm zfs-program
.Nd execute ZFS channel programs
.Sh SYNOPSIS
.Nm zfs
.Cm program
.Op Fl jn
.Op Fl t Ar instruction-limit
.Op Fl m Ar memory-limit
.Ar pool
.Ar script
.Op Ar script arguments
.
.Sh DESCRIPTION
The ZFS channel program interface allows ZFS administrative operations to be
run programmatically as a Lua script.
The entire script is executed atomically, with no other administrative
operations taking effect concurrently.
A library of ZFS calls is made available to channel program scripts.
Channel programs may only be run with root privileges.
.Pp
A modified version of the Lua 5.2 interpreter is used to run channel program
scripts.
The Lua 5.2 manual can be found at
.Lk http://www.lua.org/manual/5.2/
.Pp
The channel program given by
.Ar script
will be run on
.Ar pool ,
and any attempts to access or modify other pools will cause an error.
.
.Sh OPTIONS
.Bl -tag -width "-t"
.It Fl j
Display channel program output in JSON format.
When this flag is specified and standard output is empty -
channel program encountered an error.
The details of such an error will be printed to standard error in plain text.
.It Fl n
Executes a read-only channel program, which runs faster.
The program cannot change on-disk state by calling functions from the
zfs.sync submodule.
The program can be used to gather information such as properties and
determining if changes would succeed (zfs.check.*).
Without this flag, all pending changes must be synced to disk before a
channel program can complete.
.It Fl t Ar instruction-limit
Limit the number of Lua instructions to execute.
If a channel program executes more than the specified number of instructions,
it will be stopped and an error will be returned.
The default limit is 10 million instructions, and it can be set to a maximum of
100 million instructions.
.It Fl m Ar memory-limit
Memory limit, in bytes.
If a channel program attempts to allocate more memory than the given limit, it
will be stopped and an error returned.
The default memory limit is 10 MiB, and can be set to a maximum of 100 MiB.
.El
.Pp
All remaining argument strings will be passed directly to the Lua script as
described in the
.Sx LUA INTERFACE
section below.
.
.Sh LUA INTERFACE
A channel program can be invoked either from the command line, or via a library
call to
.Fn lzc_channel_program .
.
.Ss Arguments
Arguments passed to the channel program are converted to a Lua table.
If invoked from the command line, extra arguments to the Lua script will be
accessible as an array stored in the argument table with the key 'argv':
.Bd -literal -compact -offset indent
args = ...
argv = args["argv"]
-- argv == {1="arg1", 2="arg2", ...}
.Ed
.Pp
If invoked from the libzfs interface, an arbitrary argument list can be
passed to the channel program, which is accessible via the same
.Qq Li ...
syntax in Lua:
.Bd -literal -compact -offset indent
args = ...
-- args == {"foo"="bar", "baz"={...}, ...}
.Ed
.Pp
Note that because Lua arrays are 1-indexed, arrays passed to Lua from the
libzfs interface will have their indices incremented by 1.
That is, the element
in
.Va arr[0]
in a C array passed to a channel program will be stored in
.Va arr[1]
when accessed from Lua.
.
.Ss Return Values
Lua return statements take the form:
.Dl return ret0, ret1, ret2, ...
.Pp
Return statements returning multiple values are permitted internally in a
channel program script, but attempting to return more than one value from the
top level of the channel program is not permitted and will throw an error.
However, tables containing multiple values can still be returned.
If invoked from the command line, a return statement:
.Bd -literal -compact -offset indent
a = {foo="bar", baz=2}
return a
.Ed
.Pp
Will be output formatted as:
.Bd -literal -compact -offset indent
Channel program fully executed with return value:
return:
baz: 2
foo: 'bar'
.Ed
.
.Ss Fatal Errors
If the channel program encounters a fatal error while running, a non-zero exit
status will be returned.
If more information about the error is available, a singleton list will be
returned detailing the error:
.Dl error: \&"error string, including Lua stack trace"
.Pp
If a fatal error is returned, the channel program may have not executed at all,
may have partially executed, or may have fully executed but failed to pass a
return value back to userland.
.Pp
If the channel program exhausts an instruction or memory limit, a fatal error
will be generated and the program will be stopped, leaving the program partially
executed.
No attempt is made to reverse or undo any operations already performed.
Note that because both the instruction count and amount of memory used by a
channel program are deterministic when run against the same inputs and
filesystem state, as long as a channel program has run successfully once, you
can guarantee that it will finish successfully against a similar size system.
.Pp
If a channel program attempts to return too large a value, the program will
fully execute but exit with a nonzero status code and no return value.
.Pp
.Em Note :
ZFS API functions do not generate Fatal Errors when correctly invoked, they
return an error code and the channel program continues executing.
See the
.Sx ZFS API
section below for function-specific details on error return codes.
.
.Ss Lua to C Value Conversion
When invoking a channel program via the libzfs interface, it is necessary to
translate arguments and return values from Lua values to their C equivalents,
and vice-versa.
.Pp
There is a correspondence between nvlist values in C and Lua tables.
A Lua table which is returned from the channel program will be recursively
converted to an nvlist, with table values converted to their natural
equivalents:
.TS
cw3 l c l .
string -> string
number -> int64
boolean -> boolean_value
nil -> boolean (no value)
table -> nvlist
.TE
.Pp
Likewise, table keys are replaced by string equivalents as follows:
.TS
cw3 l c l .
string -> no change
number -> signed decimal string ("%lld")
boolean -> "true" | "false"
.TE
.Pp
Any collision of table key strings (for example, the string "true" and a
true boolean value) will cause a fatal error.
.Pp
Lua numbers are represented internally as signed 64-bit integers.
.
.Sh LUA STANDARD LIBRARY
The following Lua built-in base library functions are available:
.TS
cw3 l l l l .
assert rawlen collectgarbage rawget
error rawset getmetatable select
ipairs setmetatable next tonumber
pairs tostring rawequal type
.TE
.Pp
All functions in the
.Em coroutine ,
.Em string ,
and
.Em table
built-in submodules are also available.
A complete list and documentation of these modules is available in the Lua
manual.
.Pp
The following functions base library functions have been disabled and are
not available for use in channel programs:
.TS
cw3 l l l l l l .
dofile loadfile load pcall print xpcall
.TE
.
.Sh ZFS API
.
.Ss Function Arguments
Each API function takes a fixed set of required positional arguments and
optional keyword arguments.
For example, the destroy function takes a single positional string argument
(the name of the dataset to destroy) and an optional "defer" keyword boolean
argument.
When using parentheses to specify the arguments to a Lua function, only
positional arguments can be used:
.Dl Sy zfs.sync.destroy Ns Pq \&"rpool@snap"
.Pp
To use keyword arguments, functions must be called with a single argument that
is a Lua table containing entries mapping integers to positional arguments and
strings to keyword arguments:
.Dl Sy zfs.sync.destroy Ns Pq {1="rpool@snap", defer=true}
.Pp
The Lua language allows curly braces to be used in place of parenthesis as
syntactic sugar for this calling convention:
.Dl Sy zfs.sync.snapshot Ns {"rpool@snap", defer=true}
.
.Ss Function Return Values
If an API function succeeds, it returns 0.
If it fails, it returns an error code and the channel program continues
executing.
API functions do not generate Fatal Errors except in the case of an
unrecoverable internal file system error.
.Pp
In addition to returning an error code, some functions also return extra
details describing what caused the error.
This extra description is given as a second return value, and will always be a
Lua table, or Nil if no error details were returned.
Different keys will exist in the error details table depending on the function
and error case.
Any such function may be called expecting a single return value:
.Dl errno = Sy zfs.sync.promote Ns Pq dataset
.Pp
Or, the error details can be retrieved:
.Bd -literal -compact -offset indent
.No errno, details = Sy zfs.sync.promote Ns Pq dataset
if (errno == EEXIST) then
assert(details ~= Nil)
list_of_conflicting_snapshots = details
end
.Ed
.Pp
The following global aliases for API function error return codes are defined
for use in channel programs:
.TS
cw3 l l l l l l l .
EPERM ECHILD ENODEV ENOSPC ENOENT EAGAIN ENOTDIR
ESPIPE ESRCH ENOMEM EISDIR EROFS EINTR EACCES
EINVAL EMLINK EIO EFAULT ENFILE EPIPE ENXIO
ENOTBLK EMFILE EDOM E2BIG EBUSY ENOTTY ERANGE
ENOEXEC EEXIST ETXTBSY EDQUOT EBADF EXDEV EFBIG
.TE
.
.Ss API Functions
For detailed descriptions of the exact behavior of any ZFS administrative
operations, see the main
.Xr zfs 8
manual page.
.Bl -tag -width "xx"
.It Fn zfs.debug msg
Record a debug message in the zfs_dbgmsg log.
A log of these messages can be printed via mdb's "::zfs_dbgmsg" command, or
can be monitored live by running
.Dl dtrace -n 'zfs-dbgmsg{trace(stringof(arg0))}'
.Pp
.Bl -tag -compact -width "property (string)"
.It Ar msg Pq string
Debug message to be printed.
.El
.It Fn zfs.exists dataset
Returns true if the given dataset exists, or false if it doesn't.
A fatal error will be thrown if the dataset is not in the target pool.
That is, in a channel program running on rpool,
.Sy zfs.exists Ns Pq \&"rpool/nonexistent_fs"
returns false, but
.Sy zfs.exists Ns Pq \&"somepool/fs_that_may_exist"
will error.
.Pp
.Bl -tag -compact -width "property (string)"
.It Ar dataset Pq string
Dataset to check for existence.
Must be in the target pool.
.El
.It Fn zfs.get_prop dataset property
Returns two values.
First, a string, number or table containing the property value for the given
dataset.
Second, a string containing the source of the property (i.e. the name of the
dataset in which it was set or nil if it is readonly).
Throws a Lua error if the dataset is invalid or the property doesn't exist.
Note that Lua only supports int64 number types whereas ZFS number properties
are uint64.
This means very large values (like GUIDs) may wrap around and appear negative.
.Pp
.Bl -tag -compact -width "property (string)"
.It Ar dataset Pq string
Filesystem or snapshot path to retrieve properties from.
.It Ar property Pq string
Name of property to retrieve.
All filesystem, snapshot and volume properties are supported except for
.Sy mounted
and
.Sy iscsioptions .
Also supports the
.Sy written@ Ns Ar snap
and
.Sy written# Ns Ar bookmark
properties and the
.Ao Sy user Ns | Ns Sy group Ac Ns Ao Sy quota Ns | Ns Sy used Ac Ns Sy @ Ns Ar id
properties, though the id must be in numeric form.
.El
.El
.Bl -tag -width "xx"
.It Sy zfs.sync submodule
The sync submodule contains functions that modify the on-disk state.
They are executed in "syncing context".
.Pp
The available sync submodule functions are as follows:
.Bl -tag -width "xx"
.It Sy zfs.sync.destroy Ns Pq Ar dataset , Op Ar defer Ns = Ns Sy true Ns | Ns Sy false
Destroy the given dataset.
Returns 0 on successful destroy, or a nonzero error code if the dataset could
not be destroyed (for example, if the dataset has any active children or
clones).
.Pp
.Bl -tag -compact -width "newbookmark (string)"
.It Ar dataset Pq string
Filesystem or snapshot to be destroyed.
.It Op Ar defer Pq boolean
Valid only for destroying snapshots.
If set to true, and the snapshot has holds or clones, allows the snapshot to be
marked for deferred deletion rather than failing.
.El
.It Fn zfs.sync.inherit dataset property
Clears the specified property in the given dataset, causing it to be inherited
from an ancestor, or restored to the default if no ancestor property is set.
The
.Nm zfs Cm inherit Fl S
option has not been implemented.
Returns 0 on success, or a nonzero error code if the property could not be
cleared.
.Pp
.Bl -tag -compact -width "newbookmark (string)"
.It Ar dataset Pq string
Filesystem or snapshot containing the property to clear.
.It Ar property Pq string
The property to clear.
Allowed properties are the same as those for the
.Nm zfs Cm inherit
command.
.El
.It Fn zfs.sync.promote dataset
Promote the given clone to a filesystem.
Returns 0 on successful promotion, or a nonzero error code otherwise.
If EEXIST is returned, the second return value will be an array of the clone's
snapshots whose names collide with snapshots of the parent filesystem.
.Pp
.Bl -tag -compact -width "newbookmark (string)"
.It Ar dataset Pq string
Clone to be promoted.
.El
.It Fn zfs.sync.rollback filesystem
Rollback to the previous snapshot for a dataset.
Returns 0 on successful rollback, or a nonzero error code otherwise.
Rollbacks can be performed on filesystems or zvols, but not on snapshots
or mounted datasets.
EBUSY is returned in the case where the filesystem is mounted.
.Pp
.Bl -tag -compact -width "newbookmark (string)"
.It Ar filesystem Pq string
Filesystem to rollback.
.El
.It Fn zfs.sync.set_prop dataset property value
Sets the given property on a dataset.
Currently only user properties are supported.
Returns 0 if the property was set, or a nonzero error code otherwise.
.Pp
.Bl -tag -compact -width "newbookmark (string)"
.It Ar dataset Pq string
The dataset where the property will be set.
.It Ar property Pq string
The property to set.
.It Ar value Pq string
The value of the property to be set.
.El
.It Fn zfs.sync.snapshot dataset
Create a snapshot of a filesystem.
Returns 0 if the snapshot was successfully created,
and a nonzero error code otherwise.
.Pp
Note: Taking a snapshot will fail on any pool older than legacy version 27.
To enable taking snapshots from ZCP scripts, the pool must be upgraded.
.Pp
.Bl -tag -compact -width "newbookmark (string)"
.It Ar dataset Pq string
Name of snapshot to create.
.El
+.It Fn zfs.sync.rename_snapshot dataset oldsnapname newsnapname
+Rename a snapshot of a filesystem or a volume.
+Returns 0 if the snapshot was successfully renamed,
+and a nonzero error code otherwise.
+.Pp
+.Bl -tag -compact -width "newbookmark (string)"
+.It Ar dataset Pq string
+Name of the snapshot's parent dataset.
+.It Ar oldsnapname Pq string
+Original name of the snapshot.
+.It Ar newsnapname Pq string
+New name of the snapshot.
+.El
.It Fn zfs.sync.bookmark source newbookmark
Create a bookmark of an existing source snapshot or bookmark.
Returns 0 if the new bookmark was successfully created,
and a nonzero error code otherwise.
.Pp
Note: Bookmarking requires the corresponding pool feature to be enabled.
.Pp
.Bl -tag -compact -width "newbookmark (string)"
.It Ar source Pq string
Full name of the existing snapshot or bookmark.
.It Ar newbookmark Pq string
Full name of the new bookmark.
.El
.El
.It Sy zfs.check submodule
For each function in the
.Sy zfs.sync
submodule, there is a corresponding
.Sy zfs.check
function which performs a "dry run" of the same operation.
Each takes the same arguments as its
.Sy zfs.sync
counterpart and returns 0 if the operation would succeed,
or a non-zero error code if it would fail, along with any other error details.
That is, each has the same behavior as the corresponding sync function except
for actually executing the requested change.
For example,
.Fn zfs.check.destroy \&"fs"
returns 0 if
.Fn zfs.sync.destroy \&"fs"
would successfully destroy the dataset.
.Pp
The available
.Sy zfs.check
functions are:
.Bl -tag -compact -width "xx"
.It Sy zfs.check.destroy Ns Pq Ar dataset , Op Ar defer Ns = Ns Sy true Ns | Ns Sy false
.It Fn zfs.check.promote dataset
.It Fn zfs.check.rollback filesystem
.It Fn zfs.check.set_property dataset property value
.It Fn zfs.check.snapshot dataset
.El
.It Sy zfs.list submodule
The zfs.list submodule provides functions for iterating over datasets and
properties.
Rather than returning tables, these functions act as Lua iterators, and are
generally used as follows:
.Bd -literal -compact -offset indent
.No for child in Fn zfs.list.children \&"rpool" No do
...
end
.Ed
.Pp
The available
.Sy zfs.list
functions are:
.Bl -tag -width "xx"
.It Fn zfs.list.clones snapshot
Iterate through all clones of the given snapshot.
.Pp
.Bl -tag -compact -width "snapshot (string)"
.It Ar snapshot Pq string
Must be a valid snapshot path in the current pool.
.El
.It Fn zfs.list.snapshots dataset
Iterate through all snapshots of the given dataset.
Each snapshot is returned as a string containing the full dataset name,
e.g. "pool/fs@snap".
.Pp
.Bl -tag -compact -width "snapshot (string)"
.It Ar dataset Pq string
Must be a valid filesystem or volume.
.El
.It Fn zfs.list.children dataset
Iterate through all direct children of the given dataset.
Each child is returned as a string containing the full dataset name,
e.g. "pool/fs/child".
.Pp
.Bl -tag -compact -width "snapshot (string)"
.It Ar dataset Pq string
Must be a valid filesystem or volume.
.El
.It Fn zfs.list.bookmarks dataset
Iterate through all bookmarks of the given dataset.
Each bookmark is returned as a string containing the full dataset name,
e.g. "pool/fs#bookmark".
.Pp
.Bl -tag -compact -width "snapshot (string)"
.It Ar dataset Pq string
Must be a valid filesystem or volume.
.El
.It Fn zfs.list.holds snapshot
Iterate through all user holds on the given snapshot.
Each hold is returned
as a pair of the hold's tag and the timestamp (in seconds since the epoch) at
which it was created.
.Pp
.Bl -tag -compact -width "snapshot (string)"
.It Ar snapshot Pq string
Must be a valid snapshot.
.El
.It Fn zfs.list.properties dataset
An alias for zfs.list.user_properties (see relevant entry).
.Pp
.Bl -tag -compact -width "snapshot (string)"
.It Ar dataset Pq string
Must be a valid filesystem, snapshot, or volume.
.El
.It Fn zfs.list.user_properties dataset
Iterate through all user properties for the given dataset.
For each step of the iteration, output the property name, its value,
and its source.
Throws a Lua error if the dataset is invalid.
.Pp
.Bl -tag -compact -width "snapshot (string)"
.It Ar dataset Pq string
Must be a valid filesystem, snapshot, or volume.
.El
.It Fn zfs.list.system_properties dataset
Returns an array of strings, the names of the valid system (non-user defined)
properties for the given dataset.
Throws a Lua error if the dataset is invalid.
.Pp
.Bl -tag -compact -width "snapshot (string)"
.It Ar dataset Pq string
Must be a valid filesystem, snapshot or volume.
.El
.El
.El
.
.Sh EXAMPLES
.
.Ss Example 1
The following channel program recursively destroys a filesystem and all its
snapshots and children in a naive manner.
Note that this does not involve any error handling or reporting.
.Bd -literal -offset indent
function destroy_recursive(root)
for child in zfs.list.children(root) do
destroy_recursive(child)
end
for snap in zfs.list.snapshots(root) do
zfs.sync.destroy(snap)
end
zfs.sync.destroy(root)
end
destroy_recursive("pool/somefs")
.Ed
.
.Ss Example 2
A more verbose and robust version of the same channel program, which
properly detects and reports errors, and also takes the dataset to destroy
as a command line argument, would be as follows:
.Bd -literal -offset indent
succeeded = {}
failed = {}
function destroy_recursive(root)
for child in zfs.list.children(root) do
destroy_recursive(child)
end
for snap in zfs.list.snapshots(root) do
err = zfs.sync.destroy(snap)
if (err ~= 0) then
failed[snap] = err
else
succeeded[snap] = err
end
end
err = zfs.sync.destroy(root)
if (err ~= 0) then
failed[root] = err
else
succeeded[root] = err
end
end
args = ...
argv = args["argv"]
destroy_recursive(argv[1])
results = {}
results["succeeded"] = succeeded
results["failed"] = failed
return results
.Ed
.
.Ss Example 3
The following function performs a forced promote operation by attempting to
promote the given clone and destroying any conflicting snapshots.
.Bd -literal -offset indent
function force_promote(ds)
errno, details = zfs.check.promote(ds)
if (errno == EEXIST) then
assert(details ~= Nil)
for i, snap in ipairs(details) do
zfs.sync.destroy(ds .. "@" .. snap)
end
elseif (errno ~= 0) then
return errno
end
return zfs.sync.promote(ds)
end
.Ed
diff --git a/sys/contrib/openzfs/module/Kbuild.in b/sys/contrib/openzfs/module/Kbuild.in
index 4803952cbfed..7a20e6ee4615 100644
--- a/sys/contrib/openzfs/module/Kbuild.in
+++ b/sys/contrib/openzfs/module/Kbuild.in
@@ -1,474 +1,473 @@
# When integrated in to a monolithic kernel the spl module must appear
# first. This ensures its module initialization function is run before
# any of the other module initialization functions which depend on it.
ZFS_MODULE_CFLAGS += -std=gnu99 -Wno-declaration-after-statement
ZFS_MODULE_CFLAGS += -Wmissing-prototypes
ZFS_MODULE_CFLAGS += @KERNEL_DEBUG_CFLAGS@ @NO_FORMAT_ZERO_LENGTH@
ifneq ($(KBUILD_EXTMOD),)
zfs_include = @abs_top_srcdir@/include
icp_include = @abs_srcdir@/icp/include
zstd_include = @abs_srcdir@/zstd/include
ZFS_MODULE_CFLAGS += -include @abs_top_builddir@/zfs_config.h
ZFS_MODULE_CFLAGS += -I@abs_top_builddir@/include
src = @abs_srcdir@
obj = @abs_builddir@
else
zfs_include = $(srctree)/include/zfs
icp_include = $(srctree)/$(src)/icp/include
zstd_include = $(srctree)/$(src)/zstd/include
ZFS_MODULE_CFLAGS += -include $(zfs_include)/zfs_config.h
endif
ZFS_MODULE_CFLAGS += -I$(zfs_include)/os/linux/kernel
ZFS_MODULE_CFLAGS += -I$(zfs_include)/os/linux/spl
ZFS_MODULE_CFLAGS += -I$(zfs_include)/os/linux/zfs
ZFS_MODULE_CFLAGS += -I$(zfs_include)
ZFS_MODULE_CPPFLAGS += -D_KERNEL
ZFS_MODULE_CPPFLAGS += @KERNEL_DEBUG_CPPFLAGS@
# KASAN enables -Werror=frame-larger-than=1024, which
# breaks oh so many parts of our build.
ifeq ($(CONFIG_KASAN),y)
ZFS_MODULE_CFLAGS += -Wno-error=frame-larger-than=
endif
ifneq ($(KBUILD_EXTMOD),)
@CONFIG_QAT_TRUE@ZFS_MODULE_CFLAGS += -I@QAT_SRC@/include
@CONFIG_QAT_TRUE@KBUILD_EXTRA_SYMBOLS += @QAT_SYMBOLS@
endif
asflags-y := $(ZFS_MODULE_CFLAGS) $(ZFS_MODULE_CPPFLAGS)
ccflags-y := $(ZFS_MODULE_CFLAGS) $(ZFS_MODULE_CPPFLAGS)
# Suppress unused-value warnings in sparc64 architecture headers
ccflags-$(CONFIG_SPARC64) += -Wno-unused-value
obj-$(CONFIG_ZFS) := spl.o zfs.o
SPL_OBJS := \
spl-atomic.o \
spl-condvar.o \
spl-cred.o \
spl-err.o \
spl-generic.o \
spl-kmem-cache.o \
spl-kmem.o \
spl-kstat.o \
spl-proc.o \
spl-procfs-list.o \
spl-taskq.o \
spl-thread.o \
spl-trace.o \
spl-tsd.o \
spl-vmem.o \
spl-xdr.o \
spl-zlib.o \
spl-zone.o
spl-objs += $(addprefix os/linux/spl/,$(SPL_OBJS))
zfs-objs += avl/avl.o
ICP_OBJS := \
algs/aes/aes_impl.o \
algs/aes/aes_impl_generic.o \
algs/aes/aes_modes.o \
algs/blake3/blake3.o \
algs/blake3/blake3_generic.o \
algs/blake3/blake3_impl.o \
algs/blake3/blake3_x86-64.o \
algs/edonr/edonr.o \
algs/modes/cbc.o \
algs/modes/ccm.o \
algs/modes/ctr.o \
algs/modes/ecb.o \
algs/modes/gcm.o \
algs/modes/gcm_generic.o \
algs/modes/modes.o \
algs/sha2/sha2.o \
algs/skein/skein.o \
algs/skein/skein_block.o \
algs/skein/skein_iv.o \
api/kcf_cipher.o \
api/kcf_ctxops.o \
api/kcf_mac.o \
core/kcf_callprov.o \
core/kcf_mech_tabs.o \
core/kcf_prov_lib.o \
core/kcf_prov_tabs.o \
core/kcf_sched.o \
illumos-crypto.o \
io/aes.o \
io/sha2_mod.o \
io/skein_mod.o \
spi/kcf_spi.o
ICP_OBJS_X86_64 := \
asm-x86_64/aes/aes_aesni.o \
asm-x86_64/aes/aes_amd64.o \
asm-x86_64/aes/aeskey.o \
asm-x86_64/blake3/blake3_avx2.o \
asm-x86_64/blake3/blake3_avx512.o \
asm-x86_64/blake3/blake3_sse2.o \
asm-x86_64/blake3/blake3_sse41.o \
asm-x86_64/modes/aesni-gcm-x86_64.o \
asm-x86_64/modes/gcm_pclmulqdq.o \
asm-x86_64/modes/ghash-x86_64.o \
asm-x86_64/sha2/sha256_impl.o \
asm-x86_64/sha2/sha512_impl.o
ICP_OBJS_X86 := \
algs/aes/aes_impl_aesni.o \
algs/aes/aes_impl_x86-64.o \
algs/modes/gcm_pclmulqdq.o
ICP_OBJS_ARM64 := \
asm-aarch64/blake3/b3_aarch64_sse2.o \
asm-aarch64/blake3/b3_aarch64_sse41.o
ICP_OBJS_PPC_PPC64 := \
asm-ppc64/blake3/b3_ppc64le_sse2.o \
asm-ppc64/blake3/b3_ppc64le_sse41.o
zfs-objs += $(addprefix icp/,$(ICP_OBJS))
zfs-$(CONFIG_X86) += $(addprefix icp/,$(ICP_OBJS_X86))
zfs-$(CONFIG_UML_X86)+= $(addprefix icp/,$(ICP_OBJS_X86))
zfs-$(CONFIG_X86_64) += $(addprefix icp/,$(ICP_OBJS_X86_64))
zfs-$(CONFIG_ARM64) += $(addprefix icp/,$(ICP_OBJS_ARM64))
zfs-$(CONFIG_PPC) += $(addprefix icp/,$(ICP_OBJS_PPC_PPC64))
zfs-$(CONFIG_PPC64) += $(addprefix icp/,$(ICP_OBJS_PPC_PPC64))
$(addprefix $(obj)/icp/,$(ICP_OBJS) $(ICP_OBJS_X86) $(ICP_OBJS_X86_64) \
$(ICP_OBJS_ARM64) $(ICP_OBJS_PPC_PPC64)) : asflags-y += -I$(icp_include)
$(addprefix $(obj)/icp/,$(ICP_OBJS) $(ICP_OBJS_X86) $(ICP_OBJS_X86_64) \
$(ICP_OBJS_ARM64) $(ICP_OBJS_PPC_PPC64)) : ccflags-y += -I$(icp_include)
# Suppress objtool "can't find jump dest instruction at" warnings. They
# are caused by the constants which are defined in the text section of the
# assembly file using .byte instructions (e.g. bswap_mask). The objtool
# utility tries to interpret them as opcodes and obviously fails doing so.
OBJECT_FILES_NON_STANDARD_aesni-gcm-x86_64.o := y
OBJECT_FILES_NON_STANDARD_ghash-x86_64.o := y
# Suppress objtool "unsupported stack pointer realignment" warnings. We are
# not using a DRAP register while aligning the stack to a 64 byte boundary.
# See #6950 for the reasoning.
OBJECT_FILES_NON_STANDARD_sha256_impl.o := y
OBJECT_FILES_NON_STANDARD_sha512_impl.o := y
LUA_OBJS := \
lapi.o \
lauxlib.o \
lbaselib.o \
lcode.o \
lcompat.o \
lcorolib.o \
lctype.o \
ldebug.o \
ldo.o \
lfunc.o \
lgc.o \
llex.o \
lmem.o \
lobject.o \
lopcodes.o \
lparser.o \
lstate.o \
lstring.o \
lstrlib.o \
ltable.o \
ltablib.o \
ltm.o \
lvm.o \
lzio.o \
setjmp/setjmp.o
zfs-objs += $(addprefix lua/,$(LUA_OBJS))
NVPAIR_OBJS := \
fnvpair.o \
nvpair.o \
nvpair_alloc_fixed.o \
nvpair_alloc_spl.o
zfs-objs += $(addprefix nvpair/,$(NVPAIR_OBJS))
UNICODE_OBJS := \
u8_textprep.o \
uconv.o
zfs-objs += $(addprefix unicode/,$(UNICODE_OBJS))
ZCOMMON_OBJS := \
cityhash.o \
zfeature_common.o \
zfs_comutil.o \
zfs_deleg.o \
zfs_fletcher.o \
zfs_fletcher_superscalar.o \
zfs_fletcher_superscalar4.o \
zfs_namecheck.o \
zfs_prop.o \
zpool_prop.o \
zprop_common.o
ZCOMMON_OBJS_X86 := \
zfs_fletcher_avx512.o \
zfs_fletcher_intel.o \
zfs_fletcher_sse.o
ZCOMMON_OBJS_ARM64 := \
zfs_fletcher_aarch64_neon.o
zfs-objs += $(addprefix zcommon/,$(ZCOMMON_OBJS))
zfs-$(CONFIG_X86) += $(addprefix zcommon/,$(ZCOMMON_OBJS_X86))
zfs-$(CONFIG_UML_X86)+= $(addprefix zcommon/,$(ZCOMMON_OBJS_X86))
zfs-$(CONFIG_ARM64) += $(addprefix zcommon/,$(ZCOMMON_OBJS_ARM64))
# Zstd uses -O3 by default, so we should follow
ZFS_ZSTD_FLAGS := -O3
# -fno-tree-vectorize gets set for gcc in zstd/common/compiler.h
# Set it for other compilers, too.
ZFS_ZSTD_FLAGS += -fno-tree-vectorize
# SSE register return with SSE disabled if -march=znverX is passed
ZFS_ZSTD_FLAGS += -U__BMI__
# Quiet warnings about frame size due to unused code in unmodified zstd lib
ZFS_ZSTD_FLAGS += -Wframe-larger-than=20480
ZSTD_OBJS := \
zfs_zstd.o \
zstd_sparc.o
ZSTD_UPSTREAM_OBJS := \
lib/common/entropy_common.o \
lib/common/error_private.o \
lib/common/fse_decompress.o \
lib/common/pool.o \
lib/common/zstd_common.o \
lib/compress/fse_compress.o \
lib/compress/hist.o \
lib/compress/huf_compress.o \
lib/compress/zstd_compress.o \
lib/compress/zstd_compress_literals.o \
lib/compress/zstd_compress_sequences.o \
lib/compress/zstd_compress_superblock.o \
lib/compress/zstd_double_fast.o \
lib/compress/zstd_fast.o \
lib/compress/zstd_lazy.o \
lib/compress/zstd_ldm.o \
lib/compress/zstd_opt.o \
lib/decompress/huf_decompress.o \
lib/decompress/zstd_ddict.o \
lib/decompress/zstd_decompress.o \
lib/decompress/zstd_decompress_block.o
zfs-objs += $(addprefix zstd/,$(ZSTD_OBJS) $(ZSTD_UPSTREAM_OBJS))
# Disable aarch64 neon SIMD instructions for kernel mode
$(addprefix $(obj)/zstd/,$(ZSTD_OBJS) $(ZSTD_UPSTREAM_OBJS)) : ccflags-y += -I$(zstd_include) $(ZFS_ZSTD_FLAGS)
$(addprefix $(obj)/zstd/,$(ZSTD_OBJS) $(ZSTD_UPSTREAM_OBJS)) : asflags-y += -I$(zstd_include)
$(addprefix $(obj)/zstd/,$(ZSTD_UPSTREAM_OBJS)) : ccflags-y += -include $(zstd_include)/aarch64_compat.h -include $(zstd_include)/zstd_compat_wrapper.h -Wp,-w
$(obj)/zstd/zfs_zstd.o : ccflags-y += -include $(zstd_include)/zstd_compat_wrapper.h
ZFS_OBJS := \
abd.o \
aggsum.o \
arc.o \
blake3_zfs.o \
blkptr.o \
bplist.o \
bpobj.o \
bptree.o \
bqueue.o \
btree.o \
dataset_kstats.o \
dbuf.o \
dbuf_stats.o \
ddt.o \
ddt_zap.o \
dmu.o \
dmu_diff.o \
dmu_object.o \
dmu_objset.o \
dmu_recv.o \
dmu_redact.o \
dmu_send.o \
dmu_traverse.o \
dmu_tx.o \
dmu_zfetch.o \
dnode.o \
dnode_sync.o \
dsl_bookmark.o \
dsl_crypt.o \
dsl_dataset.o \
dsl_deadlist.o \
dsl_deleg.o \
dsl_destroy.o \
dsl_dir.o \
dsl_pool.o \
dsl_prop.o \
dsl_scan.o \
dsl_synctask.o \
dsl_userhold.o \
edonr_zfs.o \
fm.o \
gzip.o \
hkdf.o \
lz4.o \
lz4_zfs.o \
lzjb.o \
metaslab.o \
mmp.o \
multilist.o \
objlist.o \
pathname.o \
range_tree.o \
refcount.o \
rrwlock.o \
sa.o \
sha256.o \
skein_zfs.o \
spa.o \
- spa_boot.o \
spa_checkpoint.o \
spa_config.o \
spa_errlog.o \
spa_history.o \
spa_log_spacemap.o \
spa_misc.o \
spa_stats.o \
space_map.o \
space_reftree.o \
txg.o \
uberblock.o \
unique.o \
vdev.o \
vdev_cache.o \
vdev_draid.o \
vdev_draid_rand.o \
vdev_indirect.o \
vdev_indirect_births.o \
vdev_indirect_mapping.o \
vdev_initialize.o \
vdev_label.o \
vdev_mirror.o \
vdev_missing.o \
vdev_queue.o \
vdev_raidz.o \
vdev_raidz_math.o \
vdev_raidz_math_scalar.o \
vdev_rebuild.o \
vdev_removal.o \
vdev_root.o \
vdev_trim.o \
zap.o \
zap_leaf.o \
zap_micro.o \
zcp.o \
zcp_get.o \
zcp_global.o \
zcp_iter.o \
zcp_set.o \
zcp_synctask.o \
zfeature.o \
zfs_byteswap.o \
zfs_chksum.o \
zfs_fm.o \
zfs_fuid.o \
zfs_ioctl.o \
zfs_log.o \
zfs_onexit.o \
zfs_quota.o \
zfs_ratelimit.o \
zfs_replay.o \
zfs_rlock.o \
zfs_sa.o \
zfs_vnops.o \
zil.o \
zio.o \
zio_checksum.o \
zio_compress.o \
zio_inject.o \
zle.o \
zrlock.o \
zthr.o \
zvol.o
ZFS_OBJS_OS := \
abd_os.o \
arc_os.o \
mmp_os.o \
policy.o \
qat.o \
qat_compress.o \
qat_crypt.o \
spa_misc_os.o \
trace.o \
vdev_disk.o \
vdev_file.o \
zfs_acl.o \
zfs_ctldir.o \
zfs_debug.o \
zfs_dir.o \
zfs_file_os.o \
zfs_ioctl_os.o \
zfs_racct.o \
zfs_sysfs.o \
zfs_uio.o \
zfs_vfsops.o \
zfs_vnops_os.o \
zfs_znode.o \
zio_crypt.o \
zpl_ctldir.o \
zpl_export.o \
zpl_file.o \
zpl_inode.o \
zpl_super.o \
zpl_xattr.o \
zvol_os.o
ZFS_OBJS_X86 := \
vdev_raidz_math_avx2.o \
vdev_raidz_math_avx512bw.o \
vdev_raidz_math_avx512f.o \
vdev_raidz_math_sse2.o \
vdev_raidz_math_ssse3.o
ZFS_OBJS_ARM64 := \
vdev_raidz_math_aarch64_neon.o \
vdev_raidz_math_aarch64_neonx2.o
ZFS_OBJS_PPC_PPC64 := \
vdev_raidz_math_powerpc_altivec.o
zfs-objs += $(addprefix zfs/,$(ZFS_OBJS)) $(addprefix os/linux/zfs/,$(ZFS_OBJS_OS))
zfs-$(CONFIG_X86) += $(addprefix zfs/,$(ZFS_OBJS_X86))
zfs-$(CONFIG_UML_X86)+= $(addprefix zfs/,$(ZFS_OBJS_X86))
zfs-$(CONFIG_ARM64) += $(addprefix zfs/,$(ZFS_OBJS_ARM64))
zfs-$(CONFIG_PPC) += $(addprefix zfs/,$(ZFS_OBJS_PPC_PPC64))
zfs-$(CONFIG_PPC64) += $(addprefix zfs/,$(ZFS_OBJS_PPC_PPC64))
# Suppress incorrect warnings from versions of objtool which are not
# aware of x86 EVEX prefix instructions used for AVX512.
OBJECT_FILES_NON_STANDARD_vdev_raidz_math_avx512bw.o := y
OBJECT_FILES_NON_STANDARD_vdev_raidz_math_avx512f.o := y
ifeq ($(CONFIG_ALTIVEC),y)
$(obj)/zfs/vdev_raidz_math_powerpc_altivec.o : c_flags += -maltivec
endif
diff --git a/sys/contrib/openzfs/module/Makefile.bsd b/sys/contrib/openzfs/module/Makefile.bsd
index 589ca60b29be..8829ad942130 100644
--- a/sys/contrib/openzfs/module/Makefile.bsd
+++ b/sys/contrib/openzfs/module/Makefile.bsd
@@ -1,447 +1,447 @@
.if !defined(WITH_CTF)
WITH_CTF=1
.endif
.include <bsd.sys.mk>
SRCDIR=${.CURDIR}
INCDIR=${.CURDIR:H}/include
KMOD= openzfs
.PATH: ${SRCDIR}/avl \
${SRCDIR}/icp/algs/blake3 \
${SRCDIR}/icp/asm-aarch64/blake3 \
${SRCDIR}/icp/asm-ppc64/blake3 \
${SRCDIR}/icp/asm-x86_64/blake3 \
${SRCDIR}/lua \
${SRCDIR}/nvpair \
${SRCDIR}/icp/algs/edonr \
${SRCDIR}/os/freebsd/spl \
${SRCDIR}/os/freebsd/zfs \
${SRCDIR}/unicode \
${SRCDIR}/zcommon \
${SRCDIR}/zfs \
${SRCDIR}/zstd \
${SRCDIR}/zstd/lib/common \
${SRCDIR}/zstd/lib/compress \
${SRCDIR}/zstd/lib/decompress
CFLAGS+= -I${.OBJDIR:H}/include
CFLAGS+= -I${INCDIR}
CFLAGS+= -I${INCDIR}/os/freebsd
CFLAGS+= -I${INCDIR}/os/freebsd/spl
CFLAGS+= -I${INCDIR}/os/freebsd/zfs
CFLAGS+= -I${SRCDIR}/zstd/include
CFLAGS+= -I${SRCDIR}/icp/include
CFLAGS+= -include ${INCDIR}/os/freebsd/spl/sys/ccompile.h
CFLAGS+= -D__KERNEL__ -DFREEBSD_NAMECACHE -DBUILDING_ZFS -D__BSD_VISIBLE=1 \
-DHAVE_UIO_ZEROCOPY -DWITHOUT_NETDUMP -D__KERNEL -D_SYS_CONDVAR_H_ \
-D_SYS_VMEM_H_ -DKDTRACE_HOOKS -DSMP -DCOMPAT_FREEBSD11
.if ${MACHINE_ARCH} == "amd64"
CFLAGS+= -D__x86_64 -DHAVE_SSE2 -DHAVE_SSSE3 -DHAVE_SSE4_1 -DHAVE_SSE4_2 \
-DHAVE_AVX -DHAVE_AVX2 -DHAVE_AVX512F -DHAVE_AVX512VL
.endif
.if defined(WITH_DEBUG) && ${WITH_DEBUG} == "true"
CFLAGS+= -DZFS_DEBUG -g
.if defined(WITH_INVARIANTS) && ${WITH_INVARIANTS} == "true"
CFLAGS+= -DINVARIANTS -DWITNESS -DOPENSOLARIS_WITNESS
.endif
.if defined(WITH_O0) && ${WITH_O0} == "true"
CFLAGS+= -O0
.endif
.else
CFLAGS += -DNDEBUG
.endif
.if defined(WITH_VFS_DEBUG) && ${WITH_VFS_DEBUG} == "true"
# kernel must also be built with this option for this to work
CFLAGS+= -DDEBUG_VFS_LOCKS
.endif
.if defined(WITH_GCOV) && ${WITH_GCOV} == "true"
CFLAGS+= -fprofile-arcs -ftest-coverage
.endif
DEBUG_FLAGS=-g
.if ${MACHINE_ARCH} == "i386" || ${MACHINE_ARCH} == "powerpc" || \
${MACHINE_ARCH} == "arm"
CFLAGS+= -DBITS_PER_LONG=32
.else
CFLAGS+= -DBITS_PER_LONG=64
.endif
SRCS= vnode_if.h device_if.h bus_if.h
#avl
SRCS+= avl.c
# icp
SRCS+= edonr.c
#icp/algs/blake3
SRCS+= blake3.c \
blake3_generic.c \
blake3_impl.c \
blake3_x86-64.c
#icp/asm-aarch64/blake3
SRCS+= b3_aarch64_sse2.S \
b3_aarch64_sse41.S
#icp/asm-ppc64/blake3
SRCS+= b3_ppc64le_sse2.S \
b3_ppc64le_sse41.S
#icp/asm-x86_64/blake3
SRCS+= blake3_avx2.S \
blake3_avx512.S \
blake3_sse2.S \
blake3_sse41.S
#lua
SRCS+= lapi.c \
lauxlib.c \
lbaselib.c \
lcode.c \
lcompat.c \
lcorolib.c \
lctype.c \
ldebug.c \
ldo.c \
lfunc.c \
lgc.c \
llex.c \
lmem.c \
lobject.c \
lopcodes.c \
lparser.c \
lstate.c \
lstring.c \
lstrlib.c \
ltable.c \
ltablib.c \
ltm.c \
lvm.c \
lzio.c
#nvpair
SRCS+= nvpair.c \
fnvpair.c \
nvpair_alloc_spl.c \
nvpair_alloc_fixed.c
#os/freebsd/spl
SRCS+= acl_common.c \
callb.c \
list.c \
sha256c.c \
sha512c.c \
spl_acl.c \
spl_cmn_err.c \
spl_dtrace.c \
spl_kmem.c \
spl_kstat.c \
spl_misc.c \
spl_policy.c \
spl_procfs_list.c \
spl_string.c \
spl_sunddi.c \
spl_sysevent.c \
spl_taskq.c \
spl_uio.c \
spl_vfs.c \
spl_vm.c \
spl_zlib.c \
spl_zone.c
.if ${MACHINE_ARCH} == "i386" || ${MACHINE_ARCH} == "powerpc" || \
${MACHINE_ARCH} == "arm"
SRCS+= spl_atomic.c
.endif
#os/freebsd/zfs
SRCS+= abd_os.c \
arc_os.c \
crypto_os.c \
dmu_os.c \
+ event_os.c \
hkdf.c \
kmod_core.c \
spa_os.c \
sysctl_os.c \
vdev_file.c \
vdev_geom.c \
vdev_label_os.c \
zfs_acl.c \
zfs_ctldir.c \
zfs_debug.c \
zfs_dir.c \
zfs_ioctl_compat.c \
zfs_ioctl_os.c \
zfs_racct.c \
zfs_vfsops.c \
zfs_vnops_os.c \
zfs_znode.c \
zio_crypt.c \
zvol_os.c
#unicode
SRCS+= uconv.c \
u8_textprep.c
#zcommon
SRCS+= zfeature_common.c \
zfs_comutil.c \
zfs_deleg.c \
zfs_fletcher.c \
zfs_fletcher_avx512.c \
zfs_fletcher_intel.c \
zfs_fletcher_sse.c \
zfs_fletcher_superscalar.c \
zfs_fletcher_superscalar4.c \
zfs_namecheck.c \
zfs_prop.c \
zpool_prop.c \
zprop_common.c
#zfs
SRCS+= abd.c \
aggsum.c \
arc.c \
blake3_zfs.c \
blkptr.c \
bplist.c \
bpobj.c \
btree.c \
cityhash.c \
dbuf.c \
dbuf_stats.c \
bptree.c \
bqueue.c \
dataset_kstats.c \
ddt.c \
ddt_zap.c \
dmu.c \
dmu_diff.c \
dmu_object.c \
dmu_objset.c \
dmu_recv.c \
dmu_redact.c \
dmu_send.c \
dmu_traverse.c \
dmu_tx.c \
dmu_zfetch.c \
dnode.c \
dnode_sync.c \
dsl_dataset.c \
dsl_deadlist.c \
dsl_deleg.c \
dsl_bookmark.c \
dsl_dir.c \
dsl_crypt.c \
dsl_destroy.c \
dsl_pool.c \
dsl_prop.c \
dsl_scan.c \
dsl_synctask.c \
dsl_userhold.c \
edonr_zfs.c \
fm.c \
gzip.c \
lzjb.c \
lz4.c \
lz4_zfs.c \
metaslab.c \
mmp.c \
multilist.c \
objlist.c \
pathname.c \
range_tree.c \
refcount.c \
rrwlock.c \
sa.c \
sha256.c \
skein_zfs.c \
spa.c \
- spa_boot.c \
spa_checkpoint.c \
spa_config.c \
spa_errlog.c \
spa_history.c \
spa_log_spacemap.c \
spa_misc.c \
spa_stats.c \
space_map.c \
space_reftree.c \
txg.c \
uberblock.c \
unique.c \
vdev.c \
vdev_cache.c \
vdev_draid.c \
vdev_draid_rand.c \
vdev_indirect.c \
vdev_indirect_births.c \
vdev_indirect_mapping.c \
vdev_initialize.c \
vdev_label.c \
vdev_mirror.c \
vdev_missing.c \
vdev_queue.c \
vdev_raidz.c \
vdev_raidz_math.c \
vdev_raidz_math_scalar.c \
vdev_raidz_math_avx2.c \
vdev_raidz_math_avx512bw.c \
vdev_raidz_math_avx512f.c \
vdev_raidz_math_sse2.c \
vdev_raidz_math_ssse3.c \
vdev_rebuild.c \
vdev_removal.c \
vdev_root.c \
vdev_trim.c \
zap.c \
zap_leaf.c \
zap_micro.c \
zcp.c \
zcp_get.c \
zcp_global.c \
zcp_iter.c \
zcp_set.c \
zcp_synctask.c \
zfeature.c \
zfs_byteswap.c \
zfs_chksum.c \
zfs_file_os.c \
zfs_fm.c \
zfs_fuid.c \
zfs_ioctl.c \
zfs_log.c \
zfs_onexit.c \
zfs_quota.c \
zfs_ratelimit.c \
zfs_replay.c \
zfs_rlock.c \
zfs_sa.c \
zfs_vnops.c \
zil.c \
zio.c \
zio_checksum.c \
zio_compress.c \
zio_inject.c \
zle.c \
zrlock.c \
zthr.c \
zvol.c
#zstd
SRCS+= zfs_zstd.c \
entropy_common.c \
error_private.c \
fse_decompress.c \
pool.c \
zstd_common.c \
fse_compress.c \
hist.c \
huf_compress.c \
zstd_compress.c \
zstd_compress_literals.c \
zstd_compress_sequences.c \
zstd_compress_superblock.c \
zstd_double_fast.c \
zstd_fast.c \
zstd_lazy.c \
zstd_ldm.c \
zstd_opt.c \
huf_decompress.c \
zstd_ddict.c \
zstd_decompress.c \
zstd_decompress_block.c
beforeinstall:
.if ${MK_DEBUG_FILES} != "no"
mtree -eu \
-f /etc/mtree/BSD.debug.dist \
-p ${DESTDIR}/usr/lib
.endif
.include <bsd.kmod.mk>
CFLAGS.gcc+= -Wno-pointer-to-int-cast
CFLAGS.lapi.c= -Wno-cast-qual
CFLAGS.lcompat.c= -Wno-cast-qual
CFLAGS.lobject.c= -Wno-cast-qual
CFLAGS.ltable.c= -Wno-cast-qual
CFLAGS.lvm.c= -Wno-cast-qual
CFLAGS.nvpair.c= -DHAVE_RPC_TYPES -Wno-cast-qual
CFLAGS.spl_string.c= -Wno-cast-qual
CFLAGS.spl_vm.c= -Wno-cast-qual
CFLAGS.spl_zlib.c= -Wno-cast-qual
CFLAGS.abd.c= -Wno-cast-qual
CFLAGS.zfs_log.c= -Wno-cast-qual
CFLAGS.zfs_vnops_os.c= -Wno-pointer-arith
CFLAGS.u8_textprep.c= -Wno-cast-qual
CFLAGS.zfs_fletcher.c= -Wno-cast-qual -Wno-pointer-arith
CFLAGS.zfs_fletcher_intel.c= -Wno-cast-qual -Wno-pointer-arith
CFLAGS.zfs_fletcher_sse.c= -Wno-cast-qual -Wno-pointer-arith
CFLAGS.zfs_fletcher_avx512.c= -Wno-cast-qual -Wno-pointer-arith
CFLAGS.zprop_common.c= -Wno-cast-qual
CFLAGS.ddt.c= -Wno-cast-qual
CFLAGS.dmu.c= -Wno-cast-qual
CFLAGS.dmu_traverse.c= -Wno-cast-qual
CFLAGS.dsl_dir.c= -Wno-cast-qual
CFLAGS.dsl_deadlist.c= -Wno-cast-qual
CFLAGS.dsl_prop.c= -Wno-cast-qual
CFLAGS.edonr.c=-Wno-cast-qual
CFLAGS.fm.c= -Wno-cast-qual
CFLAGS.lz4_zfs.c= -Wno-cast-qual
CFLAGS.spa.c= -Wno-cast-qual
CFLAGS.spa_misc.c= -Wno-cast-qual
CFLAGS.sysctl_os.c= -include ../zfs_config.h
CFLAGS.vdev_draid.c= -Wno-cast-qual
CFLAGS.vdev_raidz.c= -Wno-cast-qual
CFLAGS.vdev_raidz_math.c= -Wno-cast-qual
CFLAGS.vdev_raidz_math_scalar.c= -Wno-cast-qual
CFLAGS.vdev_raidz_math_avx2.c= -Wno-cast-qual -Wno-duplicate-decl-specifier
CFLAGS.vdev_raidz_math_avx512f.c= -Wno-cast-qual -Wno-duplicate-decl-specifier
CFLAGS.vdev_raidz_math_sse2.c= -Wno-cast-qual -Wno-duplicate-decl-specifier
CFLAGS.zap_leaf.c= -Wno-cast-qual
CFLAGS.zap_micro.c= -Wno-cast-qual
CFLAGS.zcp.c= -Wno-cast-qual
CFLAGS.zfs_fm.c= -Wno-cast-qual
CFLAGS.zfs_ioctl.c= -Wno-cast-qual
CFLAGS.zil.c= -Wno-cast-qual
CFLAGS.zio.c= -Wno-cast-qual
CFLAGS.zrlock.c= -Wno-cast-qual
CFLAGS.zfs_zstd.c= -Wno-cast-qual -Wno-pointer-arith
CFLAGS.entropy_common.c= -fno-tree-vectorize -U__BMI__
CFLAGS.error_private.c= -fno-tree-vectorize -U__BMI__
CFLAGS.fse_decompress.c= -fno-tree-vectorize -U__BMI__
CFLAGS.pool.c= -fno-tree-vectorize -U__BMI__
CFLAGS.xxhash.c= -fno-tree-vectorize -U__BMI__
CFLAGS.zstd_common.c= -fno-tree-vectorize -U__BMI__
CFLAGS.fse_compress.c= -fno-tree-vectorize -U__BMI__
CFLAGS.hist.c= -fno-tree-vectorize -U__BMI__
CFLAGS.huf_compress.c= -fno-tree-vectorize -U__BMI__
CFLAGS.zstd_compress.c= -fno-tree-vectorize -U__BMI__
CFLAGS.zstd_compress_literals.c= -fno-tree-vectorize -U__BMI__
CFLAGS.zstd_compress_sequences.c= -fno-tree-vectorize -U__BMI__
CFLAGS.zstd_compress_superblock.c= -fno-tree-vectorize -U__BMI__
CFLAGS.zstd_double_fast.c= -fno-tree-vectorize -U__BMI__
CFLAGS.zstd_fast.c= -fno-tree-vectorize -U__BMI__
CFLAGS.zstd_lazy.c= -fno-tree-vectorize -U__BMI__
CFLAGS.zstd_ldm.c= -fno-tree-vectorize -U__BMI__
CFLAGS.zstd_opt.c= -fno-tree-vectorize -U__BMI__
CFLAGS.huf_decompress.c= -fno-tree-vectorize -U__BMI__
CFLAGS.zstd_ddict.c= -fno-tree-vectorize -U__BMI__
CFLAGS.zstd_decompress.c= -fno-tree-vectorize -U__BMI__
CFLAGS.zstd_decompress_block.c= -fno-tree-vectorize -U__BMI__
diff --git a/sys/contrib/openzfs/module/icp/algs/blake3/blake3.c b/sys/contrib/openzfs/module/icp/algs/blake3/blake3.c
index b9600207b673..5f7018598820 100644
--- a/sys/contrib/openzfs/module/icp/algs/blake3/blake3.c
+++ b/sys/contrib/openzfs/module/icp/algs/blake3/blake3.c
@@ -1,732 +1,732 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Based on BLAKE3 v1.3.1, https://github.com/BLAKE3-team/BLAKE3
* Copyright (c) 2019-2020 Samuel Neves and Jack O'Connor
* Copyright (c) 2021-2022 Tino Reichardt <milky-zfs@mcmilk.de>
*/
#include <sys/zfs_context.h>
#include <sys/blake3.h>
#include "blake3_impl.h"
/*
* We need 1056 byte stack for blake3_compress_subtree_wide()
* - we define this pragma to make gcc happy
*/
#if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wframe-larger-than="
#endif
/* internal used */
typedef struct {
uint32_t input_cv[8];
uint64_t counter;
uint8_t block[BLAKE3_BLOCK_LEN];
uint8_t block_len;
uint8_t flags;
} output_t;
/* internal flags */
enum blake3_flags {
CHUNK_START = 1 << 0,
CHUNK_END = 1 << 1,
PARENT = 1 << 2,
ROOT = 1 << 3,
KEYED_HASH = 1 << 4,
DERIVE_KEY_CONTEXT = 1 << 5,
DERIVE_KEY_MATERIAL = 1 << 6,
};
/* internal start */
static void chunk_state_init(blake3_chunk_state_t *ctx,
const uint32_t key[8], uint8_t flags)
{
memcpy(ctx->cv, key, BLAKE3_KEY_LEN);
ctx->chunk_counter = 0;
memset(ctx->buf, 0, BLAKE3_BLOCK_LEN);
ctx->buf_len = 0;
ctx->blocks_compressed = 0;
ctx->flags = flags;
}
static void chunk_state_reset(blake3_chunk_state_t *ctx,
const uint32_t key[8], uint64_t chunk_counter)
{
memcpy(ctx->cv, key, BLAKE3_KEY_LEN);
ctx->chunk_counter = chunk_counter;
ctx->blocks_compressed = 0;
memset(ctx->buf, 0, BLAKE3_BLOCK_LEN);
ctx->buf_len = 0;
}
static size_t chunk_state_len(const blake3_chunk_state_t *ctx)
{
return (BLAKE3_BLOCK_LEN * (size_t)ctx->blocks_compressed) +
((size_t)ctx->buf_len);
}
static size_t chunk_state_fill_buf(blake3_chunk_state_t *ctx,
const uint8_t *input, size_t input_len)
{
size_t take = BLAKE3_BLOCK_LEN - ((size_t)ctx->buf_len);
if (take > input_len) {
take = input_len;
}
uint8_t *dest = ctx->buf + ((size_t)ctx->buf_len);
memcpy(dest, input, take);
ctx->buf_len += (uint8_t)take;
return (take);
}
static uint8_t chunk_state_maybe_start_flag(const blake3_chunk_state_t *ctx)
{
if (ctx->blocks_compressed == 0) {
return (CHUNK_START);
} else {
return (0);
}
}
static output_t make_output(const uint32_t input_cv[8],
const uint8_t *block, uint8_t block_len,
uint64_t counter, uint8_t flags)
{
output_t ret;
memcpy(ret.input_cv, input_cv, 32);
memcpy(ret.block, block, BLAKE3_BLOCK_LEN);
ret.block_len = block_len;
ret.counter = counter;
ret.flags = flags;
return (ret);
}
/*
* Chaining values within a given chunk (specifically the compress_in_place
* interface) are represented as words. This avoids unnecessary bytes<->words
* conversion overhead in the portable implementation. However, the hash_many
* interface handles both user input and parent node blocks, so it accepts
* bytes. For that reason, chaining values in the CV stack are represented as
* bytes.
*/
-static void output_chaining_value(const blake3_impl_ops_t *ops,
+static void output_chaining_value(const blake3_ops_t *ops,
const output_t *ctx, uint8_t cv[32])
{
uint32_t cv_words[8];
memcpy(cv_words, ctx->input_cv, 32);
ops->compress_in_place(cv_words, ctx->block, ctx->block_len,
ctx->counter, ctx->flags);
store_cv_words(cv, cv_words);
}
-static void output_root_bytes(const blake3_impl_ops_t *ops, const output_t *ctx,
+static void output_root_bytes(const blake3_ops_t *ops, const output_t *ctx,
uint64_t seek, uint8_t *out, size_t out_len)
{
uint64_t output_block_counter = seek / 64;
size_t offset_within_block = seek % 64;
uint8_t wide_buf[64];
while (out_len > 0) {
ops->compress_xof(ctx->input_cv, ctx->block, ctx->block_len,
output_block_counter, ctx->flags | ROOT, wide_buf);
size_t available_bytes = 64 - offset_within_block;
size_t memcpy_len;
if (out_len > available_bytes) {
memcpy_len = available_bytes;
} else {
memcpy_len = out_len;
}
memcpy(out, wide_buf + offset_within_block, memcpy_len);
out += memcpy_len;
out_len -= memcpy_len;
output_block_counter += 1;
offset_within_block = 0;
}
}
-static void chunk_state_update(const blake3_impl_ops_t *ops,
+static void chunk_state_update(const blake3_ops_t *ops,
blake3_chunk_state_t *ctx, const uint8_t *input, size_t input_len)
{
if (ctx->buf_len > 0) {
size_t take = chunk_state_fill_buf(ctx, input, input_len);
input += take;
input_len -= take;
if (input_len > 0) {
ops->compress_in_place(ctx->cv, ctx->buf,
BLAKE3_BLOCK_LEN, ctx->chunk_counter,
ctx->flags|chunk_state_maybe_start_flag(ctx));
ctx->blocks_compressed += 1;
ctx->buf_len = 0;
memset(ctx->buf, 0, BLAKE3_BLOCK_LEN);
}
}
while (input_len > BLAKE3_BLOCK_LEN) {
ops->compress_in_place(ctx->cv, input, BLAKE3_BLOCK_LEN,
ctx->chunk_counter,
ctx->flags|chunk_state_maybe_start_flag(ctx));
ctx->blocks_compressed += 1;
input += BLAKE3_BLOCK_LEN;
input_len -= BLAKE3_BLOCK_LEN;
}
size_t take = chunk_state_fill_buf(ctx, input, input_len);
input += take;
input_len -= take;
}
static output_t chunk_state_output(const blake3_chunk_state_t *ctx)
{
uint8_t block_flags =
ctx->flags | chunk_state_maybe_start_flag(ctx) | CHUNK_END;
return (make_output(ctx->cv, ctx->buf, ctx->buf_len, ctx->chunk_counter,
block_flags));
}
static output_t parent_output(const uint8_t block[BLAKE3_BLOCK_LEN],
const uint32_t key[8], uint8_t flags)
{
return (make_output(key, block, BLAKE3_BLOCK_LEN, 0, flags | PARENT));
}
/*
* Given some input larger than one chunk, return the number of bytes that
* should go in the left subtree. This is the largest power-of-2 number of
* chunks that leaves at least 1 byte for the right subtree.
*/
static size_t left_len(size_t content_len)
{
/*
* Subtract 1 to reserve at least one byte for the right side.
* content_len
* should always be greater than BLAKE3_CHUNK_LEN.
*/
size_t full_chunks = (content_len - 1) / BLAKE3_CHUNK_LEN;
return (round_down_to_power_of_2(full_chunks) * BLAKE3_CHUNK_LEN);
}
/*
* Use SIMD parallelism to hash up to MAX_SIMD_DEGREE chunks at the same time
* on a single thread. Write out the chunk chaining values and return the
* number of chunks hashed. These chunks are never the root and never empty;
* those cases use a different codepath.
*/
-static size_t compress_chunks_parallel(const blake3_impl_ops_t *ops,
+static size_t compress_chunks_parallel(const blake3_ops_t *ops,
const uint8_t *input, size_t input_len, const uint32_t key[8],
uint64_t chunk_counter, uint8_t flags, uint8_t *out)
{
const uint8_t *chunks_array[MAX_SIMD_DEGREE];
size_t input_position = 0;
size_t chunks_array_len = 0;
while (input_len - input_position >= BLAKE3_CHUNK_LEN) {
chunks_array[chunks_array_len] = &input[input_position];
input_position += BLAKE3_CHUNK_LEN;
chunks_array_len += 1;
}
ops->hash_many(chunks_array, chunks_array_len, BLAKE3_CHUNK_LEN /
BLAKE3_BLOCK_LEN, key, chunk_counter, B_TRUE, flags, CHUNK_START,
CHUNK_END, out);
/*
* Hash the remaining partial chunk, if there is one. Note that the
* empty chunk (meaning the empty message) is a different codepath.
*/
if (input_len > input_position) {
uint64_t counter = chunk_counter + (uint64_t)chunks_array_len;
blake3_chunk_state_t chunk_state;
chunk_state_init(&chunk_state, key, flags);
chunk_state.chunk_counter = counter;
chunk_state_update(ops, &chunk_state, &input[input_position],
input_len - input_position);
output_t output = chunk_state_output(&chunk_state);
output_chaining_value(ops, &output, &out[chunks_array_len *
BLAKE3_OUT_LEN]);
return (chunks_array_len + 1);
} else {
return (chunks_array_len);
}
}
/*
* Use SIMD parallelism to hash up to MAX_SIMD_DEGREE parents at the same time
* on a single thread. Write out the parent chaining values and return the
* number of parents hashed. (If there's an odd input chaining value left over,
* return it as an additional output.) These parents are never the root and
* never empty; those cases use a different codepath.
*/
-static size_t compress_parents_parallel(const blake3_impl_ops_t *ops,
+static size_t compress_parents_parallel(const blake3_ops_t *ops,
const uint8_t *child_chaining_values, size_t num_chaining_values,
const uint32_t key[8], uint8_t flags, uint8_t *out)
{
const uint8_t *parents_array[MAX_SIMD_DEGREE_OR_2];
size_t parents_array_len = 0;
while (num_chaining_values - (2 * parents_array_len) >= 2) {
parents_array[parents_array_len] = &child_chaining_values[2 *
parents_array_len * BLAKE3_OUT_LEN];
parents_array_len += 1;
}
ops->hash_many(parents_array, parents_array_len, 1, key, 0, B_FALSE,
flags | PARENT, 0, 0, out);
/* If there's an odd child left over, it becomes an output. */
if (num_chaining_values > 2 * parents_array_len) {
memcpy(&out[parents_array_len * BLAKE3_OUT_LEN],
&child_chaining_values[2 * parents_array_len *
BLAKE3_OUT_LEN], BLAKE3_OUT_LEN);
return (parents_array_len + 1);
} else {
return (parents_array_len);
}
}
/*
* The wide helper function returns (writes out) an array of chaining values
* and returns the length of that array. The number of chaining values returned
* is the dyanmically detected SIMD degree, at most MAX_SIMD_DEGREE. Or fewer,
* if the input is shorter than that many chunks. The reason for maintaining a
* wide array of chaining values going back up the tree, is to allow the
* implementation to hash as many parents in parallel as possible.
*
* As a special case when the SIMD degree is 1, this function will still return
* at least 2 outputs. This guarantees that this function doesn't perform the
* root compression. (If it did, it would use the wrong flags, and also we
* wouldn't be able to implement exendable ouput.) Note that this function is
* not used when the whole input is only 1 chunk long; that's a different
* codepath.
*
* Why not just have the caller split the input on the first update(), instead
* of implementing this special rule? Because we don't want to limit SIMD or
* multi-threading parallelism for that update().
*/
-static size_t blake3_compress_subtree_wide(const blake3_impl_ops_t *ops,
+static size_t blake3_compress_subtree_wide(const blake3_ops_t *ops,
const uint8_t *input, size_t input_len, const uint32_t key[8],
uint64_t chunk_counter, uint8_t flags, uint8_t *out)
{
/*
* Note that the single chunk case does *not* bump the SIMD degree up
* to 2 when it is 1. If this implementation adds multi-threading in
* the future, this gives us the option of multi-threading even the
* 2-chunk case, which can help performance on smaller platforms.
*/
if (input_len <= (size_t)(ops->degree * BLAKE3_CHUNK_LEN)) {
return (compress_chunks_parallel(ops, input, input_len, key,
chunk_counter, flags, out));
}
/*
* With more than simd_degree chunks, we need to recurse. Start by
* dividing the input into left and right subtrees. (Note that this is
* only optimal as long as the SIMD degree is a power of 2. If we ever
* get a SIMD degree of 3 or something, we'll need a more complicated
* strategy.)
*/
size_t left_input_len = left_len(input_len);
size_t right_input_len = input_len - left_input_len;
const uint8_t *right_input = &input[left_input_len];
uint64_t right_chunk_counter = chunk_counter +
(uint64_t)(left_input_len / BLAKE3_CHUNK_LEN);
/*
* Make space for the child outputs. Here we use MAX_SIMD_DEGREE_OR_2
* to account for the special case of returning 2 outputs when the
* SIMD degree is 1.
*/
uint8_t cv_array[2 * MAX_SIMD_DEGREE_OR_2 * BLAKE3_OUT_LEN];
size_t degree = ops->degree;
if (left_input_len > BLAKE3_CHUNK_LEN && degree == 1) {
/*
* The special case: We always use a degree of at least two,
* to make sure there are two outputs. Except, as noted above,
* at the chunk level, where we allow degree=1. (Note that the
* 1-chunk-input case is a different codepath.)
*/
degree = 2;
}
uint8_t *right_cvs = &cv_array[degree * BLAKE3_OUT_LEN];
/*
* Recurse! If this implementation adds multi-threading support in the
* future, this is where it will go.
*/
size_t left_n = blake3_compress_subtree_wide(ops, input, left_input_len,
key, chunk_counter, flags, cv_array);
size_t right_n = blake3_compress_subtree_wide(ops, right_input,
right_input_len, key, right_chunk_counter, flags, right_cvs);
/*
* The special case again. If simd_degree=1, then we'll have left_n=1
* and right_n=1. Rather than compressing them into a single output,
* return them directly, to make sure we always have at least two
* outputs.
*/
if (left_n == 1) {
memcpy(out, cv_array, 2 * BLAKE3_OUT_LEN);
return (2);
}
/* Otherwise, do one layer of parent node compression. */
size_t num_chaining_values = left_n + right_n;
return compress_parents_parallel(ops, cv_array,
num_chaining_values, key, flags, out);
}
/*
* Hash a subtree with compress_subtree_wide(), and then condense the resulting
* list of chaining values down to a single parent node. Don't compress that
* last parent node, however. Instead, return its message bytes (the
* concatenated chaining values of its children). This is necessary when the
* first call to update() supplies a complete subtree, because the topmost
* parent node of that subtree could end up being the root. It's also necessary
* for extended output in the general case.
*
* As with compress_subtree_wide(), this function is not used on inputs of 1
* chunk or less. That's a different codepath.
*/
-static void compress_subtree_to_parent_node(const blake3_impl_ops_t *ops,
+static void compress_subtree_to_parent_node(const blake3_ops_t *ops,
const uint8_t *input, size_t input_len, const uint32_t key[8],
uint64_t chunk_counter, uint8_t flags, uint8_t out[2 * BLAKE3_OUT_LEN])
{
uint8_t cv_array[MAX_SIMD_DEGREE_OR_2 * BLAKE3_OUT_LEN];
size_t num_cvs = blake3_compress_subtree_wide(ops, input, input_len,
key, chunk_counter, flags, cv_array);
/*
* If MAX_SIMD_DEGREE is greater than 2 and there's enough input,
* compress_subtree_wide() returns more than 2 chaining values. Condense
* them into 2 by forming parent nodes repeatedly.
*/
uint8_t out_array[MAX_SIMD_DEGREE_OR_2 * BLAKE3_OUT_LEN / 2];
while (num_cvs > 2) {
num_cvs = compress_parents_parallel(ops, cv_array, num_cvs, key,
flags, out_array);
memcpy(cv_array, out_array, num_cvs * BLAKE3_OUT_LEN);
}
memcpy(out, cv_array, 2 * BLAKE3_OUT_LEN);
}
static void hasher_init_base(BLAKE3_CTX *ctx, const uint32_t key[8],
uint8_t flags)
{
memcpy(ctx->key, key, BLAKE3_KEY_LEN);
chunk_state_init(&ctx->chunk, key, flags);
ctx->cv_stack_len = 0;
ctx->ops = blake3_impl_get_ops();
}
/*
* As described in hasher_push_cv() below, we do "lazy merging", delaying
* merges until right before the next CV is about to be added. This is
* different from the reference implementation. Another difference is that we
* aren't always merging 1 chunk at a time. Instead, each CV might represent
* any power-of-two number of chunks, as long as the smaller-above-larger
* stack order is maintained. Instead of the "count the trailing 0-bits"
* algorithm described in the spec, we use a "count the total number of
* 1-bits" variant that doesn't require us to retain the subtree size of the
* CV on top of the stack. The principle is the same: each CV that should
* remain in the stack is represented by a 1-bit in the total number of chunks
* (or bytes) so far.
*/
static void hasher_merge_cv_stack(BLAKE3_CTX *ctx, uint64_t total_len)
{
size_t post_merge_stack_len = (size_t)popcnt(total_len);
while (ctx->cv_stack_len > post_merge_stack_len) {
uint8_t *parent_node =
&ctx->cv_stack[(ctx->cv_stack_len - 2) * BLAKE3_OUT_LEN];
output_t output =
parent_output(parent_node, ctx->key, ctx->chunk.flags);
output_chaining_value(ctx->ops, &output, parent_node);
ctx->cv_stack_len -= 1;
}
}
/*
* In reference_impl.rs, we merge the new CV with existing CVs from the stack
* before pushing it. We can do that because we know more input is coming, so
* we know none of the merges are root.
*
* This setting is different. We want to feed as much input as possible to
* compress_subtree_wide(), without setting aside anything for the chunk_state.
* If the user gives us 64 KiB, we want to parallelize over all 64 KiB at once
* as a single subtree, if at all possible.
*
* This leads to two problems:
* 1) This 64 KiB input might be the only call that ever gets made to update.
* In this case, the root node of the 64 KiB subtree would be the root node
* of the whole tree, and it would need to be ROOT finalized. We can't
* compress it until we know.
* 2) This 64 KiB input might complete a larger tree, whose root node is
* similarly going to be the the root of the whole tree. For example, maybe
* we have 196 KiB (that is, 128 + 64) hashed so far. We can't compress the
* node at the root of the 256 KiB subtree until we know how to finalize it.
*
* The second problem is solved with "lazy merging". That is, when we're about
* to add a CV to the stack, we don't merge it with anything first, as the
* reference impl does. Instead we do merges using the *previous* CV that was
* added, which is sitting on top of the stack, and we put the new CV
* (unmerged) on top of the stack afterwards. This guarantees that we never
* merge the root node until finalize().
*
* Solving the first problem requires an additional tool,
* compress_subtree_to_parent_node(). That function always returns the top
* *two* chaining values of the subtree it's compressing. We then do lazy
* merging with each of them separately, so that the second CV will always
* remain unmerged. (That also helps us support extendable output when we're
* hashing an input all-at-once.)
*/
static void hasher_push_cv(BLAKE3_CTX *ctx, uint8_t new_cv[BLAKE3_OUT_LEN],
uint64_t chunk_counter)
{
hasher_merge_cv_stack(ctx, chunk_counter);
memcpy(&ctx->cv_stack[ctx->cv_stack_len * BLAKE3_OUT_LEN], new_cv,
BLAKE3_OUT_LEN);
ctx->cv_stack_len += 1;
}
void
Blake3_Init(BLAKE3_CTX *ctx)
{
hasher_init_base(ctx, BLAKE3_IV, 0);
}
void
Blake3_InitKeyed(BLAKE3_CTX *ctx, const uint8_t key[BLAKE3_KEY_LEN])
{
uint32_t key_words[8];
load_key_words(key, key_words);
hasher_init_base(ctx, key_words, KEYED_HASH);
}
static void
Blake3_Update2(BLAKE3_CTX *ctx, const void *input, size_t input_len)
{
/*
* Explicitly checking for zero avoids causing UB by passing a null
* pointer to memcpy. This comes up in practice with things like:
* std::vector<uint8_t> v;
* blake3_hasher_update(&hasher, v.data(), v.size());
*/
if (input_len == 0) {
return;
}
const uint8_t *input_bytes = (const uint8_t *)input;
/*
* If we have some partial chunk bytes in the internal chunk_state, we
* need to finish that chunk first.
*/
if (chunk_state_len(&ctx->chunk) > 0) {
size_t take = BLAKE3_CHUNK_LEN - chunk_state_len(&ctx->chunk);
if (take > input_len) {
take = input_len;
}
chunk_state_update(ctx->ops, &ctx->chunk, input_bytes, take);
input_bytes += take;
input_len -= take;
/*
* If we've filled the current chunk and there's more coming,
* finalize this chunk and proceed. In this case we know it's
* not the root.
*/
if (input_len > 0) {
output_t output = chunk_state_output(&ctx->chunk);
uint8_t chunk_cv[32];
output_chaining_value(ctx->ops, &output, chunk_cv);
hasher_push_cv(ctx, chunk_cv, ctx->chunk.chunk_counter);
chunk_state_reset(&ctx->chunk, ctx->key,
ctx->chunk.chunk_counter + 1);
} else {
return;
}
}
/*
* Now the chunk_state is clear, and we have more input. If there's
* more than a single chunk (so, definitely not the root chunk), hash
* the largest whole subtree we can, with the full benefits of SIMD
* (and maybe in the future, multi-threading) parallelism. Two
* restrictions:
* - The subtree has to be a power-of-2 number of chunks. Only
* subtrees along the right edge can be incomplete, and we don't know
* where the right edge is going to be until we get to finalize().
* - The subtree must evenly divide the total number of chunks up
* until this point (if total is not 0). If the current incomplete
* subtree is only waiting for 1 more chunk, we can't hash a subtree
* of 4 chunks. We have to complete the current subtree first.
* Because we might need to break up the input to form powers of 2, or
* to evenly divide what we already have, this part runs in a loop.
*/
while (input_len > BLAKE3_CHUNK_LEN) {
size_t subtree_len = round_down_to_power_of_2(input_len);
uint64_t count_so_far =
ctx->chunk.chunk_counter * BLAKE3_CHUNK_LEN;
/*
* Shrink the subtree_len until it evenly divides the count so
* far. We know that subtree_len itself is a power of 2, so we
* can use a bitmasking trick instead of an actual remainder
* operation. (Note that if the caller consistently passes
* power-of-2 inputs of the same size, as is hopefully
* typical, this loop condition will always fail, and
* subtree_len will always be the full length of the input.)
*
* An aside: We don't have to shrink subtree_len quite this
* much. For example, if count_so_far is 1, we could pass 2
* chunks to compress_subtree_to_parent_node. Since we'll get
* 2 CVs back, we'll still get the right answer in the end,
* and we might get to use 2-way SIMD parallelism. The problem
* with this optimization, is that it gets us stuck always
* hashing 2 chunks. The total number of chunks will remain
* odd, and we'll never graduate to higher degrees of
* parallelism. See
* https://github.com/BLAKE3-team/BLAKE3/issues/69.
*/
while ((((uint64_t)(subtree_len - 1)) & count_so_far) != 0) {
subtree_len /= 2;
}
/*
* The shrunken subtree_len might now be 1 chunk long. If so,
* hash that one chunk by itself. Otherwise, compress the
* subtree into a pair of CVs.
*/
uint64_t subtree_chunks = subtree_len / BLAKE3_CHUNK_LEN;
if (subtree_len <= BLAKE3_CHUNK_LEN) {
blake3_chunk_state_t chunk_state;
chunk_state_init(&chunk_state, ctx->key,
ctx->chunk.flags);
chunk_state.chunk_counter = ctx->chunk.chunk_counter;
chunk_state_update(ctx->ops, &chunk_state, input_bytes,
subtree_len);
output_t output = chunk_state_output(&chunk_state);
uint8_t cv[BLAKE3_OUT_LEN];
output_chaining_value(ctx->ops, &output, cv);
hasher_push_cv(ctx, cv, chunk_state.chunk_counter);
} else {
/*
* This is the high-performance happy path, though
* getting here depends on the caller giving us a long
* enough input.
*/
uint8_t cv_pair[2 * BLAKE3_OUT_LEN];
compress_subtree_to_parent_node(ctx->ops, input_bytes,
subtree_len, ctx->key, ctx-> chunk.chunk_counter,
ctx->chunk.flags, cv_pair);
hasher_push_cv(ctx, cv_pair, ctx->chunk.chunk_counter);
hasher_push_cv(ctx, &cv_pair[BLAKE3_OUT_LEN],
ctx->chunk.chunk_counter + (subtree_chunks / 2));
}
ctx->chunk.chunk_counter += subtree_chunks;
input_bytes += subtree_len;
input_len -= subtree_len;
}
/*
* If there's any remaining input less than a full chunk, add it to
* the chunk state. In that case, also do a final merge loop to make
* sure the subtree stack doesn't contain any unmerged pairs. The
* remaining input means we know these merges are non-root. This merge
* loop isn't strictly necessary here, because hasher_push_chunk_cv
* already does its own merge loop, but it simplifies
* blake3_hasher_finalize below.
*/
if (input_len > 0) {
chunk_state_update(ctx->ops, &ctx->chunk, input_bytes,
input_len);
hasher_merge_cv_stack(ctx, ctx->chunk.chunk_counter);
}
}
void
Blake3_Update(BLAKE3_CTX *ctx, const void *input, size_t todo)
{
size_t done = 0;
const uint8_t *data = input;
const size_t block_max = 1024 * 64;
/* max feed buffer to leave the stack size small */
while (todo != 0) {
size_t block = (todo >= block_max) ? block_max : todo;
Blake3_Update2(ctx, data + done, block);
done += block;
todo -= block;
}
}
void
Blake3_Final(const BLAKE3_CTX *ctx, uint8_t *out)
{
Blake3_FinalSeek(ctx, 0, out, BLAKE3_OUT_LEN);
}
void
Blake3_FinalSeek(const BLAKE3_CTX *ctx, uint64_t seek, uint8_t *out,
size_t out_len)
{
/*
* Explicitly checking for zero avoids causing UB by passing a null
* pointer to memcpy. This comes up in practice with things like:
* std::vector<uint8_t> v;
* blake3_hasher_finalize(&hasher, v.data(), v.size());
*/
if (out_len == 0) {
return;
}
/* If the subtree stack is empty, then the current chunk is the root. */
if (ctx->cv_stack_len == 0) {
output_t output = chunk_state_output(&ctx->chunk);
output_root_bytes(ctx->ops, &output, seek, out, out_len);
return;
}
/*
* If there are any bytes in the chunk state, finalize that chunk and
* do a roll-up merge between that chunk hash and every subtree in the
* stack. In this case, the extra merge loop at the end of
* blake3_hasher_update guarantees that none of the subtrees in the
* stack need to be merged with each other first. Otherwise, if there
* are no bytes in the chunk state, then the top of the stack is a
* chunk hash, and we start the merge from that.
*/
output_t output;
size_t cvs_remaining;
if (chunk_state_len(&ctx->chunk) > 0) {
cvs_remaining = ctx->cv_stack_len;
output = chunk_state_output(&ctx->chunk);
} else {
/* There are always at least 2 CVs in the stack in this case. */
cvs_remaining = ctx->cv_stack_len - 2;
output = parent_output(&ctx->cv_stack[cvs_remaining * 32],
ctx->key, ctx->chunk.flags);
}
while (cvs_remaining > 0) {
cvs_remaining -= 1;
uint8_t parent_block[BLAKE3_BLOCK_LEN];
memcpy(parent_block, &ctx->cv_stack[cvs_remaining * 32], 32);
output_chaining_value(ctx->ops, &output, &parent_block[32]);
output = parent_output(parent_block, ctx->key,
ctx->chunk.flags);
}
output_root_bytes(ctx->ops, &output, seek, out, out_len);
}
diff --git a/sys/contrib/openzfs/module/icp/algs/blake3/blake3_generic.c b/sys/contrib/openzfs/module/icp/algs/blake3/blake3_generic.c
index 6c1eb33e89c0..94a1f108236e 100644
--- a/sys/contrib/openzfs/module/icp/algs/blake3/blake3_generic.c
+++ b/sys/contrib/openzfs/module/icp/algs/blake3/blake3_generic.c
@@ -1,202 +1,202 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Based on BLAKE3 v1.3.1, https://github.com/BLAKE3-team/BLAKE3
* Copyright (c) 2019-2020 Samuel Neves and Jack O'Connor
* Copyright (c) 2021-2022 Tino Reichardt <milky-zfs@mcmilk.de>
*/
#include <sys/zfs_context.h>
#include "blake3_impl.h"
#define rotr32(x, n) (((x) >> (n)) | ((x) << (32 - (n))))
static inline void g(uint32_t *state, size_t a, size_t b, size_t c, size_t d,
uint32_t x, uint32_t y)
{
state[a] = state[a] + state[b] + x;
state[d] = rotr32(state[d] ^ state[a], 16);
state[c] = state[c] + state[d];
state[b] = rotr32(state[b] ^ state[c], 12);
state[a] = state[a] + state[b] + y;
state[d] = rotr32(state[d] ^ state[a], 8);
state[c] = state[c] + state[d];
state[b] = rotr32(state[b] ^ state[c], 7);
}
static inline void round_fn(uint32_t state[16], const uint32_t *msg,
size_t round)
{
/* Select the message schedule based on the round. */
const uint8_t *schedule = BLAKE3_MSG_SCHEDULE[round];
/* Mix the columns. */
g(state, 0, 4, 8, 12, msg[schedule[0]], msg[schedule[1]]);
g(state, 1, 5, 9, 13, msg[schedule[2]], msg[schedule[3]]);
g(state, 2, 6, 10, 14, msg[schedule[4]], msg[schedule[5]]);
g(state, 3, 7, 11, 15, msg[schedule[6]], msg[schedule[7]]);
/* Mix the rows. */
g(state, 0, 5, 10, 15, msg[schedule[8]], msg[schedule[9]]);
g(state, 1, 6, 11, 12, msg[schedule[10]], msg[schedule[11]]);
g(state, 2, 7, 8, 13, msg[schedule[12]], msg[schedule[13]]);
g(state, 3, 4, 9, 14, msg[schedule[14]], msg[schedule[15]]);
}
static inline void compress_pre(uint32_t state[16], const uint32_t cv[8],
const uint8_t block[BLAKE3_BLOCK_LEN], uint8_t block_len,
uint64_t counter, uint8_t flags)
{
uint32_t block_words[16];
block_words[0] = load32(block + 4 * 0);
block_words[1] = load32(block + 4 * 1);
block_words[2] = load32(block + 4 * 2);
block_words[3] = load32(block + 4 * 3);
block_words[4] = load32(block + 4 * 4);
block_words[5] = load32(block + 4 * 5);
block_words[6] = load32(block + 4 * 6);
block_words[7] = load32(block + 4 * 7);
block_words[8] = load32(block + 4 * 8);
block_words[9] = load32(block + 4 * 9);
block_words[10] = load32(block + 4 * 10);
block_words[11] = load32(block + 4 * 11);
block_words[12] = load32(block + 4 * 12);
block_words[13] = load32(block + 4 * 13);
block_words[14] = load32(block + 4 * 14);
block_words[15] = load32(block + 4 * 15);
state[0] = cv[0];
state[1] = cv[1];
state[2] = cv[2];
state[3] = cv[3];
state[4] = cv[4];
state[5] = cv[5];
state[6] = cv[6];
state[7] = cv[7];
state[8] = BLAKE3_IV[0];
state[9] = BLAKE3_IV[1];
state[10] = BLAKE3_IV[2];
state[11] = BLAKE3_IV[3];
state[12] = counter_low(counter);
state[13] = counter_high(counter);
state[14] = (uint32_t)block_len;
state[15] = (uint32_t)flags;
round_fn(state, &block_words[0], 0);
round_fn(state, &block_words[0], 1);
round_fn(state, &block_words[0], 2);
round_fn(state, &block_words[0], 3);
round_fn(state, &block_words[0], 4);
round_fn(state, &block_words[0], 5);
round_fn(state, &block_words[0], 6);
}
static inline void blake3_compress_in_place_generic(uint32_t cv[8],
const uint8_t block[BLAKE3_BLOCK_LEN], uint8_t block_len,
uint64_t counter, uint8_t flags)
{
uint32_t state[16];
compress_pre(state, cv, block, block_len, counter, flags);
cv[0] = state[0] ^ state[8];
cv[1] = state[1] ^ state[9];
cv[2] = state[2] ^ state[10];
cv[3] = state[3] ^ state[11];
cv[4] = state[4] ^ state[12];
cv[5] = state[5] ^ state[13];
cv[6] = state[6] ^ state[14];
cv[7] = state[7] ^ state[15];
}
static inline void hash_one_generic(const uint8_t *input, size_t blocks,
const uint32_t key[8], uint64_t counter, uint8_t flags,
uint8_t flags_start, uint8_t flags_end, uint8_t out[BLAKE3_OUT_LEN])
{
uint32_t cv[8];
memcpy(cv, key, BLAKE3_KEY_LEN);
uint8_t block_flags = flags | flags_start;
while (blocks > 0) {
if (blocks == 1) {
block_flags |= flags_end;
}
blake3_compress_in_place_generic(cv, input, BLAKE3_BLOCK_LEN,
counter, block_flags);
input = &input[BLAKE3_BLOCK_LEN];
blocks -= 1;
block_flags = flags;
}
store_cv_words(out, cv);
}
static inline void blake3_compress_xof_generic(const uint32_t cv[8],
const uint8_t block[BLAKE3_BLOCK_LEN], uint8_t block_len,
uint64_t counter, uint8_t flags, uint8_t out[64])
{
uint32_t state[16];
compress_pre(state, cv, block, block_len, counter, flags);
store32(&out[0 * 4], state[0] ^ state[8]);
store32(&out[1 * 4], state[1] ^ state[9]);
store32(&out[2 * 4], state[2] ^ state[10]);
store32(&out[3 * 4], state[3] ^ state[11]);
store32(&out[4 * 4], state[4] ^ state[12]);
store32(&out[5 * 4], state[5] ^ state[13]);
store32(&out[6 * 4], state[6] ^ state[14]);
store32(&out[7 * 4], state[7] ^ state[15]);
store32(&out[8 * 4], state[8] ^ cv[0]);
store32(&out[9 * 4], state[9] ^ cv[1]);
store32(&out[10 * 4], state[10] ^ cv[2]);
store32(&out[11 * 4], state[11] ^ cv[3]);
store32(&out[12 * 4], state[12] ^ cv[4]);
store32(&out[13 * 4], state[13] ^ cv[5]);
store32(&out[14 * 4], state[14] ^ cv[6]);
store32(&out[15 * 4], state[15] ^ cv[7]);
}
static inline void blake3_hash_many_generic(const uint8_t * const *inputs,
size_t num_inputs, size_t blocks, const uint32_t key[8], uint64_t counter,
boolean_t increment_counter, uint8_t flags, uint8_t flags_start,
uint8_t flags_end, uint8_t *out)
{
while (num_inputs > 0) {
hash_one_generic(inputs[0], blocks, key, counter, flags,
flags_start, flags_end, out);
if (increment_counter) {
counter += 1;
}
inputs += 1;
num_inputs -= 1;
out = &out[BLAKE3_OUT_LEN];
}
}
static inline boolean_t blake3_is_generic_supported(void)
{
return (B_TRUE);
}
-const blake3_impl_ops_t blake3_generic_impl = {
+const blake3_ops_t blake3_generic_impl = {
.compress_in_place = blake3_compress_in_place_generic,
.compress_xof = blake3_compress_xof_generic,
.hash_many = blake3_hash_many_generic,
.is_supported = blake3_is_generic_supported,
.degree = 4,
.name = "generic"
};
diff --git a/sys/contrib/openzfs/module/icp/algs/blake3/blake3_impl.c b/sys/contrib/openzfs/module/icp/algs/blake3/blake3_impl.c
index 10741c82de7e..5276fd88fbb6 100644
--- a/sys/contrib/openzfs/module/icp/algs/blake3/blake3_impl.c
+++ b/sys/contrib/openzfs/module/icp/algs/blake3/blake3_impl.c
@@ -1,284 +1,362 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2021-2022 Tino Reichardt <milky-zfs@mcmilk.de>
*/
#include <sys/zfs_context.h>
#include <sys/zio_checksum.h>
#include "blake3_impl.h"
-static const blake3_impl_ops_t *const blake3_impls[] = {
+static const blake3_ops_t *const blake3_impls[] = {
&blake3_generic_impl,
#if defined(__aarch64__) || \
(defined(__x86_64) && defined(HAVE_SSE2)) || \
(defined(__PPC64__) && defined(__LITTLE_ENDIAN__))
&blake3_sse2_impl,
#endif
#if defined(__aarch64__) || \
(defined(__x86_64) && defined(HAVE_SSE4_1)) || \
(defined(__PPC64__) && defined(__LITTLE_ENDIAN__))
&blake3_sse41_impl,
#endif
#if defined(__x86_64) && defined(HAVE_SSE4_1) && defined(HAVE_AVX2)
&blake3_avx2_impl,
#endif
#if defined(__x86_64) && defined(HAVE_AVX512F) && defined(HAVE_AVX512VL)
&blake3_avx512_impl,
#endif
};
-/* this pointer holds current ops for implementation */
-static const blake3_impl_ops_t *blake3_selected_impl = &blake3_generic_impl;
-
-/* special implementation selections */
+/* Select BLAKE3 implementation */
#define IMPL_FASTEST (UINT32_MAX)
-#define IMPL_CYCLE (UINT32_MAX-1)
-#define IMPL_USER (UINT32_MAX-2)
-#define IMPL_PARAM (UINT32_MAX-3)
+#define IMPL_CYCLE (UINT32_MAX - 1)
-#define IMPL_READ(i) (*(volatile uint32_t *) &(i))
-static uint32_t icp_blake3_impl = IMPL_FASTEST;
+#define IMPL_READ(i) (*(volatile uint32_t *) &(i))
-#define BLAKE3_IMPL_NAME_MAX 16
+/* Indicate that benchmark has been done */
+static boolean_t blake3_initialized = B_FALSE;
-/* id of fastest implementation */
-static uint32_t blake3_fastest_id = 0;
+/* Implementation that contains the fastest methods */
+static blake3_ops_t blake3_fastest_impl = {
+ .name = "fastest"
+};
-/* currently used id */
-static uint32_t blake3_current_id = 0;
+/* Hold all supported implementations */
+static const blake3_ops_t *blake3_supp_impls[ARRAY_SIZE(blake3_impls)];
+static uint32_t blake3_supp_impls_cnt = 0;
-/* id of module parameter (-1 == unused) */
-static int blake3_param_id = -1;
+/* Currently selected implementation */
+static uint32_t blake3_impl_chosen = IMPL_FASTEST;
-/* return number of supported implementations */
-int
-blake3_get_impl_count(void)
+static struct blake3_impl_selector {
+ const char *name;
+ uint32_t sel;
+} blake3_impl_selectors[] = {
+ { "cycle", IMPL_CYCLE },
+ { "fastest", IMPL_FASTEST }
+};
+
+/* check the supported implementations */
+static void blake3_impl_init(void)
{
- static int impls = 0;
- int i;
+ int i, c;
+
+ /* init only once */
+ if (likely(blake3_initialized))
+ return;
- if (impls)
- return (impls);
+ /* move supported implementations into blake3_supp_impls */
+ for (i = 0, c = 0; i < ARRAY_SIZE(blake3_impls); i++) {
+ const blake3_ops_t *impl = blake3_impls[i];
- for (i = 0; i < ARRAY_SIZE(blake3_impls); i++) {
- if (!blake3_impls[i]->is_supported()) continue;
- impls++;
+ if (impl->is_supported && impl->is_supported())
+ blake3_supp_impls[c++] = impl;
}
+ blake3_supp_impls_cnt = c;
- return (impls);
+ /* first init generic impl, may be changed via set_fastest() */
+ memcpy(&blake3_fastest_impl, blake3_impls[0],
+ sizeof (blake3_fastest_impl));
+ blake3_initialized = B_TRUE;
}
-/* return id of selected implementation */
-int
-blake3_get_impl_id(void)
+/* get number of supported implementations */
+uint32_t
+blake3_impl_getcnt(void)
+{
+ blake3_impl_init();
+ return (blake3_supp_impls_cnt);
+}
+
+/* get id of selected implementation */
+uint32_t
+blake3_impl_getid(void)
{
- return (blake3_current_id);
+ return (IMPL_READ(blake3_impl_chosen));
}
-/* return name of selected implementation */
+/* get name of selected implementation */
const char *
-blake3_get_impl_name(void)
+blake3_impl_getname(void)
{
- return (blake3_selected_impl->name);
+ uint32_t impl = IMPL_READ(blake3_impl_chosen);
+
+ blake3_impl_init();
+ switch (impl) {
+ case IMPL_FASTEST:
+ return ("fastest");
+ case IMPL_CYCLE:
+ return ("cycle");
+ default:
+ return (blake3_supp_impls[impl]->name);
+ }
}
/* setup id as fastest implementation */
void
-blake3_set_impl_fastest(uint32_t id)
+blake3_impl_set_fastest(uint32_t id)
{
- blake3_fastest_id = id;
+ /* setup fastest impl */
+ memcpy(&blake3_fastest_impl, blake3_supp_impls[id],
+ sizeof (blake3_fastest_impl));
}
/* set implementation by id */
void
-blake3_set_impl_id(uint32_t id)
+blake3_impl_setid(uint32_t id)
{
- int i, cid;
-
- /* select fastest */
- if (id == IMPL_FASTEST)
- id = blake3_fastest_id;
-
- /* select next or first */
- if (id == IMPL_CYCLE)
- id = (++blake3_current_id) % blake3_get_impl_count();
-
- /* 0..N for the real impl */
- for (i = 0, cid = 0; i < ARRAY_SIZE(blake3_impls); i++) {
- if (!blake3_impls[i]->is_supported()) continue;
- if (cid == id) {
- blake3_current_id = cid;
- blake3_selected_impl = blake3_impls[i];
- return;
- }
- cid++;
+ blake3_impl_init();
+ switch (id) {
+ case IMPL_FASTEST:
+ atomic_swap_32(&blake3_impl_chosen, IMPL_FASTEST);
+ break;
+ case IMPL_CYCLE:
+ atomic_swap_32(&blake3_impl_chosen, IMPL_CYCLE);
+ break;
+ default:
+ ASSERT3U(id, >=, 0);
+ ASSERT3U(id, <, blake3_supp_impls_cnt);
+ atomic_swap_32(&blake3_impl_chosen, id);
+ break;
}
}
/* set implementation by name */
int
-blake3_set_impl_name(const char *name)
+blake3_impl_setname(const char *val)
{
- int i, cid;
-
- if (strcmp(name, "fastest") == 0) {
- atomic_swap_32(&icp_blake3_impl, IMPL_FASTEST);
- blake3_set_impl_id(IMPL_FASTEST);
- return (0);
- } else if (strcmp(name, "cycle") == 0) {
- atomic_swap_32(&icp_blake3_impl, IMPL_CYCLE);
- blake3_set_impl_id(IMPL_CYCLE);
- return (0);
+ uint32_t impl = IMPL_READ(blake3_impl_chosen);
+ size_t val_len;
+ int i, err = -EINVAL;
+
+ blake3_impl_init();
+ val_len = strlen(val);
+ while ((val_len > 0) && !!isspace(val[val_len-1])) /* trim '\n' */
+ val_len--;
+
+ /* check mandatory implementations */
+ for (i = 0; i < ARRAY_SIZE(blake3_impl_selectors); i++) {
+ const char *name = blake3_impl_selectors[i].name;
+
+ if (val_len == strlen(name) &&
+ strncmp(val, name, val_len) == 0) {
+ impl = blake3_impl_selectors[i].sel;
+ err = 0;
+ break;
+ }
}
- for (i = 0, cid = 0; i < ARRAY_SIZE(blake3_impls); i++) {
- if (!blake3_impls[i]->is_supported()) continue;
- if (strcmp(name, blake3_impls[i]->name) == 0) {
- if (icp_blake3_impl == IMPL_PARAM) {
- blake3_param_id = cid;
- return (0);
+ if (err != 0 && blake3_initialized) {
+ /* check all supported implementations */
+ for (i = 0; i < blake3_supp_impls_cnt; i++) {
+ const char *name = blake3_supp_impls[i]->name;
+
+ if (val_len == strlen(name) &&
+ strncmp(val, name, val_len) == 0) {
+ impl = i;
+ err = 0;
+ break;
}
- blake3_selected_impl = blake3_impls[i];
- blake3_current_id = cid;
- return (0);
}
- cid++;
}
- return (-EINVAL);
+ if (err == 0) {
+ atomic_swap_32(&blake3_impl_chosen, impl);
+ }
+
+ return (err);
}
-/* setup implementation */
-void
-blake3_setup_impl(void)
+const blake3_ops_t *
+blake3_impl_get_ops(void)
{
- switch (IMPL_READ(icp_blake3_impl)) {
- case IMPL_PARAM:
- blake3_set_impl_id(blake3_param_id);
- atomic_swap_32(&icp_blake3_impl, IMPL_USER);
- break;
+ const blake3_ops_t *ops = NULL;
+ uint32_t impl = IMPL_READ(blake3_impl_chosen);
+
+ blake3_impl_init();
+ switch (impl) {
case IMPL_FASTEST:
- blake3_set_impl_id(IMPL_FASTEST);
+ ASSERT(blake3_initialized);
+ ops = &blake3_fastest_impl;
break;
case IMPL_CYCLE:
- blake3_set_impl_id(IMPL_CYCLE);
+ /* Cycle through supported implementations */
+ ASSERT(blake3_initialized);
+ ASSERT3U(blake3_supp_impls_cnt, >, 0);
+ static uint32_t cycle_count = 0;
+ uint32_t idx = (++cycle_count) % blake3_supp_impls_cnt;
+ ops = blake3_supp_impls[idx];
break;
default:
- blake3_set_impl_id(blake3_current_id);
+ ASSERT3U(blake3_supp_impls_cnt, >, 0);
+ ASSERT3U(impl, <, blake3_supp_impls_cnt);
+ ops = blake3_supp_impls[impl];
break;
}
-}
-/* return selected implementation */
-const blake3_impl_ops_t *
-blake3_impl_get_ops(void)
-{
- /* each call to ops will cycle */
- if (icp_blake3_impl == IMPL_CYCLE)
- blake3_set_impl_id(IMPL_CYCLE);
-
- return (blake3_selected_impl);
+ ASSERT3P(ops, !=, NULL);
+ return (ops);
}
#if defined(_KERNEL)
+
void **blake3_per_cpu_ctx;
void
blake3_per_cpu_ctx_init(void)
{
/*
* Create "The Godfather" ptr to hold all blake3 ctx
*/
blake3_per_cpu_ctx = kmem_alloc(max_ncpus * sizeof (void *), KM_SLEEP);
for (int i = 0; i < max_ncpus; i++) {
blake3_per_cpu_ctx[i] = kmem_alloc(sizeof (BLAKE3_CTX),
KM_SLEEP);
}
+
+ /* init once in kernel mode */
+ blake3_impl_init();
}
void
blake3_per_cpu_ctx_fini(void)
{
for (int i = 0; i < max_ncpus; i++) {
memset(blake3_per_cpu_ctx[i], 0, sizeof (BLAKE3_CTX));
kmem_free(blake3_per_cpu_ctx[i], sizeof (BLAKE3_CTX));
}
memset(blake3_per_cpu_ctx, 0, max_ncpus * sizeof (void *));
kmem_free(blake3_per_cpu_ctx, max_ncpus * sizeof (void *));
}
-#endif
-#if defined(_KERNEL) && defined(__linux__)
+#define IMPL_FMT(impl, i) (((impl) == (i)) ? "[%s] " : "%s ")
+
+#if defined(__linux__)
+
static int
-icp_blake3_impl_set(const char *name, zfs_kernel_param_t *kp)
+blake3_param_get(char *buffer, zfs_kernel_param_t *unused)
{
- char req_name[BLAKE3_IMPL_NAME_MAX];
- size_t i;
+ const uint32_t impl = IMPL_READ(blake3_impl_chosen);
+ char *fmt;
+ int cnt = 0;
- /* sanitize input */
- i = strnlen(name, BLAKE3_IMPL_NAME_MAX);
- if (i == 0 || i >= BLAKE3_IMPL_NAME_MAX)
- return (-EINVAL);
+ /* cycling */
+ fmt = IMPL_FMT(impl, IMPL_CYCLE);
+ cnt += sprintf(buffer + cnt, fmt, "cycle");
+
+ /* list fastest */
+ fmt = IMPL_FMT(impl, IMPL_FASTEST);
+ cnt += sprintf(buffer + cnt, fmt, "fastest");
+
+ /* list all supported implementations */
+ for (uint32_t i = 0; i < blake3_supp_impls_cnt; ++i) {
+ fmt = IMPL_FMT(impl, i);
+ cnt += sprintf(buffer + cnt, fmt,
+ blake3_supp_impls[i]->name);
+ }
- strlcpy(req_name, name, BLAKE3_IMPL_NAME_MAX);
- while (i > 0 && isspace(req_name[i-1]))
- i--;
- req_name[i] = '\0';
+ return (cnt);
+}
- atomic_swap_32(&icp_blake3_impl, IMPL_PARAM);
- return (blake3_set_impl_name(req_name));
+static int
+blake3_param_set(const char *val, zfs_kernel_param_t *unused)
+{
+ (void) unused;
+ return (blake3_impl_setname(val));
}
+#elif defined(__FreeBSD__)
+
+#include <sys/sbuf.h>
+
static int
-icp_blake3_impl_get(char *buffer, zfs_kernel_param_t *kp)
+blake3_param(ZFS_MODULE_PARAM_ARGS)
{
- int i, cid, cnt = 0;
- char *fmt;
+ int err;
- /* cycling */
- fmt = (icp_blake3_impl == IMPL_CYCLE) ? "[cycle] " : "cycle ";
- cnt += sprintf(buffer + cnt, fmt);
-
- /* fastest one */
- fmt = (icp_blake3_impl == IMPL_FASTEST) ? "[fastest] " : "fastest ";
- cnt += sprintf(buffer + cnt, fmt);
-
- /* user selected */
- for (i = 0, cid = 0; i < ARRAY_SIZE(blake3_impls); i++) {
- if (!blake3_impls[i]->is_supported()) continue;
- fmt = (icp_blake3_impl == IMPL_USER &&
- cid == blake3_current_id) ? "[%s] " : "%s ";
- cnt += sprintf(buffer + cnt, fmt, blake3_impls[i]->name);
- cid++;
+ if (req->newptr == NULL) {
+ const uint32_t impl = IMPL_READ(blake3_impl_chosen);
+ const int init_buflen = 64;
+ const char *fmt;
+ struct sbuf *s;
+
+ s = sbuf_new_for_sysctl(NULL, NULL, init_buflen, req);
+
+ /* cycling */
+ fmt = IMPL_FMT(impl, IMPL_CYCLE);
+ (void) sbuf_printf(s, fmt, "cycle");
+
+ /* list fastest */
+ fmt = IMPL_FMT(impl, IMPL_FASTEST);
+ (void) sbuf_printf(s, fmt, "fastest");
+
+ /* list all supported implementations */
+ for (uint32_t i = 0; i < blake3_supp_impls_cnt; ++i) {
+ fmt = IMPL_FMT(impl, i);
+ (void) sbuf_printf(s, fmt, blake3_supp_impls[i]->name);
+ }
+
+ err = sbuf_finish(s);
+ sbuf_delete(s);
+
+ return (err);
}
- buffer[cnt] = 0;
+ char buf[16];
- return (cnt);
+ err = sysctl_handle_string(oidp, buf, sizeof (buf), req);
+ if (err) {
+ return (err);
+ }
+
+ return (-blake3_impl_setname(buf));
}
+#endif
+
+#undef IMPL_FMT
-module_param_call(icp_blake3_impl, icp_blake3_impl_set, icp_blake3_impl_get,
- NULL, 0644);
-MODULE_PARM_DESC(icp_blake3_impl, "Select BLAKE3 implementation.");
+ZFS_MODULE_VIRTUAL_PARAM_CALL(zfs, zfs_, blake3_impl,
+ blake3_param_set, blake3_param_get, ZMOD_RW, \
+ "Select BLAKE3 implementation.");
#endif
diff --git a/sys/contrib/openzfs/module/icp/algs/blake3/blake3_impl.h b/sys/contrib/openzfs/module/icp/algs/blake3/blake3_impl.h
index 5254061c7378..eef74eaa9098 100644
--- a/sys/contrib/openzfs/module/icp/algs/blake3/blake3_impl.h
+++ b/sys/contrib/openzfs/module/icp/algs/blake3/blake3_impl.h
@@ -1,213 +1,213 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Based on BLAKE3 v1.3.1, https://github.com/BLAKE3-team/BLAKE3
* Copyright (c) 2019-2020 Samuel Neves and Jack O'Connor
* Copyright (c) 2021-2022 Tino Reichardt <milky-zfs@mcmilk.de>
*/
#ifndef BLAKE3_IMPL_H
#define BLAKE3_IMPL_H
#ifdef __cplusplus
extern "C" {
#endif
#include <sys/types.h>
#include <sys/blake3.h>
#include <sys/simd.h>
/*
* Methods used to define BLAKE3 assembler implementations
*/
typedef void (*blake3_compress_in_place_f)(uint32_t cv[8],
const uint8_t block[BLAKE3_BLOCK_LEN],
uint8_t block_len, uint64_t counter,
uint8_t flags);
typedef void (*blake3_compress_xof_f)(const uint32_t cv[8],
const uint8_t block[BLAKE3_BLOCK_LEN], uint8_t block_len,
uint64_t counter, uint8_t flags, uint8_t out[64]);
typedef void (*blake3_hash_many_f)(const uint8_t * const *inputs,
size_t num_inputs, size_t blocks, const uint32_t key[8],
uint64_t counter, boolean_t increment_counter, uint8_t flags,
uint8_t flags_start, uint8_t flags_end, uint8_t *out);
typedef boolean_t (*blake3_is_supported_f)(void);
typedef struct blake3_impl_ops {
blake3_compress_in_place_f compress_in_place;
blake3_compress_xof_f compress_xof;
blake3_hash_many_f hash_many;
blake3_is_supported_f is_supported;
int degree;
const char *name;
-} blake3_impl_ops_t;
+} blake3_ops_t;
/* Return selected BLAKE3 implementation ops */
-extern const blake3_impl_ops_t *blake3_impl_get_ops(void);
+extern const blake3_ops_t *blake3_impl_get_ops(void);
-extern const blake3_impl_ops_t blake3_generic_impl;
+extern const blake3_ops_t blake3_generic_impl;
#if defined(__aarch64__) || \
(defined(__x86_64) && defined(HAVE_SSE2)) || \
(defined(__PPC64__) && defined(__LITTLE_ENDIAN__))
-extern const blake3_impl_ops_t blake3_sse2_impl;
+extern const blake3_ops_t blake3_sse2_impl;
#endif
#if defined(__aarch64__) || \
(defined(__x86_64) && defined(HAVE_SSE4_1)) || \
(defined(__PPC64__) && defined(__LITTLE_ENDIAN__))
-extern const blake3_impl_ops_t blake3_sse41_impl;
+extern const blake3_ops_t blake3_sse41_impl;
#endif
#if defined(__x86_64) && defined(HAVE_SSE4_1) && defined(HAVE_AVX2)
-extern const blake3_impl_ops_t blake3_avx2_impl;
+extern const blake3_ops_t blake3_avx2_impl;
#endif
#if defined(__x86_64) && defined(HAVE_AVX512F) && defined(HAVE_AVX512VL)
-extern const blake3_impl_ops_t blake3_avx512_impl;
+extern const blake3_ops_t blake3_avx512_impl;
#endif
#if defined(__x86_64)
#define MAX_SIMD_DEGREE 16
#else
#define MAX_SIMD_DEGREE 4
#endif
#define MAX_SIMD_DEGREE_OR_2 (MAX_SIMD_DEGREE > 2 ? MAX_SIMD_DEGREE : 2)
static const uint32_t BLAKE3_IV[8] = {
0x6A09E667UL, 0xBB67AE85UL, 0x3C6EF372UL, 0xA54FF53AUL,
0x510E527FUL, 0x9B05688CUL, 0x1F83D9ABUL, 0x5BE0CD19UL};
static const uint8_t BLAKE3_MSG_SCHEDULE[7][16] = {
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
{2, 6, 3, 10, 7, 0, 4, 13, 1, 11, 12, 5, 9, 14, 15, 8},
{3, 4, 10, 12, 13, 2, 7, 14, 6, 5, 9, 0, 11, 15, 8, 1},
{10, 7, 12, 9, 14, 3, 13, 15, 4, 0, 11, 2, 5, 8, 1, 6},
{12, 13, 9, 11, 15, 10, 14, 8, 7, 2, 5, 3, 0, 1, 6, 4},
{9, 14, 11, 5, 8, 12, 15, 1, 13, 3, 0, 10, 2, 6, 4, 7},
{11, 15, 5, 0, 1, 9, 8, 6, 14, 10, 2, 12, 3, 4, 7, 13},
};
/* Find index of the highest set bit */
static inline unsigned int highest_one(uint64_t x) {
#if defined(__GNUC__) || defined(__clang__)
return (63 ^ __builtin_clzll(x));
#elif defined(_MSC_VER) && defined(IS_X86_64)
unsigned long index;
_BitScanReverse64(&index, x);
return (index);
#elif defined(_MSC_VER) && defined(IS_X86_32)
if (x >> 32) {
unsigned long index;
_BitScanReverse(&index, x >> 32);
return (32 + index);
} else {
unsigned long index;
_BitScanReverse(&index, x);
return (index);
}
#else
unsigned int c = 0;
if (x & 0xffffffff00000000ULL) { x >>= 32; c += 32; }
if (x & 0x00000000ffff0000ULL) { x >>= 16; c += 16; }
if (x & 0x000000000000ff00ULL) { x >>= 8; c += 8; }
if (x & 0x00000000000000f0ULL) { x >>= 4; c += 4; }
if (x & 0x000000000000000cULL) { x >>= 2; c += 2; }
if (x & 0x0000000000000002ULL) { c += 1; }
return (c);
#endif
}
/* Count the number of 1 bits. */
static inline unsigned int popcnt(uint64_t x) {
unsigned int count = 0;
while (x != 0) {
count += 1;
x &= x - 1;
}
return (count);
}
/*
* Largest power of two less than or equal to x.
* As a special case, returns 1 when x is 0.
*/
static inline uint64_t round_down_to_power_of_2(uint64_t x) {
return (1ULL << highest_one(x | 1));
}
static inline uint32_t counter_low(uint64_t counter) {
return ((uint32_t)counter);
}
static inline uint32_t counter_high(uint64_t counter) {
return ((uint32_t)(counter >> 32));
}
static inline uint32_t load32(const void *src) {
const uint8_t *p = (const uint8_t *)src;
return ((uint32_t)(p[0]) << 0) | ((uint32_t)(p[1]) << 8) |
((uint32_t)(p[2]) << 16) | ((uint32_t)(p[3]) << 24);
}
static inline void load_key_words(const uint8_t key[BLAKE3_KEY_LEN],
uint32_t key_words[8]) {
key_words[0] = load32(&key[0 * 4]);
key_words[1] = load32(&key[1 * 4]);
key_words[2] = load32(&key[2 * 4]);
key_words[3] = load32(&key[3 * 4]);
key_words[4] = load32(&key[4 * 4]);
key_words[5] = load32(&key[5 * 4]);
key_words[6] = load32(&key[6 * 4]);
key_words[7] = load32(&key[7 * 4]);
}
static inline void store32(void *dst, uint32_t w) {
uint8_t *p = (uint8_t *)dst;
p[0] = (uint8_t)(w >> 0);
p[1] = (uint8_t)(w >> 8);
p[2] = (uint8_t)(w >> 16);
p[3] = (uint8_t)(w >> 24);
}
static inline void store_cv_words(uint8_t bytes_out[32], uint32_t cv_words[8]) {
store32(&bytes_out[0 * 4], cv_words[0]);
store32(&bytes_out[1 * 4], cv_words[1]);
store32(&bytes_out[2 * 4], cv_words[2]);
store32(&bytes_out[3 * 4], cv_words[3]);
store32(&bytes_out[4 * 4], cv_words[4]);
store32(&bytes_out[5 * 4], cv_words[5]);
store32(&bytes_out[6 * 4], cv_words[6]);
store32(&bytes_out[7 * 4], cv_words[7]);
}
#ifdef __cplusplus
}
#endif
#endif /* BLAKE3_IMPL_H */
diff --git a/sys/contrib/openzfs/module/icp/algs/blake3/blake3_x86-64.c b/sys/contrib/openzfs/module/icp/algs/blake3/blake3_x86-64.c
index 5f2b69484dca..84f8331ab37c 100644
--- a/sys/contrib/openzfs/module/icp/algs/blake3/blake3_x86-64.c
+++ b/sys/contrib/openzfs/module/icp/algs/blake3/blake3_x86-64.c
@@ -1,250 +1,252 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2021-2022 Tino Reichardt <milky-zfs@mcmilk.de>
*/
#include "blake3_impl.h"
#if defined(__aarch64__) || \
(defined(__x86_64) && defined(HAVE_SSE2)) || \
(defined(__PPC64__) && defined(__LITTLE_ENDIAN__))
extern void zfs_blake3_compress_in_place_sse2(uint32_t cv[8],
const uint8_t block[BLAKE3_BLOCK_LEN], uint8_t block_len,
uint64_t counter, uint8_t flags);
extern void zfs_blake3_compress_xof_sse2(const uint32_t cv[8],
const uint8_t block[BLAKE3_BLOCK_LEN], uint8_t block_len,
uint64_t counter, uint8_t flags, uint8_t out[64]);
extern void zfs_blake3_hash_many_sse2(const uint8_t * const *inputs,
size_t num_inputs, size_t blocks, const uint32_t key[8],
uint64_t counter, boolean_t increment_counter, uint8_t flags,
uint8_t flags_start, uint8_t flags_end, uint8_t *out);
static void blake3_compress_in_place_sse2(uint32_t cv[8],
const uint8_t block[BLAKE3_BLOCK_LEN], uint8_t block_len,
uint64_t counter, uint8_t flags) {
kfpu_begin();
zfs_blake3_compress_in_place_sse2(cv, block, block_len, counter,
flags);
kfpu_end();
}
static void blake3_compress_xof_sse2(const uint32_t cv[8],
const uint8_t block[BLAKE3_BLOCK_LEN], uint8_t block_len,
uint64_t counter, uint8_t flags, uint8_t out[64]) {
kfpu_begin();
zfs_blake3_compress_xof_sse2(cv, block, block_len, counter, flags,
out);
kfpu_end();
}
static void blake3_hash_many_sse2(const uint8_t * const *inputs,
size_t num_inputs, size_t blocks, const uint32_t key[8],
uint64_t counter, boolean_t increment_counter, uint8_t flags,
uint8_t flags_start, uint8_t flags_end, uint8_t *out) {
kfpu_begin();
zfs_blake3_hash_many_sse2(inputs, num_inputs, blocks, key, counter,
increment_counter, flags, flags_start, flags_end, out);
kfpu_end();
}
static boolean_t blake3_is_sse2_supported(void)
{
#if defined(__x86_64)
return (kfpu_allowed() && zfs_sse2_available());
#elif defined(__PPC64__) && defined(__linux__)
- /* TODO: implement vsx handler or FreeBSD */
return (kfpu_allowed() && zfs_vsx_available());
#else
return (kfpu_allowed());
#endif
}
-const blake3_impl_ops_t blake3_sse2_impl = {
+const blake3_ops_t blake3_sse2_impl = {
.compress_in_place = blake3_compress_in_place_sse2,
.compress_xof = blake3_compress_xof_sse2,
.hash_many = blake3_hash_many_sse2,
.is_supported = blake3_is_sse2_supported,
.degree = 4,
.name = "sse2"
};
#endif
#if defined(__aarch64__) || \
(defined(__x86_64) && defined(HAVE_SSE2)) || \
(defined(__PPC64__) && defined(__LITTLE_ENDIAN__))
extern void zfs_blake3_compress_in_place_sse41(uint32_t cv[8],
const uint8_t block[BLAKE3_BLOCK_LEN], uint8_t block_len,
uint64_t counter, uint8_t flags);
extern void zfs_blake3_compress_xof_sse41(const uint32_t cv[8],
const uint8_t block[BLAKE3_BLOCK_LEN], uint8_t block_len,
uint64_t counter, uint8_t flags, uint8_t out[64]);
extern void zfs_blake3_hash_many_sse41(const uint8_t * const *inputs,
size_t num_inputs, size_t blocks, const uint32_t key[8],
uint64_t counter, boolean_t increment_counter, uint8_t flags,
uint8_t flags_start, uint8_t flags_end, uint8_t *out);
static void blake3_compress_in_place_sse41(uint32_t cv[8],
const uint8_t block[BLAKE3_BLOCK_LEN], uint8_t block_len,
uint64_t counter, uint8_t flags) {
kfpu_begin();
zfs_blake3_compress_in_place_sse41(cv, block, block_len, counter,
flags);
kfpu_end();
}
static void blake3_compress_xof_sse41(const uint32_t cv[8],
const uint8_t block[BLAKE3_BLOCK_LEN], uint8_t block_len,
uint64_t counter, uint8_t flags, uint8_t out[64]) {
kfpu_begin();
zfs_blake3_compress_xof_sse41(cv, block, block_len, counter, flags,
out);
kfpu_end();
}
static void blake3_hash_many_sse41(const uint8_t * const *inputs,
size_t num_inputs, size_t blocks, const uint32_t key[8],
uint64_t counter, boolean_t increment_counter, uint8_t flags,
uint8_t flags_start, uint8_t flags_end, uint8_t *out) {
kfpu_begin();
zfs_blake3_hash_many_sse41(inputs, num_inputs, blocks, key, counter,
increment_counter, flags, flags_start, flags_end, out);
kfpu_end();
}
static boolean_t blake3_is_sse41_supported(void)
{
#if defined(__x86_64)
return (kfpu_allowed() && zfs_sse4_1_available());
#elif defined(__PPC64__) && defined(__linux__)
+<<<<<<< HEAD
/* TODO: implement vsx handler or FreeBSD */
+=======
+>>>>>>> c629f0bf62e351355716f9870d6c2e377584b016
return (kfpu_allowed() && zfs_vsx_available());
#else
return (kfpu_allowed());
#endif
}
-const blake3_impl_ops_t blake3_sse41_impl = {
+const blake3_ops_t blake3_sse41_impl = {
.compress_in_place = blake3_compress_in_place_sse41,
.compress_xof = blake3_compress_xof_sse41,
.hash_many = blake3_hash_many_sse41,
.is_supported = blake3_is_sse41_supported,
.degree = 4,
.name = "sse41"
};
#endif
#if defined(__x86_64) && defined(HAVE_SSE4_1) && defined(HAVE_AVX2)
extern void zfs_blake3_hash_many_avx2(const uint8_t * const *inputs,
size_t num_inputs, size_t blocks, const uint32_t key[8],
uint64_t counter, boolean_t increment_counter, uint8_t flags,
uint8_t flags_start, uint8_t flags_end, uint8_t *out);
static void blake3_hash_many_avx2(const uint8_t * const *inputs,
size_t num_inputs, size_t blocks, const uint32_t key[8],
uint64_t counter, boolean_t increment_counter, uint8_t flags,
uint8_t flags_start, uint8_t flags_end, uint8_t *out) {
kfpu_begin();
zfs_blake3_hash_many_avx2(inputs, num_inputs, blocks, key, counter,
increment_counter, flags, flags_start, flags_end, out);
kfpu_end();
}
static boolean_t blake3_is_avx2_supported(void)
{
return (kfpu_allowed() && zfs_sse4_1_available() &&
zfs_avx2_available());
}
-const blake3_impl_ops_t blake3_avx2_impl = {
+const blake3_ops_t blake3_avx2_impl = {
.compress_in_place = blake3_compress_in_place_sse41,
.compress_xof = blake3_compress_xof_sse41,
.hash_many = blake3_hash_many_avx2,
.is_supported = blake3_is_avx2_supported,
.degree = 8,
.name = "avx2"
};
#endif
#if defined(__x86_64) && defined(HAVE_AVX512F) && defined(HAVE_AVX512VL)
extern void zfs_blake3_compress_in_place_avx512(uint32_t cv[8],
const uint8_t block[BLAKE3_BLOCK_LEN], uint8_t block_len,
uint64_t counter, uint8_t flags);
extern void zfs_blake3_compress_xof_avx512(const uint32_t cv[8],
const uint8_t block[BLAKE3_BLOCK_LEN], uint8_t block_len,
uint64_t counter, uint8_t flags, uint8_t out[64]);
extern void zfs_blake3_hash_many_avx512(const uint8_t * const *inputs,
size_t num_inputs, size_t blocks, const uint32_t key[8],
uint64_t counter, boolean_t increment_counter, uint8_t flags,
uint8_t flags_start, uint8_t flags_end, uint8_t *out);
static void blake3_compress_in_place_avx512(uint32_t cv[8],
const uint8_t block[BLAKE3_BLOCK_LEN], uint8_t block_len,
uint64_t counter, uint8_t flags) {
kfpu_begin();
zfs_blake3_compress_in_place_avx512(cv, block, block_len, counter,
flags);
kfpu_end();
}
static void blake3_compress_xof_avx512(const uint32_t cv[8],
const uint8_t block[BLAKE3_BLOCK_LEN], uint8_t block_len,
uint64_t counter, uint8_t flags, uint8_t out[64]) {
kfpu_begin();
zfs_blake3_compress_xof_avx512(cv, block, block_len, counter, flags,
out);
kfpu_end();
}
static void blake3_hash_many_avx512(const uint8_t * const *inputs,
size_t num_inputs, size_t blocks, const uint32_t key[8],
uint64_t counter, boolean_t increment_counter, uint8_t flags,
uint8_t flags_start, uint8_t flags_end, uint8_t *out) {
kfpu_begin();
zfs_blake3_hash_many_avx512(inputs, num_inputs, blocks, key, counter,
increment_counter, flags, flags_start, flags_end, out);
kfpu_end();
}
static boolean_t blake3_is_avx512_supported(void)
{
return (kfpu_allowed() && zfs_avx512f_available() &&
zfs_avx512vl_available());
}
-const blake3_impl_ops_t blake3_avx512_impl = {
+const blake3_ops_t blake3_avx512_impl = {
.compress_in_place = blake3_compress_in_place_avx512,
.compress_xof = blake3_compress_xof_avx512,
.hash_many = blake3_hash_many_avx512,
.is_supported = blake3_is_avx512_supported,
.degree = 16,
.name = "avx512"
};
#endif
diff --git a/sys/contrib/openzfs/module/icp/core/kcf_mech_tabs.c b/sys/contrib/openzfs/module/icp/core/kcf_mech_tabs.c
index 3d5063b28f6e..41705e84bc4b 100644
--- a/sys/contrib/openzfs/module/icp/core/kcf_mech_tabs.c
+++ b/sys/contrib/openzfs/module/icp/core/kcf_mech_tabs.c
@@ -1,435 +1,435 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/zfs_context.h>
#include <sys/crypto/common.h>
#include <sys/crypto/api.h>
#include <sys/crypto/impl.h>
/* Cryptographic mechanisms tables and their access functions */
/*
* Internal numbers assigned to mechanisms are coded as follows:
*
* +----------------+----------------+
* | mech. class | mech. index |
* <--- 32-bits --->+<--- 32-bits --->
*
* the mech_class identifies the table the mechanism belongs to.
* mech_index is the index for that mechanism in the table.
* A mechanism belongs to exactly 1 table.
* The tables are:
* . digest_mechs_tab[] for the msg digest mechs.
* . cipher_mechs_tab[] for encrypt/decrypt and wrap/unwrap mechs.
* . mac_mechs_tab[] for MAC mechs.
* . sign_mechs_tab[] for sign & verify mechs.
* . keyops_mechs_tab[] for key/key pair generation, and key derivation.
* . misc_mechs_tab[] for mechs that don't belong to any of the above.
*
* There are no holes in the tables.
*/
/*
* Locking conventions:
* --------------------
* A mutex is associated with every entry of the tables.
* The mutex is acquired whenever the entry is accessed for
* 1) retrieving the mech_id (comparing the mech name)
* 2) finding a provider for an xxx_init() or atomic operation.
* 3) altering the mechs entry to add or remove a provider.
*
* In 2), after a provider is chosen, its prov_desc is held and the
* entry's mutex must be dropped. The provider's working function (SPI) is
* called outside the mech_entry's mutex.
*
* The number of providers for a particular mechanism is not expected to be
* long enough to justify the cost of using rwlocks, so the per-mechanism
* entry mutex won't be very *hot*.
*
*/
/* Mechanisms tables */
/* RFE 4687834 Will deal with the extensibility of these tables later */
static kcf_mech_entry_t kcf_digest_mechs_tab[KCF_MAXDIGEST];
static kcf_mech_entry_t kcf_cipher_mechs_tab[KCF_MAXCIPHER];
static kcf_mech_entry_t kcf_mac_mechs_tab[KCF_MAXMAC];
const kcf_mech_entry_tab_t kcf_mech_tabs_tab[KCF_LAST_OPSCLASS + 1] = {
{0, NULL}, /* No class zero */
{KCF_MAXDIGEST, kcf_digest_mechs_tab},
{KCF_MAXCIPHER, kcf_cipher_mechs_tab},
{KCF_MAXMAC, kcf_mac_mechs_tab},
};
static avl_tree_t kcf_mech_hash;
static int
kcf_mech_hash_compar(const void *lhs, const void *rhs)
{
const kcf_mech_entry_t *l = lhs, *r = rhs;
int cmp = strncmp(l->me_name, r->me_name, CRYPTO_MAX_MECH_NAME);
return ((0 < cmp) - (cmp < 0));
}
void
kcf_destroy_mech_tabs(void)
{
for (void *cookie = NULL; avl_destroy_nodes(&kcf_mech_hash, &cookie); )
;
avl_destroy(&kcf_mech_hash);
}
/*
* kcf_init_mech_tabs()
*
* Called by the misc/kcf's _init() routine to initialize the tables
* of mech_entry's.
*/
void
kcf_init_mech_tabs(void)
{
avl_create(&kcf_mech_hash, kcf_mech_hash_compar,
sizeof (kcf_mech_entry_t), offsetof(kcf_mech_entry_t, me_node));
}
/*
* kcf_create_mech_entry()
*
* Arguments:
* . The class of mechanism.
* . the name of the new mechanism.
*
* Description:
* Creates a new mech_entry for a mechanism not yet known to the
* framework.
* This routine is called by kcf_add_mech_provider, which is
* in turn invoked for each mechanism supported by a provider.
* The'class' argument depends on the crypto_func_group_t bitmask
* in the registering provider's mech_info struct for this mechanism.
* When there is ambiguity in the mapping between the crypto_func_group_t
* and a class (dual ops, ...) the KCF_MISC_CLASS should be used.
*
* Context:
* User context only.
*
* Returns:
* KCF_INVALID_MECH_CLASS or KCF_INVALID_MECH_NAME if the class or
* the mechname is bogus.
* KCF_MECH_TAB_FULL when there is no room left in the mech. tabs.
* KCF_SUCCESS otherwise.
*/
static int
kcf_create_mech_entry(kcf_ops_class_t class, const char *mechname)
{
if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS))
return (KCF_INVALID_MECH_CLASS);
if ((mechname == NULL) || (mechname[0] == 0))
return (KCF_INVALID_MECH_NAME);
/*
* First check if the mechanism is already in one of the tables.
* The mech_entry could be in another class.
*/
avl_index_t where = 0;
kcf_mech_entry_t tmptab;
strlcpy(tmptab.me_name, mechname, CRYPTO_MAX_MECH_NAME);
if (avl_find(&kcf_mech_hash, &tmptab, &where) != NULL)
return (KCF_SUCCESS);
/* Now take the next unused mech entry in the class's tab */
kcf_mech_entry_t *me_tab = kcf_mech_tabs_tab[class].met_tab;
int size = kcf_mech_tabs_tab[class].met_size;
for (int i = 0; i < size; ++i)
if (me_tab[i].me_name[0] == 0) {
/* Found an empty spot */
strlcpy(me_tab[i].me_name, mechname,
CRYPTO_MAX_MECH_NAME);
me_tab[i].me_mechid = KCF_MECHID(class, i);
/* Add the new mechanism to the hash table */
avl_insert(&kcf_mech_hash, &me_tab[i], where);
return (KCF_SUCCESS);
}
return (KCF_MECH_TAB_FULL);
}
/*
* kcf_add_mech_provider()
*
* Arguments:
* . An index in to the provider mechanism array
* . A pointer to the provider descriptor
* . A storage for the kcf_prov_mech_desc_t the entry was added at.
*
* Description:
* Adds a new provider of a mechanism to the mechanism's mech_entry
* chain.
*
* Context:
* User context only.
*
* Returns
* KCF_SUCCESS on success
* KCF_MECH_TAB_FULL otherwise.
*/
int
kcf_add_mech_provider(short mech_indx,
kcf_provider_desc_t *prov_desc, kcf_prov_mech_desc_t **pmdpp)
{
int error;
kcf_mech_entry_t *mech_entry = NULL;
const crypto_mech_info_t *mech_info;
crypto_mech_type_t kcf_mech_type;
kcf_prov_mech_desc_t *prov_mech;
mech_info = &prov_desc->pd_mechanisms[mech_indx];
/*
* A mechanism belongs to exactly one mechanism table.
* Find the class corresponding to the function group flag of
* the mechanism.
*/
kcf_mech_type = crypto_mech2id(mech_info->cm_mech_name);
if (kcf_mech_type == CRYPTO_MECH_INVALID) {
crypto_func_group_t fg = mech_info->cm_func_group_mask;
kcf_ops_class_t class;
if (fg & CRYPTO_FG_DIGEST || fg & CRYPTO_FG_DIGEST_ATOMIC)
class = KCF_DIGEST_CLASS;
else if (fg & CRYPTO_FG_ENCRYPT || fg & CRYPTO_FG_DECRYPT ||
fg & CRYPTO_FG_ENCRYPT_ATOMIC ||
fg & CRYPTO_FG_DECRYPT_ATOMIC)
class = KCF_CIPHER_CLASS;
else if (fg & CRYPTO_FG_MAC || fg & CRYPTO_FG_MAC_ATOMIC)
class = KCF_MAC_CLASS;
else
__builtin_unreachable();
/*
* Attempt to create a new mech_entry for the specified
* mechanism. kcf_create_mech_entry() can handle the case
* where such an entry already exists.
*/
if ((error = kcf_create_mech_entry(class,
mech_info->cm_mech_name)) != KCF_SUCCESS) {
return (error);
}
/* get the KCF mech type that was assigned to the mechanism */
kcf_mech_type = crypto_mech2id(mech_info->cm_mech_name);
ASSERT(kcf_mech_type != CRYPTO_MECH_INVALID);
}
error = kcf_get_mech_entry(kcf_mech_type, &mech_entry);
ASSERT(error == KCF_SUCCESS);
/* allocate and initialize new kcf_prov_mech_desc */
prov_mech = kmem_zalloc(sizeof (kcf_prov_mech_desc_t), KM_SLEEP);
memcpy(&prov_mech->pm_mech_info, mech_info,
sizeof (crypto_mech_info_t));
prov_mech->pm_prov_desc = prov_desc;
prov_desc->pd_mech_indx[KCF_MECH2CLASS(kcf_mech_type)]
[KCF_MECH2INDEX(kcf_mech_type)] = mech_indx;
KCF_PROV_REFHOLD(prov_desc);
KCF_PROV_IREFHOLD(prov_desc);
/*
* Add new kcf_prov_mech_desc at the front of HW providers
* chain.
*/
if (mech_entry->me_sw_prov != NULL) {
/*
* There is already a provider for this mechanism.
* Since we allow only one provider per mechanism,
* report this condition.
*/
cmn_err(CE_WARN, "The cryptographic provider "
"\"%s\" will not be used for %s. The provider "
"\"%s\" will be used for this mechanism "
"instead.", prov_desc->pd_description,
mech_info->cm_mech_name,
mech_entry->me_sw_prov->pm_prov_desc->
pd_description);
KCF_PROV_REFRELE(prov_desc);
kmem_free(prov_mech, sizeof (kcf_prov_mech_desc_t));
prov_mech = NULL;
} else {
/*
* Set the provider as the provider for
* this mechanism.
*/
mech_entry->me_sw_prov = prov_mech;
}
*pmdpp = prov_mech;
return (KCF_SUCCESS);
}
/*
* kcf_remove_mech_provider()
*
* Arguments:
* . mech_name: the name of the mechanism.
* . prov_desc: The provider descriptor
*
* Description:
* Removes a provider from chain of provider descriptors.
* The provider is made unavailable to kernel consumers for the specified
* mechanism.
*
* Context:
* User context only.
*/
void
kcf_remove_mech_provider(const char *mech_name, kcf_provider_desc_t *prov_desc)
{
crypto_mech_type_t mech_type;
kcf_prov_mech_desc_t *prov_mech = NULL;
kcf_mech_entry_t *mech_entry;
/* get the KCF mech type that was assigned to the mechanism */
if ((mech_type = crypto_mech2id(mech_name)) ==
CRYPTO_MECH_INVALID) {
/*
* Provider was not allowed for this mech due to policy or
* configuration.
*/
return;
}
/* get a ptr to the mech_entry that was created */
if (kcf_get_mech_entry(mech_type, &mech_entry) != KCF_SUCCESS) {
/*
* Provider was not allowed for this mech due to policy or
* configuration.
*/
return;
}
if (mech_entry->me_sw_prov == NULL ||
mech_entry->me_sw_prov->pm_prov_desc != prov_desc) {
/* not the provider for this mechanism */
return;
}
prov_mech = mech_entry->me_sw_prov;
mech_entry->me_sw_prov = NULL;
/* free entry */
- KCF_PROV_REFRELE(prov_mech->pm_prov_desc);
KCF_PROV_IREFRELE(prov_mech->pm_prov_desc);
+ KCF_PROV_REFRELE(prov_mech->pm_prov_desc);
kmem_free(prov_mech, sizeof (kcf_prov_mech_desc_t));
}
/*
* kcf_get_mech_entry()
*
* Arguments:
* . The framework mechanism type
* . Storage for the mechanism entry
*
* Description:
* Retrieves the mechanism entry for the mech.
*
* Context:
* User and interrupt contexts.
*
* Returns:
* KCF_MECHANISM_XXX appropriate error code.
* KCF_SUCCESS otherwise.
*/
int
kcf_get_mech_entry(crypto_mech_type_t mech_type, kcf_mech_entry_t **mep)
{
kcf_ops_class_t class;
int index;
const kcf_mech_entry_tab_t *me_tab;
ASSERT(mep != NULL);
class = KCF_MECH2CLASS(mech_type);
if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS)) {
/* the caller won't need to know it's an invalid class */
return (KCF_INVALID_MECH_NUMBER);
}
me_tab = &kcf_mech_tabs_tab[class];
index = KCF_MECH2INDEX(mech_type);
if ((index < 0) || (index >= me_tab->met_size)) {
return (KCF_INVALID_MECH_NUMBER);
}
*mep = &((me_tab->met_tab)[index]);
return (KCF_SUCCESS);
}
/*
* crypto_mech2id()
*
* Arguments:
* . mechname: A null-terminated string identifying the mechanism name.
*
* Description:
* Walks the mechanisms tables, looking for an entry that matches the
* mechname. Once it find it, it builds the 64-bit mech_type and returns
* it.
*
* Context:
* Process and interruption.
*
* Returns:
* The unique mechanism identified by 'mechname', if found.
* CRYPTO_MECH_INVALID otherwise.
*/
/*
* Lookup the hash table for an entry that matches the mechname.
* If there are no providers for the mechanism,
* but there is an unloaded provider, this routine will attempt
* to load it.
*/
crypto_mech_type_t
crypto_mech2id(const char *mechname)
{
kcf_mech_entry_t tmptab, *found;
strlcpy(tmptab.me_name, mechname, CRYPTO_MAX_MECH_NAME);
if ((found = avl_find(&kcf_mech_hash, &tmptab, NULL))) {
ASSERT(found->me_mechid != CRYPTO_MECH_INVALID);
return (found->me_mechid);
}
return (CRYPTO_MECH_INVALID);
}
#if defined(_KERNEL)
EXPORT_SYMBOL(crypto_mech2id);
#endif
diff --git a/sys/contrib/openzfs/module/icp/core/kcf_prov_tabs.c b/sys/contrib/openzfs/module/icp/core/kcf_prov_tabs.c
index 865d4e19c6eb..93af61a235d0 100644
--- a/sys/contrib/openzfs/module/icp/core/kcf_prov_tabs.c
+++ b/sys/contrib/openzfs/module/icp/core/kcf_prov_tabs.c
@@ -1,305 +1,305 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* This file is part of the core Kernel Cryptographic Framework.
* It implements the management of tables of Providers. Entries to
* added and removed when cryptographic providers register with
* and unregister from the framework, respectively. The KCF scheduler
* and ioctl pseudo driver call this function to obtain the list
* of available providers.
*
* The provider table is indexed by crypto_provider_id_t. Each
* element of the table contains a pointer to a provider descriptor,
* or NULL if the entry is free.
*
* This file also implements helper functions to allocate and free
* provider descriptors.
*/
#include <sys/zfs_context.h>
#include <sys/crypto/common.h>
#include <sys/crypto/impl.h>
#include <sys/crypto/sched_impl.h>
#include <sys/crypto/spi.h>
#define KCF_MAX_PROVIDERS 8 /* max number of providers */
/*
* Prov_tab is an array of providers which is updated when
* a crypto provider registers with kcf. The provider calls the
* SPI routine, crypto_register_provider(), which in turn calls
* kcf_prov_tab_add_provider().
*
* A provider unregisters by calling crypto_unregister_provider()
* which triggers the removal of the prov_tab entry.
* It also calls kcf_remove_mech_provider().
*
* prov_tab entries are not updated from kcf.conf or by cryptoadm(1M).
*/
static kcf_provider_desc_t *prov_tab[KCF_MAX_PROVIDERS];
static kmutex_t prov_tab_mutex; /* ensure exclusive access to the table */
static uint_t prov_tab_num = 0; /* number of providers in table */
void
kcf_prov_tab_destroy(void)
{
mutex_destroy(&prov_tab_mutex);
}
/*
* Initialize a mutex and the KCF providers table, prov_tab.
* The providers table is dynamically allocated with KCF_MAX_PROVIDERS entries.
* Called from kcf module _init().
*/
void
kcf_prov_tab_init(void)
{
mutex_init(&prov_tab_mutex, NULL, MUTEX_DEFAULT, NULL);
}
/*
* Add a provider to the provider table. If no free entry can be found
* for the new provider, returns CRYPTO_HOST_MEMORY. Otherwise, add
* the provider to the table, initialize the pd_prov_id field
* of the specified provider descriptor to the index in that table,
* and return CRYPTO_SUCCESS. Note that a REFHOLD is done on the
* provider when pointed to by a table entry.
*/
int
kcf_prov_tab_add_provider(kcf_provider_desc_t *prov_desc)
{
uint_t i;
mutex_enter(&prov_tab_mutex);
/* find free slot in providers table */
for (i = 1; i < KCF_MAX_PROVIDERS && prov_tab[i] != NULL; i++)
;
if (i == KCF_MAX_PROVIDERS) {
/* ran out of providers entries */
mutex_exit(&prov_tab_mutex);
cmn_err(CE_WARN, "out of providers entries");
return (CRYPTO_HOST_MEMORY);
}
/* initialize entry */
prov_tab[i] = prov_desc;
KCF_PROV_REFHOLD(prov_desc);
KCF_PROV_IREFHOLD(prov_desc);
prov_tab_num++;
mutex_exit(&prov_tab_mutex);
/* update provider descriptor */
prov_desc->pd_prov_id = i;
/*
* The KCF-private provider handle is defined as the internal
* provider id.
*/
prov_desc->pd_kcf_prov_handle =
(crypto_kcf_provider_handle_t)prov_desc->pd_prov_id;
return (CRYPTO_SUCCESS);
}
/*
* Remove the provider specified by its id. A REFRELE is done on the
* corresponding provider descriptor before this function returns.
* Returns CRYPTO_UNKNOWN_PROVIDER if the provider id is not valid.
*/
int
kcf_prov_tab_rem_provider(crypto_provider_id_t prov_id)
{
kcf_provider_desc_t *prov_desc;
/*
* Validate provider id, since it can be specified by a 3rd-party
* provider.
*/
mutex_enter(&prov_tab_mutex);
if (prov_id >= KCF_MAX_PROVIDERS ||
((prov_desc = prov_tab[prov_id]) == NULL)) {
mutex_exit(&prov_tab_mutex);
return (CRYPTO_INVALID_PROVIDER_ID);
}
mutex_exit(&prov_tab_mutex);
/*
* The provider id must remain valid until the associated provider
* descriptor is freed. For this reason, we simply release our
* reference to the descriptor here. When the reference count
* reaches zero, kcf_free_provider_desc() will be invoked and
* the associated entry in the providers table will be released
* at that time.
*/
- KCF_PROV_REFRELE(prov_desc);
KCF_PROV_IREFRELE(prov_desc);
+ KCF_PROV_REFRELE(prov_desc);
return (CRYPTO_SUCCESS);
}
/*
* Returns the provider descriptor corresponding to the specified
* provider id. A REFHOLD is done on the descriptor before it is
* returned to the caller. It is the responsibility of the caller
* to do a REFRELE once it is done with the provider descriptor.
*/
kcf_provider_desc_t *
kcf_prov_tab_lookup(crypto_provider_id_t prov_id)
{
kcf_provider_desc_t *prov_desc;
mutex_enter(&prov_tab_mutex);
prov_desc = prov_tab[prov_id];
if (prov_desc == NULL) {
mutex_exit(&prov_tab_mutex);
return (NULL);
}
KCF_PROV_REFHOLD(prov_desc);
mutex_exit(&prov_tab_mutex);
return (prov_desc);
}
/*
* Allocate a provider descriptor. mech_list_count specifies the
* number of mechanisms supported by the providers, and is used
* to allocate storage for the mechanism table.
* This function may sleep while allocating memory, which is OK
* since it is invoked from user context during provider registration.
*/
kcf_provider_desc_t *
kcf_alloc_provider_desc(void)
{
kcf_provider_desc_t *desc =
kmem_zalloc(sizeof (kcf_provider_desc_t), KM_SLEEP);
for (int i = 0; i < KCF_OPS_CLASSSIZE; i++)
for (int j = 0; j < KCF_MAXMECHTAB; j++)
desc->pd_mech_indx[i][j] = KCF_INVALID_INDX;
desc->pd_prov_id = KCF_PROVID_INVALID;
desc->pd_state = KCF_PROV_ALLOCATED;
mutex_init(&desc->pd_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&desc->pd_remove_cv, NULL, CV_DEFAULT, NULL);
return (desc);
}
/*
* Called by KCF_PROV_REFRELE when a provider's reference count drops
* to zero. We free the descriptor when the last reference is released.
* However, for providers, we do not free it when there is an
* unregister thread waiting. We signal that thread in this case and
* that thread is responsible for freeing the descriptor.
*/
void
kcf_provider_zero_refcnt(kcf_provider_desc_t *desc)
{
mutex_enter(&desc->pd_lock);
if (desc->pd_state == KCF_PROV_REMOVED ||
desc->pd_state == KCF_PROV_DISABLED) {
desc->pd_state = KCF_PROV_FREED;
cv_broadcast(&desc->pd_remove_cv);
mutex_exit(&desc->pd_lock);
return;
}
mutex_exit(&desc->pd_lock);
kcf_free_provider_desc(desc);
}
/*
* Free a provider descriptor.
*/
void
kcf_free_provider_desc(kcf_provider_desc_t *desc)
{
if (desc == NULL)
return;
mutex_enter(&prov_tab_mutex);
if (desc->pd_prov_id != KCF_PROVID_INVALID) {
/* release the associated providers table entry */
ASSERT(prov_tab[desc->pd_prov_id] != NULL);
prov_tab[desc->pd_prov_id] = NULL;
prov_tab_num--;
}
mutex_exit(&prov_tab_mutex);
/* free the kernel memory associated with the provider descriptor */
mutex_destroy(&desc->pd_lock);
cv_destroy(&desc->pd_remove_cv);
kmem_free(desc, sizeof (kcf_provider_desc_t));
}
/*
* Returns in the location pointed to by pd a pointer to the descriptor
* for the provider for the specified mechanism.
* The provider descriptor is returned held and it is the caller's
* responsibility to release it when done. The mechanism entry
* is returned if the optional argument mep is non NULL.
*
* Returns one of the CRYPTO_ * error codes on failure, and
* CRYPTO_SUCCESS on success.
*/
int
kcf_get_sw_prov(crypto_mech_type_t mech_type, kcf_provider_desc_t **pd,
kcf_mech_entry_t **mep, boolean_t log_warn)
{
kcf_mech_entry_t *me;
/* get the mechanism entry for this mechanism */
if (kcf_get_mech_entry(mech_type, &me) != KCF_SUCCESS)
return (CRYPTO_MECHANISM_INVALID);
/* Get the provider for this mechanism. */
if (me->me_sw_prov == NULL ||
(*pd = me->me_sw_prov->pm_prov_desc) == NULL) {
/* no provider for this mechanism */
if (log_warn)
cmn_err(CE_WARN, "no provider for \"%s\"\n",
me->me_name);
return (CRYPTO_MECH_NOT_SUPPORTED);
}
KCF_PROV_REFHOLD(*pd);
if (mep != NULL)
*mep = me;
return (CRYPTO_SUCCESS);
}
diff --git a/sys/contrib/openzfs/module/icp/include/sys/crypto/impl.h b/sys/contrib/openzfs/module/icp/include/sys/crypto/impl.h
index 32ac43475a3e..4d17221ea9a3 100644
--- a/sys/contrib/openzfs/module/icp/include/sys/crypto/impl.h
+++ b/sys/contrib/openzfs/module/icp/include/sys/crypto/impl.h
@@ -1,390 +1,390 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_CRYPTO_IMPL_H
#define _SYS_CRYPTO_IMPL_H
/*
* Kernel Cryptographic Framework private implementation definitions.
*/
#include <sys/zfs_context.h>
#include <sys/crypto/common.h>
#include <sys/crypto/api.h>
#include <sys/crypto/spi.h>
#include <sys/avl.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Prefixes convention: structures internal to the kernel cryptographic
* framework start with 'kcf_'. Exposed structure start with 'crypto_'.
*/
/*
* The following two macros should be
* #define KCF_OPS_CLASSSIZE (KCF_LAST_OPSCLASS - KCF_FIRST_OPSCLASS + 2)
* #define KCF_MAXMECHTAB KCF_MAXCIPHER
*
* However, doing that would involve reorganizing the header file a bit.
* When impl.h is broken up (bug# 4703218), this will be done. For now,
* we hardcode these values.
*/
#define KCF_OPS_CLASSSIZE 4
#define KCF_MAXMECHTAB 32
/*
* Valid values for the state of a provider. The order of
* the elements is important.
*
* Routines which get a provider or the list of providers
* should pick only those that are in KCF_PROV_READY state.
*/
typedef enum {
KCF_PROV_ALLOCATED = 1,
/*
* state < KCF_PROV_READY means the provider can not
* be used at all.
*/
KCF_PROV_READY,
/*
* state > KCF_PROV_READY means the provider can not
* be used for new requests.
*/
KCF_PROV_FAILED,
/*
* Threads setting the following two states should do so only
* if the current state < KCF_PROV_DISABLED.
*/
KCF_PROV_DISABLED,
KCF_PROV_REMOVED,
KCF_PROV_FREED
} kcf_prov_state_t;
#define KCF_IS_PROV_USABLE(pd) ((pd)->pd_state == KCF_PROV_READY)
#define KCF_IS_PROV_REMOVED(pd) ((pd)->pd_state >= KCF_PROV_REMOVED)
/*
* A provider descriptor structure. There is one such structure per
* provider. It is allocated and initialized at registration time and
* freed when the provider unregisters.
*
* pd_refcnt: Reference counter to this provider descriptor
* pd_irefcnt: References held by the framework internal structs
* pd_lock: lock protects pd_state
* pd_state: State value of the provider
* pd_ops_vector: The ops vector specified by Provider
* pd_mech_indx: Lookup table which maps a core framework mechanism
* number to an index in pd_mechanisms array
* pd_mechanisms: Array of mechanisms supported by the provider, specified
* by the provider during registration
* pd_mech_list_count: The number of entries in pi_mechanisms, specified
* by the provider during registration
* pd_remove_cv: cv to wait on while the provider queue drains
* pd_description: Provider description string
* pd_kcf_prov_handle: KCF-private handle assigned by KCF
* pd_prov_id: Identification # assigned by KCF to provider
*/
typedef struct kcf_provider_desc {
uint_t pd_refcnt;
uint_t pd_irefcnt;
kmutex_t pd_lock;
kcf_prov_state_t pd_state;
const crypto_ops_t *pd_ops_vector;
ushort_t pd_mech_indx[KCF_OPS_CLASSSIZE]\
[KCF_MAXMECHTAB];
const crypto_mech_info_t *pd_mechanisms;
uint_t pd_mech_list_count;
kcondvar_t pd_remove_cv;
const char *pd_description;
crypto_kcf_provider_handle_t pd_kcf_prov_handle;
crypto_provider_id_t pd_prov_id;
} kcf_provider_desc_t;
-/* atomic operations in linux implicitly form a memory barrier */
-#define membar_exit()
-
/*
* If a component has a reference to a kcf_provider_desc_t,
* it REFHOLD()s. A new provider descriptor which is referenced only
* by the providers table has a reference counter of one.
*/
-#define KCF_PROV_REFHOLD(desc) { \
- atomic_add_32(&(desc)->pd_refcnt, 1); \
- ASSERT((desc)->pd_refcnt != 0); \
+#define KCF_PROV_REFHOLD(desc) { \
+ int newval = atomic_add_32_nv(&(desc)->pd_refcnt, 1); \
+ ASSERT(newval != 0); \
}
-#define KCF_PROV_IREFHOLD(desc) { \
- atomic_add_32(&(desc)->pd_irefcnt, 1); \
- ASSERT((desc)->pd_irefcnt != 0); \
+#define KCF_PROV_IREFHOLD(desc) { \
+ int newval = atomic_add_32_nv(&(desc)->pd_irefcnt, 1); \
+ ASSERT(newval != 0); \
}
#define KCF_PROV_IREFRELE(desc) { \
- ASSERT((desc)->pd_irefcnt != 0); \
- membar_exit(); \
- if (atomic_add_32_nv(&(desc)->pd_irefcnt, -1) == 0) { \
+ membar_producer(); \
+ int newval = atomic_add_32_nv(&(desc)->pd_irefcnt, -1); \
+ ASSERT(newval != -1); \
+ if (newval == 0) { \
cv_broadcast(&(desc)->pd_remove_cv); \
} \
}
#define KCF_PROV_REFHELD(desc) ((desc)->pd_refcnt >= 1)
#define KCF_PROV_REFRELE(desc) { \
- ASSERT((desc)->pd_refcnt != 0); \
- membar_exit(); \
- if (atomic_add_32_nv(&(desc)->pd_refcnt, -1) == 0) { \
+ membar_producer(); \
+ int newval = atomic_add_32_nv(&(desc)->pd_refcnt, -1); \
+ ASSERT(newval != -1); \
+ if (newval == 0) { \
kcf_provider_zero_refcnt((desc)); \
} \
}
/*
* An element in a mechanism provider descriptors chain.
* The kcf_prov_mech_desc_t is duplicated in every chain the provider belongs
* to. This is a small tradeoff memory vs mutex spinning time to access the
* common provider field.
*/
typedef struct kcf_prov_mech_desc {
struct kcf_mech_entry *pm_me; /* Back to the head */
struct kcf_prov_mech_desc *pm_next; /* Next in the chain */
crypto_mech_info_t pm_mech_info; /* Provider mech info */
kcf_provider_desc_t *pm_prov_desc; /* Common desc. */
} kcf_prov_mech_desc_t;
/*
* A mechanism entry in an xxx_mech_tab[]. me_pad was deemed
* to be unnecessary and removed.
*/
typedef struct kcf_mech_entry {
crypto_mech_name_t me_name; /* mechanism name */
crypto_mech_type_t me_mechid; /* Internal id for mechanism */
kcf_prov_mech_desc_t *me_sw_prov; /* provider */
avl_node_t me_node;
} kcf_mech_entry_t;
/*
* If a component has a reference to a kcf_policy_desc_t,
* it REFHOLD()s. A new policy descriptor which is referenced only
* by the policy table has a reference count of one.
*/
-#define KCF_POLICY_REFHOLD(desc) { \
- atomic_add_32(&(desc)->pd_refcnt, 1); \
- ASSERT((desc)->pd_refcnt != 0); \
+#define KCF_POLICY_REFHOLD(desc) { \
+ int newval = atomic_add_32_nv(&(desc)->pd_refcnt, 1); \
+ ASSERT(newval != 0); \
}
/*
* Releases a reference to a policy descriptor. When the last
* reference is released, the descriptor is freed.
*/
#define KCF_POLICY_REFRELE(desc) { \
- ASSERT((desc)->pd_refcnt != 0); \
- membar_exit(); \
- if (atomic_add_32_nv(&(desc)->pd_refcnt, -1) == 0) \
+ membar_producer(); \
+ int newval = atomic_add_32_nv(&(desc)->pd_refcnt, -1); \
+ ASSERT(newval != -1); \
+ if (newval == 0) \
kcf_policy_free_desc(desc); \
}
/*
* Global tables. The sizes are from the predefined PKCS#11 v2.20 mechanisms,
* with a margin of few extra empty entry points
*/
#define KCF_MAXDIGEST 16 /* Digests */
#define KCF_MAXCIPHER 32 /* Ciphers */
#define KCF_MAXMAC 40 /* Message authentication codes */
_Static_assert(KCF_MAXCIPHER == KCF_MAXMECHTAB,
"KCF_MAXCIPHER != KCF_MAXMECHTAB"); /* See KCF_MAXMECHTAB comment */
typedef enum {
KCF_DIGEST_CLASS = 1,
KCF_CIPHER_CLASS,
KCF_MAC_CLASS,
} kcf_ops_class_t;
#define KCF_FIRST_OPSCLASS KCF_DIGEST_CLASS
#define KCF_LAST_OPSCLASS KCF_MAC_CLASS
_Static_assert(
KCF_OPS_CLASSSIZE == (KCF_LAST_OPSCLASS - KCF_FIRST_OPSCLASS + 2),
"KCF_OPS_CLASSSIZE doesn't match kcf_ops_class_t!");
/* The table of all the kcf_xxx_mech_tab[]s, indexed by kcf_ops_class */
typedef struct kcf_mech_entry_tab {
int met_size; /* Size of the met_tab[] */
kcf_mech_entry_t *met_tab; /* the table */
} kcf_mech_entry_tab_t;
extern const kcf_mech_entry_tab_t kcf_mech_tabs_tab[];
#define KCF_MECHID(class, index) \
(((crypto_mech_type_t)(class) << 32) | (crypto_mech_type_t)(index))
#define KCF_MECH2CLASS(mech_type) ((kcf_ops_class_t)((mech_type) >> 32))
#define KCF_MECH2INDEX(mech_type) ((int)((mech_type) & 0xFFFFFFFF))
#define KCF_TO_PROV_MECH_INDX(pd, mech_type) \
((pd)->pd_mech_indx[KCF_MECH2CLASS(mech_type)] \
[KCF_MECH2INDEX(mech_type)])
#define KCF_TO_PROV_MECHINFO(pd, mech_type) \
((pd)->pd_mechanisms[KCF_TO_PROV_MECH_INDX(pd, mech_type)])
#define KCF_TO_PROV_MECHNUM(pd, mech_type) \
(KCF_TO_PROV_MECHINFO(pd, mech_type).cm_mech_number)
/*
* Return codes for internal functions
*/
#define KCF_SUCCESS 0x0 /* Successful call */
#define KCF_INVALID_MECH_NUMBER 0x1 /* invalid mechanism number */
#define KCF_INVALID_MECH_NAME 0x2 /* invalid mechanism name */
#define KCF_INVALID_MECH_CLASS 0x3 /* invalid mechanism class */
#define KCF_MECH_TAB_FULL 0x4 /* Need more room in the mech tabs. */
#define KCF_INVALID_INDX ((ushort_t)-1)
/*
* Wrappers for ops vectors. In the wrapper definitions below, the pd
* argument always corresponds to a pointer to a provider descriptor
* of type kcf_prov_desc_t.
*/
#define KCF_PROV_DIGEST_OPS(pd) ((pd)->pd_ops_vector->co_digest_ops)
#define KCF_PROV_CIPHER_OPS(pd) ((pd)->pd_ops_vector->co_cipher_ops)
#define KCF_PROV_MAC_OPS(pd) ((pd)->pd_ops_vector->co_mac_ops)
#define KCF_PROV_CTX_OPS(pd) ((pd)->pd_ops_vector->co_ctx_ops)
/*
* Wrappers for crypto_digest_ops(9S) entry points.
*/
#define KCF_PROV_DIGEST_INIT(pd, ctx, mech) ( \
(KCF_PROV_DIGEST_OPS(pd) && KCF_PROV_DIGEST_OPS(pd)->digest_init) ? \
KCF_PROV_DIGEST_OPS(pd)->digest_init(ctx, mech) : \
CRYPTO_NOT_SUPPORTED)
/*
* Wrappers for crypto_cipher_ops(9S) entry points.
*/
#define KCF_PROV_ENCRYPT_INIT(pd, ctx, mech, key, template) ( \
(KCF_PROV_CIPHER_OPS(pd) && KCF_PROV_CIPHER_OPS(pd)->encrypt_init) ? \
KCF_PROV_CIPHER_OPS(pd)->encrypt_init(ctx, mech, key, template) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_ENCRYPT_ATOMIC(pd, mech, key, plaintext, ciphertext, \
template) ( \
(KCF_PROV_CIPHER_OPS(pd) && KCF_PROV_CIPHER_OPS(pd)->encrypt_atomic) ? \
KCF_PROV_CIPHER_OPS(pd)->encrypt_atomic( \
mech, key, plaintext, ciphertext, template) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_DECRYPT_ATOMIC(pd, mech, key, ciphertext, plaintext, \
template) ( \
(KCF_PROV_CIPHER_OPS(pd) && KCF_PROV_CIPHER_OPS(pd)->decrypt_atomic) ? \
KCF_PROV_CIPHER_OPS(pd)->decrypt_atomic( \
mech, key, ciphertext, plaintext, template) : \
CRYPTO_NOT_SUPPORTED)
/*
* Wrappers for crypto_mac_ops(9S) entry points.
*/
#define KCF_PROV_MAC_INIT(pd, ctx, mech, key, template) ( \
(KCF_PROV_MAC_OPS(pd) && KCF_PROV_MAC_OPS(pd)->mac_init) ? \
KCF_PROV_MAC_OPS(pd)->mac_init(ctx, mech, key, template) \
: CRYPTO_NOT_SUPPORTED)
/*
* The _ (underscore) in _mac is needed to avoid replacing the
* function mac().
*/
#define KCF_PROV_MAC_UPDATE(pd, ctx, data) ( \
(KCF_PROV_MAC_OPS(pd) && KCF_PROV_MAC_OPS(pd)->mac_update) ? \
KCF_PROV_MAC_OPS(pd)->mac_update(ctx, data) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_MAC_FINAL(pd, ctx, mac) ( \
(KCF_PROV_MAC_OPS(pd) && KCF_PROV_MAC_OPS(pd)->mac_final) ? \
KCF_PROV_MAC_OPS(pd)->mac_final(ctx, mac) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_MAC_ATOMIC(pd, mech, key, data, mac, template) ( \
(KCF_PROV_MAC_OPS(pd) && KCF_PROV_MAC_OPS(pd)->mac_atomic) ? \
KCF_PROV_MAC_OPS(pd)->mac_atomic( \
mech, key, data, mac, template) : \
CRYPTO_NOT_SUPPORTED)
/*
* Wrappers for crypto_ctx_ops(9S) entry points.
*/
#define KCF_PROV_CREATE_CTX_TEMPLATE(pd, mech, key, template, size) ( \
(KCF_PROV_CTX_OPS(pd) && KCF_PROV_CTX_OPS(pd)->create_ctx_template) ? \
KCF_PROV_CTX_OPS(pd)->create_ctx_template( \
mech, key, template, size) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_FREE_CONTEXT(pd, ctx) ( \
(KCF_PROV_CTX_OPS(pd) && KCF_PROV_CTX_OPS(pd)->free_context) ? \
KCF_PROV_CTX_OPS(pd)->free_context(ctx) : CRYPTO_NOT_SUPPORTED)
/* Miscellaneous */
extern void kcf_destroy_mech_tabs(void);
extern void kcf_init_mech_tabs(void);
extern int kcf_add_mech_provider(short, kcf_provider_desc_t *,
kcf_prov_mech_desc_t **);
extern void kcf_remove_mech_provider(const char *, kcf_provider_desc_t *);
extern int kcf_get_mech_entry(crypto_mech_type_t, kcf_mech_entry_t **);
extern kcf_provider_desc_t *kcf_alloc_provider_desc(void);
extern void kcf_provider_zero_refcnt(kcf_provider_desc_t *);
extern void kcf_free_provider_desc(kcf_provider_desc_t *);
extern void undo_register_provider(kcf_provider_desc_t *, boolean_t);
extern int crypto_put_output_data(uchar_t *, crypto_data_t *, int);
extern int crypto_update_iov(void *, crypto_data_t *, crypto_data_t *,
int (*cipher)(void *, caddr_t, size_t, crypto_data_t *));
extern int crypto_update_uio(void *, crypto_data_t *, crypto_data_t *,
int (*cipher)(void *, caddr_t, size_t, crypto_data_t *));
/* Access to the provider's table */
extern void kcf_prov_tab_destroy(void);
extern void kcf_prov_tab_init(void);
extern int kcf_prov_tab_add_provider(kcf_provider_desc_t *);
extern int kcf_prov_tab_rem_provider(crypto_provider_id_t);
extern kcf_provider_desc_t *kcf_prov_tab_lookup(crypto_provider_id_t);
extern int kcf_get_sw_prov(crypto_mech_type_t, kcf_provider_desc_t **,
kcf_mech_entry_t **, boolean_t);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_CRYPTO_IMPL_H */
diff --git a/sys/contrib/openzfs/module/icp/include/sys/crypto/sched_impl.h b/sys/contrib/openzfs/module/icp/include/sys/crypto/sched_impl.h
index 1989d5244e2b..355c1a87faa4 100644
--- a/sys/contrib/openzfs/module/icp/include/sys/crypto/sched_impl.h
+++ b/sys/contrib/openzfs/module/icp/include/sys/crypto/sched_impl.h
@@ -1,134 +1,135 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_CRYPTO_SCHED_IMPL_H
#define _SYS_CRYPTO_SCHED_IMPL_H
/*
* Scheduler internal structures.
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <sys/zfs_context.h>
#include <sys/crypto/api.h>
#include <sys/crypto/spi.h>
#include <sys/crypto/impl.h>
#include <sys/crypto/common.h>
typedef struct kcf_prov_tried {
kcf_provider_desc_t *pt_pd;
struct kcf_prov_tried *pt_next;
} kcf_prov_tried_t;
#define IS_FG_SUPPORTED(mdesc, fg) \
(((mdesc)->pm_mech_info.cm_func_group_mask & (fg)) != 0)
#define IS_PROVIDER_TRIED(pd, tlist) \
(tlist != NULL && is_in_triedlist(pd, tlist))
#define IS_RECOVERABLE(error) \
(error == CRYPTO_BUSY || \
error == CRYPTO_KEY_SIZE_RANGE)
/*
* Internal representation of a canonical context. We contain crypto_ctx_t
* structure in order to have just one memory allocation. The SPI
* ((crypto_ctx_t *)ctx)->cc_framework_private maps to this structure.
*/
typedef struct kcf_context {
crypto_ctx_t kc_glbl_ctx;
uint_t kc_refcnt;
kcf_provider_desc_t *kc_prov_desc; /* Prov. descriptor */
kcf_provider_desc_t *kc_sw_prov_desc; /* Prov. descriptor */
} kcf_context_t;
/*
* Decrement the reference count on the framework private context.
* When the last reference is released, the framework private
* context structure is freed along with the global context.
*/
#define KCF_CONTEXT_REFRELE(ictx) { \
- ASSERT((ictx)->kc_refcnt != 0); \
- membar_exit(); \
- if (atomic_add_32_nv(&(ictx)->kc_refcnt, -1) == 0) \
+ membar_producer(); \
+ int newval = atomic_add_32_nv(&(ictx)->kc_refcnt, -1); \
+ ASSERT(newval != -1); \
+ if (newval == 0) \
kcf_free_context(ictx); \
}
/*
* Check if we can release the context now. In case of CRYPTO_BUSY,
* the client can retry the request using the context,
* so we do not release the context.
*
* This macro should be called only from the final routine in
* an init/update/final sequence. We do not release the context in case
* of update operations. We require the consumer to free it
* explicitly, in case it wants to abandon the operation. This is done
* as there may be mechanisms in ECB mode that can continue even if
* an operation on a block fails.
*/
#define KCF_CONTEXT_COND_RELEASE(rv, kcf_ctx) { \
if (KCF_CONTEXT_DONE(rv)) \
KCF_CONTEXT_REFRELE(kcf_ctx); \
}
/*
* This macro determines whether we're done with a context.
*/
#define KCF_CONTEXT_DONE(rv) \
((rv) != CRYPTO_BUSY && (rv) != CRYPTO_BUFFER_TOO_SMALL)
#define KCF_SET_PROVIDER_MECHNUM(fmtype, pd, mechp) \
(mechp)->cm_type = \
KCF_TO_PROV_MECHNUM(pd, fmtype);
/*
* A crypto_ctx_template_t is internally a pointer to this struct
*/
typedef struct kcf_ctx_template {
size_t ct_size; /* for freeing */
crypto_spi_ctx_template_t ct_prov_tmpl; /* context template */
/* from the provider */
} kcf_ctx_template_t;
extern void kcf_free_triedlist(kcf_prov_tried_t *);
extern kcf_prov_tried_t *kcf_insert_triedlist(kcf_prov_tried_t **,
kcf_provider_desc_t *, int);
extern kcf_provider_desc_t *kcf_get_mech_provider(crypto_mech_type_t,
kcf_mech_entry_t **, int *, kcf_prov_tried_t *, crypto_func_group_t);
extern crypto_ctx_t *kcf_new_ctx(kcf_provider_desc_t *);
extern void kcf_sched_destroy(void);
extern void kcf_sched_init(void);
extern void kcf_free_context(kcf_context_t *);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_CRYPTO_SCHED_IMPL_H */
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/arc_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/arc_os.c
index ca2bf884257f..30e96a889e0d 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/arc_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/arc_os.c
@@ -1,269 +1,260 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
#include <sys/spa.h>
#include <sys/zio.h>
#include <sys/spa_impl.h>
#include <sys/counter.h>
#include <sys/zio_compress.h>
#include <sys/zio_checksum.h>
#include <sys/zfs_context.h>
#include <sys/arc.h>
+#include <sys/arc_os.h>
#include <sys/zfs_refcount.h>
#include <sys/vdev.h>
#include <sys/vdev_trim.h>
#include <sys/vdev_impl.h>
#include <sys/dsl_pool.h>
#include <sys/zio_checksum.h>
#include <sys/multilist.h>
#include <sys/abd.h>
#include <sys/zil.h>
#include <sys/fm/fs/zfs.h>
#include <sys/eventhandler.h>
#include <sys/callb.h>
#include <sys/kstat.h>
#include <sys/zthr.h>
#include <zfs_fletcher.h>
#include <sys/arc_impl.h>
#include <sys/sdt.h>
#include <sys/aggsum.h>
#include <sys/vnode.h>
#include <cityhash.h>
#include <machine/vmparam.h>
#include <sys/vm.h>
#include <sys/vmmeter.h>
#if __FreeBSD_version >= 1300139
static struct sx arc_vnlru_lock;
static struct vnode *arc_vnlru_marker;
#endif
extern struct vfsops zfs_vfsops;
uint_t zfs_arc_free_target = 0;
static void
arc_free_target_init(void *unused __unused)
{
zfs_arc_free_target = vm_cnt.v_free_target;
}
SYSINIT(arc_free_target_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_ANY,
arc_free_target_init, NULL);
/*
* We don't have a tunable for arc_free_target due to the dependency on
* pagedaemon initialisation.
*/
-static int
-sysctl_vfs_zfs_arc_free_target(SYSCTL_HANDLER_ARGS)
-{
- uint_t val;
- int err;
-
- val = zfs_arc_free_target;
- err = sysctl_handle_int(oidp, &val, 0, req);
- if (err != 0 || req->newptr == NULL)
- return (err);
-
- if (val < minfree)
- return (EINVAL);
- if (val > vm_cnt.v_page_count)
- return (EINVAL);
-
- zfs_arc_free_target = val;
-
- return (0);
-}
-SYSCTL_DECL(_vfs_zfs);
-SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_free_target,
- CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof (uint_t),
- sysctl_vfs_zfs_arc_free_target, "IU",
+ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, free_target,
+ param_set_arc_free_target, 0, CTLFLAG_RW,
"Desired number of free pages below which ARC triggers reclaim");
+ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, no_grow_shift,
+ param_set_arc_no_grow_shift, 0, ZMOD_RW,
+ "log2(fraction of ARC which must be free to allow growing)");
int64_t
arc_available_memory(void)
{
int64_t lowest = INT64_MAX;
int64_t n __unused;
/*
* Cooperate with pagedaemon when it's time for it to scan
* and reclaim some pages.
*/
n = PAGESIZE * ((int64_t)freemem - zfs_arc_free_target);
if (n < lowest) {
lowest = n;
}
#if defined(__i386) || !defined(UMA_MD_SMALL_ALLOC)
/*
* If we're on an i386 platform, it's possible that we'll exhaust the
* kernel heap space before we ever run out of available physical
* memory. Most checks of the size of the heap_area compare against
* tune.t_minarmem, which is the minimum available real memory that we
* can have in the system. However, this is generally fixed at 25 pages
* which is so low that it's useless. In this comparison, we seek to
* calculate the total heap-size, and reclaim if more than 3/4ths of the
* heap is allocated. (Or, in the calculation, if less than 1/4th is
* free)
*/
n = uma_avail() - (long)(uma_limit() / 4);
if (n < lowest) {
lowest = n;
}
#endif
DTRACE_PROBE1(arc__available_memory, int64_t, lowest);
return (lowest);
}
/*
* Return a default max arc size based on the amount of physical memory.
*/
uint64_t
arc_default_max(uint64_t min, uint64_t allmem)
{
uint64_t size;
if (allmem >= 1 << 30)
size = allmem - (1 << 30);
else
size = min;
return (MAX(allmem * 5 / 8, size));
}
/*
* Helper function for arc_prune_async() it is responsible for safely
* handling the execution of a registered arc_prune_func_t.
*/
static void
arc_prune_task(void *arg)
{
int64_t nr_scan = (intptr_t)arg;
arc_reduce_target_size(ptob(nr_scan));
+
+#ifndef __ILP32__
+ if (nr_scan > INT_MAX)
+ nr_scan = INT_MAX;
+#endif
+
#if __FreeBSD_version >= 1300139
sx_xlock(&arc_vnlru_lock);
vnlru_free_vfsops(nr_scan, &zfs_vfsops, arc_vnlru_marker);
sx_xunlock(&arc_vnlru_lock);
#else
vnlru_free(nr_scan, &zfs_vfsops);
#endif
}
/*
* Notify registered consumers they must drop holds on a portion of the ARC
* buffered they reference. This provides a mechanism to ensure the ARC can
* honor the arc_meta_limit and reclaim otherwise pinned ARC buffers. This
* is analogous to dnlc_reduce_cache() but more generic.
*
* This operation is performed asynchronously so it may be safely called
* in the context of the arc_reclaim_thread(). A reference is taken here
* for each registered arc_prune_t and the arc_prune_task() is responsible
* for releasing it once the registered arc_prune_func_t has completed.
*/
void
arc_prune_async(int64_t adjust)
{
#ifndef __LP64__
if (adjust > INTPTR_MAX)
adjust = INTPTR_MAX;
#endif
taskq_dispatch(arc_prune_taskq, arc_prune_task,
(void *)(intptr_t)adjust, TQ_SLEEP);
ARCSTAT_BUMP(arcstat_prune);
}
uint64_t
arc_all_memory(void)
{
return (ptob(physmem));
}
int
arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg)
{
return (0);
}
uint64_t
arc_free_memory(void)
{
return (ptob(freemem));
}
static eventhandler_tag arc_event_lowmem = NULL;
static void
arc_lowmem(void *arg __unused, int howto __unused)
{
int64_t free_memory, to_free;
arc_no_grow = B_TRUE;
arc_warm = B_TRUE;
arc_growtime = gethrtime() + SEC2NSEC(arc_grow_retry);
free_memory = arc_available_memory();
- to_free = (arc_c >> arc_shrink_shift) - MIN(free_memory, 0);
+ int64_t can_free = arc_c - arc_c_min;
+ if (can_free <= 0)
+ return;
+ to_free = (can_free >> arc_shrink_shift) - MIN(free_memory, 0);
DTRACE_PROBE2(arc__needfree, int64_t, free_memory, int64_t, to_free);
arc_reduce_target_size(to_free);
/*
* It is unsafe to block here in arbitrary threads, because we can come
* here from ARC itself and may hold ARC locks and thus risk a deadlock
* with ARC reclaim thread.
*/
if (curproc == pageproc)
arc_wait_for_eviction(to_free, B_FALSE);
}
void
arc_lowmem_init(void)
{
arc_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem, arc_lowmem, NULL,
EVENTHANDLER_PRI_FIRST);
#if __FreeBSD_version >= 1300139
arc_vnlru_marker = vnlru_alloc_marker();
sx_init(&arc_vnlru_lock, "arc vnlru lock");
#endif
}
void
arc_lowmem_fini(void)
{
if (arc_event_lowmem != NULL)
EVENTHANDLER_DEREGISTER(vm_lowmem, arc_event_lowmem);
#if __FreeBSD_version >= 1300139
if (arc_vnlru_marker != NULL) {
vnlru_free_marker(arc_vnlru_marker);
sx_destroy(&arc_vnlru_lock);
}
#endif
}
void
arc_register_hotplug(void)
{
}
void
arc_unregister_hotplug(void)
{
}
diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/disp.h b/sys/contrib/openzfs/module/os/freebsd/zfs/event_os.c
similarity index 59%
copy from sys/contrib/openzfs/include/os/freebsd/spl/sys/disp.h
copy to sys/contrib/openzfs/module/os/freebsd/zfs/event_os.c
index 2be1b76e4334..97ac151e4fa1 100644
--- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/disp.h
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/event_os.c
@@ -1,36 +1,65 @@
/*
- * Copyright (c) 2013 Andriy Gapon
- * All rights reserved.
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Rob Wing
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
- *
- * $FreeBSD$
*/
-#ifndef _OPENSOLARIS_SYS_DISP_H_
-#define _OPENSOLARIS_SYS_DISP_H_
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/sx.h>
+#include <sys/event.h>
+
+#include <sys/freebsd_event.h>
+
+static void
+knlist_sx_xlock(void *arg)
+{
+
+ sx_xlock((struct sx *)arg);
+}
+
+static void
+knlist_sx_xunlock(void *arg)
+{
+
+ sx_xunlock((struct sx *)arg);
+}
+
+static void
+knlist_sx_assert_lock(void *arg, int what)
+{
-#include <sys/proc.h>
+ if (what == LA_LOCKED)
+ sx_assert((struct sx *)arg, SX_LOCKED);
+ else
+ sx_assert((struct sx *)arg, SX_UNLOCKED);
+}
-#define kpreempt(x) kern_yield(PRI_USER)
+void
+knlist_init_sx(struct knlist *knl, struct sx *lock)
+{
-#endif /* _OPENSOLARIS_SYS_DISP_H_ */
+ knlist_init(knl, lock, knlist_sx_xlock, knlist_sx_xunlock,
+ knlist_sx_assert_lock);
+}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/kmod_core.c b/sys/contrib/openzfs/module/os/freebsd/zfs/kmod_core.c
index 2b808357ecc8..020ef6a39b5e 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/kmod_core.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/kmod_core.c
@@ -1,332 +1,339 @@
/*
* Copyright (c) 2020 iXsystems, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/buf.h>
#include <sys/cmn_err.h>
#include <sys/conf.h>
#include <sys/dmu.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_send.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_bookmark.h>
#include <sys/dsl_crypt.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_deleg.h>
#include <sys/dsl_destroy.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_scan.h>
#include <sys/dsl_userhold.h>
#include <sys/errno.h>
#include <sys/eventhandler.h>
#include <sys/file.h>
#include <sys/fm/util.h>
#include <sys/fs/zfs.h>
#include <sys/kernel.h>
#include <sys/kmem.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mount.h>
#include <sys/mutex.h>
#include <sys/nvpair.h>
#include <sys/policy.h>
#include <sys/proc.h>
#include <sys/sdt.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/stat.h>
#include <sys/sunddi.h>
#include <sys/systm.h>
#include <sys/taskqueue.h>
#include <sys/uio.h>
#include <sys/vdev.h>
#include <sys/vdev_removal.h>
#include <sys/zap.h>
#include <sys/zcp.h>
#include <sys/zfeature.h>
#include <sys/zfs_context.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_ioctl_compat.h>
#include <sys/zfs_ioctl_impl.h>
#include <sys/zfs_onexit.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_znode.h>
#include <sys/zio_checksum.h>
#include <sys/zone.h>
#include <sys/zvol.h>
#include "zfs_comutil.h"
#include "zfs_deleg.h"
#include "zfs_namecheck.h"
#include "zfs_prop.h"
SYSCTL_DECL(_vfs_zfs);
SYSCTL_DECL(_vfs_zfs_vdev);
extern uint_t rrw_tsd_key;
static int zfs_version_ioctl = ZFS_IOCVER_OZFS;
SYSCTL_DECL(_vfs_zfs_version);
SYSCTL_INT(_vfs_zfs_version, OID_AUTO, ioctl, CTLFLAG_RD, &zfs_version_ioctl,
0, "ZFS_IOCTL_VERSION");
static struct cdev *zfsdev;
static struct root_hold_token *zfs_root_token;
extern uint_t rrw_tsd_key;
extern uint_t zfs_allow_log_key;
extern uint_t zfs_geom_probe_vdev_key;
static int zfs__init(void);
static int zfs__fini(void);
static void zfs_shutdown(void *, int);
static eventhandler_tag zfs_shutdown_event_tag;
#define ZFS_MIN_KSTACK_PAGES 4
static int
zfsdev_ioctl(struct cdev *dev, ulong_t zcmd, caddr_t arg, int flag,
struct thread *td)
{
uint_t len;
int vecnum;
zfs_iocparm_t *zp;
zfs_cmd_t *zc;
zfs_cmd_legacy_t *zcl;
int rc, error;
void *uaddr;
len = IOCPARM_LEN(zcmd);
vecnum = zcmd & 0xff;
zp = (void *)arg;
uaddr = (void *)zp->zfs_cmd;
error = 0;
zcl = NULL;
if (len != sizeof (zfs_iocparm_t)) {
printf("len %d vecnum: %d sizeof (zfs_cmd_t) %ju\n",
len, vecnum, (uintmax_t)sizeof (zfs_cmd_t));
return (EINVAL);
}
zc = kmem_zalloc(sizeof (zfs_cmd_t), KM_SLEEP);
/*
* Remap ioctl code for legacy user binaries
*/
if (zp->zfs_ioctl_version == ZFS_IOCVER_LEGACY) {
vecnum = zfs_ioctl_legacy_to_ozfs(vecnum);
if (vecnum < 0) {
kmem_free(zc, sizeof (zfs_cmd_t));
return (ENOTSUP);
}
zcl = kmem_zalloc(sizeof (zfs_cmd_legacy_t), KM_SLEEP);
if (copyin(uaddr, zcl, sizeof (zfs_cmd_legacy_t))) {
error = SET_ERROR(EFAULT);
goto out;
}
zfs_cmd_legacy_to_ozfs(zcl, zc);
} else if (copyin(uaddr, zc, sizeof (zfs_cmd_t))) {
error = SET_ERROR(EFAULT);
goto out;
}
error = zfsdev_ioctl_common(vecnum, zc, 0);
if (zcl) {
zfs_cmd_ozfs_to_legacy(zc, zcl);
rc = copyout(zcl, uaddr, sizeof (*zcl));
} else {
rc = copyout(zc, uaddr, sizeof (*zc));
}
if (error == 0 && rc != 0)
error = SET_ERROR(EFAULT);
out:
if (zcl)
kmem_free(zcl, sizeof (zfs_cmd_legacy_t));
kmem_free(zc, sizeof (zfs_cmd_t));
MPASS(tsd_get(rrw_tsd_key) == NULL);
return (error);
}
static void
zfsdev_close(void *data)
{
zfsdev_state_destroy(data);
}
void
zfsdev_private_set_state(void *priv __unused, zfsdev_state_t *zs)
{
devfs_set_cdevpriv(zs, zfsdev_close);
}
zfsdev_state_t *
zfsdev_private_get_state(void *priv)
{
return (priv);
}
static int
zfsdev_open(struct cdev *devp __unused, int flag __unused, int mode __unused,
struct thread *td __unused)
{
int error;
mutex_enter(&zfsdev_state_lock);
error = zfsdev_state_init(NULL);
mutex_exit(&zfsdev_state_lock);
return (error);
}
static struct cdevsw zfs_cdevsw = {
.d_version = D_VERSION,
.d_open = zfsdev_open,
.d_ioctl = zfsdev_ioctl,
.d_name = ZFS_DRIVER
};
int
zfsdev_attach(void)
{
- zfsdev = make_dev(&zfs_cdevsw, 0x0, UID_ROOT, GID_OPERATOR, 0666,
- ZFS_DRIVER);
- return (0);
+ struct make_dev_args args;
+
+ make_dev_args_init(&args);
+ args.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK;
+ args.mda_devsw = &zfs_cdevsw;
+ args.mda_cr = NULL;
+ args.mda_uid = UID_ROOT;
+ args.mda_gid = GID_OPERATOR;
+ args.mda_mode = 0666;
+ return (make_dev_s(&args, &zfsdev, ZFS_DRIVER));
}
void
zfsdev_detach(void)
{
if (zfsdev != NULL)
destroy_dev(zfsdev);
}
int
zfs__init(void)
{
int error;
#if KSTACK_PAGES < ZFS_MIN_KSTACK_PAGES
printf("ZFS NOTICE: KSTACK_PAGES is %d which could result in stack "
"overflow panic!\nPlease consider adding "
"'options KSTACK_PAGES=%d' to your kernel config\n", KSTACK_PAGES,
ZFS_MIN_KSTACK_PAGES);
#endif
zfs_root_token = root_mount_hold("ZFS");
if ((error = zfs_kmod_init()) != 0) {
printf("ZFS: Failed to Load ZFS Filesystem"
", rc = %d\n", error);
root_mount_rel(zfs_root_token);
return (error);
}
tsd_create(&zfs_geom_probe_vdev_key, NULL);
printf("ZFS storage pool version: features support ("
SPA_VERSION_STRING ")\n");
root_mount_rel(zfs_root_token);
ddi_sysevent_init();
return (0);
}
int
zfs__fini(void)
{
if (zfs_busy() || zvol_busy() ||
zio_injection_enabled) {
return (EBUSY);
}
zfs_kmod_fini();
tsd_destroy(&zfs_geom_probe_vdev_key);
return (0);
}
static void
zfs_shutdown(void *arg __unused, int howto __unused)
{
/*
* ZFS fini routines can not properly work in a panic-ed system.
*/
if (panicstr == NULL)
zfs__fini();
}
static int
zfs_modevent(module_t mod, int type, void *unused __unused)
{
int err;
switch (type) {
case MOD_LOAD:
err = zfs__init();
if (err == 0)
zfs_shutdown_event_tag = EVENTHANDLER_REGISTER(
shutdown_post_sync, zfs_shutdown, NULL,
SHUTDOWN_PRI_FIRST);
return (err);
case MOD_UNLOAD:
err = zfs__fini();
if (err == 0 && zfs_shutdown_event_tag != NULL)
EVENTHANDLER_DEREGISTER(shutdown_post_sync,
zfs_shutdown_event_tag);
return (err);
case MOD_SHUTDOWN:
return (0);
default:
break;
}
return (EOPNOTSUPP);
}
static moduledata_t zfs_mod = {
"zfsctrl",
zfs_modevent,
0
};
#ifdef _KERNEL
EVENTHANDLER_DEFINE(mountroot, spa_boot_init, NULL, 0);
#endif
DECLARE_MODULE(zfsctrl, zfs_mod, SI_SUB_CLOCKS, SI_ORDER_ANY);
MODULE_VERSION(zfsctrl, 1);
#if __FreeBSD_version > 1300092
MODULE_DEPEND(zfsctrl, xdr, 1, 1, 1);
#else
MODULE_DEPEND(zfsctrl, krpc, 1, 1, 1);
#endif
MODULE_DEPEND(zfsctrl, acl_nfs4, 1, 1, 1);
MODULE_DEPEND(zfsctrl, crypto, 1, 1, 1);
MODULE_DEPEND(zfsctrl, zlib, 1, 1, 1);
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/spa_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/spa_os.c
index 251fafcc964e..45ea10bb487d 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/spa_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/spa_os.c
@@ -1,294 +1,293 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011 by Delphix. All rights reserved.
* Copyright (c) 2013 Martin Matuska <mm@FreeBSD.org>. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/fm/fs/zfs.h>
#include <sys/spa_impl.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/zap.h>
#include <sys/zil.h>
#include <sys/ddt.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_os.h>
#include <sys/vdev_removal.h>
#include <sys/vdev_indirect_mapping.h>
#include <sys/vdev_indirect_births.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
#include <sys/uberblock_impl.h>
#include <sys/txg.h>
#include <sys/avl.h>
#include <sys/bpobj.h>
#include <sys/dmu_traverse.h>
#include <sys/dmu_objset.h>
#include <sys/unique.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_synctask.h>
#include <sys/fs/zfs.h>
#include <sys/arc.h>
#include <sys/callb.h>
-#include <sys/spa_boot.h>
#include <sys/zfs_ioctl.h>
#include <sys/dsl_scan.h>
#include <sys/dmu_send.h>
#include <sys/dsl_destroy.h>
#include <sys/dsl_userhold.h>
#include <sys/zfeature.h>
#include <sys/zvol.h>
#include <sys/abd.h>
#include <sys/callb.h>
#include <sys/zone.h>
#include "zfs_prop.h"
#include "zfs_comutil.h"
static nvlist_t *
spa_generate_rootconf(const char *name)
{
nvlist_t **configs, **tops;
nvlist_t *config;
nvlist_t *best_cfg, *nvtop, *nvroot;
uint64_t *holes;
uint64_t best_txg;
uint64_t nchildren;
uint64_t pgid;
uint64_t count;
uint64_t i;
uint_t nholes;
if (vdev_geom_read_pool_label(name, &configs, &count) != 0)
return (NULL);
ASSERT3U(count, !=, 0);
best_txg = 0;
for (i = 0; i < count; i++) {
uint64_t txg;
txg = fnvlist_lookup_uint64(configs[i], ZPOOL_CONFIG_POOL_TXG);
if (txg > best_txg) {
best_txg = txg;
best_cfg = configs[i];
}
}
nchildren = 1;
nvlist_lookup_uint64(best_cfg, ZPOOL_CONFIG_VDEV_CHILDREN, &nchildren);
holes = NULL;
nvlist_lookup_uint64_array(best_cfg, ZPOOL_CONFIG_HOLE_ARRAY,
&holes, &nholes);
tops = kmem_zalloc(nchildren * sizeof (void *), KM_SLEEP);
for (i = 0; i < nchildren; i++) {
if (i >= count)
break;
if (configs[i] == NULL)
continue;
nvtop = fnvlist_lookup_nvlist(configs[i],
ZPOOL_CONFIG_VDEV_TREE);
tops[i] = fnvlist_dup(nvtop);
}
for (i = 0; holes != NULL && i < nholes; i++) {
if (i >= nchildren)
continue;
if (tops[holes[i]] != NULL)
continue;
tops[holes[i]] = fnvlist_alloc();
fnvlist_add_string(tops[holes[i]], ZPOOL_CONFIG_TYPE,
VDEV_TYPE_HOLE);
fnvlist_add_uint64(tops[holes[i]], ZPOOL_CONFIG_ID, holes[i]);
fnvlist_add_uint64(tops[holes[i]], ZPOOL_CONFIG_GUID, 0);
}
for (i = 0; i < nchildren; i++) {
if (tops[i] != NULL)
continue;
tops[i] = fnvlist_alloc();
fnvlist_add_string(tops[i], ZPOOL_CONFIG_TYPE,
VDEV_TYPE_MISSING);
fnvlist_add_uint64(tops[i], ZPOOL_CONFIG_ID, i);
fnvlist_add_uint64(tops[i], ZPOOL_CONFIG_GUID, 0);
}
/*
* Create pool config based on the best vdev config.
*/
config = fnvlist_dup(best_cfg);
/*
* Put this pool's top-level vdevs into a root vdev.
*/
pgid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID);
nvroot = fnvlist_alloc();
fnvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT);
fnvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL);
fnvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid);
fnvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
(const nvlist_t * const *)tops, nchildren);
/*
* Replace the existing vdev_tree with the new root vdev in
* this pool's configuration (remove the old, add the new).
*/
fnvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot);
/*
* Drop vdev config elements that should not be present at pool level.
*/
fnvlist_remove(config, ZPOOL_CONFIG_GUID);
fnvlist_remove(config, ZPOOL_CONFIG_TOP_GUID);
for (i = 0; i < count; i++)
fnvlist_free(configs[i]);
kmem_free(configs, count * sizeof (void *));
for (i = 0; i < nchildren; i++)
fnvlist_free(tops[i]);
kmem_free(tops, nchildren * sizeof (void *));
fnvlist_free(nvroot);
return (config);
}
int
spa_import_rootpool(const char *name, bool checkpointrewind)
{
spa_t *spa;
vdev_t *rvd;
nvlist_t *config, *nvtop;
char *pname;
int error;
/*
* Read the label from the boot device and generate a configuration.
*/
config = spa_generate_rootconf(name);
mutex_enter(&spa_namespace_lock);
if (config != NULL) {
pname = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
VERIFY0(strcmp(name, pname));
if ((spa = spa_lookup(pname)) != NULL) {
/*
* The pool could already be imported,
* e.g., after reboot -r.
*/
if (spa->spa_state == POOL_STATE_ACTIVE) {
mutex_exit(&spa_namespace_lock);
fnvlist_free(config);
return (0);
}
/*
* Remove the existing root pool from the namespace so
* that we can replace it with the correct config
* we just read in.
*/
spa_remove(spa);
}
spa = spa_add(pname, config, NULL);
/*
* Set spa_ubsync.ub_version as it can be used in vdev_alloc()
* via spa_version().
*/
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&spa->spa_ubsync.ub_version) != 0)
spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
} else if ((spa = spa_lookup(name)) == NULL) {
mutex_exit(&spa_namespace_lock);
fnvlist_free(config);
cmn_err(CE_NOTE, "Cannot find the pool label for '%s'",
name);
return (EIO);
} else {
config = fnvlist_dup(spa->spa_config);
}
spa->spa_is_root = B_TRUE;
spa->spa_import_flags = ZFS_IMPORT_VERBATIM;
if (checkpointrewind) {
spa->spa_import_flags |= ZFS_IMPORT_CHECKPOINT;
}
/*
* Build up a vdev tree based on the boot device's label config.
*/
nvtop = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
error = spa_config_parse(spa, &rvd, nvtop, NULL, 0,
VDEV_ALLOC_ROOTPOOL);
spa_config_exit(spa, SCL_ALL, FTAG);
if (error) {
mutex_exit(&spa_namespace_lock);
fnvlist_free(config);
cmn_err(CE_NOTE, "Can not parse the config for pool '%s'",
- pname);
+ name);
return (error);
}
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
vdev_free(rvd);
spa_config_exit(spa, SCL_ALL, FTAG);
mutex_exit(&spa_namespace_lock);
fnvlist_free(config);
return (0);
}
const char *
spa_history_zone(void)
{
return ("freebsd");
}
void
spa_import_os(spa_t *spa)
{
(void) spa;
}
void
spa_export_os(spa_t *spa)
{
(void) spa;
}
void
spa_activate_os(spa_t *spa)
{
(void) spa;
}
void
spa_deactivate_os(spa_t *spa)
{
(void) spa;
}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c
index c774f05ff706..4d908381c40a 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c
@@ -1,719 +1,878 @@
/*
* Copyright (c) 2020 iXsystems, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/conf.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/errno.h>
#include <sys/uio.h>
#include <sys/buf.h>
#include <sys/file.h>
#include <sys/kmem.h>
#include <sys/conf.h>
#include <sys/cmn_err.h>
#include <sys/stat.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_znode.h>
#include <sys/zap.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
+#include <sys/arc_os.h>
#include <sys/dmu.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_deleg.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_tx.h>
#include <sys/sunddi.h>
#include <sys/policy.h>
#include <sys/zone.h>
#include <sys/nvpair.h>
#include <sys/mount.h>
#include <sys/taskqueue.h>
#include <sys/sdt.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_onexit.h>
#include <sys/zvol.h>
#include <sys/dsl_scan.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_send.h>
#include <sys/dsl_destroy.h>
#include <sys/dsl_bookmark.h>
#include <sys/dsl_userhold.h>
#include <sys/zfeature.h>
#include <sys/zcp.h>
#include <sys/zio_checksum.h>
#include <sys/vdev_removal.h>
#include <sys/dsl_crypt.h>
#include <sys/zfs_ioctl_compat.h>
#include <sys/zfs_context.h>
#include <sys/arc_impl.h>
#include <sys/dsl_pool.h>
+#include <sys/vmmeter.h>
SYSCTL_DECL(_vfs_zfs);
SYSCTL_NODE(_vfs_zfs, OID_AUTO, arc, CTLFLAG_RW, 0,
"ZFS adaptive replacement cache");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, condense, CTLFLAG_RW, 0, "ZFS condense");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, dbuf, CTLFLAG_RW, 0, "ZFS disk buf cache");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, dbuf_cache, CTLFLAG_RW, 0,
"ZFS disk buf cache");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, deadman, CTLFLAG_RW, 0, "ZFS deadman");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, dedup, CTLFLAG_RW, 0, "ZFS dedup");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, l2arc, CTLFLAG_RW, 0, "ZFS l2arc");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, livelist, CTLFLAG_RW, 0, "ZFS livelist");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, lua, CTLFLAG_RW, 0, "ZFS lua");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, metaslab, CTLFLAG_RW, 0, "ZFS metaslab");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, mg, CTLFLAG_RW, 0, "ZFS metaslab group");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, multihost, CTLFLAG_RW, 0,
"ZFS multihost protection");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, prefetch, CTLFLAG_RW, 0, "ZFS prefetch");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, reconstruct, CTLFLAG_RW, 0, "ZFS reconstruct");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, recv, CTLFLAG_RW, 0, "ZFS receive");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, send, CTLFLAG_RW, 0, "ZFS send");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, spa, CTLFLAG_RW, 0, "ZFS space allocation");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, trim, CTLFLAG_RW, 0, "ZFS TRIM");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, txg, CTLFLAG_RW, 0, "ZFS transaction group");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, vdev, CTLFLAG_RW, 0, "ZFS VDEV");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, vnops, CTLFLAG_RW, 0, "ZFS VNOPS");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, zevent, CTLFLAG_RW, 0, "ZFS event");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, zil, CTLFLAG_RW, 0, "ZFS ZIL");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, zio, CTLFLAG_RW, 0, "ZFS ZIO");
SYSCTL_NODE(_vfs_zfs_livelist, OID_AUTO, condense, CTLFLAG_RW, 0,
"ZFS livelist condense");
SYSCTL_NODE(_vfs_zfs_vdev, OID_AUTO, cache, CTLFLAG_RW, 0, "ZFS VDEV Cache");
SYSCTL_NODE(_vfs_zfs_vdev, OID_AUTO, file, CTLFLAG_RW, 0, "ZFS VDEV file");
SYSCTL_NODE(_vfs_zfs_vdev, OID_AUTO, mirror, CTLFLAG_RD, 0,
"ZFS VDEV mirror");
SYSCTL_DECL(_vfs_zfs_version);
SYSCTL_CONST_STRING(_vfs_zfs_version, OID_AUTO, module, CTLFLAG_RD,
(ZFS_META_VERSION "-" ZFS_META_RELEASE), "OpenZFS module version");
-extern arc_state_t ARC_anon;
-extern arc_state_t ARC_mru;
-extern arc_state_t ARC_mru_ghost;
-extern arc_state_t ARC_mfu;
-extern arc_state_t ARC_mfu_ghost;
-extern arc_state_t ARC_l2c_only;
+/* arc.c */
-/*
- * minimum lifespan of a prefetch block in clock ticks
- * (initialized in arc_init())
- */
+int
+param_set_arc_long(SYSCTL_HANDLER_ARGS)
+{
+ int err;
-/* arc.c */
+ err = sysctl_handle_long(oidp, arg1, 0, req);
+ if (err != 0 || req->newptr == NULL)
+ return (err);
+
+ arc_tuning_update(B_TRUE);
+
+ return (0);
+}
+
+int
+param_set_arc_int(SYSCTL_HANDLER_ARGS)
+{
+ int err;
+
+ err = sysctl_handle_int(oidp, arg1, 0, req);
+ if (err != 0 || req->newptr == NULL)
+ return (err);
+
+ arc_tuning_update(B_TRUE);
+
+ return (0);
+}
int
param_set_arc_max(SYSCTL_HANDLER_ARGS)
{
- uint64_t val;
+ unsigned long val;
int err;
val = zfs_arc_max;
err = sysctl_handle_long(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (SET_ERROR(err));
if (val != 0 && (val < MIN_ARC_MAX || val <= arc_c_min ||
val >= arc_all_memory()))
return (SET_ERROR(EINVAL));
zfs_arc_max = val;
arc_tuning_update(B_TRUE);
/* Update the sysctl to the tuned value */
if (val != 0)
zfs_arc_max = arc_c_max;
return (0);
}
+/* BEGIN CSTYLED */
+SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_max,
+ CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
+ NULL, 0, param_set_arc_max, "LU",
+ "Maximum ARC size in bytes (LEGACY)");
+/* END CSTYLED */
+
int
param_set_arc_min(SYSCTL_HANDLER_ARGS)
{
- uint64_t val;
+ unsigned long val;
int err;
val = zfs_arc_min;
- err = sysctl_handle_64(oidp, &val, 0, req);
+ err = sysctl_handle_long(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (SET_ERROR(err));
if (val != 0 && (val < 2ULL << SPA_MAXBLOCKSHIFT || val > arc_c_max))
return (SET_ERROR(EINVAL));
zfs_arc_min = val;
arc_tuning_update(B_TRUE);
/* Update the sysctl to the tuned value */
if (val != 0)
zfs_arc_min = arc_c_min;
return (0);
}
-/* legacy compat */
-extern uint64_t l2arc_write_max; /* def max write size */
-extern uint64_t l2arc_write_boost; /* extra warmup write */
-extern uint64_t l2arc_headroom; /* # of dev writes */
+/* BEGIN CSTYLED */
+SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_min,
+ CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
+ NULL, 0, param_set_arc_min, "LU",
+ "Minimum ARC size in bytes (LEGACY)");
+/* END CSTYLED */
+
+extern uint_t zfs_arc_free_target;
+
+int
+param_set_arc_free_target(SYSCTL_HANDLER_ARGS)
+{
+ uint_t val;
+ int err;
+
+ val = zfs_arc_free_target;
+ err = sysctl_handle_int(oidp, &val, 0, req);
+ if (err != 0 || req->newptr == NULL)
+ return (err);
+
+ if (val < minfree)
+ return (EINVAL);
+ if (val > vm_cnt.v_page_count)
+ return (EINVAL);
+
+ zfs_arc_free_target = val;
+
+ return (0);
+}
+
+/*
+ * NOTE: This sysctl is CTLFLAG_RW not CTLFLAG_RWTUN due to its dependency on
+ * pagedaemon initialization.
+ */
+/* BEGIN CSTYLED */
+SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_free_target,
+ CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
+ NULL, 0, param_set_arc_free_target, "IU",
+ "Desired number of free pages below which ARC triggers reclaim"
+ " (LEGACY)");
+/* END CSTYLED */
+
+int
+param_set_arc_no_grow_shift(SYSCTL_HANDLER_ARGS)
+{
+ int err, val;
+
+ val = arc_no_grow_shift;
+ err = sysctl_handle_int(oidp, &val, 0, req);
+ if (err != 0 || req->newptr == NULL)
+ return (err);
+
+ if (val < 0 || val >= arc_shrink_shift)
+ return (EINVAL);
+
+ arc_no_grow_shift = val;
+
+ return (0);
+}
+
+/* BEGIN CSTYLED */
+SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_no_grow_shift,
+ CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
+ NULL, 0, param_set_arc_no_grow_shift, "I",
+ "log2(fraction of ARC which must be free to allow growing) (LEGACY)");
+/* END CSTYLED */
+
+extern uint64_t l2arc_write_max;
+
+/* BEGIN CSTYLED */
+SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max,
+ CTLFLAG_RWTUN, &l2arc_write_max, 0,
+ "Max write bytes per interval (LEGACY)");
+/* END CSTYLED */
+
+extern uint64_t l2arc_write_boost;
+
+/* BEGIN CSTYLED */
+SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_boost,
+ CTLFLAG_RWTUN, &l2arc_write_boost, 0,
+ "Extra write bytes during device warmup (LEGACY)");
+/* END CSTYLED */
+
+extern uint64_t l2arc_headroom;
+
+/* BEGIN CSTYLED */
+SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom,
+ CTLFLAG_RWTUN, &l2arc_headroom, 0,
+ "Number of max device writes to precache (LEGACY)");
+/* END CSTYLED */
+
extern uint64_t l2arc_headroom_boost;
-extern uint64_t l2arc_feed_secs; /* interval seconds */
-extern uint64_t l2arc_feed_min_ms; /* min interval msecs */
-extern int l2arc_noprefetch; /* don't cache prefetch bufs */
-extern int l2arc_feed_again; /* turbo warmup */
-extern int l2arc_norw; /* no reads during writes */
-
-/* BEGIN CSTYLED */
-SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max, CTLFLAG_RW,
- &l2arc_write_max, 0, "max write size (LEGACY)");
-SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_boost, CTLFLAG_RW,
- &l2arc_write_boost, 0, "extra write during warmup (LEGACY)");
-SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom, CTLFLAG_RW,
- &l2arc_headroom, 0, "number of dev writes (LEGACY)");
-SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_secs, CTLFLAG_RW,
- &l2arc_feed_secs, 0, "interval seconds (LEGACY)");
-SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_min_ms, CTLFLAG_RW,
- &l2arc_feed_min_ms, 0, "min interval milliseconds (LEGACY)");
-
-SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_noprefetch, CTLFLAG_RW,
- &l2arc_noprefetch, 0, "don't cache prefetch bufs (LEGACY)");
-SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_feed_again, CTLFLAG_RW,
- &l2arc_feed_again, 0, "turbo warmup (LEGACY)");
-SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_norw, CTLFLAG_RW,
- &l2arc_norw, 0, "no reads during writes (LEGACY)");
+/* BEGIN CSTYLED */
+SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom_boost,
+ CTLFLAG_RWTUN, &l2arc_headroom_boost, 0,
+ "Compressed l2arc_headroom multiplier (LEGACY)");
+/* END CSTYLED */
+
+extern uint64_t l2arc_feed_secs;
+
+/* BEGIN CSTYLED */
+SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_secs,
+ CTLFLAG_RWTUN, &l2arc_feed_secs, 0,
+ "Seconds between L2ARC writing (LEGACY)");
+/* END CSTYLED */
+
+extern uint64_t l2arc_feed_min_ms;
+
+/* BEGIN CSTYLED */
+SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_min_ms,
+ CTLFLAG_RWTUN, &l2arc_feed_min_ms, 0,
+ "Min feed interval in milliseconds (LEGACY)");
+/* END CSTYLED */
+
+extern int l2arc_noprefetch;
+
+/* BEGIN CSTYLED */
+SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_noprefetch,
+ CTLFLAG_RWTUN, &l2arc_noprefetch, 0,
+ "Skip caching prefetched buffers (LEGACY)");
+/* END CSTYLED */
+
+extern int l2arc_feed_again;
+
+/* BEGIN CSTYLED */
+SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_feed_again,
+ CTLFLAG_RWTUN, &l2arc_feed_again, 0,
+ "Turbo L2ARC warmup (LEGACY)");
+/* END CSTYLED */
+
+extern int l2arc_norw;
+
+/* BEGIN CSTYLED */
+SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_norw,
+ CTLFLAG_RWTUN, &l2arc_norw, 0,
+ "No reads during writes (LEGACY)");
+/* END CSTYLED */
+
+extern arc_state_t ARC_anon;
+
+/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_size, CTLFLAG_RD,
&ARC_anon.arcs_size.rc_count, 0, "size of anonymous state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_metadata_esize, CTLFLAG_RD,
&ARC_anon.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
"size of anonymous state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_data_esize, CTLFLAG_RD,
&ARC_anon.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of anonymous state");
+/* END CSTYLED */
+
+extern arc_state_t ARC_mru;
+/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_size, CTLFLAG_RD,
&ARC_mru.arcs_size.rc_count, 0, "size of mru state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_metadata_esize, CTLFLAG_RD,
&ARC_mru.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
"size of metadata in mru state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_data_esize, CTLFLAG_RD,
&ARC_mru.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of data in mru state");
+/* END CSTYLED */
+
+extern arc_state_t ARC_mru_ghost;
+/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_size, CTLFLAG_RD,
&ARC_mru_ghost.arcs_size.rc_count, 0, "size of mru ghost state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_metadata_esize, CTLFLAG_RD,
&ARC_mru_ghost.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
"size of metadata in mru ghost state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_data_esize, CTLFLAG_RD,
&ARC_mru_ghost.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of data in mru ghost state");
+/* END CSTYLED */
+extern arc_state_t ARC_mfu;
+
+/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_size, CTLFLAG_RD,
&ARC_mfu.arcs_size.rc_count, 0, "size of mfu state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_metadata_esize, CTLFLAG_RD,
&ARC_mfu.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
"size of metadata in mfu state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_data_esize, CTLFLAG_RD,
&ARC_mfu.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of data in mfu state");
+/* END CSTYLED */
+
+extern arc_state_t ARC_mfu_ghost;
+/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_size, CTLFLAG_RD,
&ARC_mfu_ghost.arcs_size.rc_count, 0, "size of mfu ghost state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_metadata_esize, CTLFLAG_RD,
&ARC_mfu_ghost.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
"size of metadata in mfu ghost state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_data_esize, CTLFLAG_RD,
&ARC_mfu_ghost.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of data in mfu ghost state");
-
-SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2c_only_size, CTLFLAG_RD,
- &ARC_l2c_only.arcs_size.rc_count, 0, "size of mru state");
/* END CSTYLED */
-static int
-sysctl_vfs_zfs_arc_no_grow_shift(SYSCTL_HANDLER_ARGS)
-{
- int err, val;
-
- val = arc_no_grow_shift;
- err = sysctl_handle_int(oidp, &val, 0, req);
- if (err != 0 || req->newptr == NULL)
- return (err);
-
- if (val < 0 || val >= arc_shrink_shift)
- return (EINVAL);
-
- arc_no_grow_shift = val;
- return (0);
-}
-
-SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_no_grow_shift,
- CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, NULL, sizeof (int),
- sysctl_vfs_zfs_arc_no_grow_shift, "I",
- "log2(fraction of ARC which must be free to allow growing)");
-
-int
-param_set_arc_long(SYSCTL_HANDLER_ARGS)
-{
- int err;
-
- err = sysctl_handle_long(oidp, arg1, 0, req);
- if (err != 0 || req->newptr == NULL)
- return (err);
-
- arc_tuning_update(B_TRUE);
-
- return (0);
-}
-
-int
-param_set_arc_int(SYSCTL_HANDLER_ARGS)
-{
- int err;
-
- err = sysctl_handle_int(oidp, arg1, 0, req);
- if (err != 0 || req->newptr == NULL)
- return (err);
-
- arc_tuning_update(B_TRUE);
-
- return (0);
-}
+extern arc_state_t ARC_l2c_only;
/* BEGIN CSTYLED */
-SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_min,
- CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
- &zfs_arc_min, sizeof (zfs_arc_min), param_set_arc_min, "LU",
- "min arc size (LEGACY)");
-SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_max,
- CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
- &zfs_arc_max, sizeof (zfs_arc_max), param_set_arc_max, "LU",
- "max arc size (LEGACY)");
+SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2c_only_size, CTLFLAG_RD,
+ &ARC_l2c_only.arcs_size.rc_count, 0, "size of mru state");
/* END CSTYLED */
/* dbuf.c */
-
/* dmu.c */
/* dmu_zfetch.c */
+
SYSCTL_NODE(_vfs_zfs, OID_AUTO, zfetch, CTLFLAG_RW, 0, "ZFS ZFETCH (LEGACY)");
-/* max bytes to prefetch per stream (default 8MB) */
extern uint32_t zfetch_max_distance;
-SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_distance, CTLFLAG_RWTUN,
- &zfetch_max_distance, 0, "Max bytes to prefetch per stream (LEGACY)");
-/* max bytes to prefetch indirects for per stream (default 64MB) */
+/* BEGIN CSTYLED */
+SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_distance,
+ CTLFLAG_RWTUN, &zfetch_max_distance, 0,
+ "Max bytes to prefetch per stream (LEGACY)");
+/* END CSTYLED */
+
extern uint32_t zfetch_max_idistance;
+
/* BEGIN CSTYLED */
-SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_idistance, CTLFLAG_RWTUN,
- &zfetch_max_idistance, 0,
+SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_idistance,
+ CTLFLAG_RWTUN, &zfetch_max_idistance, 0,
"Max bytes to prefetch indirects for per stream (LEGACY)");
/* END CSTYLED */
/* dsl_pool.c */
/* dnode.c */
+
extern int zfs_default_bs;
+
+/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, default_bs, CTLFLAG_RWTUN,
&zfs_default_bs, 0, "Default dnode block shift");
+/* END CSTYLED */
extern int zfs_default_ibs;
-SYSCTL_INT(_vfs_zfs, OID_AUTO, default_ibs, CTLFLAG_RWTUN,
- &zfs_default_ibs, 0, "Default dnode indirect block shift");
+/* BEGIN CSTYLED */
+SYSCTL_INT(_vfs_zfs, OID_AUTO, default_ibs, CTLFLAG_RWTUN,
+ &zfs_default_ibs, 0, "Default dnode indirect block shift");
+/* END CSTYLED */
/* dsl_scan.c */
/* metaslab.c */
-/* BEGIN CSTYLED */
/*
* In pools where the log space map feature is not enabled we touch
* multiple metaslabs (and their respective space maps) with each
* transaction group. Thus, we benefit from having a small space map
* block size since it allows us to issue more I/O operations scattered
* around the disk. So a sane default for the space map block size
* is 8~16K.
*/
extern int zfs_metaslab_sm_blksz_no_log;
-SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, sm_blksz_no_log, CTLFLAG_RDTUN,
- &zfs_metaslab_sm_blksz_no_log, 0,
+
+/* BEGIN CSTYLED */
+SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, sm_blksz_no_log,
+ CTLFLAG_RDTUN, &zfs_metaslab_sm_blksz_no_log, 0,
"Block size for space map in pools with log space map disabled. "
"Power of 2 greater than 4096.");
+/* END CSTYLED */
/*
* When the log space map feature is enabled, we accumulate a lot of
* changes per metaslab that are flushed once in a while so we benefit
* from a bigger block size like 128K for the metaslab space maps.
*/
extern int zfs_metaslab_sm_blksz_with_log;
-SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, sm_blksz_with_log, CTLFLAG_RDTUN,
- &zfs_metaslab_sm_blksz_with_log, 0,
+
+/* BEGIN CSTYLED */
+SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, sm_blksz_with_log,
+ CTLFLAG_RDTUN, &zfs_metaslab_sm_blksz_with_log, 0,
"Block size for space map in pools with log space map enabled. "
"Power of 2 greater than 4096.");
+/* END CSTYLED */
/*
* The in-core space map representation is more compact than its on-disk form.
* The zfs_condense_pct determines how much more compact the in-core
* space map representation must be before we compact it on-disk.
* Values should be greater than or equal to 100.
*/
extern int zfs_condense_pct;
-SYSCTL_INT(_vfs_zfs, OID_AUTO, condense_pct, CTLFLAG_RWTUN,
- &zfs_condense_pct, 0,
+
+/* BEGIN CSTYLED */
+SYSCTL_INT(_vfs_zfs, OID_AUTO, condense_pct,
+ CTLFLAG_RWTUN, &zfs_condense_pct, 0,
"Condense on-disk spacemap when it is more than this many percents"
" of in-memory counterpart");
+/* END CSTYLED */
extern int zfs_remove_max_segment;
-SYSCTL_INT(_vfs_zfs, OID_AUTO, remove_max_segment, CTLFLAG_RWTUN,
- &zfs_remove_max_segment, 0, "Largest contiguous segment ZFS will"
- " attempt to allocate when removing a device");
+
+/* BEGIN CSTYLED */
+SYSCTL_INT(_vfs_zfs, OID_AUTO, remove_max_segment,
+ CTLFLAG_RWTUN, &zfs_remove_max_segment, 0,
+ "Largest contiguous segment ZFS will attempt to allocate when removing"
+ " a device");
+/* END CSTYLED */
extern int zfs_removal_suspend_progress;
-SYSCTL_INT(_vfs_zfs, OID_AUTO, removal_suspend_progress, CTLFLAG_RWTUN,
- &zfs_removal_suspend_progress, 0,
- "Ensures certain actions can happen while in the middle of a removal");
+/* BEGIN CSTYLED */
+SYSCTL_INT(_vfs_zfs, OID_AUTO, removal_suspend_progress,
+ CTLFLAG_RWTUN, &zfs_removal_suspend_progress, 0,
+ "Ensures certain actions can happen while in the middle of a removal");
+/* END CSTYLED */
/*
* Minimum size which forces the dynamic allocator to change
* it's allocation strategy. Once the space map cannot satisfy
* an allocation of this size then it switches to using more
* aggressive strategy (i.e search by size rather than offset).
*/
extern uint64_t metaslab_df_alloc_threshold;
-SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold, CTLFLAG_RWTUN,
- &metaslab_df_alloc_threshold, 0, "Minimum size which forces the dynamic"
- " allocator to change its allocation strategy");
+
+/* BEGIN CSTYLED */
+SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold,
+ CTLFLAG_RWTUN, &metaslab_df_alloc_threshold, 0,
+ "Minimum size which forces the dynamic allocator to change its"
+ " allocation strategy");
+/* END CSTYLED */
/*
* The minimum free space, in percent, which must be available
* in a space map to continue allocations in a first-fit fashion.
* Once the space map's free space drops below this level we dynamically
* switch to using best-fit allocations.
*/
extern int metaslab_df_free_pct;
-SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, df_free_pct, CTLFLAG_RWTUN,
- &metaslab_df_free_pct, 0,
+
+/* BEGIN CSTYLED */
+SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, df_free_pct,
+ CTLFLAG_RWTUN, &metaslab_df_free_pct, 0,
"The minimum free space, in percent, which must be available in a"
" space map to continue allocations in a first-fit fashion");
+/* END CSTYLED */
/*
* Percentage of all cpus that can be used by the metaslab taskq.
*/
extern int metaslab_load_pct;
-SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, load_pct, CTLFLAG_RWTUN,
- &metaslab_load_pct, 0,
+
+/* BEGIN CSTYLED */
+SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, load_pct,
+ CTLFLAG_RWTUN, &metaslab_load_pct, 0,
"Percentage of cpus that can be used by the metaslab taskq");
+/* END CSTYLED */
/*
* Max number of metaslabs per group to preload.
*/
extern int metaslab_preload_limit;
-SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_limit, CTLFLAG_RWTUN,
- &metaslab_preload_limit, 0,
+
+/* BEGIN CSTYLED */
+SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_limit,
+ CTLFLAG_RWTUN, &metaslab_preload_limit, 0,
"Max number of metaslabs per group to preload");
+/* END CSTYLED */
+
+/* mmp.c */
+
+int
+param_set_multihost_interval(SYSCTL_HANDLER_ARGS)
+{
+ int err;
+
+ err = sysctl_handle_long(oidp, &zfs_multihost_interval, 0, req);
+ if (err != 0 || req->newptr == NULL)
+ return (err);
+
+ if (spa_mode_global != SPA_MODE_UNINIT)
+ mmp_signal_all_threads();
+
+ return (0);
+}
/* spa.c */
+
extern int zfs_ccw_retry_interval;
-SYSCTL_INT(_vfs_zfs, OID_AUTO, ccw_retry_interval, CTLFLAG_RWTUN,
- &zfs_ccw_retry_interval, 0, "Configuration cache file write,"
- " retry after failure, interval (seconds)");
+
+/* BEGIN CSTYLED */
+SYSCTL_INT(_vfs_zfs, OID_AUTO, ccw_retry_interval,
+ CTLFLAG_RWTUN, &zfs_ccw_retry_interval, 0,
+ "Configuration cache file write, retry after failure, interval"
+ " (seconds)");
+/* END CSTYLED */
extern uint64_t zfs_max_missing_tvds_cachefile;
-SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, max_missing_tvds_cachefile, CTLFLAG_RWTUN,
- &zfs_max_missing_tvds_cachefile, 0,
- "allow importing pools with missing top-level vdevs in cache file");
+
+/* BEGIN CSTYLED */
+SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, max_missing_tvds_cachefile,
+ CTLFLAG_RWTUN, &zfs_max_missing_tvds_cachefile, 0,
+ "Allow importing pools with missing top-level vdevs in cache file");
+/* END CSTYLED */
extern uint64_t zfs_max_missing_tvds_scan;
-SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, max_missing_tvds_scan, CTLFLAG_RWTUN,
- &zfs_max_missing_tvds_scan, 0,
- "allow importing pools with missing top-level vdevs during scan");
+
+/* BEGIN CSTYLED */
+SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, max_missing_tvds_scan,
+ CTLFLAG_RWTUN, &zfs_max_missing_tvds_scan, 0,
+ "Allow importing pools with missing top-level vdevs during scan");
/* END CSTYLED */
/* spa_misc.c */
+
extern int zfs_flags;
+
static int
sysctl_vfs_zfs_debug_flags(SYSCTL_HANDLER_ARGS)
{
int err, val;
val = zfs_flags;
err = sysctl_handle_int(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
/*
* ZFS_DEBUG_MODIFY must be enabled prior to boot so all
* arc buffers in the system have the necessary additional
* checksum data. However, it is safe to disable at any
* time.
*/
if (!(zfs_flags & ZFS_DEBUG_MODIFY))
val &= ~ZFS_DEBUG_MODIFY;
zfs_flags = val;
return (0);
}
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, debugflags,
CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RWTUN, NULL, 0,
sysctl_vfs_zfs_debug_flags, "IU", "Debug flags for ZFS testing.");
/* END CSTYLED */
int
param_set_deadman_synctime(SYSCTL_HANDLER_ARGS)
{
unsigned long val;
int err;
val = zfs_deadman_synctime_ms;
err = sysctl_handle_long(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
zfs_deadman_synctime_ms = val;
spa_set_deadman_synctime(MSEC2NSEC(zfs_deadman_synctime_ms));
return (0);
}
int
param_set_deadman_ziotime(SYSCTL_HANDLER_ARGS)
{
unsigned long val;
int err;
val = zfs_deadman_ziotime_ms;
err = sysctl_handle_long(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
zfs_deadman_ziotime_ms = val;
spa_set_deadman_ziotime(MSEC2NSEC(zfs_deadman_synctime_ms));
return (0);
}
int
param_set_deadman_failmode(SYSCTL_HANDLER_ARGS)
{
char buf[16];
int rc;
if (req->newptr == NULL)
strlcpy(buf, zfs_deadman_failmode, sizeof (buf));
rc = sysctl_handle_string(oidp, buf, sizeof (buf), req);
if (rc || req->newptr == NULL)
return (rc);
if (strcmp(buf, zfs_deadman_failmode) == 0)
return (0);
if (strcmp(buf, "wait") == 0)
zfs_deadman_failmode = "wait";
if (strcmp(buf, "continue") == 0)
zfs_deadman_failmode = "continue";
if (strcmp(buf, "panic") == 0)
zfs_deadman_failmode = "panic";
return (-param_set_deadman_failmode_common(buf));
}
+int
+param_set_slop_shift(SYSCTL_HANDLER_ARGS)
+{
+ int val;
+ int err;
+
+ val = spa_slop_shift;
+ err = sysctl_handle_int(oidp, &val, 0, req);
+ if (err != 0 || req->newptr == NULL)
+ return (err);
+
+ if (val < 1 || val > 31)
+ return (EINVAL);
+
+ spa_slop_shift = val;
+
+ return (0);
+}
/* spacemap.c */
+
extern int space_map_ibs;
+
+/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, space_map_ibs, CTLFLAG_RWTUN,
&space_map_ibs, 0, "Space map indirect block shift");
+/* END CSTYLED */
/* vdev.c */
+
int
param_set_min_auto_ashift(SYSCTL_HANDLER_ARGS)
{
uint64_t val;
int err;
val = zfs_vdev_min_auto_ashift;
err = sysctl_handle_64(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (SET_ERROR(err));
if (val < ASHIFT_MIN || val > zfs_vdev_max_auto_ashift)
return (SET_ERROR(EINVAL));
zfs_vdev_min_auto_ashift = val;
return (0);
}
+/* BEGIN CSTYLED */
+SYSCTL_PROC(_vfs_zfs, OID_AUTO, min_auto_ashift,
+ CTLTYPE_U64 | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
+ &zfs_vdev_min_auto_ashift, sizeof (zfs_vdev_min_auto_ashift),
+ param_set_min_auto_ashift, "QU",
+ "Min ashift used when creating new top-level vdev. (LEGACY)");
+/* END CSTYLED */
+
int
param_set_max_auto_ashift(SYSCTL_HANDLER_ARGS)
{
uint64_t val;
int err;
val = zfs_vdev_max_auto_ashift;
err = sysctl_handle_64(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (SET_ERROR(err));
if (val > ASHIFT_MAX || val < zfs_vdev_min_auto_ashift)
return (SET_ERROR(EINVAL));
zfs_vdev_max_auto_ashift = val;
return (0);
}
/* BEGIN CSTYLED */
-SYSCTL_PROC(_vfs_zfs, OID_AUTO, min_auto_ashift,
- CTLTYPE_U64 | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
- &zfs_vdev_min_auto_ashift, sizeof (zfs_vdev_min_auto_ashift),
- param_set_min_auto_ashift, "QU",
- "Min ashift used when creating new top-level vdev. (LEGACY)");
SYSCTL_PROC(_vfs_zfs, OID_AUTO, max_auto_ashift,
CTLTYPE_U64 | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
&zfs_vdev_max_auto_ashift, sizeof (zfs_vdev_max_auto_ashift),
param_set_max_auto_ashift, "QU",
"Max ashift used when optimizing for logical -> physical sector size on"
" new top-level vdevs. (LEGACY)");
+/* END CSTYLED */
/*
* Since the DTL space map of a vdev is not expected to have a lot of
* entries, we default its block size to 4K.
*/
extern int zfs_vdev_dtl_sm_blksz;
-SYSCTL_INT(_vfs_zfs, OID_AUTO, dtl_sm_blksz, CTLFLAG_RDTUN,
- &zfs_vdev_dtl_sm_blksz, 0,
+
+/* BEGIN CSTYLED */
+SYSCTL_INT(_vfs_zfs, OID_AUTO, dtl_sm_blksz,
+ CTLFLAG_RDTUN, &zfs_vdev_dtl_sm_blksz, 0,
"Block size for DTL space map. Power of 2 greater than 4096.");
+/* END CSTYLED */
/*
* vdev-wide space maps that have lots of entries written to them at
* the end of each transaction can benefit from a higher I/O bandwidth
* (e.g. vdev_obsolete_sm), thus we default their block size to 128K.
*/
extern int zfs_vdev_standard_sm_blksz;
-SYSCTL_INT(_vfs_zfs, OID_AUTO, standard_sm_blksz, CTLFLAG_RDTUN,
- &zfs_vdev_standard_sm_blksz, 0,
+
+/* BEGIN CSTYLED */
+SYSCTL_INT(_vfs_zfs, OID_AUTO, standard_sm_blksz,
+ CTLFLAG_RDTUN, &zfs_vdev_standard_sm_blksz, 0,
"Block size for standard space map. Power of 2 greater than 4096.");
/* END CSTYLED */
extern int vdev_validate_skip;
-SYSCTL_INT(_vfs_zfs, OID_AUTO, validate_skip, CTLFLAG_RDTUN,
- &vdev_validate_skip, 0, "Enable to bypass vdev_validate().");
+/* BEGIN CSTYLED */
+SYSCTL_INT(_vfs_zfs, OID_AUTO, validate_skip,
+ CTLFLAG_RDTUN, &vdev_validate_skip, 0,
+ "Enable to bypass vdev_validate().");
+/* END CSTYLED */
/* vdev_cache.c */
/* vdev_mirror.c */
-/*
- * The load configuration settings below are tuned by default for
- * the case where all devices are of the same rotational type.
- *
- * If there is a mixture of rotating and non-rotating media, setting
- * non_rotating_seek_inc to 0 may well provide better results as it
- * will direct more reads to the non-rotating vdevs which are more
- * likely to have a higher performance.
- */
-
/* vdev_queue.c */
-/* BEGIN CSTYLED */
+
extern uint32_t zfs_vdev_max_active;
-SYSCTL_UINT(_vfs_zfs, OID_AUTO, top_maxinflight, CTLFLAG_RWTUN,
- &zfs_vdev_max_active, 0,
+
+/* BEGIN CSTYLED */
+SYSCTL_UINT(_vfs_zfs, OID_AUTO, top_maxinflight,
+ CTLFLAG_RWTUN, &zfs_vdev_max_active, 0,
"The maximum number of I/Os of all types active for each device."
" (LEGACY)");
+/* END CSTYLED */
extern int zfs_vdev_def_queue_depth;
-SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, def_queue_depth, CTLFLAG_RWTUN,
- &zfs_vdev_def_queue_depth, 0,
+
+/* BEGIN CSTYLED */
+SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, def_queue_depth,
+ CTLFLAG_RWTUN, &zfs_vdev_def_queue_depth, 0,
"Default queue depth for each allocator");
+/* END CSTYLED */
+/* zio.c */
-SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, exclude_metadata, CTLFLAG_RDTUN,
- &zio_exclude_metadata, 0,
+/* BEGIN CSTYLED */
+SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, exclude_metadata,
+ CTLFLAG_RDTUN, &zio_exclude_metadata, 0,
"Exclude metadata buffers from dumps as well");
/* END CSTYLED */
-
-int
-param_set_slop_shift(SYSCTL_HANDLER_ARGS)
-{
- int val;
- int err;
-
- val = *(int *)arg1;
-
- err = sysctl_handle_int(oidp, &val, 0, req);
- if (err != 0 || req->newptr == NULL)
- return (err);
-
- if (val < 1 || val > 31)
- return (EINVAL);
-
- *(int *)arg1 = val;
-
- return (0);
-}
-
-int
-param_set_multihost_interval(SYSCTL_HANDLER_ARGS)
-{
- int err;
-
- err = sysctl_handle_long(oidp, arg1, 0, req);
- if (err != 0 || req->newptr == NULL)
- return (err);
-
- if (spa_mode_global != SPA_MODE_UNINIT)
- mmp_signal_all_threads();
-
- return (0);
-}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c b/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c
index f3b4846f4e68..fef6a1b88e36 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c
@@ -1,1327 +1,1326 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
* All rights reserved.
*
* Portions Copyright (c) 2012 Martin Matuska <mm@FreeBSD.org>
*/
#include <sys/zfs_context.h>
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/bio.h>
#include <sys/buf.h>
#include <sys/file.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_os.h>
#include <sys/fs/zfs.h>
#include <sys/zio.h>
#include <vm/vm_page.h>
#include <geom/geom.h>
#include <geom/geom_disk.h>
#include <geom/geom_int.h>
#ifndef g_topology_locked
#define g_topology_locked() sx_xlocked(&topology_lock)
#endif
/*
* Virtual device vector for GEOM.
*/
static g_attrchanged_t vdev_geom_attrchanged;
struct g_class zfs_vdev_class = {
.name = "ZFS::VDEV",
.version = G_VERSION,
.attrchanged = vdev_geom_attrchanged,
};
struct consumer_vdev_elem {
SLIST_ENTRY(consumer_vdev_elem) elems;
vdev_t *vd;
};
SLIST_HEAD(consumer_priv_t, consumer_vdev_elem);
_Static_assert(
sizeof (((struct g_consumer *)NULL)->private) ==
sizeof (struct consumer_priv_t *),
"consumer_priv_t* can't be stored in g_consumer.private");
DECLARE_GEOM_CLASS(zfs_vdev_class, zfs_vdev);
SYSCTL_DECL(_vfs_zfs_vdev);
/* Don't send BIO_FLUSH. */
static int vdev_geom_bio_flush_disable;
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_flush_disable, CTLFLAG_RWTUN,
&vdev_geom_bio_flush_disable, 0, "Disable BIO_FLUSH");
/* Don't send BIO_DELETE. */
static int vdev_geom_bio_delete_disable;
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_delete_disable, CTLFLAG_RWTUN,
&vdev_geom_bio_delete_disable, 0, "Disable BIO_DELETE");
/* Declare local functions */
static void vdev_geom_detach(struct g_consumer *cp, boolean_t open_for_read);
/*
* Thread local storage used to indicate when a thread is probing geoms
* for their guids. If NULL, this thread is not tasting geoms. If non NULL,
* it is looking for a replacement for the vdev_t* that is its value.
*/
uint_t zfs_geom_probe_vdev_key;
static void
vdev_geom_set_physpath(vdev_t *vd, struct g_consumer *cp,
boolean_t do_null_update)
{
boolean_t needs_update = B_FALSE;
char *physpath;
int error, physpath_len;
physpath_len = MAXPATHLEN;
physpath = g_malloc(physpath_len, M_WAITOK|M_ZERO);
error = g_io_getattr("GEOM::physpath", cp, &physpath_len, physpath);
if (error == 0) {
char *old_physpath;
/* g_topology lock ensures that vdev has not been closed */
g_topology_assert();
old_physpath = vd->vdev_physpath;
vd->vdev_physpath = spa_strdup(physpath);
if (old_physpath != NULL) {
needs_update = (strcmp(old_physpath,
vd->vdev_physpath) != 0);
spa_strfree(old_physpath);
} else
needs_update = do_null_update;
}
g_free(physpath);
/*
* If the physical path changed, update the config.
* Only request an update for previously unset physpaths if
* requested by the caller.
*/
if (needs_update)
spa_async_request(vd->vdev_spa, SPA_ASYNC_CONFIG_UPDATE);
}
static void
vdev_geom_attrchanged(struct g_consumer *cp, const char *attr)
{
struct consumer_priv_t *priv;
struct consumer_vdev_elem *elem;
priv = (struct consumer_priv_t *)&cp->private;
if (SLIST_EMPTY(priv))
return;
SLIST_FOREACH(elem, priv, elems) {
vdev_t *vd = elem->vd;
if (strcmp(attr, "GEOM::physpath") == 0) {
vdev_geom_set_physpath(vd, cp, /* null_update */B_TRUE);
return;
}
}
}
static void
vdev_geom_resize(struct g_consumer *cp)
{
struct consumer_priv_t *priv;
struct consumer_vdev_elem *elem;
spa_t *spa;
vdev_t *vd;
priv = (struct consumer_priv_t *)&cp->private;
if (SLIST_EMPTY(priv))
return;
SLIST_FOREACH(elem, priv, elems) {
vd = elem->vd;
if (vd->vdev_state != VDEV_STATE_HEALTHY)
continue;
spa = vd->vdev_spa;
if (!spa->spa_autoexpand)
continue;
vdev_online(spa, vd->vdev_guid, ZFS_ONLINE_EXPAND, NULL);
}
}
static void
vdev_geom_orphan(struct g_consumer *cp)
{
struct consumer_priv_t *priv;
// cppcheck-suppress uninitvar
struct consumer_vdev_elem *elem;
g_topology_assert();
priv = (struct consumer_priv_t *)&cp->private;
if (SLIST_EMPTY(priv))
/* Vdev close in progress. Ignore the event. */
return;
/*
* Orphan callbacks occur from the GEOM event thread.
* Concurrent with this call, new I/O requests may be
* working their way through GEOM about to find out
* (only once executed by the g_down thread) that we've
* been orphaned from our disk provider. These I/Os
* must be retired before we can detach our consumer.
* This is most easily achieved by acquiring the
* SPA ZIO configuration lock as a writer, but doing
* so with the GEOM topology lock held would cause
* a lock order reversal. Instead, rely on the SPA's
* async removal support to invoke a close on this
* vdev once it is safe to do so.
*/
SLIST_FOREACH(elem, priv, elems) {
// cppcheck-suppress uninitvar
vdev_t *vd = elem->vd;
vd->vdev_remove_wanted = B_TRUE;
spa_async_request(vd->vdev_spa, SPA_ASYNC_REMOVE);
}
}
static struct g_consumer *
vdev_geom_attach(struct g_provider *pp, vdev_t *vd, boolean_t sanity)
{
struct g_geom *gp;
struct g_consumer *cp;
int error;
g_topology_assert();
ZFS_LOG(1, "Attaching to %s.", pp->name);
if (sanity) {
if (pp->sectorsize > VDEV_PAD_SIZE || !ISP2(pp->sectorsize)) {
ZFS_LOG(1, "Failing attach of %s. "
"Incompatible sectorsize %d\n",
pp->name, pp->sectorsize);
return (NULL);
} else if (pp->mediasize < SPA_MINDEVSIZE) {
ZFS_LOG(1, "Failing attach of %s. "
"Incompatible mediasize %ju\n",
pp->name, pp->mediasize);
return (NULL);
}
}
/* Do we have geom already? No? Create one. */
LIST_FOREACH(gp, &zfs_vdev_class.geom, geom) {
if (gp->flags & G_GEOM_WITHER)
continue;
if (strcmp(gp->name, "zfs::vdev") != 0)
continue;
break;
}
if (gp == NULL) {
gp = g_new_geomf(&zfs_vdev_class, "zfs::vdev");
gp->orphan = vdev_geom_orphan;
gp->attrchanged = vdev_geom_attrchanged;
gp->resize = vdev_geom_resize;
cp = g_new_consumer(gp);
error = g_attach(cp, pp);
if (error != 0) {
ZFS_LOG(1, "%s(%d): g_attach failed: %d\n", __func__,
__LINE__, error);
vdev_geom_detach(cp, B_FALSE);
return (NULL);
}
error = g_access(cp, 1, 0, 1);
if (error != 0) {
ZFS_LOG(1, "%s(%d): g_access failed: %d\n", __func__,
__LINE__, error);
vdev_geom_detach(cp, B_FALSE);
return (NULL);
}
ZFS_LOG(1, "Created geom and consumer for %s.", pp->name);
} else {
/* Check if we are already connected to this provider. */
LIST_FOREACH(cp, &gp->consumer, consumer) {
if (cp->provider == pp) {
ZFS_LOG(1, "Found consumer for %s.", pp->name);
break;
}
}
if (cp == NULL) {
cp = g_new_consumer(gp);
error = g_attach(cp, pp);
if (error != 0) {
ZFS_LOG(1, "%s(%d): g_attach failed: %d\n",
__func__, __LINE__, error);
vdev_geom_detach(cp, B_FALSE);
return (NULL);
}
error = g_access(cp, 1, 0, 1);
if (error != 0) {
ZFS_LOG(1, "%s(%d): g_access failed: %d\n",
__func__, __LINE__, error);
vdev_geom_detach(cp, B_FALSE);
return (NULL);
}
ZFS_LOG(1, "Created consumer for %s.", pp->name);
} else {
error = g_access(cp, 1, 0, 1);
if (error != 0) {
ZFS_LOG(1, "%s(%d): g_access failed: %d\n",
__func__, __LINE__, error);
return (NULL);
}
ZFS_LOG(1, "Used existing consumer for %s.", pp->name);
}
}
if (vd != NULL)
vd->vdev_tsd = cp;
cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
return (cp);
}
static void
vdev_geom_detach(struct g_consumer *cp, boolean_t open_for_read)
{
struct g_geom *gp;
g_topology_assert();
ZFS_LOG(1, "Detaching from %s.",
cp->provider && cp->provider->name ? cp->provider->name : "NULL");
gp = cp->geom;
if (open_for_read)
g_access(cp, -1, 0, -1);
/* Destroy consumer on last close. */
if (cp->acr == 0 && cp->ace == 0) {
if (cp->acw > 0)
g_access(cp, 0, -cp->acw, 0);
if (cp->provider != NULL) {
ZFS_LOG(1, "Destroying consumer for %s.",
cp->provider->name ? cp->provider->name : "NULL");
g_detach(cp);
}
g_destroy_consumer(cp);
}
/* Destroy geom if there are no consumers left. */
if (LIST_EMPTY(&gp->consumer)) {
ZFS_LOG(1, "Destroyed geom %s.", gp->name);
g_wither_geom(gp, ENXIO);
}
}
static void
vdev_geom_close_locked(vdev_t *vd)
{
struct g_consumer *cp;
struct consumer_priv_t *priv;
struct consumer_vdev_elem *elem, *elem_temp;
g_topology_assert();
cp = vd->vdev_tsd;
vd->vdev_delayed_close = B_FALSE;
if (cp == NULL)
return;
ZFS_LOG(1, "Closing access to %s.", cp->provider->name);
KASSERT(cp->private != NULL, ("%s: cp->private is NULL", __func__));
priv = (struct consumer_priv_t *)&cp->private;
vd->vdev_tsd = NULL;
SLIST_FOREACH_SAFE(elem, priv, elems, elem_temp) {
if (elem->vd == vd) {
SLIST_REMOVE(priv, elem, consumer_vdev_elem, elems);
g_free(elem);
}
}
vdev_geom_detach(cp, B_TRUE);
}
/*
* Issue one or more bios to the vdev in parallel
* cmds, datas, offsets, errors, and sizes are arrays of length ncmds. Each IO
* operation is described by parallel entries from each array. There may be
* more bios actually issued than entries in the array
*/
static void
vdev_geom_io(struct g_consumer *cp, int *cmds, void **datas, off_t *offsets,
off_t *sizes, int *errors, int ncmds)
{
struct bio **bios;
uint8_t *p;
off_t off, maxio, s, end;
int i, n_bios, j;
size_t bios_size;
#if __FreeBSD_version > 1300130
maxio = maxphys - (maxphys % cp->provider->sectorsize);
#else
maxio = MAXPHYS - (MAXPHYS % cp->provider->sectorsize);
#endif
n_bios = 0;
/* How many bios are required for all commands ? */
for (i = 0; i < ncmds; i++)
n_bios += (sizes[i] + maxio - 1) / maxio;
/* Allocate memory for the bios */
bios_size = n_bios * sizeof (struct bio *);
bios = kmem_zalloc(bios_size, KM_SLEEP);
/* Prepare and issue all of the bios */
for (i = j = 0; i < ncmds; i++) {
off = offsets[i];
p = datas[i];
s = sizes[i];
end = off + s;
ASSERT0(off % cp->provider->sectorsize);
ASSERT0(s % cp->provider->sectorsize);
for (; off < end; off += maxio, p += maxio, s -= maxio, j++) {
bios[j] = g_alloc_bio();
bios[j]->bio_cmd = cmds[i];
bios[j]->bio_done = NULL;
bios[j]->bio_offset = off;
bios[j]->bio_length = MIN(s, maxio);
bios[j]->bio_data = (caddr_t)p;
g_io_request(bios[j], cp);
}
}
ASSERT3S(j, ==, n_bios);
/* Wait for all of the bios to complete, and clean them up */
for (i = j = 0; i < ncmds; i++) {
off = offsets[i];
s = sizes[i];
end = off + s;
for (; off < end; off += maxio, s -= maxio, j++) {
errors[i] = biowait(bios[j], "vdev_geom_io") ||
errors[i];
g_destroy_bio(bios[j]);
}
}
kmem_free(bios, bios_size);
}
/*
* Read the vdev config from a device. Return the number of valid labels that
* were found. The vdev config will be returned in config if and only if at
* least one valid label was found.
*/
static int
vdev_geom_read_config(struct g_consumer *cp, nvlist_t **configp)
{
struct g_provider *pp;
nvlist_t *config;
vdev_phys_t *vdev_lists[VDEV_LABELS];
char *buf;
size_t buflen;
uint64_t psize, state, txg;
off_t offsets[VDEV_LABELS];
off_t size;
off_t sizes[VDEV_LABELS];
int cmds[VDEV_LABELS];
int errors[VDEV_LABELS];
int l, nlabels;
g_topology_assert_not();
pp = cp->provider;
ZFS_LOG(1, "Reading config from %s...", pp->name);
psize = pp->mediasize;
psize = P2ALIGN(psize, (uint64_t)sizeof (vdev_label_t));
size = sizeof (*vdev_lists[0]) + pp->sectorsize -
((sizeof (*vdev_lists[0]) - 1) % pp->sectorsize) - 1;
buflen = sizeof (vdev_lists[0]->vp_nvlist);
/* Create all of the IO requests */
for (l = 0; l < VDEV_LABELS; l++) {
cmds[l] = BIO_READ;
vdev_lists[l] = kmem_alloc(size, KM_SLEEP);
offsets[l] = vdev_label_offset(psize, l, 0) + VDEV_SKIP_SIZE;
sizes[l] = size;
errors[l] = 0;
ASSERT0(offsets[l] % pp->sectorsize);
}
/* Issue the IO requests */
vdev_geom_io(cp, cmds, (void**)vdev_lists, offsets, sizes, errors,
VDEV_LABELS);
/* Parse the labels */
config = *configp = NULL;
nlabels = 0;
for (l = 0; l < VDEV_LABELS; l++) {
if (errors[l] != 0)
continue;
buf = vdev_lists[l]->vp_nvlist;
if (nvlist_unpack(buf, buflen, &config, 0) != 0)
continue;
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
&state) != 0 || state > POOL_STATE_L2CACHE) {
nvlist_free(config);
continue;
}
if (state != POOL_STATE_SPARE &&
state != POOL_STATE_L2CACHE &&
(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
&txg) != 0 || txg == 0)) {
nvlist_free(config);
continue;
}
if (*configp != NULL)
nvlist_free(*configp);
*configp = config;
nlabels++;
}
/* Free the label storage */
for (l = 0; l < VDEV_LABELS; l++)
kmem_free(vdev_lists[l], size);
return (nlabels);
}
static void
resize_configs(nvlist_t ***configs, uint64_t *count, uint64_t id)
{
nvlist_t **new_configs;
uint64_t i;
if (id < *count)
return;
new_configs = kmem_zalloc((id + 1) * sizeof (nvlist_t *),
KM_SLEEP);
for (i = 0; i < *count; i++)
new_configs[i] = (*configs)[i];
if (*configs != NULL)
kmem_free(*configs, *count * sizeof (void *));
*configs = new_configs;
*count = id + 1;
}
static void
process_vdev_config(nvlist_t ***configs, uint64_t *count, nvlist_t *cfg,
const char *name, uint64_t *known_pool_guid)
{
nvlist_t *vdev_tree;
uint64_t pool_guid;
uint64_t vdev_guid;
uint64_t id, txg, known_txg;
char *pname;
if (nvlist_lookup_string(cfg, ZPOOL_CONFIG_POOL_NAME, &pname) != 0 ||
strcmp(pname, name) != 0)
goto ignore;
if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_GUID, &pool_guid) != 0)
goto ignore;
if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_TOP_GUID, &vdev_guid) != 0)
goto ignore;
if (nvlist_lookup_nvlist(cfg, ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) != 0)
goto ignore;
if (nvlist_lookup_uint64(vdev_tree, ZPOOL_CONFIG_ID, &id) != 0)
goto ignore;
txg = fnvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_TXG);
if (*known_pool_guid != 0) {
if (pool_guid != *known_pool_guid)
goto ignore;
} else
*known_pool_guid = pool_guid;
resize_configs(configs, count, id);
if ((*configs)[id] != NULL) {
known_txg = fnvlist_lookup_uint64((*configs)[id],
ZPOOL_CONFIG_POOL_TXG);
if (txg <= known_txg)
goto ignore;
nvlist_free((*configs)[id]);
}
(*configs)[id] = cfg;
return;
ignore:
nvlist_free(cfg);
}
int
vdev_geom_read_pool_label(const char *name,
nvlist_t ***configs, uint64_t *count)
{
struct g_class *mp;
struct g_geom *gp;
struct g_provider *pp;
struct g_consumer *zcp;
nvlist_t *vdev_cfg;
uint64_t pool_guid;
int nlabels;
DROP_GIANT();
g_topology_lock();
*configs = NULL;
*count = 0;
pool_guid = 0;
LIST_FOREACH(mp, &g_classes, class) {
if (mp == &zfs_vdev_class)
continue;
LIST_FOREACH(gp, &mp->geom, geom) {
if (gp->flags & G_GEOM_WITHER)
continue;
LIST_FOREACH(pp, &gp->provider, provider) {
if (pp->flags & G_PF_WITHER)
continue;
zcp = vdev_geom_attach(pp, NULL, B_TRUE);
if (zcp == NULL)
continue;
g_topology_unlock();
nlabels = vdev_geom_read_config(zcp, &vdev_cfg);
g_topology_lock();
vdev_geom_detach(zcp, B_TRUE);
if (nlabels == 0)
continue;
ZFS_LOG(1, "successfully read vdev config");
process_vdev_config(configs, count,
vdev_cfg, name, &pool_guid);
}
}
}
g_topology_unlock();
PICKUP_GIANT();
return (*count > 0 ? 0 : ENOENT);
}
enum match {
NO_MATCH = 0, /* No matching labels found */
TOPGUID_MATCH = 1, /* Labels match top guid, not vdev guid */
ZERO_MATCH = 1, /* Should never be returned */
ONE_MATCH = 2, /* 1 label matching the vdev_guid */
TWO_MATCH = 3, /* 2 label matching the vdev_guid */
THREE_MATCH = 4, /* 3 label matching the vdev_guid */
FULL_MATCH = 5 /* all labels match the vdev_guid */
};
static enum match
vdev_attach_ok(vdev_t *vd, struct g_provider *pp)
{
nvlist_t *config;
uint64_t pool_guid, top_guid, vdev_guid;
struct g_consumer *cp;
int nlabels;
cp = vdev_geom_attach(pp, NULL, B_TRUE);
if (cp == NULL) {
ZFS_LOG(1, "Unable to attach tasting instance to %s.",
pp->name);
return (NO_MATCH);
}
g_topology_unlock();
nlabels = vdev_geom_read_config(cp, &config);
g_topology_lock();
vdev_geom_detach(cp, B_TRUE);
if (nlabels == 0) {
ZFS_LOG(1, "Unable to read config from %s.", pp->name);
return (NO_MATCH);
}
pool_guid = 0;
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid);
top_guid = 0;
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID, &top_guid);
vdev_guid = 0;
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid);
nvlist_free(config);
/*
* Check that the label's pool guid matches the desired guid.
* Inactive spares and L2ARCs do not have any pool guid in the label.
*/
if (pool_guid != 0 && pool_guid != spa_guid(vd->vdev_spa)) {
ZFS_LOG(1, "pool guid mismatch for provider %s: %ju != %ju.",
pp->name,
(uintmax_t)spa_guid(vd->vdev_spa), (uintmax_t)pool_guid);
return (NO_MATCH);
}
/*
* Check that the label's vdev guid matches the desired guid.
* The second condition handles possible race on vdev detach, when
* remaining vdev receives GUID of destroyed top level mirror vdev.
*/
if (vdev_guid == vd->vdev_guid) {
ZFS_LOG(1, "guids match for provider %s.", pp->name);
return (ZERO_MATCH + nlabels);
} else if (top_guid == vd->vdev_guid && vd == vd->vdev_top) {
ZFS_LOG(1, "top vdev guid match for provider %s.", pp->name);
return (TOPGUID_MATCH);
}
ZFS_LOG(1, "vdev guid mismatch for provider %s: %ju != %ju.",
pp->name, (uintmax_t)vd->vdev_guid, (uintmax_t)vdev_guid);
return (NO_MATCH);
}
static struct g_consumer *
vdev_geom_attach_by_guids(vdev_t *vd)
{
struct g_class *mp;
struct g_geom *gp;
struct g_provider *pp, *best_pp;
struct g_consumer *cp;
const char *vdpath;
enum match match, best_match;
g_topology_assert();
vdpath = vd->vdev_path + sizeof ("/dev/") - 1;
cp = NULL;
best_pp = NULL;
best_match = NO_MATCH;
LIST_FOREACH(mp, &g_classes, class) {
if (mp == &zfs_vdev_class)
continue;
LIST_FOREACH(gp, &mp->geom, geom) {
if (gp->flags & G_GEOM_WITHER)
continue;
LIST_FOREACH(pp, &gp->provider, provider) {
match = vdev_attach_ok(vd, pp);
if (match > best_match) {
best_match = match;
best_pp = pp;
} else if (match == best_match) {
if (strcmp(pp->name, vdpath) == 0) {
best_pp = pp;
}
}
if (match == FULL_MATCH)
goto out;
}
}
}
out:
if (best_pp) {
cp = vdev_geom_attach(best_pp, vd, B_TRUE);
if (cp == NULL) {
printf("ZFS WARNING: Unable to attach to %s.\n",
best_pp->name);
}
}
return (cp);
}
static struct g_consumer *
vdev_geom_open_by_guids(vdev_t *vd)
{
struct g_consumer *cp;
char *buf;
size_t len;
g_topology_assert();
ZFS_LOG(1, "Searching by guids [%ju:%ju].",
(uintmax_t)spa_guid(vd->vdev_spa), (uintmax_t)vd->vdev_guid);
cp = vdev_geom_attach_by_guids(vd);
if (cp != NULL) {
len = strlen(cp->provider->name) + strlen("/dev/") + 1;
buf = kmem_alloc(len, KM_SLEEP);
snprintf(buf, len, "/dev/%s", cp->provider->name);
spa_strfree(vd->vdev_path);
vd->vdev_path = buf;
ZFS_LOG(1, "Attach by guid [%ju:%ju] succeeded, provider %s.",
(uintmax_t)spa_guid(vd->vdev_spa),
(uintmax_t)vd->vdev_guid, cp->provider->name);
} else {
ZFS_LOG(1, "Search by guid [%ju:%ju] failed.",
(uintmax_t)spa_guid(vd->vdev_spa),
(uintmax_t)vd->vdev_guid);
}
return (cp);
}
static struct g_consumer *
vdev_geom_open_by_path(vdev_t *vd, int check_guid)
{
struct g_provider *pp;
struct g_consumer *cp;
g_topology_assert();
cp = NULL;
pp = g_provider_by_name(vd->vdev_path + sizeof ("/dev/") - 1);
if (pp != NULL) {
ZFS_LOG(1, "Found provider by name %s.", vd->vdev_path);
if (!check_guid || vdev_attach_ok(vd, pp) == FULL_MATCH)
cp = vdev_geom_attach(pp, vd, B_FALSE);
}
return (cp);
}
static int
vdev_geom_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
uint64_t *logical_ashift, uint64_t *physical_ashift)
{
struct g_provider *pp;
struct g_consumer *cp;
int error, has_trim;
uint16_t rate;
/*
* Set the TLS to indicate downstack that we
* should not access zvols
*/
VERIFY0(tsd_set(zfs_geom_probe_vdev_key, vd));
/*
* We must have a pathname, and it must be absolute.
*/
if (vd->vdev_path == NULL || strncmp(vd->vdev_path, "/dev/", 5) != 0) {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
return (EINVAL);
}
/*
* Reopen the device if it's not currently open. Otherwise,
* just update the physical size of the device.
*/
if ((cp = vd->vdev_tsd) != NULL) {
ASSERT(vd->vdev_reopening);
goto skip_open;
}
DROP_GIANT();
g_topology_lock();
error = 0;
if (vd->vdev_spa->spa_is_splitting ||
((vd->vdev_prevstate == VDEV_STATE_UNKNOWN &&
(vd->vdev_spa->spa_load_state == SPA_LOAD_NONE ||
vd->vdev_spa->spa_load_state == SPA_LOAD_CREATE)))) {
/*
* We are dealing with a vdev that hasn't been previously
* opened (since boot), and we are not loading an
* existing pool configuration. This looks like a
* vdev add operation to a new or existing pool.
* Assume the user really wants to do this, and find
* GEOM provider by its name, ignoring GUID mismatches.
*
* XXPOLICY: It would be safer to only allow a device
* that is unlabeled or labeled but missing
* GUID information to be opened in this fashion,
* unless we are doing a split, in which case we
* should allow any guid.
*/
cp = vdev_geom_open_by_path(vd, 0);
} else {
/*
* Try using the recorded path for this device, but only
* accept it if its label data contains the expected GUIDs.
*/
cp = vdev_geom_open_by_path(vd, 1);
if (cp == NULL) {
/*
* The device at vd->vdev_path doesn't have the
* expected GUIDs. The disks might have merely
* moved around so try all other GEOM providers
* to find one with the right GUIDs.
*/
cp = vdev_geom_open_by_guids(vd);
}
}
/* Clear the TLS now that tasting is done */
VERIFY0(tsd_set(zfs_geom_probe_vdev_key, NULL));
if (cp == NULL) {
ZFS_LOG(1, "Vdev %s not found.", vd->vdev_path);
error = ENOENT;
} else {
struct consumer_priv_t *priv;
struct consumer_vdev_elem *elem;
int spamode;
priv = (struct consumer_priv_t *)&cp->private;
if (cp->private == NULL)
SLIST_INIT(priv);
elem = g_malloc(sizeof (*elem), M_WAITOK|M_ZERO);
elem->vd = vd;
SLIST_INSERT_HEAD(priv, elem, elems);
spamode = spa_mode(vd->vdev_spa);
if (cp->provider->sectorsize > VDEV_PAD_SIZE ||
!ISP2(cp->provider->sectorsize)) {
ZFS_LOG(1, "Provider %s has unsupported sectorsize.",
cp->provider->name);
vdev_geom_close_locked(vd);
error = EINVAL;
cp = NULL;
} else if (cp->acw == 0 && (spamode & FWRITE) != 0) {
int i;
for (i = 0; i < 5; i++) {
error = g_access(cp, 0, 1, 0);
if (error == 0)
break;
g_topology_unlock();
tsleep(vd, 0, "vdev", hz / 2);
g_topology_lock();
}
if (error != 0) {
printf("ZFS WARNING: Unable to open %s for "
"writing (error=%d).\n",
cp->provider->name, error);
vdev_geom_close_locked(vd);
cp = NULL;
}
}
}
/* Fetch initial physical path information for this device. */
if (cp != NULL) {
vdev_geom_attrchanged(cp, "GEOM::physpath");
/* Set other GEOM characteristics */
vdev_geom_set_physpath(vd, cp, /* do_null_update */B_FALSE);
}
g_topology_unlock();
PICKUP_GIANT();
if (cp == NULL) {
vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
vdev_dbgmsg(vd, "vdev_geom_open: failed to open [error=%d]",
error);
return (error);
}
skip_open:
pp = cp->provider;
/*
* Determine the actual size of the device.
*/
*max_psize = *psize = pp->mediasize;
/*
* Determine the device's minimum transfer size and preferred
* transfer size.
*/
*logical_ashift = highbit(MAX(pp->sectorsize, SPA_MINBLOCKSIZE)) - 1;
*physical_ashift = 0;
if (pp->stripesize && pp->stripesize > (1 << *logical_ashift) &&
- ISP2(pp->stripesize) && pp->stripesize <= (1 << ASHIFT_MAX) &&
- pp->stripeoffset == 0)
+ ISP2(pp->stripesize) && pp->stripeoffset == 0)
*physical_ashift = highbit(pp->stripesize) - 1;
/*
* Clear the nowritecache settings, so that on a vdev_reopen()
* we will try again.
*/
vd->vdev_nowritecache = B_FALSE;
/* Inform the ZIO pipeline that we are non-rotational. */
error = g_getattr("GEOM::rotation_rate", cp, &rate);
if (error == 0 && rate == DISK_RR_NON_ROTATING)
vd->vdev_nonrot = B_TRUE;
else
vd->vdev_nonrot = B_FALSE;
/* Set when device reports it supports TRIM. */
error = g_getattr("GEOM::candelete", cp, &has_trim);
vd->vdev_has_trim = (error == 0 && has_trim);
/* Set when device reports it supports secure TRIM. */
/* unavailable on FreeBSD */
vd->vdev_has_securetrim = B_FALSE;
return (0);
}
static void
vdev_geom_close(vdev_t *vd)
{
struct g_consumer *cp;
boolean_t locked;
cp = vd->vdev_tsd;
DROP_GIANT();
locked = g_topology_locked();
if (!locked)
g_topology_lock();
if (!vd->vdev_reopening ||
(cp != NULL && ((cp->flags & G_CF_ORPHAN) != 0 ||
(cp->provider != NULL && cp->provider->error != 0))))
vdev_geom_close_locked(vd);
if (!locked)
g_topology_unlock();
PICKUP_GIANT();
}
static void
vdev_geom_io_intr(struct bio *bp)
{
vdev_t *vd;
zio_t *zio;
zio = bp->bio_caller1;
vd = zio->io_vd;
zio->io_error = bp->bio_error;
if (zio->io_error == 0 && bp->bio_resid != 0)
zio->io_error = SET_ERROR(EIO);
switch (zio->io_error) {
case ENOTSUP:
/*
* If we get ENOTSUP for BIO_FLUSH or BIO_DELETE we know
* that future attempts will never succeed. In this case
* we set a persistent flag so that we don't bother with
* requests in the future.
*/
switch (bp->bio_cmd) {
case BIO_FLUSH:
vd->vdev_nowritecache = B_TRUE;
break;
case BIO_DELETE:
break;
}
break;
case ENXIO:
if (!vd->vdev_remove_wanted) {
/*
* If provider's error is set we assume it is being
* removed.
*/
if (bp->bio_to->error != 0) {
vd->vdev_remove_wanted = B_TRUE;
spa_async_request(zio->io_spa,
SPA_ASYNC_REMOVE);
} else if (!vd->vdev_delayed_close) {
vd->vdev_delayed_close = B_TRUE;
}
}
break;
}
/*
* We have to split bio freeing into two parts, because the ABD code
* cannot be called in this context and vdev_op_io_done is not called
* for ZIO_TYPE_IOCTL zio-s.
*/
if (zio->io_type != ZIO_TYPE_READ && zio->io_type != ZIO_TYPE_WRITE) {
g_destroy_bio(bp);
zio->io_bio = NULL;
}
zio_delay_interrupt(zio);
}
struct vdev_geom_check_unmapped_cb_state {
int pages;
uint_t end;
};
/*
* Callback to check the ABD segment size/alignment and count the pages.
* GEOM requires data buffer to look virtually contiguous. It means only
* the first page of the buffer may not start and only the last may not
* end on a page boundary. All other physical pages must be full.
*/
static int
vdev_geom_check_unmapped_cb(void *buf, size_t len, void *priv)
{
struct vdev_geom_check_unmapped_cb_state *s = priv;
vm_offset_t off = (vm_offset_t)buf & PAGE_MASK;
if (s->pages != 0 && off != 0)
return (1);
if (s->end != 0)
return (1);
s->end = (off + len) & PAGE_MASK;
s->pages += (off + len + PAGE_MASK) >> PAGE_SHIFT;
return (0);
}
/*
* Check whether we can use unmapped I/O for this ZIO on this device to
* avoid data copying between scattered and/or gang ABD buffer and linear.
*/
static int
vdev_geom_check_unmapped(zio_t *zio, struct g_consumer *cp)
{
struct vdev_geom_check_unmapped_cb_state s;
/* If unmapped I/O is administratively disabled, respect that. */
if (!unmapped_buf_allowed)
return (0);
/* If the buffer is already linear, then nothing to do here. */
if (abd_is_linear(zio->io_abd))
return (0);
/*
* If unmapped I/O is not supported by the GEOM provider,
* then we can't do anything and have to copy the data.
*/
if ((cp->provider->flags & G_PF_ACCEPT_UNMAPPED) == 0)
return (0);
/* Check the buffer chunks sizes/alignments and count pages. */
s.pages = s.end = 0;
if (abd_iterate_func(zio->io_abd, 0, zio->io_size,
vdev_geom_check_unmapped_cb, &s))
return (0);
return (s.pages);
}
/*
* Callback to translate the ABD segment into array of physical pages.
*/
static int
vdev_geom_fill_unmap_cb(void *buf, size_t len, void *priv)
{
struct bio *bp = priv;
vm_offset_t addr = (vm_offset_t)buf;
vm_offset_t end = addr + len;
if (bp->bio_ma_n == 0) {
bp->bio_ma_offset = addr & PAGE_MASK;
addr &= ~PAGE_MASK;
} else {
ASSERT0(P2PHASE(addr, PAGE_SIZE));
}
do {
bp->bio_ma[bp->bio_ma_n++] =
PHYS_TO_VM_PAGE(pmap_kextract(addr));
addr += PAGE_SIZE;
} while (addr < end);
return (0);
}
static void
vdev_geom_io_start(zio_t *zio)
{
vdev_t *vd;
struct g_consumer *cp;
struct bio *bp;
vd = zio->io_vd;
switch (zio->io_type) {
case ZIO_TYPE_IOCTL:
/* XXPOLICY */
if (!vdev_readable(vd)) {
zio->io_error = SET_ERROR(ENXIO);
zio_interrupt(zio);
return;
} else {
switch (zio->io_cmd) {
case DKIOCFLUSHWRITECACHE:
if (zfs_nocacheflush ||
vdev_geom_bio_flush_disable)
break;
if (vd->vdev_nowritecache) {
zio->io_error = SET_ERROR(ENOTSUP);
break;
}
goto sendreq;
default:
zio->io_error = SET_ERROR(ENOTSUP);
}
}
zio_execute(zio);
return;
case ZIO_TYPE_TRIM:
if (!vdev_geom_bio_delete_disable) {
goto sendreq;
}
zio_execute(zio);
return;
default:
;
/* PASSTHROUGH --- placate compiler */
}
sendreq:
ASSERT(zio->io_type == ZIO_TYPE_READ ||
zio->io_type == ZIO_TYPE_WRITE ||
zio->io_type == ZIO_TYPE_TRIM ||
zio->io_type == ZIO_TYPE_IOCTL);
cp = vd->vdev_tsd;
if (cp == NULL) {
zio->io_error = SET_ERROR(ENXIO);
zio_interrupt(zio);
return;
}
bp = g_alloc_bio();
bp->bio_caller1 = zio;
switch (zio->io_type) {
case ZIO_TYPE_READ:
case ZIO_TYPE_WRITE:
zio->io_target_timestamp = zio_handle_io_delay(zio);
bp->bio_offset = zio->io_offset;
bp->bio_length = zio->io_size;
if (zio->io_type == ZIO_TYPE_READ)
bp->bio_cmd = BIO_READ;
else
bp->bio_cmd = BIO_WRITE;
/*
* If possible, represent scattered and/or gang ABD buffer to
* GEOM as an array of physical pages. It allows to satisfy
* requirement of virtually contiguous buffer without copying.
*/
int pgs = vdev_geom_check_unmapped(zio, cp);
if (pgs > 0) {
bp->bio_ma = malloc(sizeof (struct vm_page *) * pgs,
M_DEVBUF, M_WAITOK);
bp->bio_ma_n = 0;
bp->bio_ma_offset = 0;
abd_iterate_func(zio->io_abd, 0, zio->io_size,
vdev_geom_fill_unmap_cb, bp);
bp->bio_data = unmapped_buf;
bp->bio_flags |= BIO_UNMAPPED;
} else {
if (zio->io_type == ZIO_TYPE_READ) {
bp->bio_data = abd_borrow_buf(zio->io_abd,
zio->io_size);
} else {
bp->bio_data = abd_borrow_buf_copy(zio->io_abd,
zio->io_size);
}
}
break;
case ZIO_TYPE_TRIM:
bp->bio_cmd = BIO_DELETE;
bp->bio_data = NULL;
bp->bio_offset = zio->io_offset;
bp->bio_length = zio->io_size;
break;
case ZIO_TYPE_IOCTL:
bp->bio_cmd = BIO_FLUSH;
bp->bio_data = NULL;
bp->bio_offset = cp->provider->mediasize;
bp->bio_length = 0;
break;
default:
panic("invalid zio->io_type: %d\n", zio->io_type);
}
bp->bio_done = vdev_geom_io_intr;
zio->io_bio = bp;
g_io_request(bp, cp);
}
static void
vdev_geom_io_done(zio_t *zio)
{
struct bio *bp = zio->io_bio;
if (zio->io_type != ZIO_TYPE_READ && zio->io_type != ZIO_TYPE_WRITE) {
ASSERT3P(bp, ==, NULL);
return;
}
if (bp == NULL) {
ASSERT3S(zio->io_error, ==, ENXIO);
return;
}
if (bp->bio_ma != NULL) {
free(bp->bio_ma, M_DEVBUF);
} else {
if (zio->io_type == ZIO_TYPE_READ) {
abd_return_buf_copy(zio->io_abd, bp->bio_data,
zio->io_size);
} else {
abd_return_buf(zio->io_abd, bp->bio_data,
zio->io_size);
}
}
g_destroy_bio(bp);
zio->io_bio = NULL;
}
static void
vdev_geom_hold(vdev_t *vd)
{
}
static void
vdev_geom_rele(vdev_t *vd)
{
}
vdev_ops_t vdev_disk_ops = {
.vdev_op_init = NULL,
.vdev_op_fini = NULL,
.vdev_op_open = vdev_geom_open,
.vdev_op_close = vdev_geom_close,
.vdev_op_asize = vdev_default_asize,
.vdev_op_min_asize = vdev_default_min_asize,
.vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_geom_io_start,
.vdev_op_io_done = vdev_geom_io_done,
.vdev_op_state_change = NULL,
.vdev_op_need_resilver = NULL,
.vdev_op_hold = vdev_geom_hold,
.vdev_op_rele = vdev_geom_rele,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_default_xlate,
.vdev_op_rebuild_asize = NULL,
.vdev_op_metaslab_init = NULL,
.vdev_op_config_generate = NULL,
.vdev_op_nparity = NULL,
.vdev_op_ndisks = NULL,
.vdev_op_type = VDEV_TYPE_DISK, /* name of this vdev type */
.vdev_op_leaf = B_TRUE /* leaf vdev */
};
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c
index 82f0ef335d38..42bb7551e9c7 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c
@@ -1,1372 +1,1374 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
* Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved.
*/
/*
* ZFS control directory (a.k.a. ".zfs")
*
* This directory provides a common location for all ZFS meta-objects.
* Currently, this is only the 'snapshot' directory, but this may expand in the
* future. The elements are built using the GFS primitives, as the hierarchy
* does not actually exist on disk.
*
* For 'snapshot', we don't want to have all snapshots always mounted, because
* this would take up a huge amount of space in /etc/mnttab. We have three
* types of objects:
*
* ctldir ------> snapshotdir -------> snapshot
* |
* |
* V
* mounted fs
*
* The 'snapshot' node contains just enough information to lookup '..' and act
* as a mountpoint for the snapshot. Whenever we lookup a specific snapshot, we
* perform an automount of the underlying filesystem and return the
* corresponding vnode.
*
* All mounts are handled automatically by the kernel, but unmounts are
* (currently) handled from user land. The main reason is that there is no
* reliable way to auto-unmount the filesystem when it's "no longer in use".
* When the user unmounts a filesystem, we call zfsctl_unmount(), which
* unmounts any snapshots within the snapshot directory.
*
* The '.zfs', '.zfs/snapshot', and all directories created under
* '.zfs/snapshot' (ie: '.zfs/snapshot/<snapname>') are all GFS nodes and
* share the same vfs_t as the head filesystem (what '.zfs' lives under).
*
* File systems mounted ontop of the GFS nodes '.zfs/snapshot/<snapname>'
* (ie: snapshots) are ZFS nodes and have their own unique vfs_t.
* However, vnodes within these mounted on file systems have their v_vfsp
* fields set to the head filesystem to make NFS happy (see
* zfsctl_snapdir_lookup()). We VFS_HOLD the head filesystem's vfs_t
* so that it cannot be freed until all snapshots have been unmounted.
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/libkern.h>
#include <sys/dirent.h>
#include <sys/zfs_context.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_vfsops.h>
#include <sys/namei.h>
#include <sys/stat.h>
#include <sys/dmu.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_destroy.h>
#include <sys/dsl_deleg.h>
#include <sys/mount.h>
#include <sys/zap.h>
#include <sys/sysproto.h>
#include "zfs_namecheck.h"
#include <sys/kernel.h>
#include <sys/ccompat.h>
/* Common access mode for all virtual directories under the ctldir */
const uint16_t zfsctl_ctldir_mode = S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP |
S_IROTH | S_IXOTH;
/*
* "Synthetic" filesystem implementation.
*/
/*
* Assert that A implies B.
*/
#define KASSERT_IMPLY(A, B, msg) KASSERT(!(A) || (B), (msg));
static MALLOC_DEFINE(M_SFSNODES, "sfs_nodes", "synthetic-fs nodes");
typedef struct sfs_node {
char sn_name[ZFS_MAX_DATASET_NAME_LEN];
uint64_t sn_parent_id;
uint64_t sn_id;
} sfs_node_t;
/*
* Check the parent's ID as well as the node's to account for a chance
* that IDs originating from different domains (snapshot IDs, artificial
* IDs, znode IDs) may clash.
*/
static int
sfs_compare_ids(struct vnode *vp, void *arg)
{
sfs_node_t *n1 = vp->v_data;
sfs_node_t *n2 = arg;
bool equal;
equal = n1->sn_id == n2->sn_id &&
n1->sn_parent_id == n2->sn_parent_id;
/* Zero means equality. */
return (!equal);
}
static int
sfs_vnode_get(const struct mount *mp, int flags, uint64_t parent_id,
uint64_t id, struct vnode **vpp)
{
sfs_node_t search;
int err;
search.sn_id = id;
search.sn_parent_id = parent_id;
err = vfs_hash_get(mp, (uint32_t)id, flags, curthread, vpp,
sfs_compare_ids, &search);
return (err);
}
static int
sfs_vnode_insert(struct vnode *vp, int flags, uint64_t parent_id,
uint64_t id, struct vnode **vpp)
{
int err;
KASSERT(vp->v_data != NULL, ("sfs_vnode_insert with NULL v_data"));
err = vfs_hash_insert(vp, (uint32_t)id, flags, curthread, vpp,
sfs_compare_ids, vp->v_data);
return (err);
}
static void
sfs_vnode_remove(struct vnode *vp)
{
vfs_hash_remove(vp);
}
typedef void sfs_vnode_setup_fn(vnode_t *vp, void *arg);
static int
sfs_vgetx(struct mount *mp, int flags, uint64_t parent_id, uint64_t id,
const char *tag, struct vop_vector *vops,
sfs_vnode_setup_fn setup, void *arg,
struct vnode **vpp)
{
struct vnode *vp;
int error;
error = sfs_vnode_get(mp, flags, parent_id, id, vpp);
if (error != 0 || *vpp != NULL) {
KASSERT_IMPLY(error == 0, (*vpp)->v_data != NULL,
"sfs vnode with no data");
return (error);
}
/* Allocate a new vnode/inode. */
error = getnewvnode(tag, mp, vops, &vp);
if (error != 0) {
*vpp = NULL;
return (error);
}
/*
* Exclusively lock the vnode vnode while it's being constructed.
*/
lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL);
error = insmntque(vp, mp);
if (error != 0) {
*vpp = NULL;
return (error);
}
setup(vp, arg);
error = sfs_vnode_insert(vp, flags, parent_id, id, vpp);
if (error != 0 || *vpp != NULL) {
KASSERT_IMPLY(error == 0, (*vpp)->v_data != NULL,
"sfs vnode with no data");
return (error);
}
*vpp = vp;
return (0);
}
static void
sfs_print_node(sfs_node_t *node)
{
printf("\tname = %s\n", node->sn_name);
printf("\tparent_id = %ju\n", (uintmax_t)node->sn_parent_id);
printf("\tid = %ju\n", (uintmax_t)node->sn_id);
}
static sfs_node_t *
sfs_alloc_node(size_t size, const char *name, uint64_t parent_id, uint64_t id)
{
struct sfs_node *node;
KASSERT(strlen(name) < sizeof (node->sn_name),
("sfs node name is too long"));
KASSERT(size >= sizeof (*node), ("sfs node size is too small"));
node = malloc(size, M_SFSNODES, M_WAITOK | M_ZERO);
strlcpy(node->sn_name, name, sizeof (node->sn_name));
node->sn_parent_id = parent_id;
node->sn_id = id;
return (node);
}
static void
sfs_destroy_node(sfs_node_t *node)
{
free(node, M_SFSNODES);
}
static void *
sfs_reclaim_vnode(vnode_t *vp)
{
void *data;
sfs_vnode_remove(vp);
data = vp->v_data;
vp->v_data = NULL;
return (data);
}
static int
sfs_readdir_common(uint64_t parent_id, uint64_t id, struct vop_readdir_args *ap,
zfs_uio_t *uio, off_t *offp)
{
struct dirent entry;
int error;
/* Reset ncookies for subsequent use of vfs_read_dirent. */
if (ap->a_ncookies != NULL)
*ap->a_ncookies = 0;
if (zfs_uio_resid(uio) < sizeof (entry))
return (SET_ERROR(EINVAL));
if (zfs_uio_offset(uio) < 0)
return (SET_ERROR(EINVAL));
if (zfs_uio_offset(uio) == 0) {
entry.d_fileno = id;
entry.d_type = DT_DIR;
entry.d_name[0] = '.';
entry.d_name[1] = '\0';
entry.d_namlen = 1;
entry.d_reclen = sizeof (entry);
error = vfs_read_dirent(ap, &entry, zfs_uio_offset(uio));
if (error != 0)
return (SET_ERROR(error));
}
if (zfs_uio_offset(uio) < sizeof (entry))
return (SET_ERROR(EINVAL));
if (zfs_uio_offset(uio) == sizeof (entry)) {
entry.d_fileno = parent_id;
entry.d_type = DT_DIR;
entry.d_name[0] = '.';
entry.d_name[1] = '.';
entry.d_name[2] = '\0';
entry.d_namlen = 2;
entry.d_reclen = sizeof (entry);
error = vfs_read_dirent(ap, &entry, zfs_uio_offset(uio));
if (error != 0)
return (SET_ERROR(error));
}
if (offp != NULL)
*offp = 2 * sizeof (entry);
return (0);
}
/*
* .zfs inode namespace
*
* We need to generate unique inode numbers for all files and directories
* within the .zfs pseudo-filesystem. We use the following scheme:
*
* ENTRY ZFSCTL_INODE
* .zfs 1
* .zfs/snapshot 2
* .zfs/snapshot/<snap> objectid(snap)
*/
#define ZFSCTL_INO_SNAP(id) (id)
static struct vop_vector zfsctl_ops_root;
static struct vop_vector zfsctl_ops_snapdir;
static struct vop_vector zfsctl_ops_snapshot;
void
zfsctl_init(void)
{
}
void
zfsctl_fini(void)
{
}
boolean_t
zfsctl_is_node(vnode_t *vp)
{
return (vn_matchops(vp, zfsctl_ops_root) ||
vn_matchops(vp, zfsctl_ops_snapdir) ||
vn_matchops(vp, zfsctl_ops_snapshot));
}
typedef struct zfsctl_root {
sfs_node_t node;
sfs_node_t *snapdir;
timestruc_t cmtime;
} zfsctl_root_t;
/*
* Create the '.zfs' directory.
*/
void
zfsctl_create(zfsvfs_t *zfsvfs)
{
zfsctl_root_t *dot_zfs;
sfs_node_t *snapdir;
vnode_t *rvp;
uint64_t crtime[2];
ASSERT3P(zfsvfs->z_ctldir, ==, NULL);
snapdir = sfs_alloc_node(sizeof (*snapdir), "snapshot", ZFSCTL_INO_ROOT,
ZFSCTL_INO_SNAPDIR);
dot_zfs = (zfsctl_root_t *)sfs_alloc_node(sizeof (*dot_zfs), ".zfs", 0,
ZFSCTL_INO_ROOT);
dot_zfs->snapdir = snapdir;
VERIFY0(VFS_ROOT(zfsvfs->z_vfs, LK_EXCLUSIVE, &rvp));
VERIFY0(sa_lookup(VTOZ(rvp)->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs),
&crtime, sizeof (crtime)));
ZFS_TIME_DECODE(&dot_zfs->cmtime, crtime);
vput(rvp);
zfsvfs->z_ctldir = dot_zfs;
}
/*
* Destroy the '.zfs' directory. Only called when the filesystem is unmounted.
* The nodes must not have any associated vnodes by now as they should be
* vflush-ed.
*/
void
zfsctl_destroy(zfsvfs_t *zfsvfs)
{
sfs_destroy_node(zfsvfs->z_ctldir->snapdir);
sfs_destroy_node((sfs_node_t *)zfsvfs->z_ctldir);
zfsvfs->z_ctldir = NULL;
}
static int
zfsctl_fs_root_vnode(struct mount *mp, void *arg __unused, int flags,
struct vnode **vpp)
{
return (VFS_ROOT(mp, flags, vpp));
}
static void
zfsctl_common_vnode_setup(vnode_t *vp, void *arg)
{
ASSERT_VOP_ELOCKED(vp, __func__);
/* We support shared locking. */
VN_LOCK_ASHARE(vp);
vp->v_type = VDIR;
vp->v_data = arg;
}
static int
zfsctl_root_vnode(struct mount *mp, void *arg __unused, int flags,
struct vnode **vpp)
{
void *node;
int err;
node = ((zfsvfs_t *)mp->mnt_data)->z_ctldir;
err = sfs_vgetx(mp, flags, 0, ZFSCTL_INO_ROOT, "zfs", &zfsctl_ops_root,
zfsctl_common_vnode_setup, node, vpp);
return (err);
}
static int
zfsctl_snapdir_vnode(struct mount *mp, void *arg __unused, int flags,
struct vnode **vpp)
{
void *node;
int err;
node = ((zfsvfs_t *)mp->mnt_data)->z_ctldir->snapdir;
err = sfs_vgetx(mp, flags, ZFSCTL_INO_ROOT, ZFSCTL_INO_SNAPDIR, "zfs",
&zfsctl_ops_snapdir, zfsctl_common_vnode_setup, node, vpp);
return (err);
}
/*
* Given a root znode, retrieve the associated .zfs directory.
* Add a hold to the vnode and return it.
*/
int
zfsctl_root(zfsvfs_t *zfsvfs, int flags, vnode_t **vpp)
{
int error;
error = zfsctl_root_vnode(zfsvfs->z_vfs, NULL, flags, vpp);
return (error);
}
/*
* Common open routine. Disallow any write access.
*/
static int
zfsctl_common_open(struct vop_open_args *ap)
{
int flags = ap->a_mode;
if (flags & FWRITE)
return (SET_ERROR(EACCES));
return (0);
}
/*
* Common close routine. Nothing to do here.
*/
static int
zfsctl_common_close(struct vop_close_args *ap)
{
(void) ap;
return (0);
}
/*
* Common access routine. Disallow writes.
*/
static int
zfsctl_common_access(struct vop_access_args *ap)
{
accmode_t accmode = ap->a_accmode;
if (accmode & VWRITE)
return (SET_ERROR(EACCES));
return (0);
}
/*
* Common getattr function. Fill in basic information.
*/
static void
zfsctl_common_getattr(vnode_t *vp, vattr_t *vap)
{
timestruc_t now;
sfs_node_t *node;
node = vp->v_data;
vap->va_uid = 0;
vap->va_gid = 0;
vap->va_rdev = 0;
/*
* We are a purely virtual object, so we have no
* blocksize or allocated blocks.
*/
vap->va_blksize = 0;
vap->va_nblocks = 0;
vap->va_gen = 0;
vn_fsid(vp, vap);
vap->va_mode = zfsctl_ctldir_mode;
vap->va_type = VDIR;
/*
* We live in the now (for atime).
*/
gethrestime(&now);
vap->va_atime = now;
/* FreeBSD: Reset chflags(2) flags. */
vap->va_flags = 0;
vap->va_nodeid = node->sn_id;
/* At least '.' and '..'. */
vap->va_nlink = 2;
}
#ifndef _OPENSOLARIS_SYS_VNODE_H_
struct vop_fid_args {
struct vnode *a_vp;
struct fid *a_fid;
};
#endif
static int
zfsctl_common_fid(struct vop_fid_args *ap)
{
vnode_t *vp = ap->a_vp;
fid_t *fidp = (void *)ap->a_fid;
sfs_node_t *node = vp->v_data;
uint64_t object = node->sn_id;
zfid_short_t *zfid;
int i;
zfid = (zfid_short_t *)fidp;
zfid->zf_len = SHORT_FID_LEN;
for (i = 0; i < sizeof (zfid->zf_object); i++)
zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
/* .zfs nodes always have a generation number of 0 */
for (i = 0; i < sizeof (zfid->zf_gen); i++)
zfid->zf_gen[i] = 0;
return (0);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_reclaim_args {
struct vnode *a_vp;
struct thread *a_td;
};
#endif
static int
zfsctl_common_reclaim(struct vop_reclaim_args *ap)
{
vnode_t *vp = ap->a_vp;
(void) sfs_reclaim_vnode(vp);
return (0);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_print_args {
struct vnode *a_vp;
};
#endif
static int
zfsctl_common_print(struct vop_print_args *ap)
{
sfs_print_node(ap->a_vp->v_data);
return (0);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_getattr_args {
struct vnode *a_vp;
struct vattr *a_vap;
struct ucred *a_cred;
};
#endif
/*
* Get root directory attributes.
*/
static int
zfsctl_root_getattr(struct vop_getattr_args *ap)
{
struct vnode *vp = ap->a_vp;
struct vattr *vap = ap->a_vap;
zfsctl_root_t *node = vp->v_data;
zfsctl_common_getattr(vp, vap);
vap->va_ctime = node->cmtime;
vap->va_mtime = vap->va_ctime;
vap->va_birthtime = vap->va_ctime;
vap->va_nlink += 1; /* snapdir */
vap->va_size = vap->va_nlink;
return (0);
}
/*
* When we lookup "." we still can be asked to lock it
* differently, can't we?
*/
static int
zfsctl_relock_dot(vnode_t *dvp, int ltype)
{
vref(dvp);
if (ltype != VOP_ISLOCKED(dvp)) {
if (ltype == LK_EXCLUSIVE)
vn_lock(dvp, LK_UPGRADE | LK_RETRY);
else /* if (ltype == LK_SHARED) */
vn_lock(dvp, LK_DOWNGRADE | LK_RETRY);
/* Relock for the "." case may left us with reclaimed vnode. */
if (VN_IS_DOOMED(dvp)) {
vrele(dvp);
return (SET_ERROR(ENOENT));
}
}
return (0);
}
/*
* Special case the handling of "..".
*/
static int
zfsctl_root_lookup(struct vop_lookup_args *ap)
{
struct componentname *cnp = ap->a_cnp;
vnode_t *dvp = ap->a_dvp;
vnode_t **vpp = ap->a_vpp;
int flags = ap->a_cnp->cn_flags;
int lkflags = ap->a_cnp->cn_lkflags;
int nameiop = ap->a_cnp->cn_nameiop;
int err;
ASSERT3S(dvp->v_type, ==, VDIR);
if ((flags & ISLASTCN) != 0 && nameiop != LOOKUP)
return (SET_ERROR(ENOTSUP));
if (cnp->cn_namelen == 1 && *cnp->cn_nameptr == '.') {
err = zfsctl_relock_dot(dvp, lkflags & LK_TYPE_MASK);
if (err == 0)
*vpp = dvp;
} else if ((flags & ISDOTDOT) != 0) {
err = vn_vget_ino_gen(dvp, zfsctl_fs_root_vnode, NULL,
lkflags, vpp);
} else if (strncmp(cnp->cn_nameptr, "snapshot", cnp->cn_namelen) == 0) {
err = zfsctl_snapdir_vnode(dvp->v_mount, NULL, lkflags, vpp);
} else {
err = SET_ERROR(ENOENT);
}
if (err != 0)
*vpp = NULL;
return (err);
}
static int
zfsctl_root_readdir(struct vop_readdir_args *ap)
{
struct dirent entry;
vnode_t *vp = ap->a_vp;
zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
zfsctl_root_t *node = vp->v_data;
zfs_uio_t uio;
int *eofp = ap->a_eofflag;
off_t dots_offset;
int error;
zfs_uio_init(&uio, ap->a_uio);
ASSERT3S(vp->v_type, ==, VDIR);
error = sfs_readdir_common(zfsvfs->z_root, ZFSCTL_INO_ROOT, ap, &uio,
&dots_offset);
if (error != 0) {
if (error == ENAMETOOLONG) /* ran out of destination space */
error = 0;
return (error);
}
if (zfs_uio_offset(&uio) != dots_offset)
return (SET_ERROR(EINVAL));
_Static_assert(sizeof (node->snapdir->sn_name) <= sizeof (entry.d_name),
"node->snapdir->sn_name too big for entry.d_name");
entry.d_fileno = node->snapdir->sn_id;
entry.d_type = DT_DIR;
strcpy(entry.d_name, node->snapdir->sn_name);
entry.d_namlen = strlen(entry.d_name);
entry.d_reclen = sizeof (entry);
error = vfs_read_dirent(ap, &entry, zfs_uio_offset(&uio));
if (error != 0) {
if (error == ENAMETOOLONG)
error = 0;
return (SET_ERROR(error));
}
if (eofp != NULL)
*eofp = 1;
return (0);
}
static int
zfsctl_root_vptocnp(struct vop_vptocnp_args *ap)
{
static const char dotzfs_name[4] = ".zfs";
vnode_t *dvp;
int error;
if (*ap->a_buflen < sizeof (dotzfs_name))
return (SET_ERROR(ENOMEM));
error = vn_vget_ino_gen(ap->a_vp, zfsctl_fs_root_vnode, NULL,
LK_SHARED, &dvp);
if (error != 0)
return (SET_ERROR(error));
VOP_UNLOCK1(dvp);
*ap->a_vpp = dvp;
*ap->a_buflen -= sizeof (dotzfs_name);
memcpy(ap->a_buf + *ap->a_buflen, dotzfs_name, sizeof (dotzfs_name));
return (0);
}
static int
zfsctl_common_pathconf(struct vop_pathconf_args *ap)
{
/*
* We care about ACL variables so that user land utilities like ls
* can display them correctly. Since the ctldir's st_dev is set to be
* the same as the parent dataset, we must support all variables that
* it supports.
*/
switch (ap->a_name) {
case _PC_LINK_MAX:
*ap->a_retval = MIN(LONG_MAX, ZFS_LINK_MAX);
return (0);
case _PC_FILESIZEBITS:
*ap->a_retval = 64;
return (0);
case _PC_MIN_HOLE_SIZE:
*ap->a_retval = (int)SPA_MINBLOCKSIZE;
return (0);
case _PC_ACL_EXTENDED:
*ap->a_retval = 0;
return (0);
case _PC_ACL_NFS4:
*ap->a_retval = 1;
return (0);
case _PC_ACL_PATH_MAX:
*ap->a_retval = ACL_MAX_ENTRIES;
return (0);
case _PC_NAME_MAX:
*ap->a_retval = NAME_MAX;
return (0);
default:
return (vop_stdpathconf(ap));
}
}
/*
* Returns a trivial ACL
*/
static int
zfsctl_common_getacl(struct vop_getacl_args *ap)
{
int i;
if (ap->a_type != ACL_TYPE_NFS4)
return (EINVAL);
acl_nfs4_sync_acl_from_mode(ap->a_aclp, zfsctl_ctldir_mode, 0);
/*
* acl_nfs4_sync_acl_from_mode assumes that the owner can always modify
* attributes. That is not the case for the ctldir, so we must clear
* those bits. We also must clear ACL_READ_NAMED_ATTRS, because xattrs
* aren't supported by the ctldir.
*/
for (i = 0; i < ap->a_aclp->acl_cnt; i++) {
struct acl_entry *entry;
entry = &(ap->a_aclp->acl_entry[i]);
entry->ae_perm &= ~(ACL_WRITE_ACL | ACL_WRITE_OWNER |
ACL_WRITE_ATTRIBUTES | ACL_WRITE_NAMED_ATTRS |
ACL_READ_NAMED_ATTRS);
}
return (0);
}
static struct vop_vector zfsctl_ops_root = {
.vop_default = &default_vnodeops,
#if __FreeBSD_version >= 1300121
.vop_fplookup_vexec = VOP_EAGAIN,
#endif
.vop_open = zfsctl_common_open,
.vop_close = zfsctl_common_close,
.vop_ioctl = VOP_EINVAL,
.vop_getattr = zfsctl_root_getattr,
.vop_access = zfsctl_common_access,
.vop_readdir = zfsctl_root_readdir,
.vop_lookup = zfsctl_root_lookup,
.vop_inactive = VOP_NULL,
.vop_reclaim = zfsctl_common_reclaim,
.vop_fid = zfsctl_common_fid,
.vop_print = zfsctl_common_print,
.vop_vptocnp = zfsctl_root_vptocnp,
.vop_pathconf = zfsctl_common_pathconf,
.vop_getacl = zfsctl_common_getacl,
#if __FreeBSD_version >= 1400043
.vop_add_writecount = vop_stdadd_writecount_nomsync,
#endif
};
VFS_VOP_VECTOR_REGISTER(zfsctl_ops_root);
static int
zfsctl_snapshot_zname(vnode_t *vp, const char *name, int len, char *zname)
{
objset_t *os = ((zfsvfs_t *)((vp)->v_vfsp->vfs_data))->z_os;
dmu_objset_name(os, zname);
if (strlen(zname) + 1 + strlen(name) >= len)
return (SET_ERROR(ENAMETOOLONG));
(void) strcat(zname, "@");
(void) strcat(zname, name);
return (0);
}
static int
zfsctl_snapshot_lookup(vnode_t *vp, const char *name, uint64_t *id)
{
objset_t *os = ((zfsvfs_t *)((vp)->v_vfsp->vfs_data))->z_os;
int err;
err = dsl_dataset_snap_lookup(dmu_objset_ds(os), name, id);
return (err);
}
/*
* Given a vnode get a root vnode of a filesystem mounted on top of
* the vnode, if any. The root vnode is referenced and locked.
* If no filesystem is mounted then the orinal vnode remains referenced
* and locked. If any error happens the orinal vnode is unlocked and
* released.
*/
static int
zfsctl_mounted_here(vnode_t **vpp, int flags)
{
struct mount *mp;
int err;
ASSERT_VOP_LOCKED(*vpp, __func__);
ASSERT3S((*vpp)->v_type, ==, VDIR);
if ((mp = (*vpp)->v_mountedhere) != NULL) {
err = vfs_busy(mp, 0);
KASSERT(err == 0, ("vfs_busy(mp, 0) failed with %d", err));
KASSERT(vrefcnt(*vpp) > 1, ("unreferenced mountpoint"));
vput(*vpp);
err = VFS_ROOT(mp, flags, vpp);
vfs_unbusy(mp);
return (err);
}
return (EJUSTRETURN);
}
typedef struct {
const char *snap_name;
uint64_t snap_id;
} snapshot_setup_arg_t;
static void
zfsctl_snapshot_vnode_setup(vnode_t *vp, void *arg)
{
snapshot_setup_arg_t *ssa = arg;
sfs_node_t *node;
ASSERT_VOP_ELOCKED(vp, __func__);
node = sfs_alloc_node(sizeof (sfs_node_t),
ssa->snap_name, ZFSCTL_INO_SNAPDIR, ssa->snap_id);
zfsctl_common_vnode_setup(vp, node);
/* We have to support recursive locking. */
VN_LOCK_AREC(vp);
}
/*
* Lookup entry point for the 'snapshot' directory. Try to open the
* snapshot if it exist, creating the pseudo filesystem vnode as necessary.
* Perform a mount of the associated dataset on top of the vnode.
* There are four possibilities:
* - the snapshot node and vnode do not exist
* - the snapshot vnode is covered by the mounted snapshot
* - the snapshot vnode is not covered yet, the mount operation is in progress
* - the snapshot vnode is not covered, because the snapshot has been unmounted
* The last two states are transient and should be relatively short-lived.
*/
static int
zfsctl_snapdir_lookup(struct vop_lookup_args *ap)
{
vnode_t *dvp = ap->a_dvp;
vnode_t **vpp = ap->a_vpp;
struct componentname *cnp = ap->a_cnp;
char name[NAME_MAX + 1];
char fullname[ZFS_MAX_DATASET_NAME_LEN];
char *mountpoint;
size_t mountpoint_len;
zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data;
uint64_t snap_id;
int nameiop = cnp->cn_nameiop;
int lkflags = cnp->cn_lkflags;
int flags = cnp->cn_flags;
int err;
ASSERT3S(dvp->v_type, ==, VDIR);
if ((flags & ISLASTCN) != 0 && nameiop != LOOKUP)
return (SET_ERROR(ENOTSUP));
if (cnp->cn_namelen == 1 && *cnp->cn_nameptr == '.') {
err = zfsctl_relock_dot(dvp, lkflags & LK_TYPE_MASK);
if (err == 0)
*vpp = dvp;
return (err);
}
if (flags & ISDOTDOT) {
err = vn_vget_ino_gen(dvp, zfsctl_root_vnode, NULL, lkflags,
vpp);
return (err);
}
if (cnp->cn_namelen >= sizeof (name))
return (SET_ERROR(ENAMETOOLONG));
strlcpy(name, ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen + 1);
err = zfsctl_snapshot_lookup(dvp, name, &snap_id);
if (err != 0)
return (SET_ERROR(ENOENT));
for (;;) {
snapshot_setup_arg_t ssa;
ssa.snap_name = name;
ssa.snap_id = snap_id;
err = sfs_vgetx(dvp->v_mount, LK_SHARED, ZFSCTL_INO_SNAPDIR,
snap_id, "zfs", &zfsctl_ops_snapshot,
zfsctl_snapshot_vnode_setup, &ssa, vpp);
if (err != 0)
return (err);
/* Check if a new vnode has just been created. */
if (VOP_ISLOCKED(*vpp) == LK_EXCLUSIVE)
break;
/*
* Check if a snapshot is already mounted on top of the vnode.
*/
err = zfsctl_mounted_here(vpp, lkflags);
if (err != EJUSTRETURN)
return (err);
/*
* If the vnode is not covered, then either the mount operation
* is in progress or the snapshot has already been unmounted
* but the vnode hasn't been inactivated and reclaimed yet.
* We can try to re-use the vnode in the latter case.
*/
VI_LOCK(*vpp);
if (((*vpp)->v_iflag & VI_MOUNT) == 0) {
VI_UNLOCK(*vpp);
/*
* Upgrade to exclusive lock in order to:
* - avoid race conditions
* - satisfy the contract of mount_snapshot()
*/
err = VOP_LOCK(*vpp, LK_TRYUPGRADE);
if (err == 0)
break;
} else {
VI_UNLOCK(*vpp);
}
/*
* In this state we can loop on uncontested locks and starve
* the thread doing the lengthy, non-trivial mount operation.
* So, yield to prevent that from happening.
*/
vput(*vpp);
kern_yield(PRI_USER);
}
VERIFY0(zfsctl_snapshot_zname(dvp, name, sizeof (fullname), fullname));
mountpoint_len = strlen(dvp->v_vfsp->mnt_stat.f_mntonname) +
strlen("/" ZFS_CTLDIR_NAME "/snapshot/") + strlen(name) + 1;
mountpoint = kmem_alloc(mountpoint_len, KM_SLEEP);
(void) snprintf(mountpoint, mountpoint_len,
"%s/" ZFS_CTLDIR_NAME "/snapshot/%s",
dvp->v_vfsp->mnt_stat.f_mntonname, name);
err = mount_snapshot(curthread, vpp, "zfs", mountpoint, fullname, 0);
kmem_free(mountpoint, mountpoint_len);
if (err == 0) {
/*
* Fix up the root vnode mounted on .zfs/snapshot/<snapname>.
*
* This is where we lie about our v_vfsp in order to
* make .zfs/snapshot/<snapname> accessible over NFS
* without requiring manual mounts of <snapname>.
*/
ASSERT3P(VTOZ(*vpp)->z_zfsvfs, !=, zfsvfs);
VTOZ(*vpp)->z_zfsvfs->z_parent = zfsvfs;
/* Clear the root flag (set via VFS_ROOT) as well. */
(*vpp)->v_vflag &= ~VV_ROOT;
}
if (err != 0)
*vpp = NULL;
return (err);
}
static int
zfsctl_snapdir_readdir(struct vop_readdir_args *ap)
{
char snapname[ZFS_MAX_DATASET_NAME_LEN];
struct dirent entry;
vnode_t *vp = ap->a_vp;
zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
zfs_uio_t uio;
int *eofp = ap->a_eofflag;
off_t dots_offset;
int error;
zfs_uio_init(&uio, ap->a_uio);
ASSERT3S(vp->v_type, ==, VDIR);
error = sfs_readdir_common(ZFSCTL_INO_ROOT, ZFSCTL_INO_SNAPDIR, ap,
&uio, &dots_offset);
if (error != 0) {
if (error == ENAMETOOLONG) /* ran out of destination space */
error = 0;
return (error);
}
- ZFS_ENTER(zfsvfs);
+ if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
+ return (error);
for (;;) {
uint64_t cookie;
uint64_t id;
cookie = zfs_uio_offset(&uio) - dots_offset;
dsl_pool_config_enter(dmu_objset_pool(zfsvfs->z_os), FTAG);
error = dmu_snapshot_list_next(zfsvfs->z_os, sizeof (snapname),
snapname, &id, &cookie, NULL);
dsl_pool_config_exit(dmu_objset_pool(zfsvfs->z_os), FTAG);
if (error != 0) {
if (error == ENOENT) {
if (eofp != NULL)
*eofp = 1;
error = 0;
}
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
entry.d_fileno = id;
entry.d_type = DT_DIR;
strcpy(entry.d_name, snapname);
entry.d_namlen = strlen(entry.d_name);
entry.d_reclen = sizeof (entry);
error = vfs_read_dirent(ap, &entry, zfs_uio_offset(&uio));
if (error != 0) {
if (error == ENAMETOOLONG)
error = 0;
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(error));
}
zfs_uio_setoffset(&uio, cookie + dots_offset);
}
__builtin_unreachable();
}
static int
zfsctl_snapdir_getattr(struct vop_getattr_args *ap)
{
vnode_t *vp = ap->a_vp;
vattr_t *vap = ap->a_vap;
zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
dsl_dataset_t *ds;
uint64_t snap_count;
int err;
- ZFS_ENTER(zfsvfs);
+ if ((err = zfs_enter(zfsvfs, FTAG)) != 0)
+ return (err);
ds = dmu_objset_ds(zfsvfs->z_os);
zfsctl_common_getattr(vp, vap);
vap->va_ctime = dmu_objset_snap_cmtime(zfsvfs->z_os);
vap->va_mtime = vap->va_ctime;
vap->va_birthtime = vap->va_ctime;
if (dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0) {
err = zap_count(dmu_objset_pool(ds->ds_objset)->dp_meta_objset,
dsl_dataset_phys(ds)->ds_snapnames_zapobj, &snap_count);
if (err != 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (err);
}
vap->va_nlink += snap_count;
}
vap->va_size = vap->va_nlink;
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (0);
}
static struct vop_vector zfsctl_ops_snapdir = {
.vop_default = &default_vnodeops,
#if __FreeBSD_version >= 1300121
.vop_fplookup_vexec = VOP_EAGAIN,
#endif
.vop_open = zfsctl_common_open,
.vop_close = zfsctl_common_close,
.vop_getattr = zfsctl_snapdir_getattr,
.vop_access = zfsctl_common_access,
.vop_readdir = zfsctl_snapdir_readdir,
.vop_lookup = zfsctl_snapdir_lookup,
.vop_reclaim = zfsctl_common_reclaim,
.vop_fid = zfsctl_common_fid,
.vop_print = zfsctl_common_print,
.vop_pathconf = zfsctl_common_pathconf,
.vop_getacl = zfsctl_common_getacl,
#if __FreeBSD_version >= 1400043
.vop_add_writecount = vop_stdadd_writecount_nomsync,
#endif
};
VFS_VOP_VECTOR_REGISTER(zfsctl_ops_snapdir);
static int
zfsctl_snapshot_inactive(struct vop_inactive_args *ap)
{
vnode_t *vp = ap->a_vp;
VERIFY3S(vrecycle(vp), ==, 1);
return (0);
}
static int
zfsctl_snapshot_reclaim(struct vop_reclaim_args *ap)
{
vnode_t *vp = ap->a_vp;
void *data = vp->v_data;
sfs_reclaim_vnode(vp);
sfs_destroy_node(data);
return (0);
}
static int
zfsctl_snapshot_vptocnp(struct vop_vptocnp_args *ap)
{
struct mount *mp;
vnode_t *dvp;
vnode_t *vp;
sfs_node_t *node;
size_t len;
int locked;
int error;
vp = ap->a_vp;
node = vp->v_data;
len = strlen(node->sn_name);
if (*ap->a_buflen < len)
return (SET_ERROR(ENOMEM));
/*
* Prevent unmounting of the snapshot while the vnode lock
* is not held. That is not strictly required, but allows
* us to assert that an uncovered snapshot vnode is never
* "leaked".
*/
mp = vp->v_mountedhere;
if (mp == NULL)
return (SET_ERROR(ENOENT));
error = vfs_busy(mp, 0);
KASSERT(error == 0, ("vfs_busy(mp, 0) failed with %d", error));
/*
* We can vput the vnode as we can now depend on the reference owned
* by the busied mp. But we also need to hold the vnode, because
* the reference may go after vfs_unbusy() which has to be called
* before we can lock the vnode again.
*/
locked = VOP_ISLOCKED(vp);
#if __FreeBSD_version >= 1300045
enum vgetstate vs = vget_prep(vp);
#else
vhold(vp);
#endif
vput(vp);
/* Look up .zfs/snapshot, our parent. */
error = zfsctl_snapdir_vnode(vp->v_mount, NULL, LK_SHARED, &dvp);
if (error == 0) {
VOP_UNLOCK1(dvp);
*ap->a_vpp = dvp;
*ap->a_buflen -= len;
memcpy(ap->a_buf + *ap->a_buflen, node->sn_name, len);
}
vfs_unbusy(mp);
#if __FreeBSD_version >= 1300045
vget_finish(vp, locked | LK_RETRY, vs);
#else
vget(vp, locked | LK_VNHELD | LK_RETRY, curthread);
#endif
return (error);
}
/*
* These VP's should never see the light of day. They should always
* be covered.
*/
static struct vop_vector zfsctl_ops_snapshot = {
.vop_default = NULL, /* ensure very restricted access */
#if __FreeBSD_version >= 1300121
.vop_fplookup_vexec = VOP_EAGAIN,
#endif
.vop_inactive = zfsctl_snapshot_inactive,
#if __FreeBSD_version >= 1300045
.vop_need_inactive = vop_stdneed_inactive,
#endif
.vop_reclaim = zfsctl_snapshot_reclaim,
.vop_vptocnp = zfsctl_snapshot_vptocnp,
.vop_lock1 = vop_stdlock,
.vop_unlock = vop_stdunlock,
.vop_islocked = vop_stdislocked,
.vop_advlockpurge = vop_stdadvlockpurge, /* called by vgone */
.vop_print = zfsctl_common_print,
#if __FreeBSD_version >= 1400043
.vop_add_writecount = vop_stdadd_writecount_nomsync,
#endif
};
VFS_VOP_VECTOR_REGISTER(zfsctl_ops_snapshot);
int
zfsctl_lookup_objset(vfs_t *vfsp, uint64_t objsetid, zfsvfs_t **zfsvfsp)
{
zfsvfs_t *zfsvfs __unused = vfsp->vfs_data;
vnode_t *vp;
int error;
ASSERT3P(zfsvfs->z_ctldir, !=, NULL);
*zfsvfsp = NULL;
error = sfs_vnode_get(vfsp, LK_EXCLUSIVE,
ZFSCTL_INO_SNAPDIR, objsetid, &vp);
if (error == 0 && vp != NULL) {
/*
* XXX Probably need to at least reference, if not busy, the mp.
*/
if (vp->v_mountedhere != NULL)
*zfsvfsp = vp->v_mountedhere->mnt_data;
vput(vp);
}
if (*zfsvfsp == NULL)
return (SET_ERROR(EINVAL));
return (0);
}
/*
* Unmount any snapshots for the given filesystem. This is called from
* zfs_umount() - if we have a ctldir, then go through and unmount all the
* snapshots.
*/
int
zfsctl_umount_snapshots(vfs_t *vfsp, int fflags, cred_t *cr)
{
char snapname[ZFS_MAX_DATASET_NAME_LEN];
zfsvfs_t *zfsvfs = vfsp->vfs_data;
struct mount *mp;
vnode_t *vp;
uint64_t cookie;
int error;
ASSERT3P(zfsvfs->z_ctldir, !=, NULL);
cookie = 0;
for (;;) {
uint64_t id;
dsl_pool_config_enter(dmu_objset_pool(zfsvfs->z_os), FTAG);
error = dmu_snapshot_list_next(zfsvfs->z_os, sizeof (snapname),
snapname, &id, &cookie, NULL);
dsl_pool_config_exit(dmu_objset_pool(zfsvfs->z_os), FTAG);
if (error != 0) {
if (error == ENOENT)
error = 0;
break;
}
for (;;) {
error = sfs_vnode_get(vfsp, LK_EXCLUSIVE,
ZFSCTL_INO_SNAPDIR, id, &vp);
if (error != 0 || vp == NULL)
break;
mp = vp->v_mountedhere;
/*
* v_mountedhere being NULL means that the
* (uncovered) vnode is in a transient state
* (mounting or unmounting), so loop until it
* settles down.
*/
if (mp != NULL)
break;
vput(vp);
}
if (error != 0)
break;
if (vp == NULL)
continue; /* no mountpoint, nothing to do */
/*
* The mount-point vnode is kept locked to avoid spurious EBUSY
* from a concurrent umount.
* The vnode lock must have recursive locking enabled.
*/
vfs_ref(mp);
error = dounmount(mp, fflags, curthread);
KASSERT_IMPLY(error == 0, vrefcnt(vp) == 1,
("extra references after unmount"));
vput(vp);
if (error != 0)
break;
}
KASSERT_IMPLY((fflags & MS_FORCE) != 0, error == 0,
("force unmounting failed"));
return (error);
}
int
zfsctl_snapshot_unmount(const char *snapname, int flags __unused)
{
vfs_t *vfsp = NULL;
zfsvfs_t *zfsvfs = NULL;
if (strchr(snapname, '@') == NULL)
return (0);
int err = getzfsvfs(snapname, &zfsvfs);
if (err != 0) {
ASSERT3P(zfsvfs, ==, NULL);
return (0);
}
vfsp = zfsvfs->z_vfs;
ASSERT(!dsl_pool_config_held(dmu_objset_pool(zfsvfs->z_os)));
vfs_ref(vfsp);
vfs_unbusy(vfsp);
return (dounmount(vfsp, MS_FORCE, curthread));
}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_dir.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_dir.c
index 6321f0b532ae..778e4151656d 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_dir.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_dir.c
@@ -1,963 +1,962 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2016 by Delphix. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/systm.h>
#include <sys/sysmacros.h>
#include <sys/resource.h>
#include <sys/vfs.h>
#include <sys/vnode.h>
-#include <sys/extdirent.h>
#include <sys/file.h>
#include <sys/kmem.h>
#include <sys/uio.h>
#include <sys/cmn_err.h>
#include <sys/errno.h>
#include <sys/stat.h>
#include <sys/unistd.h>
#include <sys/sunddi.h>
#include <sys/random.h>
#include <sys/policy.h>
#include <sys/condvar.h>
#include <sys/callb.h>
#include <sys/smp.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_acl.h>
#include <sys/fs/zfs.h>
#include <sys/zap.h>
#include <sys/dmu.h>
#include <sys/atomic.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_fuid.h>
#include <sys/sa.h>
#include <sys/zfs_sa.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dir.h>
#include <sys/ccompat.h>
/*
* zfs_match_find() is used by zfs_dirent_lookup() to perform zap lookups
* of names after deciding which is the appropriate lookup interface.
*/
static int
zfs_match_find(zfsvfs_t *zfsvfs, znode_t *dzp, const char *name,
matchtype_t mt, uint64_t *zoid)
{
int error;
if (zfsvfs->z_norm) {
/*
* In the non-mixed case we only expect there would ever
* be one match, but we need to use the normalizing lookup.
*/
error = zap_lookup_norm(zfsvfs->z_os, dzp->z_id, name, 8, 1,
zoid, mt, NULL, 0, NULL);
} else {
error = zap_lookup(zfsvfs->z_os, dzp->z_id, name, 8, 1, zoid);
}
*zoid = ZFS_DIRENT_OBJ(*zoid);
return (error);
}
/*
* Look up a directory entry under a locked vnode.
* dvp being locked gives us a guarantee that there are no concurrent
* modification of the directory and, thus, if a node can be found in
* the directory, then it must not be unlinked.
*
* Input arguments:
* dzp - znode for directory
* name - name of entry to lock
* flag - ZNEW: if the entry already exists, fail with EEXIST.
* ZEXISTS: if the entry does not exist, fail with ENOENT.
* ZXATTR: we want dzp's xattr directory
*
* Output arguments:
* zpp - pointer to the znode for the entry (NULL if there isn't one)
*
* Return value: 0 on success or errno on failure.
*
* NOTE: Always checks for, and rejects, '.' and '..'.
*/
int
zfs_dirent_lookup(znode_t *dzp, const char *name, znode_t **zpp, int flag)
{
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
znode_t *zp;
matchtype_t mt = 0;
uint64_t zoid;
int error = 0;
if (zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_LOCKED(ZTOV(dzp), __func__);
*zpp = NULL;
/*
* Verify that we are not trying to lock '.', '..', or '.zfs'
*/
if (name[0] == '.' &&
(((name[1] == '\0') || (name[1] == '.' && name[2] == '\0')) ||
(zfs_has_ctldir(dzp) && strcmp(name, ZFS_CTLDIR_NAME) == 0)))
return (SET_ERROR(EEXIST));
/*
* Case sensitivity and normalization preferences are set when
* the file system is created. These are stored in the
* zfsvfs->z_case and zfsvfs->z_norm fields. These choices
* affect how we perform zap lookups.
*
* When matching we may need to normalize & change case according to
* FS settings.
*
* Note that a normalized match is necessary for a case insensitive
* filesystem when the lookup request is not exact because normalization
* can fold case independent of normalizing code point sequences.
*
* See the table above zfs_dropname().
*/
if (zfsvfs->z_norm != 0) {
mt = MT_NORMALIZE;
/*
* Determine if the match needs to honor the case specified in
* lookup, and if so keep track of that so that during
* normalization we don't fold case.
*/
if (zfsvfs->z_case == ZFS_CASE_MIXED) {
mt |= MT_MATCH_CASE;
}
}
/*
* Only look in or update the DNLC if we are looking for the
* name on a file system that does not require normalization
* or case folding. We can also look there if we happen to be
* on a non-normalizing, mixed sensitivity file system IF we
* are looking for the exact name.
*
* NB: we do not need to worry about this flag for ZFS_CASE_SENSITIVE
* because in that case MT_EXACT and MT_FIRST should produce exactly
* the same result.
*/
if (dzp->z_unlinked && !(flag & ZXATTR))
return (ENOENT);
if (flag & ZXATTR) {
error = sa_lookup(dzp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &zoid,
sizeof (zoid));
if (error == 0)
error = (zoid == 0 ? ENOENT : 0);
} else {
error = zfs_match_find(zfsvfs, dzp, name, mt, &zoid);
}
if (error) {
if (error != ENOENT || (flag & ZEXISTS)) {
return (error);
}
} else {
if (flag & ZNEW) {
return (SET_ERROR(EEXIST));
}
error = zfs_zget(zfsvfs, zoid, &zp);
if (error)
return (error);
ASSERT(!zp->z_unlinked);
*zpp = zp;
}
return (0);
}
static int
zfs_dd_lookup(znode_t *dzp, znode_t **zpp)
{
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
znode_t *zp;
uint64_t parent;
int error;
#ifdef ZFS_DEBUG
if (zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_LOCKED(ZTOV(dzp), __func__);
#endif
if (dzp->z_unlinked)
return (ENOENT);
if ((error = sa_lookup(dzp->z_sa_hdl,
SA_ZPL_PARENT(zfsvfs), &parent, sizeof (parent))) != 0)
return (error);
error = zfs_zget(zfsvfs, parent, &zp);
if (error == 0)
*zpp = zp;
return (error);
}
int
zfs_dirlook(znode_t *dzp, const char *name, znode_t **zpp)
{
zfsvfs_t *zfsvfs __unused = dzp->z_zfsvfs;
znode_t *zp = NULL;
int error = 0;
#ifdef ZFS_DEBUG
if (zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_LOCKED(ZTOV(dzp), __func__);
#endif
if (dzp->z_unlinked)
return (SET_ERROR(ENOENT));
if (name[0] == 0 || (name[0] == '.' && name[1] == 0)) {
*zpp = dzp;
} else if (name[0] == '.' && name[1] == '.' && name[2] == 0) {
error = zfs_dd_lookup(dzp, &zp);
if (error == 0)
*zpp = zp;
} else {
error = zfs_dirent_lookup(dzp, name, &zp, ZEXISTS);
if (error == 0) {
dzp->z_zn_prefetch = B_TRUE; /* enable prefetching */
*zpp = zp;
}
}
return (error);
}
/*
* unlinked Set (formerly known as the "delete queue") Error Handling
*
* When dealing with the unlinked set, we dmu_tx_hold_zap(), but we
* don't specify the name of the entry that we will be manipulating. We
* also fib and say that we won't be adding any new entries to the
* unlinked set, even though we might (this is to lower the minimum file
* size that can be deleted in a full filesystem). So on the small
* chance that the nlink list is using a fat zap (ie. has more than
* 2000 entries), we *may* not pre-read a block that's needed.
* Therefore it is remotely possible for some of the assertions
* regarding the unlinked set below to fail due to i/o error. On a
* nondebug system, this will result in the space being leaked.
*/
void
zfs_unlinked_add(znode_t *zp, dmu_tx_t *tx)
{
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
ASSERT(zp->z_unlinked);
ASSERT3U(zp->z_links, ==, 0);
VERIFY0(zap_add_int(zfsvfs->z_os, zfsvfs->z_unlinkedobj, zp->z_id, tx));
dataset_kstats_update_nunlinks_kstat(&zfsvfs->z_kstat, 1);
}
/*
* Clean up any znodes that had no links when we either crashed or
* (force) umounted the file system.
*/
void
zfs_unlinked_drain(zfsvfs_t *zfsvfs)
{
zap_cursor_t zc;
zap_attribute_t zap;
dmu_object_info_t doi;
znode_t *zp;
dmu_tx_t *tx;
int error;
/*
* Iterate over the contents of the unlinked set.
*/
for (zap_cursor_init(&zc, zfsvfs->z_os, zfsvfs->z_unlinkedobj);
zap_cursor_retrieve(&zc, &zap) == 0;
zap_cursor_advance(&zc)) {
/*
* See what kind of object we have in list
*/
error = dmu_object_info(zfsvfs->z_os,
zap.za_first_integer, &doi);
if (error != 0)
continue;
ASSERT((doi.doi_type == DMU_OT_PLAIN_FILE_CONTENTS) ||
(doi.doi_type == DMU_OT_DIRECTORY_CONTENTS));
/*
* We need to re-mark these list entries for deletion,
* so we pull them back into core and set zp->z_unlinked.
*/
error = zfs_zget(zfsvfs, zap.za_first_integer, &zp);
/*
* We may pick up znodes that are already marked for deletion.
* This could happen during the purge of an extended attribute
* directory. All we need to do is skip over them, since they
* are already in the system marked z_unlinked.
*/
if (error != 0)
continue;
vn_lock(ZTOV(zp), LK_EXCLUSIVE | LK_RETRY);
/*
* Due to changes in zfs_rmnode we need to make sure the
* link count is set to zero here.
*/
if (zp->z_links != 0) {
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0) {
dmu_tx_abort(tx);
vput(ZTOV(zp));
continue;
}
zp->z_links = 0;
VERIFY0(sa_update(zp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
&zp->z_links, sizeof (zp->z_links), tx));
dmu_tx_commit(tx);
}
zp->z_unlinked = B_TRUE;
vput(ZTOV(zp));
}
zap_cursor_fini(&zc);
}
/*
* Delete the entire contents of a directory. Return a count
* of the number of entries that could not be deleted. If we encounter
* an error, return a count of at least one so that the directory stays
* in the unlinked set.
*
* NOTE: this function assumes that the directory is inactive,
* so there is no need to lock its entries before deletion.
* Also, it assumes the directory contents is *only* regular
* files.
*/
static int
zfs_purgedir(znode_t *dzp)
{
zap_cursor_t zc;
zap_attribute_t zap;
znode_t *xzp;
dmu_tx_t *tx;
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
int skipped = 0;
int error;
for (zap_cursor_init(&zc, zfsvfs->z_os, dzp->z_id);
(error = zap_cursor_retrieve(&zc, &zap)) == 0;
zap_cursor_advance(&zc)) {
error = zfs_zget(zfsvfs,
ZFS_DIRENT_OBJ(zap.za_first_integer), &xzp);
if (error) {
skipped += 1;
continue;
}
vn_lock(ZTOV(xzp), LK_EXCLUSIVE | LK_RETRY);
ASSERT((ZTOV(xzp)->v_type == VREG) ||
(ZTOV(xzp)->v_type == VLNK));
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, dzp->z_id, FALSE, zap.za_name);
dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
/* Is this really needed ? */
zfs_sa_upgrade_txholds(tx, xzp);
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
vput(ZTOV(xzp));
skipped += 1;
continue;
}
error = zfs_link_destroy(dzp, zap.za_name, xzp, tx, 0, NULL);
if (error)
skipped += 1;
dmu_tx_commit(tx);
vput(ZTOV(xzp));
}
zap_cursor_fini(&zc);
if (error != ENOENT)
skipped += 1;
return (skipped);
}
extern taskq_t *zfsvfs_taskq;
void
zfs_rmnode(znode_t *zp)
{
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
objset_t *os = zfsvfs->z_os;
dmu_tx_t *tx;
uint64_t acl_obj;
uint64_t xattr_obj;
uint64_t count;
int error;
ASSERT3U(zp->z_links, ==, 0);
if (zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_ELOCKED(ZTOV(zp), __func__);
/*
* If this is an attribute directory, purge its contents.
*/
if (ZTOV(zp) != NULL && ZTOV(zp)->v_type == VDIR &&
(zp->z_pflags & ZFS_XATTR)) {
if (zfs_purgedir(zp) != 0) {
/*
* Not enough space to delete some xattrs.
* Leave it in the unlinked set.
*/
zfs_znode_dmu_fini(zp);
zfs_znode_free(zp);
return;
}
} else {
/*
* Free up all the data in the file. We don't do this for
* XATTR directories because we need truncate and remove to be
* in the same tx, like in zfs_znode_delete(). Otherwise, if
* we crash here we'll end up with an inconsistent truncated
* zap object in the delete queue. Note a truncated file is
* harmless since it only contains user data.
*/
error = dmu_free_long_range(os, zp->z_id, 0, DMU_OBJECT_END);
if (error) {
/*
* Not enough space or we were interrupted by unmount.
* Leave the file in the unlinked set.
*/
zfs_znode_dmu_fini(zp);
zfs_znode_free(zp);
return;
}
}
/*
* If the file has extended attributes, we're going to unlink
* the xattr dir.
*/
error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
&xattr_obj, sizeof (xattr_obj));
if (error)
xattr_obj = 0;
acl_obj = zfs_external_acl(zp);
/*
* Set up the final transaction.
*/
tx = dmu_tx_create(os);
dmu_tx_hold_free(tx, zp->z_id, 0, DMU_OBJECT_END);
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
if (xattr_obj)
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, TRUE, NULL);
if (acl_obj)
dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
/*
* Not enough space to delete the file. Leave it in the
* unlinked set, leaking it until the fs is remounted (at
* which point we'll call zfs_unlinked_drain() to process it).
*/
dmu_tx_abort(tx);
zfs_znode_dmu_fini(zp);
zfs_znode_free(zp);
return;
}
/*
* FreeBSD's implementation of zfs_zget requires a vnode to back it.
* This means that we could end up calling into getnewvnode while
* calling zfs_rmnode as a result of a prior call to getnewvnode
* trying to clear vnodes out of the cache. If this repeats we can
* recurse enough that we overflow our stack. To avoid this, we
* avoid calling zfs_zget on the xattr znode and instead simply add
* it to the unlinked set and schedule a call to zfs_unlinked_drain.
*/
if (xattr_obj) {
/* Add extended attribute directory to the unlinked set. */
VERIFY3U(0, ==,
zap_add_int(os, zfsvfs->z_unlinkedobj, xattr_obj, tx));
}
mutex_enter(&os->os_dsl_dataset->ds_dir->dd_activity_lock);
/* Remove this znode from the unlinked set */
VERIFY3U(0, ==,
zap_remove_int(os, zfsvfs->z_unlinkedobj, zp->z_id, tx));
if (zap_count(os, zfsvfs->z_unlinkedobj, &count) == 0 && count == 0) {
cv_broadcast(&os->os_dsl_dataset->ds_dir->dd_activity_cv);
}
mutex_exit(&os->os_dsl_dataset->ds_dir->dd_activity_lock);
dataset_kstats_update_nunlinked_kstat(&zfsvfs->z_kstat, 1);
zfs_znode_delete(zp, tx);
dmu_tx_commit(tx);
if (xattr_obj) {
/*
* We're using the FreeBSD taskqueue API here instead of
* the Solaris taskq API since the FreeBSD API allows for a
* task to be enqueued multiple times but executed once.
*/
taskqueue_enqueue(zfsvfs_taskq->tq_queue,
&zfsvfs->z_unlinked_drain_task);
}
}
static uint64_t
zfs_dirent(znode_t *zp, uint64_t mode)
{
uint64_t de = zp->z_id;
if (zp->z_zfsvfs->z_version >= ZPL_VERSION_DIRENT_TYPE)
de |= IFTODT(mode) << 60;
return (de);
}
/*
* Link zp into dzp. Can only fail if zp has been unlinked.
*/
int
zfs_link_create(znode_t *dzp, const char *name, znode_t *zp, dmu_tx_t *tx,
int flag)
{
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
vnode_t *vp = ZTOV(zp);
uint64_t value;
int zp_is_dir = (vp->v_type == VDIR);
sa_bulk_attr_t bulk[5];
uint64_t mtime[2], ctime[2];
int count = 0;
int error;
if (zfsvfs->z_replay == B_FALSE) {
ASSERT_VOP_ELOCKED(ZTOV(dzp), __func__);
ASSERT_VOP_ELOCKED(ZTOV(zp), __func__);
}
if (zp_is_dir) {
if (dzp->z_links >= ZFS_LINK_MAX)
return (SET_ERROR(EMLINK));
}
if (!(flag & ZRENAMING)) {
if (zp->z_unlinked) { /* no new links to unlinked zp */
ASSERT(!(flag & (ZNEW | ZEXISTS)));
return (SET_ERROR(ENOENT));
}
if (zp->z_links >= ZFS_LINK_MAX - zp_is_dir) {
return (SET_ERROR(EMLINK));
}
zp->z_links++;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
&zp->z_links, sizeof (zp->z_links));
} else {
ASSERT(!zp->z_unlinked);
}
value = zfs_dirent(zp, zp->z_mode);
error = zap_add(zp->z_zfsvfs->z_os, dzp->z_id, name,
8, 1, &value, tx);
/*
* zap_add could fail to add the entry if it exceeds the capacity of the
* leaf-block and zap_leaf_split() failed to help.
* The caller of this routine is responsible for failing the transaction
* which will rollback the SA updates done above.
*/
if (error != 0) {
if (!(flag & ZRENAMING) && !(flag & ZNEW))
zp->z_links--;
return (error);
}
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
&dzp->z_id, sizeof (dzp->z_id));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
if (!(flag & ZNEW)) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
ctime, sizeof (ctime));
zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime,
ctime);
}
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
ASSERT0(error);
dzp->z_size++;
dzp->z_links += zp_is_dir;
count = 0;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
&dzp->z_size, sizeof (dzp->z_size));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
&dzp->z_links, sizeof (dzp->z_links));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
mtime, sizeof (mtime));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
ctime, sizeof (ctime));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&dzp->z_pflags, sizeof (dzp->z_pflags));
zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime);
error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx);
ASSERT0(error);
return (0);
}
/*
* The match type in the code for this function should conform to:
*
* ------------------------------------------------------------------------
* fs type | z_norm | lookup type | match type
* ---------|-------------|-------------|----------------------------------
* CS !norm | 0 | 0 | 0 (exact)
* CS norm | formX | 0 | MT_NORMALIZE
* CI !norm | upper | !ZCIEXACT | MT_NORMALIZE
* CI !norm | upper | ZCIEXACT | MT_NORMALIZE | MT_MATCH_CASE
* CI norm | upper|formX | !ZCIEXACT | MT_NORMALIZE
* CI norm | upper|formX | ZCIEXACT | MT_NORMALIZE | MT_MATCH_CASE
* CM !norm | upper | !ZCILOOK | MT_NORMALIZE | MT_MATCH_CASE
* CM !norm | upper | ZCILOOK | MT_NORMALIZE
* CM norm | upper|formX | !ZCILOOK | MT_NORMALIZE | MT_MATCH_CASE
* CM norm | upper|formX | ZCILOOK | MT_NORMALIZE
*
* Abbreviations:
* CS = Case Sensitive, CI = Case Insensitive, CM = Case Mixed
* upper = case folding set by fs type on creation (U8_TEXTPREP_TOUPPER)
* formX = unicode normalization form set on fs creation
*/
static int
zfs_dropname(znode_t *dzp, const char *name, znode_t *zp, dmu_tx_t *tx,
int flag)
{
int error;
if (zp->z_zfsvfs->z_norm) {
matchtype_t mt = MT_NORMALIZE;
if (zp->z_zfsvfs->z_case == ZFS_CASE_MIXED) {
mt |= MT_MATCH_CASE;
}
error = zap_remove_norm(zp->z_zfsvfs->z_os, dzp->z_id,
name, mt, tx);
} else {
error = zap_remove(zp->z_zfsvfs->z_os, dzp->z_id, name, tx);
}
return (error);
}
/*
* Unlink zp from dzp, and mark zp for deletion if this was the last link.
* Can fail if zp is a mount point (EBUSY) or a non-empty directory (EEXIST).
* If 'unlinkedp' is NULL, we put unlinked znodes on the unlinked list.
* If it's non-NULL, we use it to indicate whether the znode needs deletion,
* and it's the caller's job to do it.
*/
int
zfs_link_destroy(znode_t *dzp, const char *name, znode_t *zp, dmu_tx_t *tx,
int flag, boolean_t *unlinkedp)
{
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
vnode_t *vp = ZTOV(zp);
int zp_is_dir = (vp->v_type == VDIR);
boolean_t unlinked = B_FALSE;
sa_bulk_attr_t bulk[5];
uint64_t mtime[2], ctime[2];
int count = 0;
int error;
if (zfsvfs->z_replay == B_FALSE) {
ASSERT_VOP_ELOCKED(ZTOV(dzp), __func__);
ASSERT_VOP_ELOCKED(ZTOV(zp), __func__);
}
if (!(flag & ZRENAMING)) {
if (zp_is_dir && !zfs_dirempty(zp))
return (SET_ERROR(ENOTEMPTY));
/*
* If we get here, we are going to try to remove the object.
* First try removing the name from the directory; if that
* fails, return the error.
*/
error = zfs_dropname(dzp, name, zp, tx, flag);
if (error != 0) {
return (error);
}
if (zp->z_links <= zp_is_dir) {
zfs_panic_recover("zfs: link count on vnode %p is %u, "
"should be at least %u", zp->z_vnode,
(int)zp->z_links,
zp_is_dir + 1);
zp->z_links = zp_is_dir + 1;
}
if (--zp->z_links == zp_is_dir) {
zp->z_unlinked = B_TRUE;
zp->z_links = 0;
unlinked = B_TRUE;
} else {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs),
NULL, &ctime, sizeof (ctime));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
NULL, &zp->z_pflags, sizeof (zp->z_pflags));
zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime,
ctime);
}
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs),
NULL, &zp->z_links, sizeof (zp->z_links));
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
count = 0;
ASSERT0(error);
} else {
ASSERT(!zp->z_unlinked);
error = zfs_dropname(dzp, name, zp, tx, flag);
if (error != 0)
return (error);
}
dzp->z_size--; /* one dirent removed */
dzp->z_links -= zp_is_dir; /* ".." link from zp */
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs),
NULL, &dzp->z_links, sizeof (dzp->z_links));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs),
NULL, &dzp->z_size, sizeof (dzp->z_size));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs),
NULL, ctime, sizeof (ctime));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs),
NULL, mtime, sizeof (mtime));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
NULL, &dzp->z_pflags, sizeof (dzp->z_pflags));
zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime);
error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx);
ASSERT0(error);
if (unlinkedp != NULL)
*unlinkedp = unlinked;
else if (unlinked)
zfs_unlinked_add(zp, tx);
return (0);
}
/*
* Indicate whether the directory is empty.
*/
boolean_t
zfs_dirempty(znode_t *dzp)
{
return (dzp->z_size == 2);
}
int
zfs_make_xattrdir(znode_t *zp, vattr_t *vap, znode_t **xvpp, cred_t *cr)
{
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
znode_t *xzp;
dmu_tx_t *tx;
int error;
zfs_acl_ids_t acl_ids;
boolean_t fuid_dirtied;
uint64_t parent __maybe_unused;
*xvpp = NULL;
if ((error = zfs_acl_ids_create(zp, IS_XATTR, vap, cr, NULL,
&acl_ids)) != 0)
return (error);
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, 0)) {
zfs_acl_ids_free(&acl_ids);
return (SET_ERROR(EDQUOT));
}
getnewvnode_reserve_();
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
getnewvnode_drop_reserve();
return (error);
}
zfs_mknode(zp, vap, tx, cr, IS_XATTR, &xzp, &acl_ids);
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
ASSERT0(sa_lookup(xzp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs), &parent,
sizeof (parent)));
ASSERT3U(parent, ==, zp->z_id);
VERIFY0(sa_update(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &xzp->z_id,
sizeof (xzp->z_id), tx));
zfs_log_create(zfsvfs->z_log, tx, TX_MKXATTR, zp, xzp, "", NULL,
acl_ids.z_fuidp, vap);
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
getnewvnode_drop_reserve();
*xvpp = xzp;
return (0);
}
/*
* Return a znode for the extended attribute directory for zp.
* ** If the directory does not already exist, it is created **
*
* IN: zp - znode to obtain attribute directory from
* cr - credentials of caller
* flags - flags from the VOP_LOOKUP call
*
* OUT: xzpp - pointer to extended attribute znode
*
* RETURN: 0 on success
* error number on failure
*/
int
zfs_get_xattrdir(znode_t *zp, znode_t **xzpp, cred_t *cr, int flags)
{
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
znode_t *xzp;
vattr_t va;
int error;
top:
error = zfs_dirent_lookup(zp, "", &xzp, ZXATTR);
if (error)
return (error);
if (xzp != NULL) {
*xzpp = xzp;
return (0);
}
if (!(flags & CREATE_XATTR_DIR))
return (SET_ERROR(ENOATTR));
if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) {
return (SET_ERROR(EROFS));
}
/*
* The ability to 'create' files in an attribute
* directory comes from the write_xattr permission on the base file.
*
* The ability to 'search' an attribute directory requires
* read_xattr permission on the base file.
*
* Once in a directory the ability to read/write attributes
* is controlled by the permissions on the attribute file.
*/
va.va_mask = AT_MODE | AT_UID | AT_GID;
va.va_type = VDIR;
va.va_mode = S_IFDIR | S_ISVTX | 0777;
zfs_fuid_map_ids(zp, cr, &va.va_uid, &va.va_gid);
error = zfs_make_xattrdir(zp, &va, xzpp, cr);
if (error == ERESTART) {
/* NB: we already did dmu_tx_wait() if necessary */
goto top;
}
if (error == 0)
VOP_UNLOCK1(ZTOV(*xzpp));
return (error);
}
/*
* Decide whether it is okay to remove within a sticky directory.
*
* In sticky directories, write access is not sufficient;
* you can remove entries from a directory only if:
*
* you own the directory,
* you own the entry,
* the entry is a plain file and you have write access,
* or you are privileged (checked in secpolicy...).
*
* The function returns 0 if remove access is granted.
*/
int
zfs_sticky_remove_access(znode_t *zdp, znode_t *zp, cred_t *cr)
{
uid_t uid;
uid_t downer;
uid_t fowner;
zfsvfs_t *zfsvfs = zdp->z_zfsvfs;
if (zdp->z_zfsvfs->z_replay)
return (0);
if ((zdp->z_mode & S_ISVTX) == 0)
return (0);
downer = zfs_fuid_map_id(zfsvfs, zdp->z_uid, cr, ZFS_OWNER);
fowner = zfs_fuid_map_id(zfsvfs, zp->z_uid, cr, ZFS_OWNER);
if ((uid = crgetuid(cr)) == downer || uid == fowner ||
(ZTOV(zp)->v_type == VREG &&
zfs_zaccess(zp, ACE_WRITE_DATA, 0, B_FALSE, cr) == 0))
return (0);
else
return (secpolicy_vnode_remove(ZTOV(zp), cr));
}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c
index a6930cec517b..60c9ff0581e0 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c
@@ -1,303 +1,307 @@
/*
* Copyright (c) 2020 iXsystems, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/dmu.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_recv.h>
#include <sys/dmu_tx.h>
#include <sys/dbuf.h>
#include <sys/dnode.h>
#include <sys/zfs_context.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_synctask.h>
#include <sys/zfs_ioctl.h>
#include <sys/zap.h>
#include <sys/zio_checksum.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_file.h>
#include <sys/buf.h>
#include <sys/stat.h>
int
zfs_file_open(const char *path, int flags, int mode, zfs_file_t **fpp)
{
struct thread *td;
int rc, fd;
td = curthread;
pwd_ensure_dirs();
/* 12.x doesn't take a const char * */
rc = kern_openat(td, AT_FDCWD, __DECONST(char *, path),
UIO_SYSSPACE, flags, mode);
if (rc)
return (SET_ERROR(rc));
fd = td->td_retval[0];
td->td_retval[0] = 0;
if (fget(curthread, fd, &cap_no_rights, fpp))
kern_close(td, fd);
return (0);
}
void
zfs_file_close(zfs_file_t *fp)
{
fo_close(fp, curthread);
}
static int
zfs_file_write_impl(zfs_file_t *fp, const void *buf, size_t count, loff_t *offp,
ssize_t *resid)
{
ssize_t rc;
struct uio auio;
struct thread *td;
struct iovec aiov;
td = curthread;
aiov.iov_base = (void *)(uintptr_t)buf;
aiov.iov_len = count;
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
auio.uio_segflg = UIO_SYSSPACE;
auio.uio_resid = count;
auio.uio_rw = UIO_WRITE;
auio.uio_td = td;
auio.uio_offset = *offp;
if ((fp->f_flag & FWRITE) == 0)
return (SET_ERROR(EBADF));
if (fp->f_type == DTYPE_VNODE)
bwillwrite();
rc = fo_write(fp, &auio, td->td_ucred, FOF_OFFSET, td);
if (rc)
return (SET_ERROR(rc));
if (resid)
*resid = auio.uio_resid;
else if (auio.uio_resid)
return (SET_ERROR(EIO));
*offp += count - auio.uio_resid;
return (rc);
}
int
zfs_file_write(zfs_file_t *fp, const void *buf, size_t count, ssize_t *resid)
{
loff_t off = fp->f_offset;
ssize_t rc;
rc = zfs_file_write_impl(fp, buf, count, &off, resid);
if (rc == 0)
fp->f_offset = off;
return (SET_ERROR(rc));
}
int
zfs_file_pwrite(zfs_file_t *fp, const void *buf, size_t count, loff_t off,
ssize_t *resid)
{
return (zfs_file_write_impl(fp, buf, count, &off, resid));
}
static int
zfs_file_read_impl(zfs_file_t *fp, void *buf, size_t count, loff_t *offp,
ssize_t *resid)
{
ssize_t rc;
struct uio auio;
struct thread *td;
struct iovec aiov;
td = curthread;
aiov.iov_base = (void *)(uintptr_t)buf;
aiov.iov_len = count;
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
auio.uio_segflg = UIO_SYSSPACE;
auio.uio_resid = count;
auio.uio_rw = UIO_READ;
auio.uio_td = td;
auio.uio_offset = *offp;
if ((fp->f_flag & FREAD) == 0)
return (SET_ERROR(EBADF));
rc = fo_read(fp, &auio, td->td_ucred, FOF_OFFSET, td);
if (rc)
return (SET_ERROR(rc));
if (resid)
*resid = auio.uio_resid;
*offp += count - auio.uio_resid;
return (SET_ERROR(0));
}
int
zfs_file_read(zfs_file_t *fp, void *buf, size_t count, ssize_t *resid)
{
loff_t off = fp->f_offset;
ssize_t rc;
rc = zfs_file_read_impl(fp, buf, count, &off, resid);
if (rc == 0)
fp->f_offset = off;
return (rc);
}
int
zfs_file_pread(zfs_file_t *fp, void *buf, size_t count, loff_t off,
ssize_t *resid)
{
return (zfs_file_read_impl(fp, buf, count, &off, resid));
}
int
zfs_file_seek(zfs_file_t *fp, loff_t *offp, int whence)
{
int rc;
struct thread *td;
td = curthread;
if ((fp->f_ops->fo_flags & DFLAG_SEEKABLE) == 0)
return (SET_ERROR(ESPIPE));
rc = fo_seek(fp, *offp, whence, td);
if (rc == 0)
*offp = td->td_uretoff.tdu_off;
return (SET_ERROR(rc));
}
int
zfs_file_getattr(zfs_file_t *fp, zfs_file_attr_t *zfattr)
{
struct thread *td;
struct stat sb;
int rc;
td = curthread;
#if __FreeBSD_version < 1400037
rc = fo_stat(fp, &sb, td->td_ucred, td);
#else
rc = fo_stat(fp, &sb, td->td_ucred);
#endif
if (rc)
return (SET_ERROR(rc));
zfattr->zfa_size = sb.st_size;
zfattr->zfa_mode = sb.st_mode;
return (0);
}
static __inline int
zfs_vop_fsync(vnode_t *vp)
{
struct mount *mp;
int error;
+#if __FreeBSD_version < 1400068
+ if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
+#else
if ((error = vn_start_write(vp, &mp, V_WAIT | V_PCATCH)) != 0)
+#endif
goto drop;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = VOP_FSYNC(vp, MNT_WAIT, curthread);
VOP_UNLOCK1(vp);
vn_finished_write(mp);
drop:
return (SET_ERROR(error));
}
int
zfs_file_fsync(zfs_file_t *fp, int flags)
{
if (fp->f_type != DTYPE_VNODE)
return (EINVAL);
return (zfs_vop_fsync(fp->f_vnode));
}
zfs_file_t *
zfs_file_get(int fd)
{
struct file *fp;
if (fget(curthread, fd, &cap_no_rights, &fp))
return (NULL);
return (fp);
}
void
zfs_file_put(zfs_file_t *fp)
{
fdrop(fp, curthread);
}
loff_t
zfs_file_off(zfs_file_t *fp)
{
return (fp->f_offset);
}
void *
zfs_file_private(zfs_file_t *fp)
{
file_t *tmpfp;
void *data;
int error;
tmpfp = curthread->td_fpop;
curthread->td_fpop = fp;
error = devfs_get_cdevpriv(&data);
curthread->td_fpop = tmpfp;
if (error != 0)
return (NULL);
return (data);
}
int
zfs_file_unlink(const char *fnamep)
{
zfs_uio_seg_t seg = UIO_SYSSPACE;
int rc;
#if __FreeBSD_version >= 1300018
rc = kern_funlinkat(curthread, AT_FDCWD, fnamep, FD_NONE, seg, 0, 0);
#elif __FreeBSD_version >= 1202504 || defined(AT_BENEATH)
rc = kern_unlinkat(curthread, AT_FDCWD, __DECONST(char *, fnamep),
seg, 0, 0);
#else
rc = kern_unlinkat(curthread, AT_FDCWD, __DECONST(char *, fnamep),
seg, 0);
#endif
return (SET_ERROR(rc));
}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c
index 24e06b1a8809..b290c36748ca 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c
@@ -1,2337 +1,2318 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011 Pawel Jakub Dawidek <pawel@dawidek.net>.
* All rights reserved.
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Nexenta Systems, Inc. All rights reserved.
*/
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/sysmacros.h>
#include <sys/kmem.h>
#include <sys/acl.h>
#include <sys/vnode.h>
#include <sys/vfs.h>
#include <sys/mntent.h>
#include <sys/mount.h>
#include <sys/cmn_err.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_dir.h>
#include <sys/zil.h>
#include <sys/fs/zfs.h>
#include <sys/dmu.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_deleg.h>
#include <sys/spa.h>
#include <sys/zap.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/policy.h>
#include <sys/atomic.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_fuid.h>
#include <sys/sunddi.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dir.h>
-#include <sys/spa_boot.h>
#include <sys/jail.h>
#include <ufs/ufs/quota.h>
#include <sys/zfs_quota.h>
#include "zfs_comutil.h"
#ifndef MNTK_VMSETSIZE_BUG
#define MNTK_VMSETSIZE_BUG 0
#endif
#ifndef MNTK_NOMSYNC
#define MNTK_NOMSYNC 8
#endif
struct mtx zfs_debug_mtx;
MTX_SYSINIT(zfs_debug_mtx, &zfs_debug_mtx, "zfs_debug", MTX_DEF);
SYSCTL_NODE(_vfs, OID_AUTO, zfs, CTLFLAG_RW, 0, "ZFS file system");
int zfs_super_owner;
SYSCTL_INT(_vfs_zfs, OID_AUTO, super_owner, CTLFLAG_RW, &zfs_super_owner, 0,
"File system owners can perform privileged operation on file systems");
int zfs_debug_level;
SYSCTL_INT(_vfs_zfs, OID_AUTO, debug, CTLFLAG_RWTUN, &zfs_debug_level, 0,
"Debug level");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, version, CTLFLAG_RD, 0, "ZFS versions");
static int zfs_version_acl = ZFS_ACL_VERSION;
SYSCTL_INT(_vfs_zfs_version, OID_AUTO, acl, CTLFLAG_RD, &zfs_version_acl, 0,
"ZFS_ACL_VERSION");
static int zfs_version_spa = SPA_VERSION;
SYSCTL_INT(_vfs_zfs_version, OID_AUTO, spa, CTLFLAG_RD, &zfs_version_spa, 0,
"SPA_VERSION");
static int zfs_version_zpl = ZPL_VERSION;
SYSCTL_INT(_vfs_zfs_version, OID_AUTO, zpl, CTLFLAG_RD, &zfs_version_zpl, 0,
"ZPL_VERSION");
#if __FreeBSD_version >= 1400018
static int zfs_quotactl(vfs_t *vfsp, int cmds, uid_t id, void *arg,
bool *mp_busy);
#else
static int zfs_quotactl(vfs_t *vfsp, int cmds, uid_t id, void *arg);
#endif
static int zfs_mount(vfs_t *vfsp);
static int zfs_umount(vfs_t *vfsp, int fflag);
static int zfs_root(vfs_t *vfsp, int flags, vnode_t **vpp);
static int zfs_statfs(vfs_t *vfsp, struct statfs *statp);
static int zfs_vget(vfs_t *vfsp, ino_t ino, int flags, vnode_t **vpp);
static int zfs_sync(vfs_t *vfsp, int waitfor);
#if __FreeBSD_version >= 1300098
static int zfs_checkexp(vfs_t *vfsp, struct sockaddr *nam, uint64_t *extflagsp,
struct ucred **credanonp, int *numsecflavors, int *secflavors);
#else
static int zfs_checkexp(vfs_t *vfsp, struct sockaddr *nam, int *extflagsp,
struct ucred **credanonp, int *numsecflavors, int **secflavors);
#endif
static int zfs_fhtovp(vfs_t *vfsp, fid_t *fidp, int flags, vnode_t **vpp);
static void zfs_freevfs(vfs_t *vfsp);
struct vfsops zfs_vfsops = {
.vfs_mount = zfs_mount,
.vfs_unmount = zfs_umount,
#if __FreeBSD_version >= 1300049
.vfs_root = vfs_cache_root,
.vfs_cachedroot = zfs_root,
#else
.vfs_root = zfs_root,
#endif
.vfs_statfs = zfs_statfs,
.vfs_vget = zfs_vget,
.vfs_sync = zfs_sync,
.vfs_checkexp = zfs_checkexp,
.vfs_fhtovp = zfs_fhtovp,
.vfs_quotactl = zfs_quotactl,
};
VFS_SET(zfs_vfsops, zfs, VFCF_JAIL | VFCF_DELEGADMIN);
/*
* We need to keep a count of active fs's.
* This is necessary to prevent our module
* from being unloaded after a umount -f
*/
static uint32_t zfs_active_fs_count = 0;
int
zfs_get_temporary_prop(dsl_dataset_t *ds, zfs_prop_t zfs_prop, uint64_t *val,
char *setpoint)
{
int error;
zfsvfs_t *zfvp;
vfs_t *vfsp;
objset_t *os;
uint64_t tmp = *val;
error = dmu_objset_from_ds(ds, &os);
if (error != 0)
return (error);
error = getzfsvfs_impl(os, &zfvp);
if (error != 0)
return (error);
if (zfvp == NULL)
return (ENOENT);
vfsp = zfvp->z_vfs;
switch (zfs_prop) {
case ZFS_PROP_ATIME:
if (vfs_optionisset(vfsp, MNTOPT_NOATIME, NULL))
tmp = 0;
if (vfs_optionisset(vfsp, MNTOPT_ATIME, NULL))
tmp = 1;
break;
case ZFS_PROP_DEVICES:
if (vfs_optionisset(vfsp, MNTOPT_NODEVICES, NULL))
tmp = 0;
if (vfs_optionisset(vfsp, MNTOPT_DEVICES, NULL))
tmp = 1;
break;
case ZFS_PROP_EXEC:
if (vfs_optionisset(vfsp, MNTOPT_NOEXEC, NULL))
tmp = 0;
if (vfs_optionisset(vfsp, MNTOPT_EXEC, NULL))
tmp = 1;
break;
case ZFS_PROP_SETUID:
if (vfs_optionisset(vfsp, MNTOPT_NOSETUID, NULL))
tmp = 0;
if (vfs_optionisset(vfsp, MNTOPT_SETUID, NULL))
tmp = 1;
break;
case ZFS_PROP_READONLY:
if (vfs_optionisset(vfsp, MNTOPT_RW, NULL))
tmp = 0;
if (vfs_optionisset(vfsp, MNTOPT_RO, NULL))
tmp = 1;
break;
case ZFS_PROP_XATTR:
if (zfvp->z_flags & ZSB_XATTR)
tmp = zfvp->z_xattr;
break;
case ZFS_PROP_NBMAND:
if (vfs_optionisset(vfsp, MNTOPT_NONBMAND, NULL))
tmp = 0;
if (vfs_optionisset(vfsp, MNTOPT_NBMAND, NULL))
tmp = 1;
break;
default:
vfs_unbusy(vfsp);
return (ENOENT);
}
vfs_unbusy(vfsp);
if (tmp != *val) {
(void) strcpy(setpoint, "temporary");
*val = tmp;
}
return (0);
}
static int
zfs_getquota(zfsvfs_t *zfsvfs, uid_t id, int isgroup, struct dqblk64 *dqp)
{
int error = 0;
char buf[32];
uint64_t usedobj, quotaobj;
uint64_t quota, used = 0;
timespec_t now;
usedobj = isgroup ? DMU_GROUPUSED_OBJECT : DMU_USERUSED_OBJECT;
quotaobj = isgroup ? zfsvfs->z_groupquota_obj : zfsvfs->z_userquota_obj;
if (quotaobj == 0 || zfsvfs->z_replay) {
error = ENOENT;
goto done;
}
(void) sprintf(buf, "%llx", (longlong_t)id);
if ((error = zap_lookup(zfsvfs->z_os, quotaobj,
buf, sizeof (quota), 1, &quota)) != 0) {
dprintf("%s(%d): quotaobj lookup failed\n",
__FUNCTION__, __LINE__);
goto done;
}
/*
* quota(8) uses bsoftlimit as "quoota", and hardlimit as "limit".
* So we set them to be the same.
*/
dqp->dqb_bsoftlimit = dqp->dqb_bhardlimit = btodb(quota);
error = zap_lookup(zfsvfs->z_os, usedobj, buf, sizeof (used), 1, &used);
if (error && error != ENOENT) {
dprintf("%s(%d): usedobj failed; %d\n",
__FUNCTION__, __LINE__, error);
goto done;
}
dqp->dqb_curblocks = btodb(used);
dqp->dqb_ihardlimit = dqp->dqb_isoftlimit = 0;
vfs_timestamp(&now);
/*
* Setting this to 0 causes FreeBSD quota(8) to print
* the number of days since the epoch, which isn't
* particularly useful.
*/
dqp->dqb_btime = dqp->dqb_itime = now.tv_sec;
done:
return (error);
}
static int
#if __FreeBSD_version >= 1400018
zfs_quotactl(vfs_t *vfsp, int cmds, uid_t id, void *arg, bool *mp_busy)
#else
zfs_quotactl(vfs_t *vfsp, int cmds, uid_t id, void *arg)
#endif
{
zfsvfs_t *zfsvfs = vfsp->vfs_data;
struct thread *td;
int cmd, type, error = 0;
int bitsize;
zfs_userquota_prop_t quota_type;
struct dqblk64 dqblk = { 0 };
td = curthread;
cmd = cmds >> SUBCMDSHIFT;
type = cmds & SUBCMDMASK;
- ZFS_ENTER(zfsvfs);
+ if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
+ return (error);
if (id == -1) {
switch (type) {
case USRQUOTA:
id = td->td_ucred->cr_ruid;
break;
case GRPQUOTA:
id = td->td_ucred->cr_rgid;
break;
default:
error = EINVAL;
#if __FreeBSD_version < 1400018
if (cmd == Q_QUOTAON || cmd == Q_QUOTAOFF)
vfs_unbusy(vfsp);
#endif
goto done;
}
}
/*
* Map BSD type to:
* ZFS_PROP_USERUSED,
* ZFS_PROP_USERQUOTA,
* ZFS_PROP_GROUPUSED,
* ZFS_PROP_GROUPQUOTA
*/
switch (cmd) {
case Q_SETQUOTA:
case Q_SETQUOTA32:
if (type == USRQUOTA)
quota_type = ZFS_PROP_USERQUOTA;
else if (type == GRPQUOTA)
quota_type = ZFS_PROP_GROUPQUOTA;
else
error = EINVAL;
break;
case Q_GETQUOTA:
case Q_GETQUOTA32:
if (type == USRQUOTA)
quota_type = ZFS_PROP_USERUSED;
else if (type == GRPQUOTA)
quota_type = ZFS_PROP_GROUPUSED;
else
error = EINVAL;
break;
}
/*
* Depending on the cmd, we may need to get
* the ruid and domain (see fuidstr_to_sid?),
* the fuid (how?), or other information.
* Create fuid using zfs_fuid_create(zfsvfs, id,
* ZFS_OWNER or ZFS_GROUP, cr, &fuidp)?
* I think I can use just the id?
*
* Look at zfs_id_overquota() to look up a quota.
* zap_lookup(something, quotaobj, fuidstring,
* sizeof (long long), 1, &quota)
*
* See zfs_set_userquota() to set a quota.
*/
if ((uint32_t)type >= MAXQUOTAS) {
error = EINVAL;
goto done;
}
switch (cmd) {
case Q_GETQUOTASIZE:
bitsize = 64;
error = copyout(&bitsize, arg, sizeof (int));
break;
case Q_QUOTAON:
// As far as I can tell, you can't turn quotas on or off on zfs
error = 0;
#if __FreeBSD_version < 1400018
vfs_unbusy(vfsp);
#endif
break;
case Q_QUOTAOFF:
error = ENOTSUP;
#if __FreeBSD_version < 1400018
vfs_unbusy(vfsp);
#endif
break;
case Q_SETQUOTA:
error = copyin(arg, &dqblk, sizeof (dqblk));
if (error == 0)
error = zfs_set_userquota(zfsvfs, quota_type,
"", id, dbtob(dqblk.dqb_bhardlimit));
break;
case Q_GETQUOTA:
error = zfs_getquota(zfsvfs, id, type == GRPQUOTA, &dqblk);
if (error == 0)
error = copyout(&dqblk, arg, sizeof (dqblk));
break;
default:
error = EINVAL;
break;
}
done:
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
boolean_t
zfs_is_readonly(zfsvfs_t *zfsvfs)
{
return (!!(zfsvfs->z_vfs->vfs_flag & VFS_RDONLY));
}
static int
zfs_sync(vfs_t *vfsp, int waitfor)
{
/*
* Data integrity is job one. We don't want a compromised kernel
* writing to the storage pool, so we never sync during panic.
*/
if (panicstr)
return (0);
/*
* Ignore the system syncher. ZFS already commits async data
* at zfs_txg_timeout intervals.
*/
if (waitfor == MNT_LAZY)
return (0);
if (vfsp != NULL) {
/*
* Sync a specific filesystem.
*/
zfsvfs_t *zfsvfs = vfsp->vfs_data;
dsl_pool_t *dp;
int error;
error = vfs_stdsync(vfsp, waitfor);
if (error != 0)
return (error);
- ZFS_ENTER(zfsvfs);
+ if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
+ return (error);
dp = dmu_objset_pool(zfsvfs->z_os);
/*
* If the system is shutting down, then skip any
* filesystems which may exist on a suspended pool.
*/
if (rebooting && spa_suspended(dp->dp_spa)) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (0);
}
if (zfsvfs->z_log != NULL)
zil_commit(zfsvfs->z_log, 0);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
} else {
/*
* Sync all ZFS filesystems. This is what happens when you
* run sync(8). Unlike other filesystems, ZFS honors the
* request by waiting for all pools to commit all dirty data.
*/
spa_sync_allpools();
}
return (0);
}
static void
atime_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
if (newval == TRUE) {
zfsvfs->z_atime = TRUE;
zfsvfs->z_vfs->vfs_flag &= ~MNT_NOATIME;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOATIME);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_ATIME, NULL, 0);
} else {
zfsvfs->z_atime = FALSE;
zfsvfs->z_vfs->vfs_flag |= MNT_NOATIME;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_ATIME);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOATIME, NULL, 0);
}
}
static void
xattr_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
if (newval == ZFS_XATTR_OFF) {
zfsvfs->z_flags &= ~ZSB_XATTR;
} else {
zfsvfs->z_flags |= ZSB_XATTR;
if (newval == ZFS_XATTR_SA)
zfsvfs->z_xattr_sa = B_TRUE;
else
zfsvfs->z_xattr_sa = B_FALSE;
}
}
static void
blksz_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
ASSERT3U(newval, <=, spa_maxblocksize(dmu_objset_spa(zfsvfs->z_os)));
ASSERT3U(newval, >=, SPA_MINBLOCKSIZE);
ASSERT(ISP2(newval));
zfsvfs->z_max_blksz = newval;
zfsvfs->z_vfs->mnt_stat.f_iosize = newval;
}
static void
readonly_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
if (newval) {
/* XXX locking on vfs_flag? */
zfsvfs->z_vfs->vfs_flag |= VFS_RDONLY;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_RW);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_RO, NULL, 0);
} else {
/* XXX locking on vfs_flag? */
zfsvfs->z_vfs->vfs_flag &= ~VFS_RDONLY;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_RO);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_RW, NULL, 0);
}
}
static void
setuid_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
if (newval == FALSE) {
zfsvfs->z_vfs->vfs_flag |= VFS_NOSETUID;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_SETUID);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOSETUID, NULL, 0);
} else {
zfsvfs->z_vfs->vfs_flag &= ~VFS_NOSETUID;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOSETUID);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_SETUID, NULL, 0);
}
}
static void
exec_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
if (newval == FALSE) {
zfsvfs->z_vfs->vfs_flag |= VFS_NOEXEC;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_EXEC);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOEXEC, NULL, 0);
} else {
zfsvfs->z_vfs->vfs_flag &= ~VFS_NOEXEC;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOEXEC);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_EXEC, NULL, 0);
}
}
/*
* The nbmand mount option can be changed at mount time.
* We can't allow it to be toggled on live file systems or incorrect
* behavior may be seen from cifs clients
*
* This property isn't registered via dsl_prop_register(), but this callback
* will be called when a file system is first mounted
*/
static void
nbmand_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
if (newval == FALSE) {
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NBMAND);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NONBMAND, NULL, 0);
} else {
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NONBMAND);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NBMAND, NULL, 0);
}
}
static void
snapdir_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
zfsvfs->z_show_ctldir = newval;
}
static void
acl_mode_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
zfsvfs->z_acl_mode = newval;
}
static void
acl_inherit_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
zfsvfs->z_acl_inherit = newval;
}
static void
acl_type_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
zfsvfs->z_acl_type = newval;
}
static int
zfs_register_callbacks(vfs_t *vfsp)
{
struct dsl_dataset *ds = NULL;
objset_t *os = NULL;
zfsvfs_t *zfsvfs = NULL;
uint64_t nbmand;
boolean_t readonly = B_FALSE;
boolean_t do_readonly = B_FALSE;
boolean_t setuid = B_FALSE;
boolean_t do_setuid = B_FALSE;
boolean_t exec = B_FALSE;
boolean_t do_exec = B_FALSE;
boolean_t xattr = B_FALSE;
boolean_t atime = B_FALSE;
boolean_t do_atime = B_FALSE;
boolean_t do_xattr = B_FALSE;
int error = 0;
ASSERT3P(vfsp, !=, NULL);
zfsvfs = vfsp->vfs_data;
ASSERT3P(zfsvfs, !=, NULL);
os = zfsvfs->z_os;
/*
* This function can be called for a snapshot when we update snapshot's
* mount point, which isn't really supported.
*/
if (dmu_objset_is_snapshot(os))
return (EOPNOTSUPP);
/*
* The act of registering our callbacks will destroy any mount
* options we may have. In order to enable temporary overrides
* of mount options, we stash away the current values and
* restore them after we register the callbacks.
*/
if (vfs_optionisset(vfsp, MNTOPT_RO, NULL) ||
!spa_writeable(dmu_objset_spa(os))) {
readonly = B_TRUE;
do_readonly = B_TRUE;
} else if (vfs_optionisset(vfsp, MNTOPT_RW, NULL)) {
readonly = B_FALSE;
do_readonly = B_TRUE;
}
if (vfs_optionisset(vfsp, MNTOPT_NOSETUID, NULL)) {
setuid = B_FALSE;
do_setuid = B_TRUE;
} else if (vfs_optionisset(vfsp, MNTOPT_SETUID, NULL)) {
setuid = B_TRUE;
do_setuid = B_TRUE;
}
if (vfs_optionisset(vfsp, MNTOPT_NOEXEC, NULL)) {
exec = B_FALSE;
do_exec = B_TRUE;
} else if (vfs_optionisset(vfsp, MNTOPT_EXEC, NULL)) {
exec = B_TRUE;
do_exec = B_TRUE;
}
if (vfs_optionisset(vfsp, MNTOPT_NOXATTR, NULL)) {
zfsvfs->z_xattr = xattr = ZFS_XATTR_OFF;
do_xattr = B_TRUE;
} else if (vfs_optionisset(vfsp, MNTOPT_XATTR, NULL)) {
zfsvfs->z_xattr = xattr = ZFS_XATTR_DIR;
do_xattr = B_TRUE;
} else if (vfs_optionisset(vfsp, MNTOPT_DIRXATTR, NULL)) {
zfsvfs->z_xattr = xattr = ZFS_XATTR_DIR;
do_xattr = B_TRUE;
} else if (vfs_optionisset(vfsp, MNTOPT_SAXATTR, NULL)) {
zfsvfs->z_xattr = xattr = ZFS_XATTR_SA;
do_xattr = B_TRUE;
}
if (vfs_optionisset(vfsp, MNTOPT_NOATIME, NULL)) {
atime = B_FALSE;
do_atime = B_TRUE;
} else if (vfs_optionisset(vfsp, MNTOPT_ATIME, NULL)) {
atime = B_TRUE;
do_atime = B_TRUE;
}
/*
* We need to enter pool configuration here, so that we can use
* dsl_prop_get_int_ds() to handle the special nbmand property below.
* dsl_prop_get_integer() can not be used, because it has to acquire
* spa_namespace_lock and we can not do that because we already hold
* z_teardown_lock. The problem is that spa_write_cachefile() is called
* with spa_namespace_lock held and the function calls ZFS vnode
* operations to write the cache file and thus z_teardown_lock is
* acquired after spa_namespace_lock.
*/
ds = dmu_objset_ds(os);
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
/*
* nbmand is a special property. It can only be changed at
* mount time.
*
* This is weird, but it is documented to only be changeable
* at mount time.
*/
if (vfs_optionisset(vfsp, MNTOPT_NONBMAND, NULL)) {
nbmand = B_FALSE;
} else if (vfs_optionisset(vfsp, MNTOPT_NBMAND, NULL)) {
nbmand = B_TRUE;
} else if ((error = dsl_prop_get_int_ds(ds, "nbmand", &nbmand) != 0)) {
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
return (error);
}
/*
* Register property callbacks.
*
* It would probably be fine to just check for i/o error from
* the first prop_register(), but I guess I like to go
* overboard...
*/
error = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ATIME), atime_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_XATTR), xattr_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_RECORDSIZE), blksz_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_READONLY), readonly_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_SETUID), setuid_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_EXEC), exec_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_SNAPDIR), snapdir_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLTYPE), acl_type_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLMODE), acl_mode_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLINHERIT), acl_inherit_changed_cb,
zfsvfs);
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
if (error)
goto unregister;
/*
* Invoke our callbacks to restore temporary mount options.
*/
if (do_readonly)
readonly_changed_cb(zfsvfs, readonly);
if (do_setuid)
setuid_changed_cb(zfsvfs, setuid);
if (do_exec)
exec_changed_cb(zfsvfs, exec);
if (do_xattr)
xattr_changed_cb(zfsvfs, xattr);
if (do_atime)
atime_changed_cb(zfsvfs, atime);
nbmand_changed_cb(zfsvfs, nbmand);
return (0);
unregister:
dsl_prop_unregister_all(ds, zfsvfs);
return (error);
}
/*
* Associate this zfsvfs with the given objset, which must be owned.
* This will cache a bunch of on-disk state from the objset in the
* zfsvfs.
*/
static int
zfsvfs_init(zfsvfs_t *zfsvfs, objset_t *os)
{
int error;
uint64_t val;
zfsvfs->z_max_blksz = SPA_OLD_MAXBLOCKSIZE;
zfsvfs->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;
zfsvfs->z_os = os;
error = zfs_get_zplprop(os, ZFS_PROP_VERSION, &zfsvfs->z_version);
if (error != 0)
return (error);
if (zfsvfs->z_version >
zfs_zpl_version_map(spa_version(dmu_objset_spa(os)))) {
(void) printf("Can't mount a version %lld file system "
"on a version %lld pool\n. Pool must be upgraded to mount "
"this file system.", (u_longlong_t)zfsvfs->z_version,
(u_longlong_t)spa_version(dmu_objset_spa(os)));
return (SET_ERROR(ENOTSUP));
}
error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &val);
if (error != 0)
return (error);
zfsvfs->z_norm = (int)val;
error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &val);
if (error != 0)
return (error);
zfsvfs->z_utf8 = (val != 0);
error = zfs_get_zplprop(os, ZFS_PROP_CASE, &val);
if (error != 0)
return (error);
zfsvfs->z_case = (uint_t)val;
error = zfs_get_zplprop(os, ZFS_PROP_ACLTYPE, &val);
if (error != 0)
return (error);
zfsvfs->z_acl_type = (uint_t)val;
/*
* Fold case on file systems that are always or sometimes case
* insensitive.
*/
if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
zfsvfs->z_case == ZFS_CASE_MIXED)
zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER;
zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os);
uint64_t sa_obj = 0;
if (zfsvfs->z_use_sa) {
/* should either have both of these objects or none */
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1,
&sa_obj);
if (error != 0)
return (error);
error = zfs_get_zplprop(os, ZFS_PROP_XATTR, &val);
if (error == 0 && val == ZFS_XATTR_SA)
zfsvfs->z_xattr_sa = B_TRUE;
}
error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
&zfsvfs->z_attr_table);
if (error != 0)
return (error);
if (zfsvfs->z_version >= ZPL_VERSION_SA)
sa_register_update_callback(os, zfs_sa_upgrade);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1,
&zfsvfs->z_root);
if (error != 0)
return (error);
ASSERT3U(zfsvfs->z_root, !=, 0);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
&zfsvfs->z_unlinkedobj);
if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA],
8, 1, &zfsvfs->z_userquota_obj);
if (error == ENOENT)
zfsvfs->z_userquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA],
8, 1, &zfsvfs->z_groupquota_obj);
if (error == ENOENT)
zfsvfs->z_groupquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_PROJECTQUOTA],
8, 1, &zfsvfs->z_projectquota_obj);
if (error == ENOENT)
zfsvfs->z_projectquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_USEROBJQUOTA],
8, 1, &zfsvfs->z_userobjquota_obj);
if (error == ENOENT)
zfsvfs->z_userobjquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_GROUPOBJQUOTA],
8, 1, &zfsvfs->z_groupobjquota_obj);
if (error == ENOENT)
zfsvfs->z_groupobjquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_PROJECTOBJQUOTA],
8, 1, &zfsvfs->z_projectobjquota_obj);
if (error == ENOENT)
zfsvfs->z_projectobjquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1,
&zfsvfs->z_fuid_obj);
if (error == ENOENT)
zfsvfs->z_fuid_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SHARES_DIR, 8, 1,
&zfsvfs->z_shares_dir);
if (error == ENOENT)
zfsvfs->z_shares_dir = 0;
else if (error != 0)
return (error);
/*
* Only use the name cache if we are looking for a
* name on a file system that does not require normalization
* or case folding. We can also look there if we happen to be
* on a non-normalizing, mixed sensitivity file system IF we
* are looking for the exact name (which is always the case on
* FreeBSD).
*/
zfsvfs->z_use_namecache = !zfsvfs->z_norm ||
((zfsvfs->z_case == ZFS_CASE_MIXED) &&
!(zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER));
return (0);
}
taskq_t *zfsvfs_taskq;
static void
zfsvfs_task_unlinked_drain(void *context, int pending __unused)
{
zfs_unlinked_drain((zfsvfs_t *)context);
}
int
zfsvfs_create(const char *osname, boolean_t readonly, zfsvfs_t **zfvp)
{
objset_t *os;
zfsvfs_t *zfsvfs;
int error;
boolean_t ro = (readonly || (strchr(osname, '@') != NULL));
/*
* XXX: Fix struct statfs so this isn't necessary!
*
* The 'osname' is used as the filesystem's special node, which means
* it must fit in statfs.f_mntfromname, or else it can't be
* enumerated, so libzfs_mnttab_find() returns NULL, which causes
* 'zfs unmount' to think it's not mounted when it is.
*/
if (strlen(osname) >= MNAMELEN)
return (SET_ERROR(ENAMETOOLONG));
zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
error = dmu_objset_own(osname, DMU_OST_ZFS, ro, B_TRUE, zfsvfs,
&os);
if (error != 0) {
kmem_free(zfsvfs, sizeof (zfsvfs_t));
return (error);
}
error = zfsvfs_create_impl(zfvp, zfsvfs, os);
return (error);
}
int
zfsvfs_create_impl(zfsvfs_t **zfvp, zfsvfs_t *zfsvfs, objset_t *os)
{
int error;
zfsvfs->z_vfs = NULL;
zfsvfs->z_parent = zfsvfs;
mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&zfsvfs->z_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
offsetof(znode_t, z_link_node));
TASK_INIT(&zfsvfs->z_unlinked_drain_task, 0,
zfsvfs_task_unlinked_drain, zfsvfs);
ZFS_TEARDOWN_INIT(zfsvfs);
ZFS_TEARDOWN_INACTIVE_INIT(zfsvfs);
rw_init(&zfsvfs->z_fuid_lock, NULL, RW_DEFAULT, NULL);
for (int i = 0; i != ZFS_OBJ_MTX_SZ; i++)
mutex_init(&zfsvfs->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL);
error = zfsvfs_init(zfsvfs, os);
if (error != 0) {
dmu_objset_disown(os, B_TRUE, zfsvfs);
*zfvp = NULL;
kmem_free(zfsvfs, sizeof (zfsvfs_t));
return (error);
}
*zfvp = zfsvfs;
return (0);
}
static int
zfsvfs_setup(zfsvfs_t *zfsvfs, boolean_t mounting)
{
int error;
/*
* Check for a bad on-disk format version now since we
* lied about owning the dataset readonly before.
*/
if (!(zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) &&
dmu_objset_incompatible_encryption_version(zfsvfs->z_os))
return (SET_ERROR(EROFS));
error = zfs_register_callbacks(zfsvfs->z_vfs);
if (error)
return (error);
/*
* If we are not mounting (ie: online recv), then we don't
* have to worry about replaying the log as we blocked all
* operations out since we closed the ZIL.
*/
if (mounting) {
boolean_t readonly;
ASSERT3P(zfsvfs->z_kstat.dk_kstats, ==, NULL);
error = dataset_kstats_create(&zfsvfs->z_kstat, zfsvfs->z_os);
if (error)
return (error);
zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data,
&zfsvfs->z_kstat.dk_zil_sums);
/*
* During replay we remove the read only flag to
* allow replays to succeed.
*/
readonly = zfsvfs->z_vfs->vfs_flag & VFS_RDONLY;
if (readonly != 0) {
zfsvfs->z_vfs->vfs_flag &= ~VFS_RDONLY;
} else {
dsl_dir_t *dd;
zap_stats_t zs;
if (zap_get_stats(zfsvfs->z_os, zfsvfs->z_unlinkedobj,
&zs) == 0) {
dataset_kstats_update_nunlinks_kstat(
&zfsvfs->z_kstat, zs.zs_num_entries);
dprintf_ds(zfsvfs->z_os->os_dsl_dataset,
"num_entries in unlinked set: %llu",
(u_longlong_t)zs.zs_num_entries);
}
zfs_unlinked_drain(zfsvfs);
dd = zfsvfs->z_os->os_dsl_dataset->ds_dir;
dd->dd_activity_cancelled = B_FALSE;
}
/*
* Parse and replay the intent log.
*
* Because of ziltest, this must be done after
* zfs_unlinked_drain(). (Further note: ziltest
* doesn't use readonly mounts, where
* zfs_unlinked_drain() isn't called.) This is because
* ziltest causes spa_sync() to think it's committed,
* but actually it is not, so the intent log contains
* many txg's worth of changes.
*
* In particular, if object N is in the unlinked set in
* the last txg to actually sync, then it could be
* actually freed in a later txg and then reallocated
* in a yet later txg. This would write a "create
* object N" record to the intent log. Normally, this
* would be fine because the spa_sync() would have
* written out the fact that object N is free, before
* we could write the "create object N" intent log
* record.
*
* But when we are in ziltest mode, we advance the "open
* txg" without actually spa_sync()-ing the changes to
* disk. So we would see that object N is still
* allocated and in the unlinked set, and there is an
* intent log record saying to allocate it.
*/
if (spa_writeable(dmu_objset_spa(zfsvfs->z_os))) {
if (zil_replay_disable) {
zil_destroy(zfsvfs->z_log, B_FALSE);
} else {
boolean_t use_nc = zfsvfs->z_use_namecache;
zfsvfs->z_use_namecache = B_FALSE;
zfsvfs->z_replay = B_TRUE;
zil_replay(zfsvfs->z_os, zfsvfs,
zfs_replay_vector);
zfsvfs->z_replay = B_FALSE;
zfsvfs->z_use_namecache = use_nc;
}
}
/* restore readonly bit */
if (readonly != 0)
zfsvfs->z_vfs->vfs_flag |= VFS_RDONLY;
} else {
ASSERT3P(zfsvfs->z_kstat.dk_kstats, !=, NULL);
zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data,
&zfsvfs->z_kstat.dk_zil_sums);
}
/*
* Set the objset user_ptr to track its zfsvfs.
*/
mutex_enter(&zfsvfs->z_os->os_user_ptr_lock);
dmu_objset_set_user(zfsvfs->z_os, zfsvfs);
mutex_exit(&zfsvfs->z_os->os_user_ptr_lock);
return (0);
}
void
zfsvfs_free(zfsvfs_t *zfsvfs)
{
int i;
zfs_fuid_destroy(zfsvfs);
mutex_destroy(&zfsvfs->z_znodes_lock);
mutex_destroy(&zfsvfs->z_lock);
ASSERT3U(zfsvfs->z_nr_znodes, ==, 0);
list_destroy(&zfsvfs->z_all_znodes);
ZFS_TEARDOWN_DESTROY(zfsvfs);
ZFS_TEARDOWN_INACTIVE_DESTROY(zfsvfs);
rw_destroy(&zfsvfs->z_fuid_lock);
for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
mutex_destroy(&zfsvfs->z_hold_mtx[i]);
dataset_kstats_destroy(&zfsvfs->z_kstat);
kmem_free(zfsvfs, sizeof (zfsvfs_t));
}
static void
zfs_set_fuid_feature(zfsvfs_t *zfsvfs)
{
zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
- if (zfsvfs->z_vfs) {
- if (zfsvfs->z_use_fuids) {
- vfs_set_feature(zfsvfs->z_vfs, VFSFT_XVATTR);
- vfs_set_feature(zfsvfs->z_vfs, VFSFT_SYSATTR_VIEWS);
- vfs_set_feature(zfsvfs->z_vfs, VFSFT_ACEMASKONACCESS);
- vfs_set_feature(zfsvfs->z_vfs, VFSFT_ACLONCREATE);
- vfs_set_feature(zfsvfs->z_vfs, VFSFT_ACCESS_FILTER);
- vfs_set_feature(zfsvfs->z_vfs, VFSFT_REPARSE);
- } else {
- vfs_clear_feature(zfsvfs->z_vfs, VFSFT_XVATTR);
- vfs_clear_feature(zfsvfs->z_vfs, VFSFT_SYSATTR_VIEWS);
- vfs_clear_feature(zfsvfs->z_vfs, VFSFT_ACEMASKONACCESS);
- vfs_clear_feature(zfsvfs->z_vfs, VFSFT_ACLONCREATE);
- vfs_clear_feature(zfsvfs->z_vfs, VFSFT_ACCESS_FILTER);
- vfs_clear_feature(zfsvfs->z_vfs, VFSFT_REPARSE);
- }
- }
zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os);
}
static int
zfs_domount(vfs_t *vfsp, char *osname)
{
uint64_t recordsize, fsid_guid;
int error = 0;
zfsvfs_t *zfsvfs;
ASSERT3P(vfsp, !=, NULL);
ASSERT3P(osname, !=, NULL);
error = zfsvfs_create(osname, vfsp->mnt_flag & MNT_RDONLY, &zfsvfs);
if (error)
return (error);
zfsvfs->z_vfs = vfsp;
if ((error = dsl_prop_get_integer(osname,
"recordsize", &recordsize, NULL)))
goto out;
zfsvfs->z_vfs->vfs_bsize = SPA_MINBLOCKSIZE;
zfsvfs->z_vfs->mnt_stat.f_iosize = recordsize;
vfsp->vfs_data = zfsvfs;
vfsp->mnt_flag |= MNT_LOCAL;
vfsp->mnt_kern_flag |= MNTK_LOOKUP_SHARED;
vfsp->mnt_kern_flag |= MNTK_SHARED_WRITES;
vfsp->mnt_kern_flag |= MNTK_EXTENDED_SHARED;
/*
* This can cause a loss of coherence between ARC and page cache
* on ZoF - unclear if the problem is in FreeBSD or ZoF
*/
vfsp->mnt_kern_flag |= MNTK_NO_IOPF; /* vn_io_fault can be used */
vfsp->mnt_kern_flag |= MNTK_NOMSYNC;
vfsp->mnt_kern_flag |= MNTK_VMSETSIZE_BUG;
#if defined(_KERNEL) && !defined(KMEM_DEBUG)
vfsp->mnt_kern_flag |= MNTK_FPLOOKUP;
#endif
/*
* The fsid is 64 bits, composed of an 8-bit fs type, which
* separates our fsid from any other filesystem types, and a
* 56-bit objset unique ID. The objset unique ID is unique to
* all objsets open on this system, provided by unique_create().
* The 8-bit fs type must be put in the low bits of fsid[1]
* because that's where other Solaris filesystems put it.
*/
fsid_guid = dmu_objset_fsid_guid(zfsvfs->z_os);
ASSERT3U((fsid_guid & ~((1ULL << 56) - 1)), ==, 0);
vfsp->vfs_fsid.val[0] = fsid_guid;
vfsp->vfs_fsid.val[1] = ((fsid_guid >> 32) << 8) |
(vfsp->mnt_vfc->vfc_typenum & 0xFF);
/*
* Set features for file system.
*/
zfs_set_fuid_feature(zfsvfs);
- if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE) {
- vfs_set_feature(vfsp, VFSFT_DIRENTFLAGS);
- vfs_set_feature(vfsp, VFSFT_CASEINSENSITIVE);
- vfs_set_feature(vfsp, VFSFT_NOCASESENSITIVE);
- } else if (zfsvfs->z_case == ZFS_CASE_MIXED) {
- vfs_set_feature(vfsp, VFSFT_DIRENTFLAGS);
- vfs_set_feature(vfsp, VFSFT_CASEINSENSITIVE);
- }
- vfs_set_feature(vfsp, VFSFT_ZEROCOPY_SUPPORTED);
if (dmu_objset_is_snapshot(zfsvfs->z_os)) {
uint64_t pval;
atime_changed_cb(zfsvfs, B_FALSE);
readonly_changed_cb(zfsvfs, B_TRUE);
if ((error = dsl_prop_get_integer(osname,
"xattr", &pval, NULL)))
goto out;
xattr_changed_cb(zfsvfs, pval);
if ((error = dsl_prop_get_integer(osname,
"acltype", &pval, NULL)))
goto out;
acl_type_changed_cb(zfsvfs, pval);
zfsvfs->z_issnap = B_TRUE;
zfsvfs->z_os->os_sync = ZFS_SYNC_DISABLED;
mutex_enter(&zfsvfs->z_os->os_user_ptr_lock);
dmu_objset_set_user(zfsvfs->z_os, zfsvfs);
mutex_exit(&zfsvfs->z_os->os_user_ptr_lock);
} else {
if ((error = zfsvfs_setup(zfsvfs, B_TRUE)))
goto out;
}
vfs_mountedfrom(vfsp, osname);
if (!zfsvfs->z_issnap)
zfsctl_create(zfsvfs);
out:
if (error) {
dmu_objset_disown(zfsvfs->z_os, B_TRUE, zfsvfs);
zfsvfs_free(zfsvfs);
} else {
atomic_inc_32(&zfs_active_fs_count);
}
return (error);
}
static void
zfs_unregister_callbacks(zfsvfs_t *zfsvfs)
{
objset_t *os = zfsvfs->z_os;
if (!dmu_objset_is_snapshot(os))
dsl_prop_unregister_all(dmu_objset_ds(os), zfsvfs);
}
static int
getpoolname(const char *osname, char *poolname)
{
char *p;
p = strchr(osname, '/');
if (p == NULL) {
if (strlen(osname) >= MAXNAMELEN)
return (ENAMETOOLONG);
(void) strcpy(poolname, osname);
} else {
if (p - osname >= MAXNAMELEN)
return (ENAMETOOLONG);
(void) strncpy(poolname, osname, p - osname);
poolname[p - osname] = '\0';
}
return (0);
}
static void
fetch_osname_options(char *name, bool *checkpointrewind)
{
if (name[0] == '!') {
*checkpointrewind = true;
memmove(name, name + 1, strlen(name));
} else {
*checkpointrewind = false;
}
}
static int
zfs_mount(vfs_t *vfsp)
{
kthread_t *td = curthread;
vnode_t *mvp = vfsp->mnt_vnodecovered;
cred_t *cr = td->td_ucred;
char *osname;
int error = 0;
int canwrite;
bool checkpointrewind;
if (vfs_getopt(vfsp->mnt_optnew, "from", (void **)&osname, NULL))
return (SET_ERROR(EINVAL));
/*
* If full-owner-access is enabled and delegated administration is
* turned on, we must set nosuid.
*/
if (zfs_super_owner &&
dsl_deleg_access(osname, ZFS_DELEG_PERM_MOUNT, cr) != ECANCELED) {
secpolicy_fs_mount_clearopts(cr, vfsp);
}
fetch_osname_options(osname, &checkpointrewind);
/*
* Check for mount privilege?
*
* If we don't have privilege then see if
* we have local permission to allow it
*/
error = secpolicy_fs_mount(cr, mvp, vfsp);
if (error) {
if (dsl_deleg_access(osname, ZFS_DELEG_PERM_MOUNT, cr) != 0)
goto out;
if (!(vfsp->vfs_flag & MS_REMOUNT)) {
vattr_t vattr;
/*
* Make sure user is the owner of the mount point
* or has sufficient privileges.
*/
vattr.va_mask = AT_UID;
vn_lock(mvp, LK_SHARED | LK_RETRY);
if (VOP_GETATTR(mvp, &vattr, cr)) {
VOP_UNLOCK1(mvp);
goto out;
}
if (secpolicy_vnode_owner(mvp, cr, vattr.va_uid) != 0 &&
VOP_ACCESS(mvp, VWRITE, cr, td) != 0) {
VOP_UNLOCK1(mvp);
goto out;
}
VOP_UNLOCK1(mvp);
}
secpolicy_fs_mount_clearopts(cr, vfsp);
}
/*
* Refuse to mount a filesystem if we are in a local zone and the
* dataset is not visible.
*/
if (!INGLOBALZONE(curproc) &&
(!zone_dataset_visible(osname, &canwrite) || !canwrite)) {
error = SET_ERROR(EPERM);
goto out;
}
vfsp->vfs_flag |= MNT_NFS4ACLS;
/*
* When doing a remount, we simply refresh our temporary properties
* according to those options set in the current VFS options.
*/
if (vfsp->vfs_flag & MS_REMOUNT) {
zfsvfs_t *zfsvfs = vfsp->vfs_data;
/*
* Refresh mount options with z_teardown_lock blocking I/O while
* the filesystem is in an inconsistent state.
* The lock also serializes this code with filesystem
* manipulations between entry to zfs_suspend_fs() and return
* from zfs_resume_fs().
*/
ZFS_TEARDOWN_ENTER_WRITE(zfsvfs, FTAG);
zfs_unregister_callbacks(zfsvfs);
error = zfs_register_callbacks(vfsp);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
goto out;
}
/* Initial root mount: try hard to import the requested root pool. */
if ((vfsp->vfs_flag & MNT_ROOTFS) != 0 &&
(vfsp->vfs_flag & MNT_UPDATE) == 0) {
char pname[MAXNAMELEN];
error = getpoolname(osname, pname);
if (error == 0)
error = spa_import_rootpool(pname, checkpointrewind);
if (error)
goto out;
}
DROP_GIANT();
error = zfs_domount(vfsp, osname);
PICKUP_GIANT();
out:
return (error);
}
static int
zfs_statfs(vfs_t *vfsp, struct statfs *statp)
{
zfsvfs_t *zfsvfs = vfsp->vfs_data;
uint64_t refdbytes, availbytes, usedobjs, availobjs;
+ int error;
statp->f_version = STATFS_VERSION;
- ZFS_ENTER(zfsvfs);
+ if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
+ return (error);
dmu_objset_space(zfsvfs->z_os,
&refdbytes, &availbytes, &usedobjs, &availobjs);
/*
* The underlying storage pool actually uses multiple block sizes.
* We report the fragsize as the smallest block size we support,
* and we report our blocksize as the filesystem's maximum blocksize.
*/
statp->f_bsize = SPA_MINBLOCKSIZE;
statp->f_iosize = zfsvfs->z_vfs->mnt_stat.f_iosize;
/*
* The following report "total" blocks of various kinds in the
* file system, but reported in terms of f_frsize - the
* "fragment" size.
*/
statp->f_blocks = (refdbytes + availbytes) >> SPA_MINBLOCKSHIFT;
statp->f_bfree = availbytes / statp->f_bsize;
statp->f_bavail = statp->f_bfree; /* no root reservation */
/*
* statvfs() should really be called statufs(), because it assumes
* static metadata. ZFS doesn't preallocate files, so the best
* we can do is report the max that could possibly fit in f_files,
* and that minus the number actually used in f_ffree.
* For f_ffree, report the smaller of the number of object available
* and the number of blocks (each object will take at least a block).
*/
statp->f_ffree = MIN(availobjs, statp->f_bfree);
statp->f_files = statp->f_ffree + usedobjs;
/*
* We're a zfs filesystem.
*/
strlcpy(statp->f_fstypename, "zfs",
sizeof (statp->f_fstypename));
strlcpy(statp->f_mntfromname, vfsp->mnt_stat.f_mntfromname,
sizeof (statp->f_mntfromname));
strlcpy(statp->f_mntonname, vfsp->mnt_stat.f_mntonname,
sizeof (statp->f_mntonname));
statp->f_namemax = MAXNAMELEN - 1;
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (0);
}
static int
zfs_root(vfs_t *vfsp, int flags, vnode_t **vpp)
{
zfsvfs_t *zfsvfs = vfsp->vfs_data;
znode_t *rootzp;
int error;
- ZFS_ENTER(zfsvfs);
+ if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
+ return (error);
error = zfs_zget(zfsvfs, zfsvfs->z_root, &rootzp);
if (error == 0)
*vpp = ZTOV(rootzp);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
if (error == 0) {
error = vn_lock(*vpp, flags);
if (error != 0) {
VN_RELE(*vpp);
*vpp = NULL;
}
}
return (error);
}
/*
* Teardown the zfsvfs::z_os.
*
* Note, if 'unmounting' is FALSE, we return with the 'z_teardown_lock'
* and 'z_teardown_inactive_lock' held.
*/
static int
zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
{
znode_t *zp;
dsl_dir_t *dd;
/*
* If someone has not already unmounted this file system,
* drain the zrele_taskq to ensure all active references to the
* zfsvfs_t have been handled only then can it be safely destroyed.
*/
if (zfsvfs->z_os) {
/*
* If we're unmounting we have to wait for the list to
* drain completely.
*
* If we're not unmounting there's no guarantee the list
* will drain completely, but zreles run from the taskq
* may add the parents of dir-based xattrs to the taskq
* so we want to wait for these.
*
* We can safely read z_nr_znodes without locking because the
* VFS has already blocked operations which add to the
* z_all_znodes list and thus increment z_nr_znodes.
*/
int round = 0;
while (zfsvfs->z_nr_znodes > 0) {
taskq_wait_outstanding(dsl_pool_zrele_taskq(
dmu_objset_pool(zfsvfs->z_os)), 0);
if (++round > 1 && !unmounting)
break;
}
}
ZFS_TEARDOWN_ENTER_WRITE(zfsvfs, FTAG);
if (!unmounting) {
/*
* We purge the parent filesystem's vfsp as the parent
* filesystem and all of its snapshots have their vnode's
* v_vfsp set to the parent's filesystem's vfsp. Note,
* 'z_parent' is self referential for non-snapshots.
*/
#ifdef FREEBSD_NAMECACHE
#if __FreeBSD_version >= 1300117
cache_purgevfs(zfsvfs->z_parent->z_vfs);
#else
cache_purgevfs(zfsvfs->z_parent->z_vfs, true);
#endif
#endif
}
/*
* Close the zil. NB: Can't close the zil while zfs_inactive
* threads are blocked as zil_close can call zfs_inactive.
*/
if (zfsvfs->z_log) {
zil_close(zfsvfs->z_log);
zfsvfs->z_log = NULL;
}
ZFS_TEARDOWN_INACTIVE_ENTER_WRITE(zfsvfs);
/*
* If we are not unmounting (ie: online recv) and someone already
* unmounted this file system while we were doing the switcheroo,
* or a reopen of z_os failed then just bail out now.
*/
if (!unmounting && (zfsvfs->z_unmounted || zfsvfs->z_os == NULL)) {
ZFS_TEARDOWN_INACTIVE_EXIT_WRITE(zfsvfs);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
return (SET_ERROR(EIO));
}
/*
* At this point there are no vops active, and any new vops will
* fail with EIO since we have z_teardown_lock for writer (only
* relevant for forced unmount).
*
* Release all holds on dbufs.
*/
mutex_enter(&zfsvfs->z_znodes_lock);
for (zp = list_head(&zfsvfs->z_all_znodes); zp != NULL;
zp = list_next(&zfsvfs->z_all_znodes, zp)) {
if (zp->z_sa_hdl != NULL) {
zfs_znode_dmu_fini(zp);
}
}
mutex_exit(&zfsvfs->z_znodes_lock);
/*
* If we are unmounting, set the unmounted flag and let new vops
* unblock. zfs_inactive will have the unmounted behavior, and all
* other vops will fail with EIO.
*/
if (unmounting) {
zfsvfs->z_unmounted = B_TRUE;
ZFS_TEARDOWN_INACTIVE_EXIT_WRITE(zfsvfs);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
}
/*
* z_os will be NULL if there was an error in attempting to reopen
* zfsvfs, so just return as the properties had already been
* unregistered and cached data had been evicted before.
*/
if (zfsvfs->z_os == NULL)
return (0);
/*
* Unregister properties.
*/
zfs_unregister_callbacks(zfsvfs);
/*
* Evict cached data
*/
if (!zfs_is_readonly(zfsvfs))
txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
dmu_objset_evict_dbufs(zfsvfs->z_os);
dd = zfsvfs->z_os->os_dsl_dataset->ds_dir;
dsl_dir_cancel_waiters(dd);
return (0);
}
static int
zfs_umount(vfs_t *vfsp, int fflag)
{
kthread_t *td = curthread;
zfsvfs_t *zfsvfs = vfsp->vfs_data;
objset_t *os;
cred_t *cr = td->td_ucred;
int ret;
ret = secpolicy_fs_unmount(cr, vfsp);
if (ret) {
if (dsl_deleg_access((char *)vfsp->vfs_resource,
ZFS_DELEG_PERM_MOUNT, cr))
return (ret);
}
/*
* Unmount any snapshots mounted under .zfs before unmounting the
* dataset itself.
*/
if (zfsvfs->z_ctldir != NULL) {
if ((ret = zfsctl_umount_snapshots(vfsp, fflag, cr)) != 0)
return (ret);
}
if (fflag & MS_FORCE) {
/*
* Mark file system as unmounted before calling
* vflush(FORCECLOSE). This way we ensure no future vnops
* will be called and risk operating on DOOMED vnodes.
*/
ZFS_TEARDOWN_ENTER_WRITE(zfsvfs, FTAG);
zfsvfs->z_unmounted = B_TRUE;
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
}
/*
* Flush all the files.
*/
ret = vflush(vfsp, 0, (fflag & MS_FORCE) ? FORCECLOSE : 0, td);
if (ret != 0)
return (ret);
while (taskqueue_cancel(zfsvfs_taskq->tq_queue,
&zfsvfs->z_unlinked_drain_task, NULL) != 0)
taskqueue_drain(zfsvfs_taskq->tq_queue,
&zfsvfs->z_unlinked_drain_task);
VERIFY0(zfsvfs_teardown(zfsvfs, B_TRUE));
os = zfsvfs->z_os;
/*
* z_os will be NULL if there was an error in
* attempting to reopen zfsvfs.
*/
if (os != NULL) {
/*
* Unset the objset user_ptr.
*/
mutex_enter(&os->os_user_ptr_lock);
dmu_objset_set_user(os, NULL);
mutex_exit(&os->os_user_ptr_lock);
/*
* Finally release the objset
*/
dmu_objset_disown(os, B_TRUE, zfsvfs);
}
/*
* We can now safely destroy the '.zfs' directory node.
*/
if (zfsvfs->z_ctldir != NULL)
zfsctl_destroy(zfsvfs);
zfs_freevfs(vfsp);
return (0);
}
static int
zfs_vget(vfs_t *vfsp, ino_t ino, int flags, vnode_t **vpp)
{
zfsvfs_t *zfsvfs = vfsp->vfs_data;
znode_t *zp;
int err;
/*
* zfs_zget() can't operate on virtual entries like .zfs/ or
* .zfs/snapshot/ directories, that's why we return EOPNOTSUPP.
* This will make NFS to switch to LOOKUP instead of using VGET.
*/
if (ino == ZFSCTL_INO_ROOT || ino == ZFSCTL_INO_SNAPDIR ||
(zfsvfs->z_shares_dir != 0 && ino == zfsvfs->z_shares_dir))
return (EOPNOTSUPP);
- ZFS_ENTER(zfsvfs);
+ if ((err = zfs_enter(zfsvfs, FTAG)) != 0)
+ return (err);
err = zfs_zget(zfsvfs, ino, &zp);
if (err == 0 && zp->z_unlinked) {
vrele(ZTOV(zp));
err = EINVAL;
}
if (err == 0)
*vpp = ZTOV(zp);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
if (err == 0) {
err = vn_lock(*vpp, flags);
if (err != 0)
vrele(*vpp);
}
if (err != 0)
*vpp = NULL;
return (err);
}
static int
#if __FreeBSD_version >= 1300098
zfs_checkexp(vfs_t *vfsp, struct sockaddr *nam, uint64_t *extflagsp,
struct ucred **credanonp, int *numsecflavors, int *secflavors)
#else
zfs_checkexp(vfs_t *vfsp, struct sockaddr *nam, int *extflagsp,
struct ucred **credanonp, int *numsecflavors, int **secflavors)
#endif
{
zfsvfs_t *zfsvfs = vfsp->vfs_data;
/*
* If this is regular file system vfsp is the same as
* zfsvfs->z_parent->z_vfs, but if it is snapshot,
* zfsvfs->z_parent->z_vfs represents parent file system
* which we have to use here, because only this file system
* has mnt_export configured.
*/
return (vfs_stdcheckexp(zfsvfs->z_parent->z_vfs, nam, extflagsp,
credanonp, numsecflavors, secflavors));
}
_Static_assert(sizeof (struct fid) >= SHORT_FID_LEN,
"struct fid bigger than SHORT_FID_LEN");
_Static_assert(sizeof (struct fid) >= LONG_FID_LEN,
"struct fid bigger than LONG_FID_LEN");
static int
zfs_fhtovp(vfs_t *vfsp, fid_t *fidp, int flags, vnode_t **vpp)
{
struct componentname cn;
zfsvfs_t *zfsvfs = vfsp->vfs_data;
znode_t *zp;
vnode_t *dvp;
uint64_t object = 0;
uint64_t fid_gen = 0;
uint64_t setgen = 0;
uint64_t gen_mask;
uint64_t zp_gen;
int i, err;
*vpp = NULL;
- ZFS_ENTER(zfsvfs);
+ if ((err = zfs_enter(zfsvfs, FTAG)) != 0)
+ return (err);
/*
* On FreeBSD we can get snapshot's mount point or its parent file
* system mount point depending if snapshot is already mounted or not.
*/
if (zfsvfs->z_parent == zfsvfs && fidp->fid_len == LONG_FID_LEN) {
zfid_long_t *zlfid = (zfid_long_t *)fidp;
uint64_t objsetid = 0;
for (i = 0; i < sizeof (zlfid->zf_setid); i++)
objsetid |= ((uint64_t)zlfid->zf_setid[i]) << (8 * i);
for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
setgen |= ((uint64_t)zlfid->zf_setgen[i]) << (8 * i);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
err = zfsctl_lookup_objset(vfsp, objsetid, &zfsvfs);
if (err)
return (SET_ERROR(EINVAL));
- ZFS_ENTER(zfsvfs);
+ if ((err = zfs_enter(zfsvfs, FTAG)) != 0)
+ return (err);
}
if (fidp->fid_len == SHORT_FID_LEN || fidp->fid_len == LONG_FID_LEN) {
zfid_short_t *zfid = (zfid_short_t *)fidp;
for (i = 0; i < sizeof (zfid->zf_object); i++)
object |= ((uint64_t)zfid->zf_object[i]) << (8 * i);
for (i = 0; i < sizeof (zfid->zf_gen); i++)
fid_gen |= ((uint64_t)zfid->zf_gen[i]) << (8 * i);
} else {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
if (fidp->fid_len == LONG_FID_LEN && (fid_gen > 1 || setgen != 0)) {
dprintf("snapdir fid: fid_gen (%llu) and setgen (%llu)\n",
(u_longlong_t)fid_gen, (u_longlong_t)setgen);
return (SET_ERROR(EINVAL));
}
/*
* A zero fid_gen means we are in .zfs or the .zfs/snapshot
* directory tree. If the object == zfsvfs->z_shares_dir, then
* we are in the .zfs/shares directory tree.
*/
if ((fid_gen == 0 &&
(object == ZFSCTL_INO_ROOT || object == ZFSCTL_INO_SNAPDIR)) ||
(zfsvfs->z_shares_dir != 0 && object == zfsvfs->z_shares_dir)) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
VERIFY0(zfsctl_root(zfsvfs, LK_SHARED, &dvp));
if (object == ZFSCTL_INO_SNAPDIR) {
cn.cn_nameptr = "snapshot";
cn.cn_namelen = strlen(cn.cn_nameptr);
cn.cn_nameiop = LOOKUP;
cn.cn_flags = ISLASTCN | LOCKLEAF;
cn.cn_lkflags = flags;
VERIFY0(VOP_LOOKUP(dvp, vpp, &cn));
vput(dvp);
} else if (object == zfsvfs->z_shares_dir) {
/*
* XXX This branch must not be taken,
* if it is, then the lookup below will
* explode.
*/
cn.cn_nameptr = "shares";
cn.cn_namelen = strlen(cn.cn_nameptr);
cn.cn_nameiop = LOOKUP;
cn.cn_flags = ISLASTCN;
cn.cn_lkflags = flags;
VERIFY0(VOP_LOOKUP(dvp, vpp, &cn));
vput(dvp);
} else {
*vpp = dvp;
}
return (err);
}
gen_mask = -1ULL >> (64 - 8 * i);
dprintf("getting %llu [%llu mask %llx]\n", (u_longlong_t)object,
(u_longlong_t)fid_gen,
(u_longlong_t)gen_mask);
if ((err = zfs_zget(zfsvfs, object, &zp))) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (err);
}
(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), &zp_gen,
sizeof (uint64_t));
zp_gen = zp_gen & gen_mask;
if (zp_gen == 0)
zp_gen = 1;
if (zp->z_unlinked || zp_gen != fid_gen) {
dprintf("znode gen (%llu) != fid gen (%llu)\n",
(u_longlong_t)zp_gen, (u_longlong_t)fid_gen);
vrele(ZTOV(zp));
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
*vpp = ZTOV(zp);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
err = vn_lock(*vpp, flags);
if (err == 0)
vnode_create_vobject(*vpp, zp->z_size, curthread);
else
*vpp = NULL;
return (err);
}
/*
* Block out VOPs and close zfsvfs_t::z_os
*
* Note, if successful, then we return with the 'z_teardown_lock' and
* 'z_teardown_inactive_lock' write held. We leave ownership of the underlying
* dataset and objset intact so that they can be atomically handed off during
* a subsequent rollback or recv operation and the resume thereafter.
*/
int
zfs_suspend_fs(zfsvfs_t *zfsvfs)
{
int error;
if ((error = zfsvfs_teardown(zfsvfs, B_FALSE)) != 0)
return (error);
return (0);
}
/*
* Rebuild SA and release VOPs. Note that ownership of the underlying dataset
* is an invariant across any of the operations that can be performed while the
* filesystem was suspended. Whether it succeeded or failed, the preconditions
* are the same: the relevant objset and associated dataset are owned by
* zfsvfs, held, and long held on entry.
*/
int
zfs_resume_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
{
int err;
znode_t *zp;
ASSERT(ZFS_TEARDOWN_WRITE_HELD(zfsvfs));
ASSERT(ZFS_TEARDOWN_INACTIVE_WRITE_HELD(zfsvfs));
/*
* We already own this, so just update the objset_t, as the one we
* had before may have been evicted.
*/
objset_t *os;
VERIFY3P(ds->ds_owner, ==, zfsvfs);
VERIFY(dsl_dataset_long_held(ds));
dsl_pool_t *dp = spa_get_dsl(dsl_dataset_get_spa(ds));
dsl_pool_config_enter(dp, FTAG);
VERIFY0(dmu_objset_from_ds(ds, &os));
dsl_pool_config_exit(dp, FTAG);
err = zfsvfs_init(zfsvfs, os);
if (err != 0)
goto bail;
ds->ds_dir->dd_activity_cancelled = B_FALSE;
VERIFY0(zfsvfs_setup(zfsvfs, B_FALSE));
zfs_set_fuid_feature(zfsvfs);
/*
* Attempt to re-establish all the active znodes with
* their dbufs. If a zfs_rezget() fails, then we'll let
- * any potential callers discover that via ZFS_ENTER_VERIFY_VP
+ * any potential callers discover that via zfs_enter_verify_zp
* when they try to use their znode.
*/
mutex_enter(&zfsvfs->z_znodes_lock);
for (zp = list_head(&zfsvfs->z_all_znodes); zp;
zp = list_next(&zfsvfs->z_all_znodes, zp)) {
(void) zfs_rezget(zp);
}
mutex_exit(&zfsvfs->z_znodes_lock);
bail:
/* release the VOPs */
ZFS_TEARDOWN_INACTIVE_EXIT_WRITE(zfsvfs);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
if (err) {
/*
* Since we couldn't setup the sa framework, try to force
* unmount this file system.
*/
if (vn_vfswlock(zfsvfs->z_vfs->vfs_vnodecovered) == 0) {
vfs_ref(zfsvfs->z_vfs);
(void) dounmount(zfsvfs->z_vfs, MS_FORCE, curthread);
}
}
return (err);
}
static void
zfs_freevfs(vfs_t *vfsp)
{
zfsvfs_t *zfsvfs = vfsp->vfs_data;
zfsvfs_free(zfsvfs);
atomic_dec_32(&zfs_active_fs_count);
}
#ifdef __i386__
static int desiredvnodes_backup;
#include <sys/vmmeter.h>
#include <vm/vm_page.h>
#include <vm/vm_object.h>
#include <vm/vm_kern.h>
#include <vm/vm_map.h>
#endif
static void
zfs_vnodes_adjust(void)
{
#ifdef __i386__
int newdesiredvnodes;
desiredvnodes_backup = desiredvnodes;
/*
* We calculate newdesiredvnodes the same way it is done in
* vntblinit(). If it is equal to desiredvnodes, it means that
* it wasn't tuned by the administrator and we can tune it down.
*/
newdesiredvnodes = min(maxproc + vm_cnt.v_page_count / 4, 2 *
vm_kmem_size / (5 * (sizeof (struct vm_object) +
sizeof (struct vnode))));
if (newdesiredvnodes == desiredvnodes)
desiredvnodes = (3 * newdesiredvnodes) / 4;
#endif
}
static void
zfs_vnodes_adjust_back(void)
{
#ifdef __i386__
desiredvnodes = desiredvnodes_backup;
#endif
}
void
zfs_init(void)
{
printf("ZFS filesystem version: " ZPL_VERSION_STRING "\n");
/*
* Initialize .zfs directory structures
*/
zfsctl_init();
/*
* Initialize znode cache, vnode ops, etc...
*/
zfs_znode_init();
/*
* Reduce number of vnodes. Originally number of vnodes is calculated
* with UFS inode in mind. We reduce it here, because it's too big for
* ZFS/i386.
*/
zfs_vnodes_adjust();
dmu_objset_register_type(DMU_OST_ZFS, zpl_get_file_info);
zfsvfs_taskq = taskq_create("zfsvfs", 1, minclsyspri, 0, 0, 0);
}
void
zfs_fini(void)
{
taskq_destroy(zfsvfs_taskq);
zfsctl_fini();
zfs_znode_fini();
zfs_vnodes_adjust_back();
}
int
zfs_busy(void)
{
return (zfs_active_fs_count != 0);
}
/*
* Release VOPs and unmount a suspended filesystem.
*/
int
zfs_end_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
{
ASSERT(ZFS_TEARDOWN_WRITE_HELD(zfsvfs));
ASSERT(ZFS_TEARDOWN_INACTIVE_WRITE_HELD(zfsvfs));
/*
* We already own this, so just hold and rele it to update the
* objset_t, as the one we had before may have been evicted.
*/
objset_t *os;
VERIFY3P(ds->ds_owner, ==, zfsvfs);
VERIFY(dsl_dataset_long_held(ds));
dsl_pool_t *dp = spa_get_dsl(dsl_dataset_get_spa(ds));
dsl_pool_config_enter(dp, FTAG);
VERIFY0(dmu_objset_from_ds(ds, &os));
dsl_pool_config_exit(dp, FTAG);
zfsvfs->z_os = os;
/* release the VOPs */
ZFS_TEARDOWN_INACTIVE_EXIT_WRITE(zfsvfs);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
/*
* Try to force unmount this file system.
*/
(void) zfs_umount(zfsvfs->z_vfs, 0);
zfsvfs->z_unmounted = B_TRUE;
return (0);
}
int
zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers)
{
int error;
objset_t *os = zfsvfs->z_os;
dmu_tx_t *tx;
if (newvers < ZPL_VERSION_INITIAL || newvers > ZPL_VERSION)
return (SET_ERROR(EINVAL));
if (newvers < zfsvfs->z_version)
return (SET_ERROR(EINVAL));
if (zfs_spa_version_map(newvers) >
spa_version(dmu_objset_spa(zfsvfs->z_os)))
return (SET_ERROR(ENOTSUP));
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_FALSE, ZPL_VERSION_STR);
if (newvers >= ZPL_VERSION_SA && !zfsvfs->z_use_sa) {
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_TRUE,
ZFS_SA_ATTRS);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
}
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
return (error);
}
error = zap_update(os, MASTER_NODE_OBJ, ZPL_VERSION_STR,
8, 1, &newvers, tx);
if (error) {
dmu_tx_commit(tx);
return (error);
}
if (newvers >= ZPL_VERSION_SA && !zfsvfs->z_use_sa) {
uint64_t sa_obj;
ASSERT3U(spa_version(dmu_objset_spa(zfsvfs->z_os)), >=,
SPA_VERSION_SA);
sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
DMU_OT_NONE, 0, tx);
error = zap_add(os, MASTER_NODE_OBJ,
ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
ASSERT0(error);
VERIFY0(sa_set_sa_object(os, sa_obj));
sa_register_update_callback(os, zfs_sa_upgrade);
}
spa_history_log_internal_ds(dmu_objset_ds(os), "upgrade", tx,
"from %ju to %ju", (uintmax_t)zfsvfs->z_version,
(uintmax_t)newvers);
dmu_tx_commit(tx);
zfsvfs->z_version = newvers;
os->os_version = newvers;
zfs_set_fuid_feature(zfsvfs);
return (0);
}
/*
* Read a property stored within the master node.
*/
int
zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value)
{
uint64_t *cached_copy = NULL;
/*
* Figure out where in the objset_t the cached copy would live, if it
* is available for the requested property.
*/
if (os != NULL) {
switch (prop) {
case ZFS_PROP_VERSION:
cached_copy = &os->os_version;
break;
case ZFS_PROP_NORMALIZE:
cached_copy = &os->os_normalization;
break;
case ZFS_PROP_UTF8ONLY:
cached_copy = &os->os_utf8only;
break;
case ZFS_PROP_CASE:
cached_copy = &os->os_casesensitivity;
break;
default:
break;
}
}
if (cached_copy != NULL && *cached_copy != OBJSET_PROP_UNINITIALIZED) {
*value = *cached_copy;
return (0);
}
/*
* If the property wasn't cached, look up the file system's value for
* the property. For the version property, we look up a slightly
* different string.
*/
const char *pname;
int error = ENOENT;
if (prop == ZFS_PROP_VERSION) {
pname = ZPL_VERSION_STR;
} else {
pname = zfs_prop_to_name(prop);
}
if (os != NULL) {
ASSERT3U(os->os_phys->os_type, ==, DMU_OST_ZFS);
error = zap_lookup(os, MASTER_NODE_OBJ, pname, 8, 1, value);
}
if (error == ENOENT) {
/* No value set, use the default value */
switch (prop) {
case ZFS_PROP_VERSION:
*value = ZPL_VERSION;
break;
case ZFS_PROP_NORMALIZE:
case ZFS_PROP_UTF8ONLY:
*value = 0;
break;
case ZFS_PROP_CASE:
*value = ZFS_CASE_SENSITIVE;
break;
case ZFS_PROP_ACLTYPE:
*value = ZFS_ACLTYPE_NFSV4;
break;
default:
return (error);
}
error = 0;
}
/*
* If one of the methods for getting the property value above worked,
* copy it into the objset_t's cache.
*/
if (error == 0 && cached_copy != NULL) {
*cached_copy = *value;
}
return (error);
}
/*
* Return true if the corresponding vfs's unmounted flag is set.
* Otherwise return false.
* If this function returns true we know VFS unmount has been initiated.
*/
boolean_t
zfs_get_vfs_flag_unmounted(objset_t *os)
{
zfsvfs_t *zfvp;
boolean_t unmounted = B_FALSE;
ASSERT3U(dmu_objset_type(os), ==, DMU_OST_ZFS);
mutex_enter(&os->os_user_ptr_lock);
zfvp = dmu_objset_get_user(os);
if (zfvp != NULL && zfvp->z_vfs != NULL &&
(zfvp->z_vfs->mnt_kern_flag & MNTK_UNMOUNT))
unmounted = B_TRUE;
mutex_exit(&os->os_user_ptr_lock);
return (unmounted);
}
#ifdef _KERNEL
void
zfsvfs_update_fromname(const char *oldname, const char *newname)
{
char tmpbuf[MAXPATHLEN];
struct mount *mp;
char *fromname;
size_t oldlen;
oldlen = strlen(oldname);
mtx_lock(&mountlist_mtx);
TAILQ_FOREACH(mp, &mountlist, mnt_list) {
fromname = mp->mnt_stat.f_mntfromname;
if (strcmp(fromname, oldname) == 0) {
(void) strlcpy(fromname, newname,
sizeof (mp->mnt_stat.f_mntfromname));
continue;
}
if (strncmp(fromname, oldname, oldlen) == 0 &&
(fromname[oldlen] == '/' || fromname[oldlen] == '@')) {
(void) snprintf(tmpbuf, sizeof (tmpbuf), "%s%s",
newname, fromname + oldlen);
(void) strlcpy(fromname, tmpbuf,
sizeof (mp->mnt_stat.f_mntfromname));
continue;
}
}
mtx_unlock(&mountlist_mtx);
}
#endif
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c
index bb61b0162985..fae390a148d6 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c
@@ -1,6354 +1,6324 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2017 Nexenta Systems, Inc.
*/
/* Portions Copyright 2007 Jeremy Teo */
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/systm.h>
#include <sys/sysmacros.h>
#include <sys/resource.h>
#include <sys/vfs.h>
#include <sys/endian.h>
#include <sys/vm.h>
#include <sys/vnode.h>
#if __FreeBSD_version >= 1300102
#include <sys/smr.h>
#endif
#include <sys/dirent.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/kmem.h>
#include <sys/taskq.h>
#include <sys/uio.h>
#include <sys/atomic.h>
#include <sys/namei.h>
#include <sys/mman.h>
#include <sys/cmn_err.h>
#include <sys/kdb.h>
#include <sys/sysproto.h>
#include <sys/errno.h>
#include <sys/unistd.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_ioctl.h>
#include <sys/fs/zfs.h>
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/dbuf.h>
#include <sys/zap.h>
#include <sys/sa.h>
#include <sys/policy.h>
#include <sys/sunddi.h>
#include <sys/filio.h>
#include <sys/sid.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_fuid.h>
#include <sys/zfs_quota.h>
#include <sys/zfs_sa.h>
#include <sys/zfs_rlock.h>
-#include <sys/extdirent.h>
#include <sys/bio.h>
#include <sys/buf.h>
#include <sys/sched.h>
#include <sys/acl.h>
#include <sys/vmmeter.h>
#include <vm/vm_param.h>
#include <sys/zil.h>
#include <sys/zfs_vnops.h>
#include <vm/vm_object.h>
#include <sys/extattr.h>
#include <sys/priv.h>
#ifndef VN_OPEN_INVFS
#define VN_OPEN_INVFS 0x0
#endif
VFS_SMR_DECLARE;
#if __FreeBSD_version < 1300103
#define NDFREE_PNBUF(ndp) NDFREE((ndp), NDF_ONLY_PNBUF)
#endif
#if __FreeBSD_version >= 1300047
#define vm_page_wire_lock(pp)
#define vm_page_wire_unlock(pp)
#else
#define vm_page_wire_lock(pp) vm_page_lock(pp)
#define vm_page_wire_unlock(pp) vm_page_unlock(pp)
#endif
#ifdef DEBUG_VFS_LOCKS
#define VNCHECKREF(vp) \
VNASSERT((vp)->v_holdcnt > 0 && (vp)->v_usecount > 0, vp, \
("%s: wrong ref counts", __func__));
#else
#define VNCHECKREF(vp)
#endif
#if __FreeBSD_version >= 1400045
typedef uint64_t cookie_t;
#else
typedef ulong_t cookie_t;
#endif
/*
* Programming rules.
*
* Each vnode op performs some logical unit of work. To do this, the ZPL must
* properly lock its in-core state, create a DMU transaction, do the work,
* record this work in the intent log (ZIL), commit the DMU transaction,
* and wait for the intent log to commit if it is a synchronous operation.
* Moreover, the vnode ops must work in both normal and log replay context.
* The ordering of events is important to avoid deadlocks and references
* to freed memory. The example below illustrates the following Big Rules:
*
* (1) A check must be made in each zfs thread for a mounted file system.
- * This is done avoiding races using ZFS_ENTER(zfsvfs).
- * A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes
- * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros
+ * This is done avoiding races using zfs_enter(zfsvfs).
+ * A zfs_exit(zfsvfs) is needed before all returns. Any znodes
+ * must be checked with zfs_verify_zp(zp). Both of these macros
* can return EIO from the calling function.
*
* (2) VN_RELE() should always be the last thing except for zil_commit()
- * (if necessary) and ZFS_EXIT(). This is for 3 reasons:
+ * (if necessary) and zfs_exit(). This is for 3 reasons:
* First, if it's the last reference, the vnode/znode
* can be freed, so the zp may point to freed memory. Second, the last
* reference will call zfs_zinactive(), which may induce a lot of work --
* pushing cached pages (which acquires range locks) and syncing out
* cached atime changes. Third, zfs_zinactive() may require a new tx,
* which could deadlock the system if you were already holding one.
* If you must call VN_RELE() within a tx then use VN_RELE_ASYNC().
*
* (3) All range locks must be grabbed before calling dmu_tx_assign(),
* as they can span dmu_tx_assign() calls.
*
* (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to
* dmu_tx_assign(). This is critical because we don't want to block
* while holding locks.
*
- * If no ZPL locks are held (aside from ZFS_ENTER()), use TXG_WAIT. This
+ * If no ZPL locks are held (aside from zfs_enter()), use TXG_WAIT. This
* reduces lock contention and CPU usage when we must wait (note that if
* throughput is constrained by the storage, nearly every transaction
* must wait).
*
* Note, in particular, that if a lock is sometimes acquired before
* the tx assigns, and sometimes after (e.g. z_lock), then failing
* to use a non-blocking assign can deadlock the system. The scenario:
*
* Thread A has grabbed a lock before calling dmu_tx_assign().
* Thread B is in an already-assigned tx, and blocks for this lock.
* Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
* forever, because the previous txg can't quiesce until B's tx commits.
*
* If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
* then drop all locks, call dmu_tx_wait(), and try again. On subsequent
* calls to dmu_tx_assign(), pass TXG_NOTHROTTLE in addition to TXG_NOWAIT,
* to indicate that this operation has already called dmu_tx_wait().
* This will ensure that we don't retry forever, waiting a short bit
* each time.
*
* (5) If the operation succeeded, generate the intent log entry for it
* before dropping locks. This ensures that the ordering of events
* in the intent log matches the order in which they actually occurred.
* During ZIL replay the zfs_log_* functions will update the sequence
* number to indicate the zil transaction has replayed.
*
* (6) At the end of each vnode op, the DMU tx must always commit,
* regardless of whether there were any errors.
*
* (7) After dropping all locks, invoke zil_commit(zilog, foid)
* to ensure that synchronous semantics are provided when necessary.
*
* In general, this is how things should be ordered in each vnode op:
*
- * ZFS_ENTER(zfsvfs); // exit if unmounted
+ * zfs_enter(zfsvfs); // exit if unmounted
* top:
* zfs_dirent_lookup(&dl, ...) // lock directory entry (may VN_HOLD())
* rw_enter(...); // grab any other locks you need
* tx = dmu_tx_create(...); // get DMU tx
* dmu_tx_hold_*(); // hold each object you might modify
* error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
* if (error) {
* rw_exit(...); // drop locks
* zfs_dirent_unlock(dl); // unlock directory entry
* VN_RELE(...); // release held vnodes
* if (error == ERESTART) {
* waited = B_TRUE;
* dmu_tx_wait(tx);
* dmu_tx_abort(tx);
* goto top;
* }
* dmu_tx_abort(tx); // abort DMU tx
- * ZFS_EXIT(zfsvfs); // finished in zfs
+ * zfs_exit(zfsvfs); // finished in zfs
* return (error); // really out of space
* }
* error = do_real_work(); // do whatever this VOP does
* if (error == 0)
* zfs_log_*(...); // on success, make ZIL entry
* dmu_tx_commit(tx); // commit DMU tx -- error or not
* rw_exit(...); // drop locks
* zfs_dirent_unlock(dl); // unlock directory entry
* VN_RELE(...); // release held vnodes
* zil_commit(zilog, foid); // synchronous when necessary
- * ZFS_EXIT(zfsvfs); // finished in zfs
+ * zfs_exit(zfsvfs); // finished in zfs
* return (error); // done, report error
*/
static int
zfs_open(vnode_t **vpp, int flag, cred_t *cr)
{
(void) cr;
znode_t *zp = VTOZ(*vpp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ int error;
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (error);
if ((flag & FWRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
((flag & FAPPEND) == 0)) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EPERM));
}
/* Keep a count of the synchronous opens in the znode */
if (flag & O_SYNC)
atomic_inc_32(&zp->z_sync_cnt);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (0);
}
static int
zfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr)
{
(void) offset, (void) cr;
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ int error;
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (error);
/* Decrement the synchronous opens in the znode */
if ((flag & O_SYNC) && (count == 1))
atomic_dec_32(&zp->z_sync_cnt);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (0);
}
static int
zfs_ioctl(vnode_t *vp, ulong_t com, intptr_t data, int flag, cred_t *cred,
int *rvalp)
{
(void) flag, (void) cred, (void) rvalp;
loff_t off;
int error;
switch (com) {
case _FIOFFS:
{
return (0);
/*
* The following two ioctls are used by bfu. Faking out,
* necessary to avoid bfu errors.
*/
}
case _FIOGDIO:
case _FIOSDIO:
{
return (0);
}
case F_SEEK_DATA:
case F_SEEK_HOLE:
{
off = *(offset_t *)data;
/* offset parameter is in/out */
error = zfs_holey(VTOZ(vp), com, &off);
if (error)
return (error);
*(offset_t *)data = off;
return (0);
}
}
return (SET_ERROR(ENOTTY));
}
static vm_page_t
page_busy(vnode_t *vp, int64_t start, int64_t off, int64_t nbytes)
{
vm_object_t obj;
vm_page_t pp;
int64_t end;
/*
* At present vm_page_clear_dirty extends the cleared range to DEV_BSIZE
* aligned boundaries, if the range is not aligned. As a result a
* DEV_BSIZE subrange with partially dirty data may get marked as clean.
* It may happen that all DEV_BSIZE subranges are marked clean and thus
* the whole page would be considered clean despite have some
* dirty data.
* For this reason we should shrink the range to DEV_BSIZE aligned
* boundaries before calling vm_page_clear_dirty.
*/
end = rounddown2(off + nbytes, DEV_BSIZE);
off = roundup2(off, DEV_BSIZE);
nbytes = end - off;
obj = vp->v_object;
zfs_vmobject_assert_wlocked_12(obj);
#if __FreeBSD_version < 1300050
for (;;) {
if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
pp->valid) {
if (vm_page_xbusied(pp)) {
/*
* Reference the page before unlocking and
* sleeping so that the page daemon is less
* likely to reclaim it.
*/
vm_page_reference(pp);
vm_page_lock(pp);
zfs_vmobject_wunlock(obj);
vm_page_busy_sleep(pp, "zfsmwb", true);
zfs_vmobject_wlock(obj);
continue;
}
vm_page_sbusy(pp);
} else if (pp != NULL) {
ASSERT(!pp->valid);
pp = NULL;
}
if (pp != NULL) {
ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
vm_object_pip_add(obj, 1);
pmap_remove_write(pp);
if (nbytes != 0)
vm_page_clear_dirty(pp, off, nbytes);
}
break;
}
#else
vm_page_grab_valid_unlocked(&pp, obj, OFF_TO_IDX(start),
VM_ALLOC_NOCREAT | VM_ALLOC_SBUSY | VM_ALLOC_NORMAL |
VM_ALLOC_IGN_SBUSY);
if (pp != NULL) {
ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
vm_object_pip_add(obj, 1);
pmap_remove_write(pp);
if (nbytes != 0)
vm_page_clear_dirty(pp, off, nbytes);
}
#endif
return (pp);
}
static void
page_unbusy(vm_page_t pp)
{
vm_page_sunbusy(pp);
#if __FreeBSD_version >= 1300041
vm_object_pip_wakeup(pp->object);
#else
vm_object_pip_subtract(pp->object, 1);
#endif
}
#if __FreeBSD_version > 1300051
static vm_page_t
page_hold(vnode_t *vp, int64_t start)
{
vm_object_t obj;
vm_page_t m;
obj = vp->v_object;
vm_page_grab_valid_unlocked(&m, obj, OFF_TO_IDX(start),
VM_ALLOC_NOCREAT | VM_ALLOC_WIRED | VM_ALLOC_IGN_SBUSY |
VM_ALLOC_NOBUSY);
return (m);
}
#else
static vm_page_t
page_hold(vnode_t *vp, int64_t start)
{
vm_object_t obj;
vm_page_t pp;
obj = vp->v_object;
zfs_vmobject_assert_wlocked(obj);
for (;;) {
if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
pp->valid) {
if (vm_page_xbusied(pp)) {
/*
* Reference the page before unlocking and
* sleeping so that the page daemon is less
* likely to reclaim it.
*/
vm_page_reference(pp);
vm_page_lock(pp);
zfs_vmobject_wunlock(obj);
vm_page_busy_sleep(pp, "zfsmwb", true);
zfs_vmobject_wlock(obj);
continue;
}
ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
vm_page_wire_lock(pp);
vm_page_hold(pp);
vm_page_wire_unlock(pp);
} else
pp = NULL;
break;
}
return (pp);
}
#endif
static void
page_unhold(vm_page_t pp)
{
vm_page_wire_lock(pp);
#if __FreeBSD_version >= 1300035
vm_page_unwire(pp, PQ_ACTIVE);
#else
vm_page_unhold(pp);
#endif
vm_page_wire_unlock(pp);
}
/*
* When a file is memory mapped, we must keep the IO data synchronized
* between the DMU cache and the memory mapped pages. What this means:
*
* On Write: If we find a memory mapped page, we write to *both*
* the page and the dmu buffer.
*/
void
update_pages(znode_t *zp, int64_t start, int len, objset_t *os)
{
vm_object_t obj;
struct sf_buf *sf;
vnode_t *vp = ZTOV(zp);
caddr_t va;
int off;
ASSERT3P(vp->v_mount, !=, NULL);
obj = vp->v_object;
ASSERT3P(obj, !=, NULL);
off = start & PAGEOFFSET;
zfs_vmobject_wlock_12(obj);
#if __FreeBSD_version >= 1300041
vm_object_pip_add(obj, 1);
#endif
for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
vm_page_t pp;
int nbytes = imin(PAGESIZE - off, len);
if ((pp = page_busy(vp, start, off, nbytes)) != NULL) {
zfs_vmobject_wunlock_12(obj);
va = zfs_map_page(pp, &sf);
(void) dmu_read(os, zp->z_id, start + off, nbytes,
va + off, DMU_READ_PREFETCH);
zfs_unmap_page(sf);
zfs_vmobject_wlock_12(obj);
page_unbusy(pp);
}
len -= nbytes;
off = 0;
}
#if __FreeBSD_version >= 1300041
vm_object_pip_wakeup(obj);
#else
vm_object_pip_wakeupn(obj, 0);
#endif
zfs_vmobject_wunlock_12(obj);
}
/*
* Read with UIO_NOCOPY flag means that sendfile(2) requests
* ZFS to populate a range of page cache pages with data.
*
* NOTE: this function could be optimized to pre-allocate
* all pages in advance, drain exclusive busy on all of them,
* map them into contiguous KVA region and populate them
* in one single dmu_read() call.
*/
int
mappedread_sf(znode_t *zp, int nbytes, zfs_uio_t *uio)
{
vnode_t *vp = ZTOV(zp);
objset_t *os = zp->z_zfsvfs->z_os;
struct sf_buf *sf;
vm_object_t obj;
vm_page_t pp;
int64_t start;
caddr_t va;
int len = nbytes;
int error = 0;
ASSERT3U(zfs_uio_segflg(uio), ==, UIO_NOCOPY);
ASSERT3P(vp->v_mount, !=, NULL);
obj = vp->v_object;
ASSERT3P(obj, !=, NULL);
ASSERT0(zfs_uio_offset(uio) & PAGEOFFSET);
zfs_vmobject_wlock_12(obj);
for (start = zfs_uio_offset(uio); len > 0; start += PAGESIZE) {
int bytes = MIN(PAGESIZE, len);
pp = vm_page_grab_unlocked(obj, OFF_TO_IDX(start),
VM_ALLOC_SBUSY | VM_ALLOC_NORMAL | VM_ALLOC_IGN_SBUSY);
if (vm_page_none_valid(pp)) {
zfs_vmobject_wunlock_12(obj);
va = zfs_map_page(pp, &sf);
error = dmu_read(os, zp->z_id, start, bytes, va,
DMU_READ_PREFETCH);
if (bytes != PAGESIZE && error == 0)
memset(va + bytes, 0, PAGESIZE - bytes);
zfs_unmap_page(sf);
zfs_vmobject_wlock_12(obj);
#if __FreeBSD_version >= 1300081
if (error == 0) {
vm_page_valid(pp);
vm_page_activate(pp);
vm_page_do_sunbusy(pp);
} else {
zfs_vmobject_wlock(obj);
if (!vm_page_wired(pp) && pp->valid == 0 &&
vm_page_busy_tryupgrade(pp))
vm_page_free(pp);
else
vm_page_sunbusy(pp);
zfs_vmobject_wunlock(obj);
}
#else
vm_page_do_sunbusy(pp);
vm_page_lock(pp);
if (error) {
if (pp->wire_count == 0 && pp->valid == 0 &&
!vm_page_busied(pp))
vm_page_free(pp);
} else {
pp->valid = VM_PAGE_BITS_ALL;
vm_page_activate(pp);
}
vm_page_unlock(pp);
#endif
} else {
ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
vm_page_do_sunbusy(pp);
}
if (error)
break;
zfs_uio_advance(uio, bytes);
len -= bytes;
}
zfs_vmobject_wunlock_12(obj);
return (error);
}
/*
* When a file is memory mapped, we must keep the IO data synchronized
* between the DMU cache and the memory mapped pages. What this means:
*
* On Read: We "read" preferentially from memory mapped pages,
* else we default from the dmu buffer.
*
* NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
* the file is memory mapped.
*/
int
mappedread(znode_t *zp, int nbytes, zfs_uio_t *uio)
{
vnode_t *vp = ZTOV(zp);
vm_object_t obj;
int64_t start;
int len = nbytes;
int off;
int error = 0;
ASSERT3P(vp->v_mount, !=, NULL);
obj = vp->v_object;
ASSERT3P(obj, !=, NULL);
start = zfs_uio_offset(uio);
off = start & PAGEOFFSET;
zfs_vmobject_wlock_12(obj);
for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
vm_page_t pp;
uint64_t bytes = MIN(PAGESIZE - off, len);
if ((pp = page_hold(vp, start))) {
struct sf_buf *sf;
caddr_t va;
zfs_vmobject_wunlock_12(obj);
va = zfs_map_page(pp, &sf);
error = vn_io_fault_uiomove(va + off, bytes,
GET_UIO_STRUCT(uio));
zfs_unmap_page(sf);
zfs_vmobject_wlock_12(obj);
page_unhold(pp);
} else {
zfs_vmobject_wunlock_12(obj);
error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
uio, bytes);
zfs_vmobject_wlock_12(obj);
}
len -= bytes;
off = 0;
if (error)
break;
}
zfs_vmobject_wunlock_12(obj);
return (error);
}
int
zfs_write_simple(znode_t *zp, const void *data, size_t len,
loff_t pos, size_t *presid)
{
int error = 0;
ssize_t resid;
error = vn_rdwr(UIO_WRITE, ZTOV(zp), __DECONST(void *, data), len, pos,
UIO_SYSSPACE, IO_SYNC, kcred, NOCRED, &resid, curthread);
if (error) {
return (SET_ERROR(error));
} else if (presid == NULL) {
if (resid != 0) {
error = SET_ERROR(EIO);
}
} else {
*presid = resid;
}
return (error);
}
void
zfs_zrele_async(znode_t *zp)
{
vnode_t *vp = ZTOV(zp);
objset_t *os = ITOZSB(vp)->z_os;
VN_RELE_ASYNC(vp, dsl_pool_zrele_taskq(dmu_objset_pool(os)));
}
static int
zfs_dd_callback(struct mount *mp, void *arg, int lkflags, struct vnode **vpp)
{
int error;
*vpp = arg;
error = vn_lock(*vpp, lkflags);
if (error != 0)
vrele(*vpp);
return (error);
}
static int
zfs_lookup_lock(vnode_t *dvp, vnode_t *vp, const char *name, int lkflags)
{
znode_t *zdp = VTOZ(dvp);
zfsvfs_t *zfsvfs __unused = zdp->z_zfsvfs;
int error;
int ltype;
if (zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_LOCKED(dvp, __func__);
if (name[0] == 0 || (name[0] == '.' && name[1] == 0)) {
ASSERT3P(dvp, ==, vp);
vref(dvp);
ltype = lkflags & LK_TYPE_MASK;
if (ltype != VOP_ISLOCKED(dvp)) {
if (ltype == LK_EXCLUSIVE)
vn_lock(dvp, LK_UPGRADE | LK_RETRY);
else /* if (ltype == LK_SHARED) */
vn_lock(dvp, LK_DOWNGRADE | LK_RETRY);
/*
* Relock for the "." case could leave us with
* reclaimed vnode.
*/
if (VN_IS_DOOMED(dvp)) {
vrele(dvp);
return (SET_ERROR(ENOENT));
}
}
return (0);
} else if (name[0] == '.' && name[1] == '.' && name[2] == 0) {
/*
* Note that in this case, dvp is the child vnode, and we
* are looking up the parent vnode - exactly reverse from
* normal operation. Unlocking dvp requires some rather
* tricky unlock/relock dance to prevent mp from being freed;
* use vn_vget_ino_gen() which takes care of all that.
*
* XXX Note that there is a time window when both vnodes are
* unlocked. It is possible, although highly unlikely, that
* during that window the parent-child relationship between
* the vnodes may change, for example, get reversed.
* In that case we would have a wrong lock order for the vnodes.
* All other filesystems seem to ignore this problem, so we
* do the same here.
* A potential solution could be implemented as follows:
* - using LK_NOWAIT when locking the second vnode and retrying
* if necessary
* - checking that the parent-child relationship still holds
* after locking both vnodes and retrying if it doesn't
*/
error = vn_vget_ino_gen(dvp, zfs_dd_callback, vp, lkflags, &vp);
return (error);
} else {
error = vn_lock(vp, lkflags);
if (error != 0)
vrele(vp);
return (error);
}
}
/*
* Lookup an entry in a directory, or an extended attribute directory.
* If it exists, return a held vnode reference for it.
*
* IN: dvp - vnode of directory to search.
* nm - name of entry to lookup.
* pnp - full pathname to lookup [UNUSED].
* flags - LOOKUP_XATTR set if looking for an attribute.
* rdir - root directory vnode [UNUSED].
* cr - credentials of caller.
* ct - caller context
*
* OUT: vpp - vnode of located entry, NULL if not found.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* NA
*/
static int
zfs_lookup(vnode_t *dvp, const char *nm, vnode_t **vpp,
struct componentname *cnp, int nameiop, cred_t *cr, int flags,
boolean_t cached)
{
znode_t *zdp = VTOZ(dvp);
znode_t *zp;
zfsvfs_t *zfsvfs = zdp->z_zfsvfs;
#if __FreeBSD_version > 1300124
seqc_t dvp_seqc;
#endif
int error = 0;
/*
* Fast path lookup, however we must skip DNLC lookup
* for case folding or normalizing lookups because the
* DNLC code only stores the passed in name. This means
* creating 'a' and removing 'A' on a case insensitive
* file system would work, but DNLC still thinks 'a'
* exists and won't let you create it again on the next
* pass through fast path.
*/
if (!(flags & LOOKUP_XATTR)) {
if (dvp->v_type != VDIR) {
return (SET_ERROR(ENOTDIR));
} else if (zdp->z_sa_hdl == NULL) {
return (SET_ERROR(EIO));
}
}
DTRACE_PROBE2(zfs__fastpath__lookup__miss, vnode_t *, dvp,
const char *, nm);
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zdp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zdp, FTAG)) != 0)
+ return (error);
#if __FreeBSD_version > 1300124
dvp_seqc = vn_seqc_read_notmodify(dvp);
#endif
*vpp = NULL;
if (flags & LOOKUP_XATTR) {
/*
* If the xattr property is off, refuse the lookup request.
*/
if (!(zfsvfs->z_flags & ZSB_XATTR)) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EOPNOTSUPP));
}
/*
* We don't allow recursive attributes..
* Maybe someday we will.
*/
if (zdp->z_pflags & ZFS_XATTR) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
if ((error = zfs_get_xattrdir(VTOZ(dvp), &zp, cr, flags))) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
*vpp = ZTOV(zp);
/*
* Do we have permission to get into attribute directory?
*/
error = zfs_zaccess(zp, ACE_EXECUTE, 0, B_FALSE, cr);
if (error) {
vrele(ZTOV(zp));
}
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Check accessibility of directory if we're not coming in via
* VOP_CACHEDLOOKUP.
*/
if (!cached) {
#ifdef NOEXECCHECK
if ((cnp->cn_flags & NOEXECCHECK) != 0) {
cnp->cn_flags &= ~NOEXECCHECK;
} else
#endif
if ((error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr))) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
}
if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EILSEQ));
}
/*
* First handle the special cases.
*/
if ((cnp->cn_flags & ISDOTDOT) != 0) {
/*
* If we are a snapshot mounted under .zfs, return
* the vp for the snapshot directory.
*/
if (zdp->z_id == zfsvfs->z_root && zfsvfs->z_parent != zfsvfs) {
struct componentname cn;
vnode_t *zfsctl_vp;
int ltype;
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
ltype = VOP_ISLOCKED(dvp);
VOP_UNLOCK1(dvp);
error = zfsctl_root(zfsvfs->z_parent, LK_SHARED,
&zfsctl_vp);
if (error == 0) {
cn.cn_nameptr = "snapshot";
cn.cn_namelen = strlen(cn.cn_nameptr);
cn.cn_nameiop = cnp->cn_nameiop;
cn.cn_flags = cnp->cn_flags & ~ISDOTDOT;
cn.cn_lkflags = cnp->cn_lkflags;
error = VOP_LOOKUP(zfsctl_vp, vpp, &cn);
vput(zfsctl_vp);
}
vn_lock(dvp, ltype | LK_RETRY);
return (error);
}
}
if (zfs_has_ctldir(zdp) && strcmp(nm, ZFS_CTLDIR_NAME) == 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
if ((cnp->cn_flags & ISLASTCN) != 0 && nameiop != LOOKUP)
return (SET_ERROR(ENOTSUP));
error = zfsctl_root(zfsvfs, cnp->cn_lkflags, vpp);
return (error);
}
/*
* The loop is retry the lookup if the parent-child relationship
* changes during the dot-dot locking complexities.
*/
for (;;) {
uint64_t parent;
error = zfs_dirlook(zdp, nm, &zp);
if (error == 0)
*vpp = ZTOV(zp);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
if (error != 0)
break;
error = zfs_lookup_lock(dvp, *vpp, nm, cnp->cn_lkflags);
if (error != 0) {
/*
* If we've got a locking error, then the vnode
* got reclaimed because of a force unmount.
* We never enter doomed vnodes into the name cache.
*/
*vpp = NULL;
return (error);
}
if ((cnp->cn_flags & ISDOTDOT) == 0)
break;
- ZFS_ENTER(zfsvfs);
+ if ((error = zfs_enter(zfsvfs, FTAG)) != 0) {
+ vput(ZTOV(zp));
+ *vpp = NULL;
+ return (error);
+ }
if (zdp->z_sa_hdl == NULL) {
error = SET_ERROR(EIO);
} else {
error = sa_lookup(zdp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
&parent, sizeof (parent));
}
if (error != 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
vput(ZTOV(zp));
break;
}
if (zp->z_id == parent) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
break;
}
vput(ZTOV(zp));
}
if (error != 0)
*vpp = NULL;
/* Translate errors and add SAVENAME when needed. */
if (cnp->cn_flags & ISLASTCN) {
switch (nameiop) {
case CREATE:
case RENAME:
if (error == ENOENT) {
error = EJUSTRETURN;
#if __FreeBSD_version < 1400068
cnp->cn_flags |= SAVENAME;
#endif
break;
}
zfs_fallthrough;
case DELETE:
#if __FreeBSD_version < 1400068
if (error == 0)
cnp->cn_flags |= SAVENAME;
#endif
break;
}
}
#if __FreeBSD_version > 1300124
if ((cnp->cn_flags & ISDOTDOT) != 0) {
/*
* FIXME: zfs_lookup_lock relocks vnodes and does nothing to
* handle races. In particular different callers may end up
* with different vnodes and will try to add conflicting
* entries to the namecache.
*
* While finding different result may be acceptable in face
* of concurrent modification, adding conflicting entries
* trips over an assert in the namecache.
*
* Ultimately let an entry through once everything settles.
*/
if (!vn_seqc_consistent(dvp, dvp_seqc)) {
cnp->cn_flags &= ~MAKEENTRY;
}
}
#endif
/* Insert name into cache (as non-existent) if appropriate. */
if (zfsvfs->z_use_namecache && !zfsvfs->z_replay &&
error == ENOENT && (cnp->cn_flags & MAKEENTRY) != 0)
cache_enter(dvp, NULL, cnp);
/* Insert name into cache if appropriate. */
if (zfsvfs->z_use_namecache && !zfsvfs->z_replay &&
error == 0 && (cnp->cn_flags & MAKEENTRY)) {
if (!(cnp->cn_flags & ISLASTCN) ||
(nameiop != DELETE && nameiop != RENAME)) {
cache_enter(dvp, *vpp, cnp);
}
}
return (error);
}
/*
* Attempt to create a new entry in a directory. If the entry
* already exists, truncate the file if permissible, else return
* an error. Return the vp of the created or trunc'd file.
*
* IN: dvp - vnode of directory to put new file entry in.
* name - name of new file entry.
* vap - attributes of new file.
* excl - flag indicating exclusive or non-exclusive mode.
* mode - mode to open file with.
* cr - credentials of caller.
* flag - large file flag [UNUSED].
* ct - caller context
* vsecp - ACL to be set
*
* OUT: vpp - vnode of created or trunc'd entry.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dvp - ctime|mtime updated if new entry created
* vp - ctime|mtime always, atime if new
*/
int
zfs_create(znode_t *dzp, const char *name, vattr_t *vap, int excl, int mode,
znode_t **zpp, cred_t *cr, int flag, vsecattr_t *vsecp)
{
(void) excl, (void) mode, (void) flag;
znode_t *zp;
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
zilog_t *zilog;
objset_t *os;
dmu_tx_t *tx;
int error;
uid_t uid = crgetuid(cr);
gid_t gid = crgetgid(cr);
uint64_t projid = ZFS_DEFAULT_PROJID;
zfs_acl_ids_t acl_ids;
boolean_t fuid_dirtied;
uint64_t txtype;
#ifdef DEBUG_VFS_LOCKS
vnode_t *dvp = ZTOV(dzp);
#endif
/*
* If we have an ephemeral id, ACL, or XVATTR then
* make sure file system is at proper version
*/
if (zfsvfs->z_use_fuids == B_FALSE &&
(vsecp || (vap->va_mask & AT_XVATTR) ||
IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
return (SET_ERROR(EINVAL));
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(dzp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
+ return (error);
os = zfsvfs->z_os;
zilog = zfsvfs->z_log;
if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EILSEQ));
}
if (vap->va_mask & AT_XVATTR) {
if ((error = secpolicy_xvattr(ZTOV(dzp), (xvattr_t *)vap,
crgetuid(cr), cr, vap->va_type)) != 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
}
*zpp = NULL;
if ((vap->va_mode & S_ISVTX) && secpolicy_vnode_stky_modify(cr))
vap->va_mode &= ~S_ISVTX;
error = zfs_dirent_lookup(dzp, name, &zp, ZNEW);
if (error) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
ASSERT3P(zp, ==, NULL);
/*
* Create a new file object and update the directory
* to reference it.
*/
if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
goto out;
}
/*
* We only support the creation of regular files in
* extended attribute directories.
*/
if ((dzp->z_pflags & ZFS_XATTR) &&
(vap->va_type != VREG)) {
error = SET_ERROR(EINVAL);
goto out;
}
if ((error = zfs_acl_ids_create(dzp, 0, vap,
cr, vsecp, &acl_ids)) != 0)
goto out;
if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode))
projid = zfs_inherit_projid(dzp);
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, projid)) {
zfs_acl_ids_free(&acl_ids);
error = SET_ERROR(EDQUOT);
goto out;
}
getnewvnode_reserve_();
tx = dmu_tx_create(os);
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE);
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
if (!zfsvfs->z_use_sa &&
acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, acl_ids.z_aclp->z_acl_bytes);
}
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
getnewvnode_drop_reserve();
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
(void) zfs_link_create(dzp, name, zp, tx, ZNEW);
txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
zfs_log_create(zilog, tx, txtype, dzp, zp, name,
vsecp, acl_ids.z_fuidp, vap);
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
getnewvnode_drop_reserve();
out:
VNCHECKREF(dvp);
if (error == 0) {
*zpp = zp;
}
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Remove an entry from a directory.
*
* IN: dvp - vnode of directory to remove entry from.
* name - name of entry to remove.
* cr - credentials of caller.
* ct - caller context
* flags - case flags
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dvp - ctime|mtime
* vp - ctime (if nlink > 0)
*/
static int
zfs_remove_(vnode_t *dvp, vnode_t *vp, const char *name, cred_t *cr)
{
znode_t *dzp = VTOZ(dvp);
znode_t *zp;
znode_t *xzp;
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
zilog_t *zilog;
uint64_t xattr_obj;
uint64_t obj = 0;
dmu_tx_t *tx;
boolean_t unlinked;
uint64_t txtype;
int error;
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(dzp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
+ return (error);
zp = VTOZ(vp);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_verify_zp(zp)) != 0) {
+ zfs_exit(zfsvfs, FTAG);
+ return (error);
+ }
zilog = zfsvfs->z_log;
xattr_obj = 0;
xzp = NULL;
if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
goto out;
}
/*
* Need to use rmdir for removing directories.
*/
if (vp->v_type == VDIR) {
error = SET_ERROR(EPERM);
goto out;
}
vnevent_remove(vp, dvp, name, ct);
obj = zp->z_id;
/* are there any extended attributes? */
error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
&xattr_obj, sizeof (xattr_obj));
if (error == 0 && xattr_obj) {
error = zfs_zget(zfsvfs, xattr_obj, &xzp);
ASSERT0(error);
}
/*
* We may delete the znode now, or we may put it in the unlinked set;
* it depends on whether we're the last link, and on whether there are
* other holds on the vnode. So we dmu_tx_hold() the right things to
* allow for either case.
*/
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
zfs_sa_upgrade_txholds(tx, dzp);
if (xzp) {
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
}
/* charge as an update -- would be nice not to charge at all */
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
/*
* Mark this transaction as typically resulting in a net free of space
*/
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Remove the directory entry.
*/
error = zfs_link_destroy(dzp, name, zp, tx, ZEXISTS, &unlinked);
if (error) {
dmu_tx_commit(tx);
goto out;
}
if (unlinked) {
zfs_unlinked_add(zp, tx);
vp->v_vflag |= VV_NOSYNC;
}
/* XXX check changes to linux vnops */
txtype = TX_REMOVE;
zfs_log_remove(zilog, tx, txtype, dzp, name, obj, unlinked);
dmu_tx_commit(tx);
out:
if (xzp)
vrele(ZTOV(xzp));
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
static int
zfs_lookup_internal(znode_t *dzp, const char *name, vnode_t **vpp,
struct componentname *cnp, int nameiop)
{
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
int error;
cnp->cn_nameptr = __DECONST(char *, name);
cnp->cn_namelen = strlen(name);
cnp->cn_nameiop = nameiop;
cnp->cn_flags = ISLASTCN;
#if __FreeBSD_version < 1400068
cnp->cn_flags |= SAVENAME;
#endif
cnp->cn_lkflags = LK_EXCLUSIVE | LK_RETRY;
cnp->cn_cred = kcred;
#if __FreeBSD_version < 1400037
cnp->cn_thread = curthread;
#endif
if (zfsvfs->z_use_namecache && !zfsvfs->z_replay) {
struct vop_lookup_args a;
a.a_gen.a_desc = &vop_lookup_desc;
a.a_dvp = ZTOV(dzp);
a.a_vpp = vpp;
a.a_cnp = cnp;
error = vfs_cache_lookup(&a);
} else {
error = zfs_lookup(ZTOV(dzp), name, vpp, cnp, nameiop, kcred, 0,
B_FALSE);
}
#ifdef ZFS_DEBUG
if (error) {
printf("got error %d on name %s on op %d\n", error, name,
nameiop);
kdb_backtrace();
}
#endif
return (error);
}
int
zfs_remove(znode_t *dzp, const char *name, cred_t *cr, int flags)
{
vnode_t *vp;
int error;
struct componentname cn;
if ((error = zfs_lookup_internal(dzp, name, &vp, &cn, DELETE)))
return (error);
error = zfs_remove_(ZTOV(dzp), vp, name, cr);
vput(vp);
return (error);
}
/*
* Create a new directory and insert it into dvp using the name
* provided. Return a pointer to the inserted directory.
*
* IN: dvp - vnode of directory to add subdir to.
* dirname - name of new directory.
* vap - attributes of new directory.
* cr - credentials of caller.
* ct - caller context
* flags - case flags
* vsecp - ACL to be set
*
* OUT: vpp - vnode of created directory.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dvp - ctime|mtime updated
* vp - ctime|mtime|atime updated
*/
int
zfs_mkdir(znode_t *dzp, const char *dirname, vattr_t *vap, znode_t **zpp,
cred_t *cr, int flags, vsecattr_t *vsecp)
{
(void) flags, (void) vsecp;
znode_t *zp;
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
zilog_t *zilog;
uint64_t txtype;
dmu_tx_t *tx;
int error;
uid_t uid = crgetuid(cr);
gid_t gid = crgetgid(cr);
zfs_acl_ids_t acl_ids;
boolean_t fuid_dirtied;
ASSERT3U(vap->va_type, ==, VDIR);
/*
* If we have an ephemeral id, ACL, or XVATTR then
* make sure file system is at proper version
*/
if (zfsvfs->z_use_fuids == B_FALSE &&
((vap->va_mask & AT_XVATTR) ||
IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
return (SET_ERROR(EINVAL));
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(dzp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
+ return (error);
zilog = zfsvfs->z_log;
if (dzp->z_pflags & ZFS_XATTR) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
if (zfsvfs->z_utf8 && u8_validate(dirname,
strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EILSEQ));
}
if (vap->va_mask & AT_XVATTR) {
if ((error = secpolicy_xvattr(ZTOV(dzp), (xvattr_t *)vap,
crgetuid(cr), cr, vap->va_type)) != 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
}
if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
NULL, &acl_ids)) != 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* First make sure the new directory doesn't exist.
*
* Existence is checked first to make sure we don't return
* EACCES instead of EEXIST which can cause some applications
* to fail.
*/
*zpp = NULL;
if ((error = zfs_dirent_lookup(dzp, dirname, &zp, ZNEW))) {
zfs_acl_ids_free(&acl_ids);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
ASSERT3P(zp, ==, NULL);
if ((error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr))) {
zfs_acl_ids_free(&acl_ids);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, zfs_inherit_projid(dzp))) {
zfs_acl_ids_free(&acl_ids);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EDQUOT));
}
/*
* Add a new entry to the directory.
*/
getnewvnode_reserve_();
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
acl_ids.z_aclp->z_acl_bytes);
}
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
getnewvnode_drop_reserve();
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Create new node.
*/
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
/*
* Now put new name in parent dir.
*/
(void) zfs_link_create(dzp, dirname, zp, tx, ZNEW);
*zpp = zp;
txtype = zfs_log_create_txtype(Z_DIR, NULL, vap);
zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, NULL,
acl_ids.z_fuidp, vap);
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
getnewvnode_drop_reserve();
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (0);
}
#if __FreeBSD_version < 1300124
static void
cache_vop_rmdir(struct vnode *dvp, struct vnode *vp)
{
cache_purge(dvp);
cache_purge(vp);
}
#endif
/*
* Remove a directory subdir entry. If the current working
* directory is the same as the subdir to be removed, the
* remove will fail.
*
* IN: dvp - vnode of directory to remove from.
* name - name of directory to be removed.
* cwd - vnode of current working directory.
* cr - credentials of caller.
* ct - caller context
* flags - case flags
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dvp - ctime|mtime updated
*/
static int
zfs_rmdir_(vnode_t *dvp, vnode_t *vp, const char *name, cred_t *cr)
{
znode_t *dzp = VTOZ(dvp);
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
zilog_t *zilog;
dmu_tx_t *tx;
int error;
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(dzp);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
+ return (error);
+ if ((error = zfs_verify_zp(zp)) != 0) {
+ zfs_exit(zfsvfs, FTAG);
+ return (error);
+ }
zilog = zfsvfs->z_log;
if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
goto out;
}
if (vp->v_type != VDIR) {
error = SET_ERROR(ENOTDIR);
goto out;
}
vnevent_rmdir(vp, dvp, name, ct);
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
zfs_sa_upgrade_txholds(tx, zp);
zfs_sa_upgrade_txholds(tx, dzp);
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
error = zfs_link_destroy(dzp, name, zp, tx, ZEXISTS, NULL);
if (error == 0) {
uint64_t txtype = TX_RMDIR;
zfs_log_remove(zilog, tx, txtype, dzp, name,
ZFS_NO_OBJECT, B_FALSE);
}
dmu_tx_commit(tx);
cache_vop_rmdir(dvp, vp);
out:
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
int
zfs_rmdir(znode_t *dzp, const char *name, znode_t *cwd, cred_t *cr, int flags)
{
struct componentname cn;
vnode_t *vp;
int error;
if ((error = zfs_lookup_internal(dzp, name, &vp, &cn, DELETE)))
return (error);
error = zfs_rmdir_(ZTOV(dzp), vp, name, cr);
vput(vp);
return (error);
}
/*
* Read as many directory entries as will fit into the provided
* buffer from the given directory cursor position (specified in
* the uio structure).
*
* IN: vp - vnode of directory to read.
* uio - structure supplying read location, range info,
* and return buffer.
* cr - credentials of caller.
* ct - caller context
- * flags - case flags
*
* OUT: uio - updated offset and range, buffer filled.
* eofp - set to true if end-of-file detected.
+ * ncookies- number of entries in cookies
+ * cookies - offsets to directory entries
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* vp - atime updated
*
* Note that the low 4 bits of the cookie returned by zap is always zero.
* This allows us to use the low range for "special" directory entries:
* We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
* we use the offset 2 for the '.zfs' directory.
*/
static int
zfs_readdir(vnode_t *vp, zfs_uio_t *uio, cred_t *cr, int *eofp,
int *ncookies, cookie_t **cookies)
{
znode_t *zp = VTOZ(vp);
iovec_t *iovp;
- edirent_t *eodp;
dirent64_t *odp;
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
objset_t *os;
caddr_t outbuf;
size_t bufsize;
zap_cursor_t zc;
zap_attribute_t zap;
uint_t bytes_wanted;
uint64_t offset; /* must be unsigned; checks for < 1 */
uint64_t parent;
int local_eof;
int outcount;
int error;
uint8_t prefetch;
- boolean_t check_sysattrs;
uint8_t type;
int ncooks;
cookie_t *cooks = NULL;
- int flags = 0;
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (error);
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
&parent, sizeof (parent))) != 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* If we are not given an eof variable,
* use a local one.
*/
if (eofp == NULL)
eofp = &local_eof;
/*
* Check for valid iov_len.
*/
if (GET_UIO_STRUCT(uio)->uio_iov->iov_len <= 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* Quit if directory has been removed (posix)
*/
if ((*eofp = zp->z_unlinked) != 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (0);
}
error = 0;
os = zfsvfs->z_os;
offset = zfs_uio_offset(uio);
prefetch = zp->z_zn_prefetch;
/*
* Initialize the iterator cursor.
*/
if (offset <= 3) {
/*
* Start iteration from the beginning of the directory.
*/
zap_cursor_init(&zc, os, zp->z_id);
} else {
/*
* The offset is a serialized cursor.
*/
zap_cursor_init_serialized(&zc, os, zp->z_id, offset);
}
/*
* Get space to change directory entries into fs independent format.
*/
iovp = GET_UIO_STRUCT(uio)->uio_iov;
bytes_wanted = iovp->iov_len;
if (zfs_uio_segflg(uio) != UIO_SYSSPACE || zfs_uio_iovcnt(uio) != 1) {
bufsize = bytes_wanted;
outbuf = kmem_alloc(bufsize, KM_SLEEP);
odp = (struct dirent64 *)outbuf;
} else {
bufsize = bytes_wanted;
outbuf = NULL;
odp = (struct dirent64 *)iovp->iov_base;
}
- eodp = (struct edirent *)odp;
if (ncookies != NULL) {
/*
* Minimum entry size is dirent size and 1 byte for a file name.
*/
ncooks = zfs_uio_resid(uio) / (sizeof (struct dirent) -
sizeof (((struct dirent *)NULL)->d_name) + 1);
cooks = malloc(ncooks * sizeof (*cooks), M_TEMP, M_WAITOK);
*cookies = cooks;
*ncookies = ncooks;
}
- /*
- * If this VFS supports the system attribute view interface; and
- * we're looking at an extended attribute directory; and we care
- * about normalization conflicts on this vfs; then we must check
- * for normalization conflicts with the sysattr name space.
- */
-#ifdef TODO
- check_sysattrs = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) &&
- (vp->v_flag & V_XATTRDIR) && zfsvfs->z_norm &&
- (flags & V_RDDIR_ENTFLAGS);
-#else
- check_sysattrs = 0;
-#endif
/*
* Transform to file-system independent format
*/
outcount = 0;
while (outcount < bytes_wanted) {
ino64_t objnum;
ushort_t reclen;
off64_t *next = NULL;
/*
* Special case `.', `..', and `.zfs'.
*/
if (offset == 0) {
(void) strcpy(zap.za_name, ".");
zap.za_normalization_conflict = 0;
objnum = zp->z_id;
type = DT_DIR;
} else if (offset == 1) {
(void) strcpy(zap.za_name, "..");
zap.za_normalization_conflict = 0;
objnum = parent;
type = DT_DIR;
} else if (offset == 2 && zfs_show_ctldir(zp)) {
(void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
zap.za_normalization_conflict = 0;
objnum = ZFSCTL_INO_ROOT;
type = DT_DIR;
} else {
/*
* Grab next entry.
*/
if ((error = zap_cursor_retrieve(&zc, &zap))) {
if ((*eofp = (error == ENOENT)) != 0)
break;
else
goto update;
}
if (zap.za_integer_length != 8 ||
zap.za_num_integers != 1) {
cmn_err(CE_WARN, "zap_readdir: bad directory "
"entry, obj = %lld, offset = %lld\n",
(u_longlong_t)zp->z_id,
(u_longlong_t)offset);
error = SET_ERROR(ENXIO);
goto update;
}
objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
/*
* MacOS X can extract the object type here such as:
* uint8_t type = ZFS_DIRENT_TYPE(zap.za_first_integer);
*/
type = ZFS_DIRENT_TYPE(zap.za_first_integer);
-
- if (check_sysattrs && !zap.za_normalization_conflict) {
-#ifdef TODO
- zap.za_normalization_conflict =
- xattr_sysattr_casechk(zap.za_name);
-#else
- panic("%s:%u: TODO", __func__, __LINE__);
-#endif
- }
}
- if (flags & V_RDDIR_ACCFILTER) {
- /*
- * If we have no access at all, don't include
- * this entry in the returned information
- */
- znode_t *ezp;
- if (zfs_zget(zp->z_zfsvfs, objnum, &ezp) != 0)
- goto skip_entry;
- if (!zfs_has_access(ezp, cr)) {
- vrele(ZTOV(ezp));
- goto skip_entry;
- }
- vrele(ZTOV(ezp));
- }
-
- if (flags & V_RDDIR_ENTFLAGS)
- reclen = EDIRENT_RECLEN(strlen(zap.za_name));
- else
- reclen = DIRENT64_RECLEN(strlen(zap.za_name));
+ reclen = DIRENT64_RECLEN(strlen(zap.za_name));
/*
* Will this entry fit in the buffer?
*/
if (outcount + reclen > bufsize) {
/*
* Did we manage to fit anything in the buffer?
*/
if (!outcount) {
error = SET_ERROR(EINVAL);
goto update;
}
break;
}
- if (flags & V_RDDIR_ENTFLAGS) {
- /*
- * Add extended flag entry:
- */
- eodp->ed_ino = objnum;
- eodp->ed_reclen = reclen;
- /* NOTE: ed_off is the offset for the *next* entry */
- next = &(eodp->ed_off);
- eodp->ed_eflags = zap.za_normalization_conflict ?
- ED_CASE_CONFLICT : 0;
- (void) strncpy(eodp->ed_name, zap.za_name,
- EDIRENT_NAMELEN(reclen));
- eodp = (edirent_t *)((intptr_t)eodp + reclen);
- } else {
- /*
- * Add normal entry:
- */
- odp->d_ino = objnum;
- odp->d_reclen = reclen;
- odp->d_namlen = strlen(zap.za_name);
- /* NOTE: d_off is the offset for the *next* entry. */
- next = &odp->d_off;
- strlcpy(odp->d_name, zap.za_name, odp->d_namlen + 1);
- odp->d_type = type;
- dirent_terminate(odp);
- odp = (dirent64_t *)((intptr_t)odp + reclen);
- }
+ /*
+ * Add normal entry:
+ */
+ odp->d_ino = objnum;
+ odp->d_reclen = reclen;
+ odp->d_namlen = strlen(zap.za_name);
+ /* NOTE: d_off is the offset for the *next* entry. */
+ next = &odp->d_off;
+ strlcpy(odp->d_name, zap.za_name, odp->d_namlen + 1);
+ odp->d_type = type;
+ dirent_terminate(odp);
+ odp = (dirent64_t *)((intptr_t)odp + reclen);
+
outcount += reclen;
ASSERT3S(outcount, <=, bufsize);
/* Prefetch znode */
if (prefetch)
dmu_prefetch(os, objnum, 0, 0, 0,
ZIO_PRIORITY_SYNC_READ);
- skip_entry:
/*
* Move to the next entry, fill in the previous offset.
*/
if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) {
zap_cursor_advance(&zc);
offset = zap_cursor_serialize(&zc);
} else {
offset += 1;
}
/* Fill the offset right after advancing the cursor. */
if (next != NULL)
*next = offset;
if (cooks != NULL) {
*cooks++ = offset;
ncooks--;
KASSERT(ncooks >= 0, ("ncookies=%d", ncooks));
}
}
zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
/* Subtract unused cookies */
if (ncookies != NULL)
*ncookies -= ncooks;
if (zfs_uio_segflg(uio) == UIO_SYSSPACE && zfs_uio_iovcnt(uio) == 1) {
iovp->iov_base += outcount;
iovp->iov_len -= outcount;
zfs_uio_resid(uio) -= outcount;
} else if ((error =
zfs_uiomove(outbuf, (long)outcount, UIO_READ, uio))) {
/*
* Reset the pointer.
*/
offset = zfs_uio_offset(uio);
}
update:
zap_cursor_fini(&zc);
if (zfs_uio_segflg(uio) != UIO_SYSSPACE || zfs_uio_iovcnt(uio) != 1)
kmem_free(outbuf, bufsize);
if (error == ENOENT)
error = 0;
ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
zfs_uio_setoffset(uio, offset);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
if (error != 0 && cookies != NULL) {
free(*cookies, M_TEMP);
*cookies = NULL;
*ncookies = 0;
}
return (error);
}
/*
* Get the requested file attributes and place them in the provided
* vattr structure.
*
* IN: vp - vnode of file.
* vap - va_mask identifies requested attributes.
* If AT_XVATTR set, then optional attrs are requested
* flags - ATTR_NOACLCHECK (CIFS server context)
* cr - credentials of caller.
*
* OUT: vap - attribute values.
*
* RETURN: 0 (always succeeds).
*/
static int
zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr)
{
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
int error = 0;
uint32_t blksize;
u_longlong_t nblocks;
uint64_t mtime[2], ctime[2], crtime[2], rdev;
xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
xoptattr_t *xoap = NULL;
boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
sa_bulk_attr_t bulk[4];
int count = 0;
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (error);
zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL, &crtime, 16);
if (vp->v_type == VBLK || vp->v_type == VCHR)
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL,
&rdev, 8);
if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
* Also, if we are the owner don't bother, since owner should
* always be allowed to read basic attributes of file.
*/
if (!(zp->z_pflags & ZFS_ACL_TRIVIAL) &&
(vap->va_uid != crgetuid(cr))) {
if ((error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0,
skipaclchk, cr))) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
}
/*
* Return all attributes. It's cheaper to provide the answer
* than to determine whether we were asked the question.
*/
vap->va_type = IFTOVT(zp->z_mode);
vap->va_mode = zp->z_mode & ~S_IFMT;
vn_fsid(vp, vap);
vap->va_nodeid = zp->z_id;
vap->va_nlink = zp->z_links;
if ((vp->v_flag & VROOT) && zfs_show_ctldir(zp) &&
zp->z_links < ZFS_LINK_MAX)
vap->va_nlink++;
vap->va_size = zp->z_size;
if (vp->v_type == VBLK || vp->v_type == VCHR)
vap->va_rdev = zfs_cmpldev(rdev);
vap->va_gen = zp->z_gen;
vap->va_flags = 0; /* FreeBSD: Reset chflags(2) flags. */
vap->va_filerev = zp->z_seq;
/*
* Add in any requested optional attributes and the create time.
* Also set the corresponding bits in the returned attribute bitmap.
*/
if ((xoap = xva_getxoptattr(xvap)) != NULL && zfsvfs->z_use_fuids) {
if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
xoap->xoa_archive =
((zp->z_pflags & ZFS_ARCHIVE) != 0);
XVA_SET_RTN(xvap, XAT_ARCHIVE);
}
if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
xoap->xoa_readonly =
((zp->z_pflags & ZFS_READONLY) != 0);
XVA_SET_RTN(xvap, XAT_READONLY);
}
if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
xoap->xoa_system =
((zp->z_pflags & ZFS_SYSTEM) != 0);
XVA_SET_RTN(xvap, XAT_SYSTEM);
}
if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
xoap->xoa_hidden =
((zp->z_pflags & ZFS_HIDDEN) != 0);
XVA_SET_RTN(xvap, XAT_HIDDEN);
}
if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
xoap->xoa_nounlink =
((zp->z_pflags & ZFS_NOUNLINK) != 0);
XVA_SET_RTN(xvap, XAT_NOUNLINK);
}
if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
xoap->xoa_immutable =
((zp->z_pflags & ZFS_IMMUTABLE) != 0);
XVA_SET_RTN(xvap, XAT_IMMUTABLE);
}
if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
xoap->xoa_appendonly =
((zp->z_pflags & ZFS_APPENDONLY) != 0);
XVA_SET_RTN(xvap, XAT_APPENDONLY);
}
if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
xoap->xoa_nodump =
((zp->z_pflags & ZFS_NODUMP) != 0);
XVA_SET_RTN(xvap, XAT_NODUMP);
}
if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
xoap->xoa_opaque =
((zp->z_pflags & ZFS_OPAQUE) != 0);
XVA_SET_RTN(xvap, XAT_OPAQUE);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
xoap->xoa_av_quarantined =
((zp->z_pflags & ZFS_AV_QUARANTINED) != 0);
XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
xoap->xoa_av_modified =
((zp->z_pflags & ZFS_AV_MODIFIED) != 0);
XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) &&
vp->v_type == VREG) {
zfs_sa_get_scanstamp(zp, xvap);
}
if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
xoap->xoa_reparse = ((zp->z_pflags & ZFS_REPARSE) != 0);
XVA_SET_RTN(xvap, XAT_REPARSE);
}
if (XVA_ISSET_REQ(xvap, XAT_GEN)) {
xoap->xoa_generation = zp->z_gen;
XVA_SET_RTN(xvap, XAT_GEN);
}
if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
xoap->xoa_offline =
((zp->z_pflags & ZFS_OFFLINE) != 0);
XVA_SET_RTN(xvap, XAT_OFFLINE);
}
if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
xoap->xoa_sparse =
((zp->z_pflags & ZFS_SPARSE) != 0);
XVA_SET_RTN(xvap, XAT_SPARSE);
}
if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
xoap->xoa_projinherit =
((zp->z_pflags & ZFS_PROJINHERIT) != 0);
XVA_SET_RTN(xvap, XAT_PROJINHERIT);
}
if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
xoap->xoa_projid = zp->z_projid;
XVA_SET_RTN(xvap, XAT_PROJID);
}
}
ZFS_TIME_DECODE(&vap->va_atime, zp->z_atime);
ZFS_TIME_DECODE(&vap->va_mtime, mtime);
ZFS_TIME_DECODE(&vap->va_ctime, ctime);
ZFS_TIME_DECODE(&vap->va_birthtime, crtime);
sa_object_size(zp->z_sa_hdl, &blksize, &nblocks);
vap->va_blksize = blksize;
vap->va_bytes = nblocks << 9; /* nblocks * 512 */
if (zp->z_blksz == 0) {
/*
* Block size hasn't been set; suggest maximal I/O transfers.
*/
vap->va_blksize = zfsvfs->z_max_blksz;
}
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (0);
}
/*
* Set the file attributes to the values contained in the
* vattr structure.
*
* IN: zp - znode of file to be modified.
* vap - new attribute values.
* If AT_XVATTR set, then optional attrs are being set
* flags - ATTR_UTIME set if non-default time values provided.
* - ATTR_NOACLCHECK (CIFS context only).
* cr - credentials of caller.
* ct - caller context
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* vp - ctime updated, mtime updated if size changed.
*/
int
zfs_setattr(znode_t *zp, vattr_t *vap, int flags, cred_t *cr)
{
vnode_t *vp = ZTOV(zp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
objset_t *os;
zilog_t *zilog;
dmu_tx_t *tx;
vattr_t oldva;
xvattr_t tmpxvattr;
uint_t mask = vap->va_mask;
uint_t saved_mask = 0;
uint64_t saved_mode;
int trim_mask = 0;
uint64_t new_mode;
uint64_t new_uid, new_gid;
uint64_t xattr_obj;
uint64_t mtime[2], ctime[2];
uint64_t projid = ZFS_INVALID_PROJID;
znode_t *attrzp;
int need_policy = FALSE;
int err, err2;
zfs_fuid_info_t *fuidp = NULL;
xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
xoptattr_t *xoap;
zfs_acl_t *aclp;
boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
boolean_t fuid_dirtied = B_FALSE;
sa_bulk_attr_t bulk[7], xattr_bulk[7];
int count = 0, xattr_count = 0;
if (mask == 0)
return (0);
if (mask & AT_NOSET)
return (SET_ERROR(EINVAL));
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((err = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (err);
os = zfsvfs->z_os;
zilog = zfsvfs->z_log;
/*
* Make sure that if we have ephemeral uid/gid or xvattr specified
* that file system is at proper version level
*/
if (zfsvfs->z_use_fuids == B_FALSE &&
(((mask & AT_UID) && IS_EPHEMERAL(vap->va_uid)) ||
((mask & AT_GID) && IS_EPHEMERAL(vap->va_gid)) ||
(mask & AT_XVATTR))) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
if (mask & AT_SIZE && vp->v_type == VDIR) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EISDIR));
}
if (mask & AT_SIZE && vp->v_type != VREG && vp->v_type != VFIFO) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* If this is an xvattr_t, then get a pointer to the structure of
* optional attributes. If this is NULL, then we have a vattr_t.
*/
xoap = xva_getxoptattr(xvap);
xva_init(&tmpxvattr);
/*
* Immutable files can only alter immutable bit and atime
*/
if ((zp->z_pflags & ZFS_IMMUTABLE) &&
((mask & (AT_SIZE|AT_UID|AT_GID|AT_MTIME|AT_MODE)) ||
((mask & AT_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EPERM));
}
/*
* Note: ZFS_READONLY is handled in zfs_zaccess_common.
*/
/*
* Verify timestamps doesn't overflow 32 bits.
* ZFS can handle large timestamps, but 32bit syscalls can't
* handle times greater than 2039. This check should be removed
* once large timestamps are fully supported.
*/
if (mask & (AT_ATIME | AT_MTIME)) {
if (((mask & AT_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) ||
((mask & AT_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EOVERFLOW));
}
}
if (xoap != NULL && (mask & AT_XVATTR)) {
if (XVA_ISSET_REQ(xvap, XAT_CREATETIME) &&
TIMESPEC_OVERFLOW(&vap->va_birthtime)) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EOVERFLOW));
}
if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
if (!dmu_objset_projectquota_enabled(os) ||
(!S_ISREG(zp->z_mode) && !S_ISDIR(zp->z_mode))) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EOPNOTSUPP));
}
projid = xoap->xoa_projid;
if (unlikely(projid == ZFS_INVALID_PROJID)) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
if (projid == zp->z_projid && zp->z_pflags & ZFS_PROJID)
projid = ZFS_INVALID_PROJID;
else
need_policy = TRUE;
}
if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT) &&
(xoap->xoa_projinherit !=
((zp->z_pflags & ZFS_PROJINHERIT) != 0)) &&
(!dmu_objset_projectquota_enabled(os) ||
(!S_ISREG(zp->z_mode) && !S_ISDIR(zp->z_mode)))) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EOPNOTSUPP));
}
}
attrzp = NULL;
aclp = NULL;
if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EROFS));
}
/*
* First validate permissions
*/
if (mask & AT_SIZE) {
/*
* XXX - Note, we are not providing any open
* mode flags here (like FNDELAY), so we may
* block if there are locks present... this
* should be addressed in openat().
*/
/* XXX - would it be OK to generate a log record here? */
err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
if (err) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (err);
}
}
if (mask & (AT_ATIME|AT_MTIME) ||
((mask & AT_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
XVA_ISSET_REQ(xvap, XAT_READONLY) ||
XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
XVA_ISSET_REQ(xvap, XAT_OFFLINE) ||
XVA_ISSET_REQ(xvap, XAT_SPARSE) ||
XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
skipaclchk, cr);
}
if (mask & (AT_UID|AT_GID)) {
int idmask = (mask & (AT_UID|AT_GID));
int take_owner;
int take_group;
/*
* NOTE: even if a new mode is being set,
* we may clear S_ISUID/S_ISGID bits.
*/
if (!(mask & AT_MODE))
vap->va_mode = zp->z_mode;
/*
* Take ownership or chgrp to group we are a member of
*/
take_owner = (mask & AT_UID) && (vap->va_uid == crgetuid(cr));
take_group = (mask & AT_GID) &&
zfs_groupmember(zfsvfs, vap->va_gid, cr);
/*
* If both AT_UID and AT_GID are set then take_owner and
* take_group must both be set in order to allow taking
* ownership.
*
* Otherwise, send the check through secpolicy_vnode_setattr()
*
*/
if (((idmask == (AT_UID|AT_GID)) && take_owner && take_group) ||
((idmask == AT_UID) && take_owner) ||
((idmask == AT_GID) && take_group)) {
if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0,
skipaclchk, cr) == 0) {
/*
* Remove setuid/setgid for non-privileged users
*/
secpolicy_setid_clear(vap, vp, cr);
trim_mask = (mask & (AT_UID|AT_GID));
} else {
need_policy = TRUE;
}
} else {
need_policy = TRUE;
}
}
oldva.va_mode = zp->z_mode;
zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
if (mask & AT_XVATTR) {
/*
* Update xvattr mask to include only those attributes
* that are actually changing.
*
* the bits will be restored prior to actually setting
* the attributes so the caller thinks they were set.
*/
if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
if (xoap->xoa_appendonly !=
((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_APPENDONLY);
XVA_SET_REQ(&tmpxvattr, XAT_APPENDONLY);
}
}
if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
if (xoap->xoa_projinherit !=
((zp->z_pflags & ZFS_PROJINHERIT) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_PROJINHERIT);
XVA_SET_REQ(&tmpxvattr, XAT_PROJINHERIT);
}
}
if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
if (xoap->xoa_nounlink !=
((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_NOUNLINK);
XVA_SET_REQ(&tmpxvattr, XAT_NOUNLINK);
}
}
if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
if (xoap->xoa_immutable !=
((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
XVA_SET_REQ(&tmpxvattr, XAT_IMMUTABLE);
}
}
if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
if (xoap->xoa_nodump !=
((zp->z_pflags & ZFS_NODUMP) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_NODUMP);
XVA_SET_REQ(&tmpxvattr, XAT_NODUMP);
}
}
if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
if (xoap->xoa_av_modified !=
((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
XVA_SET_REQ(&tmpxvattr, XAT_AV_MODIFIED);
}
}
if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
if ((vp->v_type != VREG &&
xoap->xoa_av_quarantined) ||
xoap->xoa_av_quarantined !=
((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
XVA_SET_REQ(&tmpxvattr, XAT_AV_QUARANTINED);
}
}
if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EPERM));
}
if (need_policy == FALSE &&
(XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
need_policy = TRUE;
}
}
if (mask & AT_MODE) {
if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) {
err = secpolicy_setid_setsticky_clear(vp, vap,
&oldva, cr);
if (err) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (err);
}
trim_mask |= AT_MODE;
} else {
need_policy = TRUE;
}
}
if (need_policy) {
/*
* If trim_mask is set then take ownership
* has been granted or write_acl is present and user
* has the ability to modify mode. In that case remove
* UID|GID and or MODE from mask so that
* secpolicy_vnode_setattr() doesn't revoke it.
*/
if (trim_mask) {
saved_mask = vap->va_mask;
vap->va_mask &= ~trim_mask;
if (trim_mask & AT_MODE) {
/*
* Save the mode, as secpolicy_vnode_setattr()
* will overwrite it with ova.va_mode.
*/
saved_mode = vap->va_mode;
}
}
err = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags,
(int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp);
if (err) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (err);
}
if (trim_mask) {
vap->va_mask |= saved_mask;
if (trim_mask & AT_MODE) {
/*
* Recover the mode after
* secpolicy_vnode_setattr().
*/
vap->va_mode = saved_mode;
}
}
}
/*
* secpolicy_vnode_setattr, or take ownership may have
* changed va_mask
*/
mask = vap->va_mask;
if ((mask & (AT_UID | AT_GID)) || projid != ZFS_INVALID_PROJID) {
err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
&xattr_obj, sizeof (xattr_obj));
if (err == 0 && xattr_obj) {
err = zfs_zget(zp->z_zfsvfs, xattr_obj, &attrzp);
if (err == 0) {
err = vn_lock(ZTOV(attrzp), LK_EXCLUSIVE);
if (err != 0)
vrele(ZTOV(attrzp));
}
if (err)
goto out2;
}
if (mask & AT_UID) {
new_uid = zfs_fuid_create(zfsvfs,
(uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
if (new_uid != zp->z_uid &&
zfs_id_overquota(zfsvfs, DMU_USERUSED_OBJECT,
new_uid)) {
if (attrzp)
vput(ZTOV(attrzp));
err = SET_ERROR(EDQUOT);
goto out2;
}
}
if (mask & AT_GID) {
new_gid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_gid,
cr, ZFS_GROUP, &fuidp);
if (new_gid != zp->z_gid &&
zfs_id_overquota(zfsvfs, DMU_GROUPUSED_OBJECT,
new_gid)) {
if (attrzp)
vput(ZTOV(attrzp));
err = SET_ERROR(EDQUOT);
goto out2;
}
}
if (projid != ZFS_INVALID_PROJID &&
zfs_id_overquota(zfsvfs, DMU_PROJECTUSED_OBJECT, projid)) {
if (attrzp)
vput(ZTOV(attrzp));
err = SET_ERROR(EDQUOT);
goto out2;
}
}
tx = dmu_tx_create(os);
if (mask & AT_MODE) {
uint64_t pmode = zp->z_mode;
uint64_t acl_obj;
new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
if (zp->z_zfsvfs->z_acl_mode == ZFS_ACL_RESTRICTED &&
!(zp->z_pflags & ZFS_ACL_TRIVIAL)) {
err = SET_ERROR(EPERM);
goto out;
}
if ((err = zfs_acl_chmod_setattr(zp, &aclp, new_mode)))
goto out;
if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) {
/*
* Are we upgrading ACL from old V0 format
* to V1 format?
*/
if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
zfs_znode_acl_version(zp) ==
ZFS_ACL_VERSION_INITIAL) {
dmu_tx_hold_free(tx, acl_obj, 0,
DMU_OBJECT_END);
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, aclp->z_acl_bytes);
} else {
dmu_tx_hold_write(tx, acl_obj, 0,
aclp->z_acl_bytes);
}
} else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, aclp->z_acl_bytes);
}
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
} else {
if (((mask & AT_XVATTR) &&
XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) ||
(projid != ZFS_INVALID_PROJID &&
!(zp->z_pflags & ZFS_PROJID)))
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
else
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
}
if (attrzp) {
dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE);
}
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
zfs_sa_upgrade_txholds(tx, zp);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err)
goto out;
count = 0;
/*
* Set each attribute requested.
* We group settings according to the locks they need to acquire.
*
* Note: you cannot set ctime directly, although it will be
* updated as a side-effect of calling this function.
*/
if (projid != ZFS_INVALID_PROJID && !(zp->z_pflags & ZFS_PROJID)) {
/*
* For the existed object that is upgraded from old system,
* its on-disk layout has no slot for the project ID attribute.
* But quota accounting logic needs to access related slots by
* offset directly. So we need to adjust old objects' layout
* to make the project ID to some unified and fixed offset.
*/
if (attrzp)
err = sa_add_projid(attrzp->z_sa_hdl, tx, projid);
if (err == 0)
err = sa_add_projid(zp->z_sa_hdl, tx, projid);
if (unlikely(err == EEXIST))
err = 0;
else if (err != 0)
goto out;
else
projid = ZFS_INVALID_PROJID;
}
if (mask & (AT_UID|AT_GID|AT_MODE))
mutex_enter(&zp->z_acl_lock);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
if (attrzp) {
if (mask & (AT_UID|AT_GID|AT_MODE))
mutex_enter(&attrzp->z_acl_lock);
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_FLAGS(zfsvfs), NULL, &attrzp->z_pflags,
sizeof (attrzp->z_pflags));
if (projid != ZFS_INVALID_PROJID) {
attrzp->z_projid = projid;
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_PROJID(zfsvfs), NULL, &attrzp->z_projid,
sizeof (attrzp->z_projid));
}
}
if (mask & (AT_UID|AT_GID)) {
if (mask & AT_UID) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
&new_uid, sizeof (new_uid));
zp->z_uid = new_uid;
if (attrzp) {
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_UID(zfsvfs), NULL, &new_uid,
sizeof (new_uid));
attrzp->z_uid = new_uid;
}
}
if (mask & AT_GID) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs),
NULL, &new_gid, sizeof (new_gid));
zp->z_gid = new_gid;
if (attrzp) {
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_GID(zfsvfs), NULL, &new_gid,
sizeof (new_gid));
attrzp->z_gid = new_gid;
}
}
if (!(mask & AT_MODE)) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs),
NULL, &new_mode, sizeof (new_mode));
new_mode = zp->z_mode;
}
err = zfs_acl_chown_setattr(zp);
ASSERT0(err);
if (attrzp) {
vn_seqc_write_begin(ZTOV(attrzp));
err = zfs_acl_chown_setattr(attrzp);
vn_seqc_write_end(ZTOV(attrzp));
ASSERT0(err);
}
}
if (mask & AT_MODE) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
&new_mode, sizeof (new_mode));
zp->z_mode = new_mode;
ASSERT3P(aclp, !=, NULL);
err = zfs_aclset_common(zp, aclp, cr, tx);
ASSERT0(err);
if (zp->z_acl_cached)
zfs_acl_free(zp->z_acl_cached);
zp->z_acl_cached = aclp;
aclp = NULL;
}
if (mask & AT_ATIME) {
ZFS_TIME_ENCODE(&vap->va_atime, zp->z_atime);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
&zp->z_atime, sizeof (zp->z_atime));
}
if (mask & AT_MTIME) {
ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
mtime, sizeof (mtime));
}
if (projid != ZFS_INVALID_PROJID) {
zp->z_projid = projid;
SA_ADD_BULK_ATTR(bulk, count,
SA_ZPL_PROJID(zfsvfs), NULL, &zp->z_projid,
sizeof (zp->z_projid));
}
/* XXX - shouldn't this be done *before* the ATIME/MTIME checks? */
if (mask & AT_SIZE && !(mask & AT_MTIME)) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs),
NULL, mtime, sizeof (mtime));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, sizeof (ctime));
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
} else if (mask != 0) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, sizeof (ctime));
zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime);
if (attrzp) {
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, sizeof (ctime));
zfs_tstamp_update_setup(attrzp, STATE_CHANGED,
mtime, ctime);
}
}
/*
* Do this after setting timestamps to prevent timestamp
* update from toggling bit
*/
if (xoap && (mask & AT_XVATTR)) {
if (XVA_ISSET_REQ(xvap, XAT_CREATETIME))
xoap->xoa_createtime = vap->va_birthtime;
/*
* restore trimmed off masks
* so that return masks can be set for caller.
*/
if (XVA_ISSET_REQ(&tmpxvattr, XAT_APPENDONLY)) {
XVA_SET_REQ(xvap, XAT_APPENDONLY);
}
if (XVA_ISSET_REQ(&tmpxvattr, XAT_NOUNLINK)) {
XVA_SET_REQ(xvap, XAT_NOUNLINK);
}
if (XVA_ISSET_REQ(&tmpxvattr, XAT_IMMUTABLE)) {
XVA_SET_REQ(xvap, XAT_IMMUTABLE);
}
if (XVA_ISSET_REQ(&tmpxvattr, XAT_NODUMP)) {
XVA_SET_REQ(xvap, XAT_NODUMP);
}
if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_MODIFIED)) {
XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
}
if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_QUARANTINED)) {
XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
}
if (XVA_ISSET_REQ(&tmpxvattr, XAT_PROJINHERIT)) {
XVA_SET_REQ(xvap, XAT_PROJINHERIT);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
ASSERT3S(vp->v_type, ==, VREG);
zfs_xvattr_set(zp, xvap, tx);
}
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
if (mask != 0)
zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
if (mask & (AT_UID|AT_GID|AT_MODE))
mutex_exit(&zp->z_acl_lock);
if (attrzp) {
if (mask & (AT_UID|AT_GID|AT_MODE))
mutex_exit(&attrzp->z_acl_lock);
}
out:
if (err == 0 && attrzp) {
err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
xattr_count, tx);
ASSERT0(err2);
}
if (attrzp)
vput(ZTOV(attrzp));
if (aclp)
zfs_acl_free(aclp);
if (fuidp) {
zfs_fuid_info_free(fuidp);
fuidp = NULL;
}
if (err) {
dmu_tx_abort(tx);
} else {
err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
dmu_tx_commit(tx);
}
out2:
if (os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (err);
}
/*
* Look up the directory entries corresponding to the source and target
* directory/name pairs.
*/
static int
zfs_rename_relock_lookup(znode_t *sdzp, const struct componentname *scnp,
znode_t **szpp, znode_t *tdzp, const struct componentname *tcnp,
znode_t **tzpp)
{
zfsvfs_t *zfsvfs;
znode_t *szp, *tzp;
int error;
/*
* Before using sdzp and tdzp we must ensure that they are live.
* As a porting legacy from illumos we have two things to worry
* about. One is typical for FreeBSD and it is that the vnode is
* not reclaimed (doomed). The other is that the znode is live.
* The current code can invalidate the znode without acquiring the
* corresponding vnode lock if the object represented by the znode
* and vnode is no longer valid after a rollback or receive operation.
- * z_teardown_lock hidden behind ZFS_ENTER and ZFS_EXIT is the lock
+ * z_teardown_lock hidden behind zfs_enter and zfs_exit is the lock
* that protects the znodes from the invalidation.
*/
zfsvfs = sdzp->z_zfsvfs;
ASSERT3P(zfsvfs, ==, tdzp->z_zfsvfs);
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(sdzp);
- ZFS_VERIFY_ZP(tdzp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, sdzp, FTAG)) != 0)
+ return (error);
+ if ((error = zfs_verify_zp(tdzp)) != 0) {
+ zfs_exit(zfsvfs, FTAG);
+ return (error);
+ }
/*
* Re-resolve svp to be certain it still exists and fetch the
* correct vnode.
*/
error = zfs_dirent_lookup(sdzp, scnp->cn_nameptr, &szp, ZEXISTS);
if (error != 0) {
/* Source entry invalid or not there. */
if ((scnp->cn_flags & ISDOTDOT) != 0 ||
(scnp->cn_namelen == 1 && scnp->cn_nameptr[0] == '.'))
error = SET_ERROR(EINVAL);
goto out;
}
*szpp = szp;
/*
* Re-resolve tvp, if it disappeared we just carry on.
*/
error = zfs_dirent_lookup(tdzp, tcnp->cn_nameptr, &tzp, 0);
if (error != 0) {
vrele(ZTOV(szp));
if ((tcnp->cn_flags & ISDOTDOT) != 0)
error = SET_ERROR(EINVAL);
goto out;
}
*tzpp = tzp;
out:
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* We acquire all but fdvp locks using non-blocking acquisitions. If we
* fail to acquire any lock in the path we will drop all held locks,
* acquire the new lock in a blocking fashion, and then release it and
* restart the rename. This acquire/release step ensures that we do not
* spin on a lock waiting for release. On error release all vnode locks
* and decrement references the way tmpfs_rename() would do.
*/
static int
zfs_rename_relock(struct vnode *sdvp, struct vnode **svpp,
struct vnode *tdvp, struct vnode **tvpp,
const struct componentname *scnp, const struct componentname *tcnp)
{
struct vnode *nvp, *svp, *tvp;
znode_t *sdzp, *tdzp, *szp, *tzp;
int error;
VOP_UNLOCK1(tdvp);
if (*tvpp != NULL && *tvpp != tdvp)
VOP_UNLOCK1(*tvpp);
relock:
error = vn_lock(sdvp, LK_EXCLUSIVE);
if (error)
goto out;
error = vn_lock(tdvp, LK_EXCLUSIVE | LK_NOWAIT);
if (error != 0) {
VOP_UNLOCK1(sdvp);
if (error != EBUSY)
goto out;
error = vn_lock(tdvp, LK_EXCLUSIVE);
if (error)
goto out;
VOP_UNLOCK1(tdvp);
goto relock;
}
tdzp = VTOZ(tdvp);
sdzp = VTOZ(sdvp);
error = zfs_rename_relock_lookup(sdzp, scnp, &szp, tdzp, tcnp, &tzp);
if (error != 0) {
VOP_UNLOCK1(sdvp);
VOP_UNLOCK1(tdvp);
goto out;
}
svp = ZTOV(szp);
tvp = tzp != NULL ? ZTOV(tzp) : NULL;
/*
* Now try acquire locks on svp and tvp.
*/
nvp = svp;
error = vn_lock(nvp, LK_EXCLUSIVE | LK_NOWAIT);
if (error != 0) {
VOP_UNLOCK1(sdvp);
VOP_UNLOCK1(tdvp);
if (tvp != NULL)
vrele(tvp);
if (error != EBUSY) {
vrele(nvp);
goto out;
}
error = vn_lock(nvp, LK_EXCLUSIVE);
if (error != 0) {
vrele(nvp);
goto out;
}
VOP_UNLOCK1(nvp);
/*
* Concurrent rename race.
* XXX ?
*/
if (nvp == tdvp) {
vrele(nvp);
error = SET_ERROR(EINVAL);
goto out;
}
vrele(*svpp);
*svpp = nvp;
goto relock;
}
vrele(*svpp);
*svpp = nvp;
if (*tvpp != NULL)
vrele(*tvpp);
*tvpp = NULL;
if (tvp != NULL) {
nvp = tvp;
error = vn_lock(nvp, LK_EXCLUSIVE | LK_NOWAIT);
if (error != 0) {
VOP_UNLOCK1(sdvp);
VOP_UNLOCK1(tdvp);
VOP_UNLOCK1(*svpp);
if (error != EBUSY) {
vrele(nvp);
goto out;
}
error = vn_lock(nvp, LK_EXCLUSIVE);
if (error != 0) {
vrele(nvp);
goto out;
}
vput(nvp);
goto relock;
}
*tvpp = nvp;
}
return (0);
out:
return (error);
}
/*
* Note that we must use VRELE_ASYNC in this function as it walks
* up the directory tree and vrele may need to acquire an exclusive
* lock if a last reference to a vnode is dropped.
*/
static int
zfs_rename_check(znode_t *szp, znode_t *sdzp, znode_t *tdzp)
{
zfsvfs_t *zfsvfs;
znode_t *zp, *zp1;
uint64_t parent;
int error;
zfsvfs = tdzp->z_zfsvfs;
if (tdzp == szp)
return (SET_ERROR(EINVAL));
if (tdzp == sdzp)
return (0);
if (tdzp->z_id == zfsvfs->z_root)
return (0);
zp = tdzp;
for (;;) {
ASSERT(!zp->z_unlinked);
if ((error = sa_lookup(zp->z_sa_hdl,
SA_ZPL_PARENT(zfsvfs), &parent, sizeof (parent))) != 0)
break;
if (parent == szp->z_id) {
error = SET_ERROR(EINVAL);
break;
}
if (parent == zfsvfs->z_root)
break;
if (parent == sdzp->z_id)
break;
error = zfs_zget(zfsvfs, parent, &zp1);
if (error != 0)
break;
if (zp != tdzp)
VN_RELE_ASYNC(ZTOV(zp),
dsl_pool_zrele_taskq(
dmu_objset_pool(zfsvfs->z_os)));
zp = zp1;
}
if (error == ENOTDIR)
panic("checkpath: .. not a directory\n");
if (zp != tdzp)
VN_RELE_ASYNC(ZTOV(zp),
dsl_pool_zrele_taskq(dmu_objset_pool(zfsvfs->z_os)));
return (error);
}
#if __FreeBSD_version < 1300124
static void
cache_vop_rename(struct vnode *fdvp, struct vnode *fvp, struct vnode *tdvp,
struct vnode *tvp, struct componentname *fcnp, struct componentname *tcnp)
{
cache_purge(fvp);
if (tvp != NULL)
cache_purge(tvp);
cache_purge_negative(tdvp);
}
#endif
static int
zfs_do_rename_impl(vnode_t *sdvp, vnode_t **svpp, struct componentname *scnp,
vnode_t *tdvp, vnode_t **tvpp, struct componentname *tcnp,
cred_t *cr);
/*
* Move an entry from the provided source directory to the target
* directory. Change the entry name as indicated.
*
* IN: sdvp - Source directory containing the "old entry".
* scnp - Old entry name.
* tdvp - Target directory to contain the "new entry".
* tcnp - New entry name.
* cr - credentials of caller.
* INOUT: svpp - Source file
* tvpp - Target file, may point to NULL initially
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* sdvp,tdvp - ctime|mtime updated
*/
static int
zfs_do_rename(vnode_t *sdvp, vnode_t **svpp, struct componentname *scnp,
vnode_t *tdvp, vnode_t **tvpp, struct componentname *tcnp,
cred_t *cr)
{
int error;
ASSERT_VOP_ELOCKED(tdvp, __func__);
if (*tvpp != NULL)
ASSERT_VOP_ELOCKED(*tvpp, __func__);
/* Reject renames across filesystems. */
if ((*svpp)->v_mount != tdvp->v_mount ||
((*tvpp) != NULL && (*svpp)->v_mount != (*tvpp)->v_mount)) {
error = SET_ERROR(EXDEV);
goto out;
}
if (zfsctl_is_node(tdvp)) {
error = SET_ERROR(EXDEV);
goto out;
}
/*
* Lock all four vnodes to ensure safety and semantics of renaming.
*/
error = zfs_rename_relock(sdvp, svpp, tdvp, tvpp, scnp, tcnp);
if (error != 0) {
/* no vnodes are locked in the case of error here */
return (error);
}
error = zfs_do_rename_impl(sdvp, svpp, scnp, tdvp, tvpp, tcnp, cr);
VOP_UNLOCK1(sdvp);
VOP_UNLOCK1(*svpp);
out:
if (*tvpp != NULL)
VOP_UNLOCK1(*tvpp);
if (tdvp != *tvpp)
VOP_UNLOCK1(tdvp);
return (error);
}
static int
zfs_do_rename_impl(vnode_t *sdvp, vnode_t **svpp, struct componentname *scnp,
vnode_t *tdvp, vnode_t **tvpp, struct componentname *tcnp,
cred_t *cr)
{
dmu_tx_t *tx;
zfsvfs_t *zfsvfs;
zilog_t *zilog;
znode_t *tdzp, *sdzp, *tzp, *szp;
const char *snm = scnp->cn_nameptr;
const char *tnm = tcnp->cn_nameptr;
int error;
tdzp = VTOZ(tdvp);
sdzp = VTOZ(sdvp);
zfsvfs = tdzp->z_zfsvfs;
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(tdzp);
- ZFS_VERIFY_ZP(sdzp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, tdzp, FTAG)) != 0)
+ return (error);
+ if ((error = zfs_verify_zp(sdzp)) != 0) {
+ zfs_exit(zfsvfs, FTAG);
+ return (error);
+ }
zilog = zfsvfs->z_log;
if (zfsvfs->z_utf8 && u8_validate(tnm,
strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
error = SET_ERROR(EILSEQ);
goto out;
}
/* If source and target are the same file, there is nothing to do. */
if ((*svpp) == (*tvpp)) {
error = 0;
goto out;
}
if (((*svpp)->v_type == VDIR && (*svpp)->v_mountedhere != NULL) ||
((*tvpp) != NULL && (*tvpp)->v_type == VDIR &&
(*tvpp)->v_mountedhere != NULL)) {
error = SET_ERROR(EXDEV);
goto out;
}
szp = VTOZ(*svpp);
- ZFS_VERIFY_ZP(szp);
+ if ((error = zfs_verify_zp(szp)) != 0) {
+ zfs_exit(zfsvfs, FTAG);
+ return (error);
+ }
tzp = *tvpp == NULL ? NULL : VTOZ(*tvpp);
- if (tzp != NULL)
- ZFS_VERIFY_ZP(tzp);
+ if (tzp != NULL) {
+ if ((error = zfs_verify_zp(tzp)) != 0) {
+ zfs_exit(zfsvfs, FTAG);
+ return (error);
+ }
+ }
/*
* This is to prevent the creation of links into attribute space
* by renaming a linked file into/outof an attribute directory.
* See the comment in zfs_link() for why this is considered bad.
*/
if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
error = SET_ERROR(EINVAL);
goto out;
}
/*
* If we are using project inheritance, means if the directory has
* ZFS_PROJINHERIT set, then its descendant directories will inherit
* not only the project ID, but also the ZFS_PROJINHERIT flag. Under
* such case, we only allow renames into our tree when the project
* IDs are the same.
*/
if (tdzp->z_pflags & ZFS_PROJINHERIT &&
tdzp->z_projid != szp->z_projid) {
error = SET_ERROR(EXDEV);
goto out;
}
/*
* Must have write access at the source to remove the old entry
* and write access at the target to create the new entry.
* Note that if target and source are the same, this can be
* done in a single check.
*/
if ((error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr)))
goto out;
if ((*svpp)->v_type == VDIR) {
/*
* Avoid ".", "..", and aliases of "." for obvious reasons.
*/
if ((scnp->cn_namelen == 1 && scnp->cn_nameptr[0] == '.') ||
sdzp == szp ||
(scnp->cn_flags | tcnp->cn_flags) & ISDOTDOT) {
error = EINVAL;
goto out;
}
/*
* Check to make sure rename is valid.
* Can't do a move like this: /usr/a/b to /usr/a/b/c/d
*/
if ((error = zfs_rename_check(szp, sdzp, tdzp)))
goto out;
}
/*
* Does target exist?
*/
if (tzp) {
/*
* Source and target must be the same type.
*/
if ((*svpp)->v_type == VDIR) {
if ((*tvpp)->v_type != VDIR) {
error = SET_ERROR(ENOTDIR);
goto out;
} else {
cache_purge(tdvp);
if (sdvp != tdvp)
cache_purge(sdvp);
}
} else {
if ((*tvpp)->v_type == VDIR) {
error = SET_ERROR(EISDIR);
goto out;
}
}
}
vn_seqc_write_begin(*svpp);
vn_seqc_write_begin(sdvp);
if (*tvpp != NULL)
vn_seqc_write_begin(*tvpp);
if (tdvp != *tvpp)
vn_seqc_write_begin(tdvp);
vnevent_rename_src(*svpp, sdvp, scnp->cn_nameptr, ct);
if (tzp)
vnevent_rename_dest(*tvpp, tdvp, tnm, ct);
/*
* notify the target directory if it is not the same
* as source directory.
*/
if (tdvp != sdvp) {
vnevent_rename_dest_dir(tdvp, ct);
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
if (sdzp != tdzp) {
dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, tdzp);
}
if (tzp) {
dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, tzp);
}
zfs_sa_upgrade_txholds(tx, szp);
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
goto out_seq;
}
if (tzp) /* Attempt to remove the existing target */
error = zfs_link_destroy(tdzp, tnm, tzp, tx, 0, NULL);
if (error == 0) {
error = zfs_link_create(tdzp, tnm, szp, tx, ZRENAMING);
if (error == 0) {
szp->z_pflags |= ZFS_AV_MODIFIED;
error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
(void *)&szp->z_pflags, sizeof (uint64_t), tx);
ASSERT0(error);
error = zfs_link_destroy(sdzp, snm, szp, tx, ZRENAMING,
NULL);
if (error == 0) {
zfs_log_rename(zilog, tx, TX_RENAME, sdzp,
snm, tdzp, tnm, szp);
/*
* Update path information for the target vnode
*/
vn_renamepath(tdvp, *svpp, tnm, strlen(tnm));
} else {
/*
* At this point, we have successfully created
* the target name, but have failed to remove
* the source name. Since the create was done
* with the ZRENAMING flag, there are
* complications; for one, the link count is
* wrong. The easiest way to deal with this
* is to remove the newly created target, and
* return the original error. This must
* succeed; fortunately, it is very unlikely to
* fail, since we just created it.
*/
VERIFY0(zfs_link_destroy(tdzp, tnm, szp, tx,
ZRENAMING, NULL));
}
}
if (error == 0) {
cache_vop_rename(sdvp, *svpp, tdvp, *tvpp, scnp, tcnp);
}
}
dmu_tx_commit(tx);
out_seq:
vn_seqc_write_end(*svpp);
vn_seqc_write_end(sdvp);
if (*tvpp != NULL)
vn_seqc_write_end(*tvpp);
if (tdvp != *tvpp)
vn_seqc_write_end(tdvp);
out:
if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
int
zfs_rename(znode_t *sdzp, const char *sname, znode_t *tdzp, const char *tname,
cred_t *cr, int flags)
{
struct componentname scn, tcn;
vnode_t *sdvp, *tdvp;
vnode_t *svp, *tvp;
int error;
svp = tvp = NULL;
sdvp = ZTOV(sdzp);
tdvp = ZTOV(tdzp);
error = zfs_lookup_internal(sdzp, sname, &svp, &scn, DELETE);
if (sdzp->z_zfsvfs->z_replay == B_FALSE)
VOP_UNLOCK1(sdvp);
if (error != 0)
goto fail;
VOP_UNLOCK1(svp);
vn_lock(tdvp, LK_EXCLUSIVE | LK_RETRY);
error = zfs_lookup_internal(tdzp, tname, &tvp, &tcn, RENAME);
if (error == EJUSTRETURN)
tvp = NULL;
else if (error != 0) {
VOP_UNLOCK1(tdvp);
goto fail;
}
error = zfs_do_rename(sdvp, &svp, &scn, tdvp, &tvp, &tcn, cr);
fail:
if (svp != NULL)
vrele(svp);
if (tvp != NULL)
vrele(tvp);
return (error);
}
/*
* Insert the indicated symbolic reference entry into the directory.
*
* IN: dvp - Directory to contain new symbolic link.
* link - Name for new symlink entry.
* vap - Attributes of new entry.
* cr - credentials of caller.
* ct - caller context
* flags - case flags
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dvp - ctime|mtime updated
*/
int
zfs_symlink(znode_t *dzp, const char *name, vattr_t *vap,
const char *link, znode_t **zpp, cred_t *cr, int flags)
{
(void) flags;
znode_t *zp;
dmu_tx_t *tx;
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
zilog_t *zilog;
uint64_t len = strlen(link);
int error;
zfs_acl_ids_t acl_ids;
boolean_t fuid_dirtied;
uint64_t txtype = TX_SYMLINK;
ASSERT3S(vap->va_type, ==, VLNK);
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(dzp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
+ return (error);
zilog = zfsvfs->z_log;
if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EILSEQ));
}
if (len > MAXPATHLEN) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(ENAMETOOLONG));
}
if ((error = zfs_acl_ids_create(dzp, 0,
vap, cr, NULL, &acl_ids)) != 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Attempt to lock directory; fail if entry already exists.
*/
error = zfs_dirent_lookup(dzp, name, &zp, ZNEW);
if (error) {
zfs_acl_ids_free(&acl_ids);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
zfs_acl_ids_free(&acl_ids);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids,
0 /* projid */)) {
zfs_acl_ids_free(&acl_ids);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EDQUOT));
}
getnewvnode_reserve_();
tx = dmu_tx_create(zfsvfs->z_os);
fuid_dirtied = zfsvfs->z_fuid_dirty;
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE + len);
dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
acl_ids.z_aclp->z_acl_bytes);
}
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
getnewvnode_drop_reserve();
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Create a new object for the symlink.
* for version 4 ZPL datasets the symlink will be an SA attribute
*/
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
if (zp->z_is_sa)
error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zfsvfs),
__DECONST(void *, link), len, tx);
else
zfs_sa_symlink(zp, __DECONST(char *, link), len, tx);
zp->z_size = len;
(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
&zp->z_size, sizeof (zp->z_size), tx);
/*
* Insert the new object into the directory.
*/
(void) zfs_link_create(dzp, name, zp, tx, ZNEW);
zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
*zpp = zp;
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
getnewvnode_drop_reserve();
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Return, in the buffer contained in the provided uio structure,
* the symbolic path referred to by vp.
*
* IN: vp - vnode of symbolic link.
* uio - structure to contain the link path.
* cr - credentials of caller.
* ct - caller context
*
* OUT: uio - structure containing the link path.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* vp - atime updated
*/
static int
zfs_readlink(vnode_t *vp, zfs_uio_t *uio, cred_t *cr, caller_context_t *ct)
{
(void) cr, (void) ct;
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
int error;
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (error);
if (zp->z_is_sa)
error = sa_lookup_uio(zp->z_sa_hdl,
SA_ZPL_SYMLINK(zfsvfs), uio);
else
error = zfs_sa_readlink(zp, uio);
ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Insert a new entry into directory tdvp referencing svp.
*
* IN: tdvp - Directory to contain new entry.
* svp - vnode of new entry.
* name - name of new entry.
* cr - credentials of caller.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* tdvp - ctime|mtime updated
* svp - ctime updated
*/
int
zfs_link(znode_t *tdzp, znode_t *szp, const char *name, cred_t *cr,
int flags)
{
(void) flags;
znode_t *tzp;
zfsvfs_t *zfsvfs = tdzp->z_zfsvfs;
zilog_t *zilog;
dmu_tx_t *tx;
int error;
uint64_t parent;
uid_t owner;
ASSERT3S(ZTOV(tdzp)->v_type, ==, VDIR);
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(tdzp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, tdzp, FTAG)) != 0)
+ return (error);
zilog = zfsvfs->z_log;
/*
* POSIX dictates that we return EPERM here.
* Better choices include ENOTSUP or EISDIR.
*/
if (ZTOV(szp)->v_type == VDIR) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EPERM));
}
- ZFS_VERIFY_ZP(szp);
+ if ((error = zfs_verify_zp(szp)) != 0) {
+ zfs_exit(zfsvfs, FTAG);
+ return (error);
+ }
/*
* If we are using project inheritance, means if the directory has
* ZFS_PROJINHERIT set, then its descendant directories will inherit
* not only the project ID, but also the ZFS_PROJINHERIT flag. Under
* such case, we only allow hard link creation in our tree when the
* project IDs are the same.
*/
if (tdzp->z_pflags & ZFS_PROJINHERIT &&
tdzp->z_projid != szp->z_projid) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EXDEV));
}
if (szp->z_pflags & (ZFS_APPENDONLY |
ZFS_IMMUTABLE | ZFS_READONLY)) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EPERM));
}
/* Prevent links to .zfs/shares files */
if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
&parent, sizeof (uint64_t))) != 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
if (parent == zfsvfs->z_shares_dir) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EPERM));
}
if (zfsvfs->z_utf8 && u8_validate(name,
strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EILSEQ));
}
/*
* We do not support links between attributes and non-attributes
* because of the potential security risk of creating links
* into "normal" file space in order to circumvent restrictions
* imposed in attribute space.
*/
if ((szp->z_pflags & ZFS_XATTR) != (tdzp->z_pflags & ZFS_XATTR)) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
owner = zfs_fuid_map_id(zfsvfs, szp->z_uid, cr, ZFS_OWNER);
if (owner != crgetuid(cr) && secpolicy_basic_link(ZTOV(szp), cr) != 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EPERM));
}
if ((error = zfs_zaccess(tdzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Attempt to lock directory; fail if entry already exists.
*/
error = zfs_dirent_lookup(tdzp, name, &tzp, ZNEW);
if (error) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, name);
zfs_sa_upgrade_txholds(tx, szp);
zfs_sa_upgrade_txholds(tx, tdzp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
error = zfs_link_create(tdzp, name, szp, tx, 0);
if (error == 0) {
uint64_t txtype = TX_LINK;
zfs_log_link(zilog, tx, txtype, tdzp, szp, name);
}
dmu_tx_commit(tx);
if (error == 0) {
vnevent_link(ZTOV(szp), ct);
}
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Free or allocate space in a file. Currently, this function only
* supports the `F_FREESP' command. However, this command is somewhat
* misnamed, as its functionality includes the ability to allocate as
* well as free space.
*
* IN: ip - inode of file to free data in.
* cmd - action to take (only F_FREESP supported).
* bfp - section of file to free/alloc.
* flag - current file open mode flags.
* offset - current file offset.
* cr - credentials of caller.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* ip - ctime|mtime updated
*/
int
zfs_space(znode_t *zp, int cmd, flock64_t *bfp, int flag,
offset_t offset, cred_t *cr)
{
(void) offset;
zfsvfs_t *zfsvfs = ZTOZSB(zp);
uint64_t off, len;
int error;
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (error);
if (cmd != F_FREESP) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* Callers might not be able to detect properly that we are read-only,
* so check it explicitly here.
*/
if (zfs_is_readonly(zfsvfs)) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EROFS));
}
if (bfp->l_len < 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* Permissions aren't checked on Solaris because on this OS
* zfs_space() can only be called with an opened file handle.
* On Linux we can get here through truncate_range() which
* operates directly on inodes, so we need to check access rights.
*/
if ((error = zfs_zaccess(zp, ACE_WRITE_DATA, 0, B_FALSE, cr))) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
off = bfp->l_start;
len = bfp->l_len; /* 0 means from off to end of file */
error = zfs_freesp(zp, off, len, flag, TRUE);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
static void
zfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
{
(void) cr, (void) ct;
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
int error;
ZFS_TEARDOWN_INACTIVE_ENTER_READ(zfsvfs);
if (zp->z_sa_hdl == NULL) {
/*
* The fs has been unmounted, or we did a
* suspend/resume and this file no longer exists.
*/
ZFS_TEARDOWN_INACTIVE_EXIT_READ(zfsvfs);
vrecycle(vp);
return;
}
if (zp->z_unlinked) {
/*
* Fast path to recycle a vnode of a removed file.
*/
ZFS_TEARDOWN_INACTIVE_EXIT_READ(zfsvfs);
vrecycle(vp);
return;
}
if (zp->z_atime_dirty && zp->z_unlinked == 0) {
dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
} else {
(void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs),
(void *)&zp->z_atime, sizeof (zp->z_atime), tx);
zp->z_atime_dirty = 0;
dmu_tx_commit(tx);
}
}
ZFS_TEARDOWN_INACTIVE_EXIT_READ(zfsvfs);
}
_Static_assert(sizeof (struct zfid_short) <= sizeof (struct fid),
"struct zfid_short bigger than struct fid");
_Static_assert(sizeof (struct zfid_long) <= sizeof (struct fid),
"struct zfid_long bigger than struct fid");
static int
zfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
{
(void) ct;
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
uint32_t gen;
uint64_t gen64;
uint64_t object = zp->z_id;
zfid_short_t *zfid;
int size, i, error;
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (error);
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs),
&gen64, sizeof (uint64_t))) != 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
gen = (uint32_t)gen64;
size = (zfsvfs->z_parent != zfsvfs) ? LONG_FID_LEN : SHORT_FID_LEN;
fidp->fid_len = size;
zfid = (zfid_short_t *)fidp;
zfid->zf_len = size;
for (i = 0; i < sizeof (zfid->zf_object); i++)
zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
/* Must have a non-zero generation number to distinguish from .zfs */
if (gen == 0)
gen = 1;
for (i = 0; i < sizeof (zfid->zf_gen); i++)
zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
if (size == LONG_FID_LEN) {
uint64_t objsetid = dmu_objset_id(zfsvfs->z_os);
zfid_long_t *zlfid;
zlfid = (zfid_long_t *)fidp;
for (i = 0; i < sizeof (zlfid->zf_setid); i++)
zlfid->zf_setid[i] = (uint8_t)(objsetid >> (8 * i));
/* XXX - this should be the generation number for the objset */
for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
zlfid->zf_setgen[i] = 0;
}
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (0);
}
static int
zfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
caller_context_t *ct)
{
znode_t *zp;
zfsvfs_t *zfsvfs;
+ int error;
switch (cmd) {
case _PC_LINK_MAX:
*valp = MIN(LONG_MAX, ZFS_LINK_MAX);
return (0);
case _PC_FILESIZEBITS:
*valp = 64;
return (0);
case _PC_MIN_HOLE_SIZE:
*valp = (int)SPA_MINBLOCKSIZE;
return (0);
case _PC_ACL_EXTENDED:
#if 0 /* POSIX ACLs are not implemented for ZFS on FreeBSD yet. */
zp = VTOZ(vp);
zfsvfs = zp->z_zfsvfs;
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (error);
*valp = zfsvfs->z_acl_type == ZFSACLTYPE_POSIX ? 1 : 0;
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
#else
*valp = 0;
#endif
return (0);
case _PC_ACL_NFS4:
zp = VTOZ(vp);
zfsvfs = zp->z_zfsvfs;
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (error);
*valp = zfsvfs->z_acl_type == ZFS_ACLTYPE_NFSV4 ? 1 : 0;
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (0);
case _PC_ACL_PATH_MAX:
*valp = ACL_MAX_ENTRIES;
return (0);
default:
return (EOPNOTSUPP);
}
}
static int
zfs_getpages(struct vnode *vp, vm_page_t *ma, int count, int *rbehind,
int *rahead)
{
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
zfs_locked_range_t *lr;
vm_object_t object;
off_t start, end, obj_size;
uint_t blksz;
int pgsin_b, pgsin_a;
int error;
- ZFS_ENTER_ERROR(zfsvfs, zfs_vm_pagerret_error);
- ZFS_VERIFY_ZP_ERROR(zp, zfs_vm_pagerret_error);
+ if (zfs_enter_verify_zp(zfsvfs, zp, FTAG) != 0)
+ return (zfs_vm_pagerret_error);
start = IDX_TO_OFF(ma[0]->pindex);
end = IDX_TO_OFF(ma[count - 1]->pindex + 1);
/*
* Lock a range covering all required and optional pages.
* Note that we need to handle the case of the block size growing.
*/
for (;;) {
blksz = zp->z_blksz;
lr = zfs_rangelock_tryenter(&zp->z_rangelock,
rounddown(start, blksz),
roundup(end, blksz) - rounddown(start, blksz), RL_READER);
if (lr == NULL) {
if (rahead != NULL) {
*rahead = 0;
rahead = NULL;
}
if (rbehind != NULL) {
*rbehind = 0;
rbehind = NULL;
}
break;
}
if (blksz == zp->z_blksz)
break;
zfs_rangelock_exit(lr);
}
object = ma[0]->object;
zfs_vmobject_wlock(object);
obj_size = object->un_pager.vnp.vnp_size;
zfs_vmobject_wunlock(object);
if (IDX_TO_OFF(ma[count - 1]->pindex) >= obj_size) {
if (lr != NULL)
zfs_rangelock_exit(lr);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (zfs_vm_pagerret_bad);
}
pgsin_b = 0;
if (rbehind != NULL) {
pgsin_b = OFF_TO_IDX(start - rounddown(start, blksz));
pgsin_b = MIN(*rbehind, pgsin_b);
}
pgsin_a = 0;
if (rahead != NULL) {
pgsin_a = OFF_TO_IDX(roundup(end, blksz) - end);
if (end + IDX_TO_OFF(pgsin_a) >= obj_size)
pgsin_a = OFF_TO_IDX(round_page(obj_size) - end);
pgsin_a = MIN(*rahead, pgsin_a);
}
/*
* NB: we need to pass the exact byte size of the data that we expect
* to read after accounting for the file size. This is required because
* ZFS will panic if we request DMU to read beyond the end of the last
* allocated block.
*/
error = dmu_read_pages(zfsvfs->z_os, zp->z_id, ma, count, &pgsin_b,
&pgsin_a, MIN(end, obj_size) - (end - PAGE_SIZE));
if (lr != NULL)
zfs_rangelock_exit(lr);
ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, count*PAGE_SIZE);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
if (error != 0)
return (zfs_vm_pagerret_error);
VM_CNT_INC(v_vnodein);
VM_CNT_ADD(v_vnodepgsin, count + pgsin_b + pgsin_a);
if (rbehind != NULL)
*rbehind = pgsin_b;
if (rahead != NULL)
*rahead = pgsin_a;
return (zfs_vm_pagerret_ok);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_getpages_args {
struct vnode *a_vp;
vm_page_t *a_m;
int a_count;
int *a_rbehind;
int *a_rahead;
};
#endif
static int
zfs_freebsd_getpages(struct vop_getpages_args *ap)
{
return (zfs_getpages(ap->a_vp, ap->a_m, ap->a_count, ap->a_rbehind,
ap->a_rahead));
}
static int
zfs_putpages(struct vnode *vp, vm_page_t *ma, size_t len, int flags,
int *rtvals)
{
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
zfs_locked_range_t *lr;
dmu_tx_t *tx;
struct sf_buf *sf;
vm_object_t object;
vm_page_t m;
caddr_t va;
size_t tocopy;
size_t lo_len;
vm_ooffset_t lo_off;
vm_ooffset_t off;
uint_t blksz;
int ncount;
int pcount;
int err;
int i;
object = vp->v_object;
KASSERT(ma[0]->object == object, ("mismatching object"));
KASSERT(len > 0 && (len & PAGE_MASK) == 0, ("unexpected length"));
pcount = btoc(len);
ncount = pcount;
for (i = 0; i < pcount; i++)
rtvals[i] = zfs_vm_pagerret_error;
- ZFS_ENTER_ERROR(zfsvfs, zfs_vm_pagerret_error);
- ZFS_VERIFY_ZP_ERROR(zp, zfs_vm_pagerret_error);
+ if (zfs_enter_verify_zp(zfsvfs, zp, FTAG) != 0)
+ return (zfs_vm_pagerret_error);
off = IDX_TO_OFF(ma[0]->pindex);
blksz = zp->z_blksz;
lo_off = rounddown(off, blksz);
lo_len = roundup(len + (off - lo_off), blksz);
lr = zfs_rangelock_enter(&zp->z_rangelock, lo_off, lo_len, RL_WRITER);
zfs_vmobject_wlock(object);
if (len + off > object->un_pager.vnp.vnp_size) {
if (object->un_pager.vnp.vnp_size > off) {
int pgoff;
len = object->un_pager.vnp.vnp_size - off;
ncount = btoc(len);
if ((pgoff = (int)len & PAGE_MASK) != 0) {
/*
* If the object is locked and the following
* conditions hold, then the page's dirty
* field cannot be concurrently changed by a
* pmap operation.
*/
m = ma[ncount - 1];
vm_page_assert_sbusied(m);
KASSERT(!pmap_page_is_write_mapped(m),
("zfs_putpages: page %p is not read-only",
m));
vm_page_clear_dirty(m, pgoff, PAGE_SIZE -
pgoff);
}
} else {
len = 0;
ncount = 0;
}
if (ncount < pcount) {
for (i = ncount; i < pcount; i++) {
rtvals[i] = zfs_vm_pagerret_bad;
}
}
}
zfs_vmobject_wunlock(object);
if (ncount == 0)
goto out;
if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT, zp->z_uid) ||
zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT, zp->z_gid) ||
(zp->z_projid != ZFS_DEFAULT_PROJID &&
zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
zp->z_projid))) {
goto out;
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_write(tx, zp->z_id, off, len);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err != 0) {
dmu_tx_abort(tx);
goto out;
}
if (zp->z_blksz < PAGE_SIZE) {
for (i = 0; len > 0; off += tocopy, len -= tocopy, i++) {
tocopy = len > PAGE_SIZE ? PAGE_SIZE : len;
va = zfs_map_page(ma[i], &sf);
dmu_write(zfsvfs->z_os, zp->z_id, off, tocopy, va, tx);
zfs_unmap_page(sf);
}
} else {
err = dmu_write_pages(zfsvfs->z_os, zp->z_id, off, len, ma, tx);
}
if (err == 0) {
uint64_t mtime[2], ctime[2];
sa_bulk_attr_t bulk[3];
int count = 0;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
&mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, 8);
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
err = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
ASSERT0(err);
/*
* XXX we should be passing a callback to undirty
* but that would make the locking messier
*/
zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, off,
len, 0, NULL, NULL);
zfs_vmobject_wlock(object);
for (i = 0; i < ncount; i++) {
rtvals[i] = zfs_vm_pagerret_ok;
vm_page_undirty(ma[i]);
}
zfs_vmobject_wunlock(object);
VM_CNT_INC(v_vnodeout);
VM_CNT_ADD(v_vnodepgsout, ncount);
}
dmu_tx_commit(tx);
out:
zfs_rangelock_exit(lr);
if ((flags & (zfs_vm_pagerput_sync | zfs_vm_pagerput_inval)) != 0 ||
zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zfsvfs->z_log, zp->z_id);
dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, len);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (rtvals[0]);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_putpages_args {
struct vnode *a_vp;
vm_page_t *a_m;
int a_count;
int a_sync;
int *a_rtvals;
};
#endif
static int
zfs_freebsd_putpages(struct vop_putpages_args *ap)
{
return (zfs_putpages(ap->a_vp, ap->a_m, ap->a_count, ap->a_sync,
ap->a_rtvals));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_bmap_args {
struct vnode *a_vp;
daddr_t a_bn;
struct bufobj **a_bop;
daddr_t *a_bnp;
int *a_runp;
int *a_runb;
};
#endif
static int
zfs_freebsd_bmap(struct vop_bmap_args *ap)
{
if (ap->a_bop != NULL)
*ap->a_bop = &ap->a_vp->v_bufobj;
if (ap->a_bnp != NULL)
*ap->a_bnp = ap->a_bn;
if (ap->a_runp != NULL)
*ap->a_runp = 0;
if (ap->a_runb != NULL)
*ap->a_runb = 0;
return (0);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_open_args {
struct vnode *a_vp;
int a_mode;
struct ucred *a_cred;
struct thread *a_td;
};
#endif
static int
zfs_freebsd_open(struct vop_open_args *ap)
{
vnode_t *vp = ap->a_vp;
znode_t *zp = VTOZ(vp);
int error;
error = zfs_open(&vp, ap->a_mode, ap->a_cred);
if (error == 0)
vnode_create_vobject(vp, zp->z_size, ap->a_td);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_close_args {
struct vnode *a_vp;
int a_fflag;
struct ucred *a_cred;
struct thread *a_td;
};
#endif
static int
zfs_freebsd_close(struct vop_close_args *ap)
{
return (zfs_close(ap->a_vp, ap->a_fflag, 1, 0, ap->a_cred));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_ioctl_args {
struct vnode *a_vp;
ulong_t a_command;
caddr_t a_data;
int a_fflag;
struct ucred *cred;
struct thread *td;
};
#endif
static int
zfs_freebsd_ioctl(struct vop_ioctl_args *ap)
{
return (zfs_ioctl(ap->a_vp, ap->a_command, (intptr_t)ap->a_data,
ap->a_fflag, ap->a_cred, NULL));
}
static int
ioflags(int ioflags)
{
int flags = 0;
if (ioflags & IO_APPEND)
flags |= O_APPEND;
if (ioflags & IO_NDELAY)
flags |= O_NONBLOCK;
if (ioflags & IO_SYNC)
flags |= O_SYNC;
return (flags);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_read_args {
struct vnode *a_vp;
struct uio *a_uio;
int a_ioflag;
struct ucred *a_cred;
};
#endif
static int
zfs_freebsd_read(struct vop_read_args *ap)
{
zfs_uio_t uio;
zfs_uio_init(&uio, ap->a_uio);
return (zfs_read(VTOZ(ap->a_vp), &uio, ioflags(ap->a_ioflag),
ap->a_cred));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_write_args {
struct vnode *a_vp;
struct uio *a_uio;
int a_ioflag;
struct ucred *a_cred;
};
#endif
static int
zfs_freebsd_write(struct vop_write_args *ap)
{
zfs_uio_t uio;
zfs_uio_init(&uio, ap->a_uio);
return (zfs_write(VTOZ(ap->a_vp), &uio, ioflags(ap->a_ioflag),
ap->a_cred));
}
#if __FreeBSD_version >= 1300102
/*
* VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see
* the comment above cache_fplookup for details.
*/
static int
zfs_freebsd_fplookup_vexec(struct vop_fplookup_vexec_args *v)
{
vnode_t *vp;
znode_t *zp;
uint64_t pflags;
vp = v->a_vp;
zp = VTOZ_SMR(vp);
if (__predict_false(zp == NULL))
return (EAGAIN);
pflags = atomic_load_64(&zp->z_pflags);
if (pflags & ZFS_AV_QUARANTINED)
return (EAGAIN);
if (pflags & ZFS_XATTR)
return (EAGAIN);
if ((pflags & ZFS_NO_EXECS_DENIED) == 0)
return (EAGAIN);
return (0);
}
#endif
#if __FreeBSD_version >= 1300139
static int
zfs_freebsd_fplookup_symlink(struct vop_fplookup_symlink_args *v)
{
vnode_t *vp;
znode_t *zp;
char *target;
vp = v->a_vp;
zp = VTOZ_SMR(vp);
if (__predict_false(zp == NULL)) {
return (EAGAIN);
}
target = atomic_load_consume_ptr(&zp->z_cached_symlink);
if (target == NULL) {
return (EAGAIN);
}
return (cache_symlink_resolve(v->a_fpl, target, strlen(target)));
}
#endif
#ifndef _SYS_SYSPROTO_H_
struct vop_access_args {
struct vnode *a_vp;
accmode_t a_accmode;
struct ucred *a_cred;
struct thread *a_td;
};
#endif
static int
zfs_freebsd_access(struct vop_access_args *ap)
{
vnode_t *vp = ap->a_vp;
znode_t *zp = VTOZ(vp);
accmode_t accmode;
int error = 0;
if (ap->a_accmode == VEXEC) {
if (zfs_fastaccesschk_execute(zp, ap->a_cred) == 0)
return (0);
}
/*
* ZFS itself only knowns about VREAD, VWRITE, VEXEC and VAPPEND,
*/
accmode = ap->a_accmode & (VREAD|VWRITE|VEXEC|VAPPEND);
if (accmode != 0)
error = zfs_access(zp, accmode, 0, ap->a_cred);
/*
* VADMIN has to be handled by vaccess().
*/
if (error == 0) {
accmode = ap->a_accmode & ~(VREAD|VWRITE|VEXEC|VAPPEND);
if (accmode != 0) {
#if __FreeBSD_version >= 1300105
error = vaccess(vp->v_type, zp->z_mode, zp->z_uid,
zp->z_gid, accmode, ap->a_cred);
#else
error = vaccess(vp->v_type, zp->z_mode, zp->z_uid,
zp->z_gid, accmode, ap->a_cred, NULL);
#endif
}
}
/*
* For VEXEC, ensure that at least one execute bit is set for
* non-directories.
*/
if (error == 0 && (ap->a_accmode & VEXEC) != 0 && vp->v_type != VDIR &&
(zp->z_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) {
error = EACCES;
}
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_lookup_args {
struct vnode *a_dvp;
struct vnode **a_vpp;
struct componentname *a_cnp;
};
#endif
static int
zfs_freebsd_lookup(struct vop_lookup_args *ap, boolean_t cached)
{
struct componentname *cnp = ap->a_cnp;
char nm[NAME_MAX + 1];
ASSERT3U(cnp->cn_namelen, <, sizeof (nm));
strlcpy(nm, cnp->cn_nameptr, MIN(cnp->cn_namelen + 1, sizeof (nm)));
return (zfs_lookup(ap->a_dvp, nm, ap->a_vpp, cnp, cnp->cn_nameiop,
cnp->cn_cred, 0, cached));
}
static int
zfs_freebsd_cachedlookup(struct vop_cachedlookup_args *ap)
{
return (zfs_freebsd_lookup((struct vop_lookup_args *)ap, B_TRUE));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_lookup_args {
struct vnode *a_dvp;
struct vnode **a_vpp;
struct componentname *a_cnp;
};
#endif
static int
zfs_cache_lookup(struct vop_lookup_args *ap)
{
zfsvfs_t *zfsvfs;
zfsvfs = ap->a_dvp->v_mount->mnt_data;
if (zfsvfs->z_use_namecache)
return (vfs_cache_lookup(ap));
else
return (zfs_freebsd_lookup(ap, B_FALSE));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_create_args {
struct vnode *a_dvp;
struct vnode **a_vpp;
struct componentname *a_cnp;
struct vattr *a_vap;
};
#endif
static int
zfs_freebsd_create(struct vop_create_args *ap)
{
zfsvfs_t *zfsvfs;
struct componentname *cnp = ap->a_cnp;
vattr_t *vap = ap->a_vap;
znode_t *zp = NULL;
int rc, mode;
#if __FreeBSD_version < 1400068
ASSERT(cnp->cn_flags & SAVENAME);
#endif
vattr_init_mask(vap);
mode = vap->va_mode & ALLPERMS;
zfsvfs = ap->a_dvp->v_mount->mnt_data;
*ap->a_vpp = NULL;
rc = zfs_create(VTOZ(ap->a_dvp), cnp->cn_nameptr, vap, 0, mode,
&zp, cnp->cn_cred, 0 /* flag */, NULL /* vsecattr */);
if (rc == 0)
*ap->a_vpp = ZTOV(zp);
if (zfsvfs->z_use_namecache &&
rc == 0 && (cnp->cn_flags & MAKEENTRY) != 0)
cache_enter(ap->a_dvp, *ap->a_vpp, cnp);
return (rc);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_remove_args {
struct vnode *a_dvp;
struct vnode *a_vp;
struct componentname *a_cnp;
};
#endif
static int
zfs_freebsd_remove(struct vop_remove_args *ap)
{
#if __FreeBSD_version < 1400068
ASSERT(ap->a_cnp->cn_flags & SAVENAME);
#endif
return (zfs_remove_(ap->a_dvp, ap->a_vp, ap->a_cnp->cn_nameptr,
ap->a_cnp->cn_cred));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_mkdir_args {
struct vnode *a_dvp;
struct vnode **a_vpp;
struct componentname *a_cnp;
struct vattr *a_vap;
};
#endif
static int
zfs_freebsd_mkdir(struct vop_mkdir_args *ap)
{
vattr_t *vap = ap->a_vap;
znode_t *zp = NULL;
int rc;
#if __FreeBSD_version < 1400068
ASSERT(ap->a_cnp->cn_flags & SAVENAME);
#endif
vattr_init_mask(vap);
*ap->a_vpp = NULL;
rc = zfs_mkdir(VTOZ(ap->a_dvp), ap->a_cnp->cn_nameptr, vap, &zp,
ap->a_cnp->cn_cred, 0, NULL);
if (rc == 0)
*ap->a_vpp = ZTOV(zp);
return (rc);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_rmdir_args {
struct vnode *a_dvp;
struct vnode *a_vp;
struct componentname *a_cnp;
};
#endif
static int
zfs_freebsd_rmdir(struct vop_rmdir_args *ap)
{
struct componentname *cnp = ap->a_cnp;
#if __FreeBSD_version < 1400068
ASSERT(cnp->cn_flags & SAVENAME);
#endif
return (zfs_rmdir_(ap->a_dvp, ap->a_vp, cnp->cn_nameptr, cnp->cn_cred));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_readdir_args {
struct vnode *a_vp;
struct uio *a_uio;
struct ucred *a_cred;
int *a_eofflag;
int *a_ncookies;
cookie_t **a_cookies;
};
#endif
static int
zfs_freebsd_readdir(struct vop_readdir_args *ap)
{
zfs_uio_t uio;
zfs_uio_init(&uio, ap->a_uio);
return (zfs_readdir(ap->a_vp, &uio, ap->a_cred, ap->a_eofflag,
ap->a_ncookies, ap->a_cookies));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_fsync_args {
struct vnode *a_vp;
int a_waitfor;
struct thread *a_td;
};
#endif
static int
zfs_freebsd_fsync(struct vop_fsync_args *ap)
{
vop_stdfsync(ap);
return (zfs_fsync(VTOZ(ap->a_vp), 0, ap->a_td->td_ucred));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_getattr_args {
struct vnode *a_vp;
struct vattr *a_vap;
struct ucred *a_cred;
};
#endif
static int
zfs_freebsd_getattr(struct vop_getattr_args *ap)
{
vattr_t *vap = ap->a_vap;
xvattr_t xvap;
ulong_t fflags = 0;
int error;
xva_init(&xvap);
xvap.xva_vattr = *vap;
xvap.xva_vattr.va_mask |= AT_XVATTR;
/* Convert chflags into ZFS-type flags. */
/* XXX: what about SF_SETTABLE?. */
XVA_SET_REQ(&xvap, XAT_IMMUTABLE);
XVA_SET_REQ(&xvap, XAT_APPENDONLY);
XVA_SET_REQ(&xvap, XAT_NOUNLINK);
XVA_SET_REQ(&xvap, XAT_NODUMP);
XVA_SET_REQ(&xvap, XAT_READONLY);
XVA_SET_REQ(&xvap, XAT_ARCHIVE);
XVA_SET_REQ(&xvap, XAT_SYSTEM);
XVA_SET_REQ(&xvap, XAT_HIDDEN);
XVA_SET_REQ(&xvap, XAT_REPARSE);
XVA_SET_REQ(&xvap, XAT_OFFLINE);
XVA_SET_REQ(&xvap, XAT_SPARSE);
error = zfs_getattr(ap->a_vp, (vattr_t *)&xvap, 0, ap->a_cred);
if (error != 0)
return (error);
/* Convert ZFS xattr into chflags. */
#define FLAG_CHECK(fflag, xflag, xfield) do { \
if (XVA_ISSET_RTN(&xvap, (xflag)) && (xfield) != 0) \
fflags |= (fflag); \
} while (0)
FLAG_CHECK(SF_IMMUTABLE, XAT_IMMUTABLE,
xvap.xva_xoptattrs.xoa_immutable);
FLAG_CHECK(SF_APPEND, XAT_APPENDONLY,
xvap.xva_xoptattrs.xoa_appendonly);
FLAG_CHECK(SF_NOUNLINK, XAT_NOUNLINK,
xvap.xva_xoptattrs.xoa_nounlink);
FLAG_CHECK(UF_ARCHIVE, XAT_ARCHIVE,
xvap.xva_xoptattrs.xoa_archive);
FLAG_CHECK(UF_NODUMP, XAT_NODUMP,
xvap.xva_xoptattrs.xoa_nodump);
FLAG_CHECK(UF_READONLY, XAT_READONLY,
xvap.xva_xoptattrs.xoa_readonly);
FLAG_CHECK(UF_SYSTEM, XAT_SYSTEM,
xvap.xva_xoptattrs.xoa_system);
FLAG_CHECK(UF_HIDDEN, XAT_HIDDEN,
xvap.xva_xoptattrs.xoa_hidden);
FLAG_CHECK(UF_REPARSE, XAT_REPARSE,
xvap.xva_xoptattrs.xoa_reparse);
FLAG_CHECK(UF_OFFLINE, XAT_OFFLINE,
xvap.xva_xoptattrs.xoa_offline);
FLAG_CHECK(UF_SPARSE, XAT_SPARSE,
xvap.xva_xoptattrs.xoa_sparse);
#undef FLAG_CHECK
*vap = xvap.xva_vattr;
vap->va_flags = fflags;
return (0);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_setattr_args {
struct vnode *a_vp;
struct vattr *a_vap;
struct ucred *a_cred;
};
#endif
static int
zfs_freebsd_setattr(struct vop_setattr_args *ap)
{
vnode_t *vp = ap->a_vp;
vattr_t *vap = ap->a_vap;
cred_t *cred = ap->a_cred;
xvattr_t xvap;
ulong_t fflags;
uint64_t zflags;
vattr_init_mask(vap);
vap->va_mask &= ~AT_NOSET;
xva_init(&xvap);
xvap.xva_vattr = *vap;
zflags = VTOZ(vp)->z_pflags;
if (vap->va_flags != VNOVAL) {
zfsvfs_t *zfsvfs = VTOZ(vp)->z_zfsvfs;
int error;
if (zfsvfs->z_use_fuids == B_FALSE)
return (EOPNOTSUPP);
fflags = vap->va_flags;
/*
* XXX KDM
* We need to figure out whether it makes sense to allow
* UF_REPARSE through, since we don't really have other
* facilities to handle reparse points and zfs_setattr()
* doesn't currently allow setting that attribute anyway.
*/
if ((fflags & ~(SF_IMMUTABLE|SF_APPEND|SF_NOUNLINK|UF_ARCHIVE|
UF_NODUMP|UF_SYSTEM|UF_HIDDEN|UF_READONLY|UF_REPARSE|
UF_OFFLINE|UF_SPARSE)) != 0)
return (EOPNOTSUPP);
/*
* Unprivileged processes are not permitted to unset system
* flags, or modify flags if any system flags are set.
* Privileged non-jail processes may not modify system flags
* if securelevel > 0 and any existing system flags are set.
* Privileged jail processes behave like privileged non-jail
* processes if the PR_ALLOW_CHFLAGS permission bit is set;
* otherwise, they behave like unprivileged processes.
*/
if (secpolicy_fs_owner(vp->v_mount, cred) == 0 ||
spl_priv_check_cred(cred, PRIV_VFS_SYSFLAGS) == 0) {
if (zflags &
(ZFS_IMMUTABLE | ZFS_APPENDONLY | ZFS_NOUNLINK)) {
error = securelevel_gt(cred, 0);
if (error != 0)
return (error);
}
} else {
/*
* Callers may only modify the file flags on
* objects they have VADMIN rights for.
*/
if ((error = VOP_ACCESS(vp, VADMIN, cred,
curthread)) != 0)
return (error);
if (zflags &
(ZFS_IMMUTABLE | ZFS_APPENDONLY |
ZFS_NOUNLINK)) {
return (EPERM);
}
if (fflags &
(SF_IMMUTABLE | SF_APPEND | SF_NOUNLINK)) {
return (EPERM);
}
}
#define FLAG_CHANGE(fflag, zflag, xflag, xfield) do { \
if (((fflags & (fflag)) && !(zflags & (zflag))) || \
((zflags & (zflag)) && !(fflags & (fflag)))) { \
XVA_SET_REQ(&xvap, (xflag)); \
(xfield) = ((fflags & (fflag)) != 0); \
} \
} while (0)
/* Convert chflags into ZFS-type flags. */
/* XXX: what about SF_SETTABLE?. */
FLAG_CHANGE(SF_IMMUTABLE, ZFS_IMMUTABLE, XAT_IMMUTABLE,
xvap.xva_xoptattrs.xoa_immutable);
FLAG_CHANGE(SF_APPEND, ZFS_APPENDONLY, XAT_APPENDONLY,
xvap.xva_xoptattrs.xoa_appendonly);
FLAG_CHANGE(SF_NOUNLINK, ZFS_NOUNLINK, XAT_NOUNLINK,
xvap.xva_xoptattrs.xoa_nounlink);
FLAG_CHANGE(UF_ARCHIVE, ZFS_ARCHIVE, XAT_ARCHIVE,
xvap.xva_xoptattrs.xoa_archive);
FLAG_CHANGE(UF_NODUMP, ZFS_NODUMP, XAT_NODUMP,
xvap.xva_xoptattrs.xoa_nodump);
FLAG_CHANGE(UF_READONLY, ZFS_READONLY, XAT_READONLY,
xvap.xva_xoptattrs.xoa_readonly);
FLAG_CHANGE(UF_SYSTEM, ZFS_SYSTEM, XAT_SYSTEM,
xvap.xva_xoptattrs.xoa_system);
FLAG_CHANGE(UF_HIDDEN, ZFS_HIDDEN, XAT_HIDDEN,
xvap.xva_xoptattrs.xoa_hidden);
FLAG_CHANGE(UF_REPARSE, ZFS_REPARSE, XAT_REPARSE,
xvap.xva_xoptattrs.xoa_reparse);
FLAG_CHANGE(UF_OFFLINE, ZFS_OFFLINE, XAT_OFFLINE,
xvap.xva_xoptattrs.xoa_offline);
FLAG_CHANGE(UF_SPARSE, ZFS_SPARSE, XAT_SPARSE,
xvap.xva_xoptattrs.xoa_sparse);
#undef FLAG_CHANGE
}
if (vap->va_birthtime.tv_sec != VNOVAL) {
xvap.xva_vattr.va_mask |= AT_XVATTR;
XVA_SET_REQ(&xvap, XAT_CREATETIME);
}
return (zfs_setattr(VTOZ(vp), (vattr_t *)&xvap, 0, cred));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_rename_args {
struct vnode *a_fdvp;
struct vnode *a_fvp;
struct componentname *a_fcnp;
struct vnode *a_tdvp;
struct vnode *a_tvp;
struct componentname *a_tcnp;
};
#endif
static int
zfs_freebsd_rename(struct vop_rename_args *ap)
{
vnode_t *fdvp = ap->a_fdvp;
vnode_t *fvp = ap->a_fvp;
vnode_t *tdvp = ap->a_tdvp;
vnode_t *tvp = ap->a_tvp;
int error;
#if __FreeBSD_version < 1400068
ASSERT(ap->a_fcnp->cn_flags & (SAVENAME|SAVESTART));
ASSERT(ap->a_tcnp->cn_flags & (SAVENAME|SAVESTART));
#endif
error = zfs_do_rename(fdvp, &fvp, ap->a_fcnp, tdvp, &tvp,
ap->a_tcnp, ap->a_fcnp->cn_cred);
vrele(fdvp);
vrele(fvp);
vrele(tdvp);
if (tvp != NULL)
vrele(tvp);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_symlink_args {
struct vnode *a_dvp;
struct vnode **a_vpp;
struct componentname *a_cnp;
struct vattr *a_vap;
char *a_target;
};
#endif
static int
zfs_freebsd_symlink(struct vop_symlink_args *ap)
{
struct componentname *cnp = ap->a_cnp;
vattr_t *vap = ap->a_vap;
znode_t *zp = NULL;
#if __FreeBSD_version >= 1300139
char *symlink;
size_t symlink_len;
#endif
int rc;
#if __FreeBSD_version < 1400068
ASSERT(cnp->cn_flags & SAVENAME);
#endif
vap->va_type = VLNK; /* FreeBSD: Syscall only sets va_mode. */
vattr_init_mask(vap);
*ap->a_vpp = NULL;
rc = zfs_symlink(VTOZ(ap->a_dvp), cnp->cn_nameptr, vap,
ap->a_target, &zp, cnp->cn_cred, 0 /* flags */);
if (rc == 0) {
*ap->a_vpp = ZTOV(zp);
ASSERT_VOP_ELOCKED(ZTOV(zp), __func__);
#if __FreeBSD_version >= 1300139
MPASS(zp->z_cached_symlink == NULL);
symlink_len = strlen(ap->a_target);
symlink = cache_symlink_alloc(symlink_len + 1, M_WAITOK);
if (symlink != NULL) {
memcpy(symlink, ap->a_target, symlink_len);
symlink[symlink_len] = '\0';
atomic_store_rel_ptr((uintptr_t *)&zp->z_cached_symlink,
(uintptr_t)symlink);
}
#endif
}
return (rc);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_readlink_args {
struct vnode *a_vp;
struct uio *a_uio;
struct ucred *a_cred;
};
#endif
static int
zfs_freebsd_readlink(struct vop_readlink_args *ap)
{
zfs_uio_t uio;
int error;
#if __FreeBSD_version >= 1300139
znode_t *zp = VTOZ(ap->a_vp);
char *symlink, *base;
size_t symlink_len;
bool trycache;
#endif
zfs_uio_init(&uio, ap->a_uio);
#if __FreeBSD_version >= 1300139
trycache = false;
if (zfs_uio_segflg(&uio) == UIO_SYSSPACE &&
zfs_uio_iovcnt(&uio) == 1) {
base = zfs_uio_iovbase(&uio, 0);
symlink_len = zfs_uio_iovlen(&uio, 0);
trycache = true;
}
#endif
error = zfs_readlink(ap->a_vp, &uio, ap->a_cred, NULL);
#if __FreeBSD_version >= 1300139
if (atomic_load_ptr(&zp->z_cached_symlink) != NULL ||
error != 0 || !trycache) {
return (error);
}
symlink_len -= zfs_uio_resid(&uio);
symlink = cache_symlink_alloc(symlink_len + 1, M_WAITOK);
if (symlink != NULL) {
memcpy(symlink, base, symlink_len);
symlink[symlink_len] = '\0';
if (!atomic_cmpset_rel_ptr((uintptr_t *)&zp->z_cached_symlink,
(uintptr_t)NULL, (uintptr_t)symlink)) {
cache_symlink_free(symlink, symlink_len + 1);
}
}
#endif
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_link_args {
struct vnode *a_tdvp;
struct vnode *a_vp;
struct componentname *a_cnp;
};
#endif
static int
zfs_freebsd_link(struct vop_link_args *ap)
{
struct componentname *cnp = ap->a_cnp;
vnode_t *vp = ap->a_vp;
vnode_t *tdvp = ap->a_tdvp;
if (tdvp->v_mount != vp->v_mount)
return (EXDEV);
#if __FreeBSD_version < 1400068
ASSERT(cnp->cn_flags & SAVENAME);
#endif
return (zfs_link(VTOZ(tdvp), VTOZ(vp),
cnp->cn_nameptr, cnp->cn_cred, 0));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_inactive_args {
struct vnode *a_vp;
struct thread *a_td;
};
#endif
static int
zfs_freebsd_inactive(struct vop_inactive_args *ap)
{
vnode_t *vp = ap->a_vp;
#if __FreeBSD_version >= 1300123
zfs_inactive(vp, curthread->td_ucred, NULL);
#else
zfs_inactive(vp, ap->a_td->td_ucred, NULL);
#endif
return (0);
}
#if __FreeBSD_version >= 1300042
#ifndef _SYS_SYSPROTO_H_
struct vop_need_inactive_args {
struct vnode *a_vp;
struct thread *a_td;
};
#endif
static int
zfs_freebsd_need_inactive(struct vop_need_inactive_args *ap)
{
vnode_t *vp = ap->a_vp;
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
int need;
if (vn_need_pageq_flush(vp))
return (1);
if (!ZFS_TEARDOWN_INACTIVE_TRY_ENTER_READ(zfsvfs))
return (1);
need = (zp->z_sa_hdl == NULL || zp->z_unlinked || zp->z_atime_dirty);
ZFS_TEARDOWN_INACTIVE_EXIT_READ(zfsvfs);
return (need);
}
#endif
#ifndef _SYS_SYSPROTO_H_
struct vop_reclaim_args {
struct vnode *a_vp;
struct thread *a_td;
};
#endif
static int
zfs_freebsd_reclaim(struct vop_reclaim_args *ap)
{
vnode_t *vp = ap->a_vp;
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
ASSERT3P(zp, !=, NULL);
#if __FreeBSD_version < 1300042
/* Destroy the vm object and flush associated pages. */
vnode_destroy_vobject(vp);
#endif
/*
* z_teardown_inactive_lock protects from a race with
* zfs_znode_dmu_fini in zfsvfs_teardown during
* force unmount.
*/
ZFS_TEARDOWN_INACTIVE_ENTER_READ(zfsvfs);
if (zp->z_sa_hdl == NULL)
zfs_znode_free(zp);
else
zfs_zinactive(zp);
ZFS_TEARDOWN_INACTIVE_EXIT_READ(zfsvfs);
vp->v_data = NULL;
return (0);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_fid_args {
struct vnode *a_vp;
struct fid *a_fid;
};
#endif
static int
zfs_freebsd_fid(struct vop_fid_args *ap)
{
return (zfs_fid(ap->a_vp, (void *)ap->a_fid, NULL));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_pathconf_args {
struct vnode *a_vp;
int a_name;
register_t *a_retval;
} *ap;
#endif
static int
zfs_freebsd_pathconf(struct vop_pathconf_args *ap)
{
ulong_t val;
int error;
error = zfs_pathconf(ap->a_vp, ap->a_name, &val,
curthread->td_ucred, NULL);
if (error == 0) {
*ap->a_retval = val;
return (error);
}
if (error != EOPNOTSUPP)
return (error);
switch (ap->a_name) {
case _PC_NAME_MAX:
*ap->a_retval = NAME_MAX;
return (0);
#if __FreeBSD_version >= 1400032
case _PC_DEALLOC_PRESENT:
*ap->a_retval = 1;
return (0);
#endif
case _PC_PIPE_BUF:
if (ap->a_vp->v_type == VDIR || ap->a_vp->v_type == VFIFO) {
*ap->a_retval = PIPE_BUF;
return (0);
}
return (EINVAL);
default:
return (vop_stdpathconf(ap));
}
}
static int zfs_xattr_compat = 1;
static int
zfs_check_attrname(const char *name)
{
/* We don't allow '/' character in attribute name. */
if (strchr(name, '/') != NULL)
return (SET_ERROR(EINVAL));
/* We don't allow attribute names that start with a namespace prefix. */
if (ZFS_XA_NS_PREFIX_FORBIDDEN(name))
return (SET_ERROR(EINVAL));
return (0);
}
/*
* FreeBSD's extended attributes namespace defines file name prefix for ZFS'
* extended attribute name:
*
* NAMESPACE XATTR_COMPAT PREFIX
* system * freebsd:system:
* user 1 (none, can be used to access ZFS
* fsattr(5) attributes created on Solaris)
* user 0 user.
*/
static int
zfs_create_attrname(int attrnamespace, const char *name, char *attrname,
size_t size, boolean_t compat)
{
const char *namespace, *prefix, *suffix;
memset(attrname, 0, size);
switch (attrnamespace) {
case EXTATTR_NAMESPACE_USER:
if (compat) {
/*
* This is the default namespace by which we can access
* all attributes created on Solaris.
*/
prefix = namespace = suffix = "";
} else {
/*
* This is compatible with the user namespace encoding
* on Linux prior to xattr_compat, but nothing
* else.
*/
prefix = "";
namespace = "user";
suffix = ".";
}
break;
case EXTATTR_NAMESPACE_SYSTEM:
prefix = "freebsd:";
namespace = EXTATTR_NAMESPACE_SYSTEM_STRING;
suffix = ":";
break;
case EXTATTR_NAMESPACE_EMPTY:
default:
return (SET_ERROR(EINVAL));
}
if (snprintf(attrname, size, "%s%s%s%s", prefix, namespace, suffix,
name) >= size) {
return (SET_ERROR(ENAMETOOLONG));
}
return (0);
}
static int
zfs_ensure_xattr_cached(znode_t *zp)
{
int error = 0;
ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock));
if (zp->z_xattr_cached != NULL)
return (0);
if (rw_write_held(&zp->z_xattr_lock))
return (zfs_sa_get_xattr(zp));
if (!rw_tryupgrade(&zp->z_xattr_lock)) {
rw_exit(&zp->z_xattr_lock);
rw_enter(&zp->z_xattr_lock, RW_WRITER);
}
if (zp->z_xattr_cached == NULL)
error = zfs_sa_get_xattr(zp);
rw_downgrade(&zp->z_xattr_lock);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_getextattr {
IN struct vnode *a_vp;
IN int a_attrnamespace;
IN const char *a_name;
INOUT struct uio *a_uio;
OUT size_t *a_size;
IN struct ucred *a_cred;
IN struct thread *a_td;
};
#endif
static int
zfs_getextattr_dir(struct vop_getextattr_args *ap, const char *attrname)
{
struct thread *td = ap->a_td;
struct nameidata nd;
struct vattr va;
vnode_t *xvp = NULL, *vp;
int error, flags;
error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred,
LOOKUP_XATTR, B_FALSE);
if (error != 0)
return (error);
flags = FREAD;
#if __FreeBSD_version < 1400043
NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, attrname,
xvp, td);
#else
NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, attrname, xvp);
#endif
error = vn_open_cred(&nd, &flags, 0, VN_OPEN_INVFS, ap->a_cred, NULL);
if (error != 0)
return (SET_ERROR(error));
vp = nd.ni_vp;
NDFREE_PNBUF(&nd);
if (ap->a_size != NULL) {
error = VOP_GETATTR(vp, &va, ap->a_cred);
if (error == 0)
*ap->a_size = (size_t)va.va_size;
} else if (ap->a_uio != NULL)
error = VOP_READ(vp, ap->a_uio, IO_UNIT, ap->a_cred);
VOP_UNLOCK1(vp);
vn_close(vp, flags, ap->a_cred, td);
return (error);
}
static int
zfs_getextattr_sa(struct vop_getextattr_args *ap, const char *attrname)
{
znode_t *zp = VTOZ(ap->a_vp);
uchar_t *nv_value;
uint_t nv_size;
int error;
error = zfs_ensure_xattr_cached(zp);
if (error != 0)
return (error);
ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock));
ASSERT3P(zp->z_xattr_cached, !=, NULL);
error = nvlist_lookup_byte_array(zp->z_xattr_cached, attrname,
&nv_value, &nv_size);
if (error != 0)
return (SET_ERROR(error));
if (ap->a_size != NULL)
*ap->a_size = nv_size;
else if (ap->a_uio != NULL)
error = uiomove(nv_value, nv_size, ap->a_uio);
if (error != 0)
return (SET_ERROR(error));
return (0);
}
static int
zfs_getextattr_impl(struct vop_getextattr_args *ap, boolean_t compat)
{
znode_t *zp = VTOZ(ap->a_vp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
char attrname[EXTATTR_MAXNAMELEN+1];
int error;
error = zfs_create_attrname(ap->a_attrnamespace, ap->a_name, attrname,
sizeof (attrname), compat);
if (error != 0)
return (error);
error = ENOENT;
if (zfsvfs->z_use_sa && zp->z_is_sa)
error = zfs_getextattr_sa(ap, attrname);
if (error == ENOENT)
error = zfs_getextattr_dir(ap, attrname);
return (error);
}
/*
* Vnode operation to retrieve a named extended attribute.
*/
static int
zfs_getextattr(struct vop_getextattr_args *ap)
{
znode_t *zp = VTOZ(ap->a_vp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int error;
/*
* If the xattr property is off, refuse the request.
*/
if (!(zfsvfs->z_flags & ZSB_XATTR))
return (SET_ERROR(EOPNOTSUPP));
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
ap->a_cred, ap->a_td, VREAD);
if (error != 0)
return (SET_ERROR(error));
error = zfs_check_attrname(ap->a_name);
if (error != 0)
return (error);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (error);
error = ENOENT;
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
rw_enter(&zp->z_xattr_lock, RW_READER);
error = zfs_getextattr_impl(ap, zfs_xattr_compat);
if ((error == ENOENT || error == ENOATTR) &&
ap->a_attrnamespace == EXTATTR_NAMESPACE_USER) {
/*
* Fall back to the alternate namespace format if we failed to
* find a user xattr.
*/
error = zfs_getextattr_impl(ap, !zfs_xattr_compat);
}
rw_exit(&zp->z_xattr_lock);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
if (error == ENOENT)
error = SET_ERROR(ENOATTR);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_deleteextattr {
IN struct vnode *a_vp;
IN int a_attrnamespace;
IN const char *a_name;
IN struct ucred *a_cred;
IN struct thread *a_td;
};
#endif
static int
zfs_deleteextattr_dir(struct vop_deleteextattr_args *ap, const char *attrname)
{
struct nameidata nd;
vnode_t *xvp = NULL, *vp;
int error;
error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred,
LOOKUP_XATTR, B_FALSE);
if (error != 0)
return (error);
#if __FreeBSD_version < 1400043
NDINIT_ATVP(&nd, DELETE, NOFOLLOW | LOCKPARENT | LOCKLEAF,
UIO_SYSSPACE, attrname, xvp, ap->a_td);
#else
NDINIT_ATVP(&nd, DELETE, NOFOLLOW | LOCKPARENT | LOCKLEAF,
UIO_SYSSPACE, attrname, xvp);
#endif
error = namei(&nd);
if (error != 0)
return (SET_ERROR(error));
vp = nd.ni_vp;
error = VOP_REMOVE(nd.ni_dvp, vp, &nd.ni_cnd);
NDFREE_PNBUF(&nd);
vput(nd.ni_dvp);
if (vp == nd.ni_dvp)
vrele(vp);
else
vput(vp);
return (error);
}
static int
zfs_deleteextattr_sa(struct vop_deleteextattr_args *ap, const char *attrname)
{
znode_t *zp = VTOZ(ap->a_vp);
nvlist_t *nvl;
int error;
error = zfs_ensure_xattr_cached(zp);
if (error != 0)
return (error);
ASSERT(RW_WRITE_HELD(&zp->z_xattr_lock));
ASSERT3P(zp->z_xattr_cached, !=, NULL);
nvl = zp->z_xattr_cached;
error = nvlist_remove(nvl, attrname, DATA_TYPE_BYTE_ARRAY);
if (error != 0)
error = SET_ERROR(error);
else
error = zfs_sa_set_xattr(zp, attrname, NULL, 0);
if (error != 0) {
zp->z_xattr_cached = NULL;
nvlist_free(nvl);
}
return (error);
}
static int
zfs_deleteextattr_impl(struct vop_deleteextattr_args *ap, boolean_t compat)
{
znode_t *zp = VTOZ(ap->a_vp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
char attrname[EXTATTR_MAXNAMELEN+1];
int error;
error = zfs_create_attrname(ap->a_attrnamespace, ap->a_name, attrname,
sizeof (attrname), compat);
if (error != 0)
return (error);
error = ENOENT;
if (zfsvfs->z_use_sa && zp->z_is_sa)
error = zfs_deleteextattr_sa(ap, attrname);
if (error == ENOENT)
error = zfs_deleteextattr_dir(ap, attrname);
return (error);
}
/*
* Vnode operation to remove a named attribute.
*/
static int
zfs_deleteextattr(struct vop_deleteextattr_args *ap)
{
znode_t *zp = VTOZ(ap->a_vp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int error;
/*
* If the xattr property is off, refuse the request.
*/
if (!(zfsvfs->z_flags & ZSB_XATTR))
return (SET_ERROR(EOPNOTSUPP));
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
ap->a_cred, ap->a_td, VWRITE);
if (error != 0)
return (SET_ERROR(error));
error = zfs_check_attrname(ap->a_name);
if (error != 0)
return (error);
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (error);
rw_enter(&zp->z_xattr_lock, RW_WRITER);
error = zfs_deleteextattr_impl(ap, zfs_xattr_compat);
if ((error == ENOENT || error == ENOATTR) &&
ap->a_attrnamespace == EXTATTR_NAMESPACE_USER) {
/*
* Fall back to the alternate namespace format if we failed to
* find a user xattr.
*/
error = zfs_deleteextattr_impl(ap, !zfs_xattr_compat);
}
rw_exit(&zp->z_xattr_lock);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
if (error == ENOENT)
error = SET_ERROR(ENOATTR);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_setextattr {
IN struct vnode *a_vp;
IN int a_attrnamespace;
IN const char *a_name;
INOUT struct uio *a_uio;
IN struct ucred *a_cred;
IN struct thread *a_td;
};
#endif
static int
zfs_setextattr_dir(struct vop_setextattr_args *ap, const char *attrname)
{
struct thread *td = ap->a_td;
struct nameidata nd;
struct vattr va;
vnode_t *xvp = NULL, *vp;
int error, flags;
error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred,
LOOKUP_XATTR | CREATE_XATTR_DIR, B_FALSE);
if (error != 0)
return (error);
flags = FFLAGS(O_WRONLY | O_CREAT);
#if __FreeBSD_version < 1400043
NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, attrname, xvp, td);
#else
NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, attrname, xvp);
#endif
error = vn_open_cred(&nd, &flags, 0600, VN_OPEN_INVFS, ap->a_cred,
NULL);
if (error != 0)
return (SET_ERROR(error));
vp = nd.ni_vp;
NDFREE_PNBUF(&nd);
VATTR_NULL(&va);
va.va_size = 0;
error = VOP_SETATTR(vp, &va, ap->a_cred);
if (error == 0)
VOP_WRITE(vp, ap->a_uio, IO_UNIT, ap->a_cred);
VOP_UNLOCK1(vp);
vn_close(vp, flags, ap->a_cred, td);
return (error);
}
static int
zfs_setextattr_sa(struct vop_setextattr_args *ap, const char *attrname)
{
znode_t *zp = VTOZ(ap->a_vp);
nvlist_t *nvl;
size_t sa_size;
int error;
error = zfs_ensure_xattr_cached(zp);
if (error != 0)
return (error);
ASSERT(RW_WRITE_HELD(&zp->z_xattr_lock));
ASSERT3P(zp->z_xattr_cached, !=, NULL);
nvl = zp->z_xattr_cached;
size_t entry_size = ap->a_uio->uio_resid;
if (entry_size > DXATTR_MAX_ENTRY_SIZE)
return (SET_ERROR(EFBIG));
error = nvlist_size(nvl, &sa_size, NV_ENCODE_XDR);
if (error != 0)
return (SET_ERROR(error));
if (sa_size > DXATTR_MAX_SA_SIZE)
return (SET_ERROR(EFBIG));
uchar_t *buf = kmem_alloc(entry_size, KM_SLEEP);
error = uiomove(buf, entry_size, ap->a_uio);
if (error != 0) {
error = SET_ERROR(error);
} else {
error = nvlist_add_byte_array(nvl, attrname, buf, entry_size);
if (error != 0)
error = SET_ERROR(error);
}
if (error == 0)
error = zfs_sa_set_xattr(zp, attrname, buf, entry_size);
kmem_free(buf, entry_size);
if (error != 0) {
zp->z_xattr_cached = NULL;
nvlist_free(nvl);
}
return (error);
}
static int
zfs_setextattr_impl(struct vop_setextattr_args *ap, boolean_t compat)
{
znode_t *zp = VTOZ(ap->a_vp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
char attrname[EXTATTR_MAXNAMELEN+1];
int error;
error = zfs_create_attrname(ap->a_attrnamespace, ap->a_name, attrname,
sizeof (attrname), compat);
if (error != 0)
return (error);
struct vop_deleteextattr_args vda = {
.a_vp = ap->a_vp,
.a_attrnamespace = ap->a_attrnamespace,
.a_name = ap->a_name,
.a_cred = ap->a_cred,
.a_td = ap->a_td,
};
error = ENOENT;
if (zfsvfs->z_use_sa && zp->z_is_sa && zfsvfs->z_xattr_sa) {
error = zfs_setextattr_sa(ap, attrname);
if (error == 0) {
/*
* Successfully put into SA, we need to clear the one
* in dir if present.
*/
zfs_deleteextattr_dir(&vda, attrname);
}
}
if (error != 0) {
error = zfs_setextattr_dir(ap, attrname);
if (error == 0 && zp->z_is_sa) {
/*
* Successfully put into dir, we need to clear the one
* in SA if present.
*/
zfs_deleteextattr_sa(&vda, attrname);
}
}
if (error == 0 && ap->a_attrnamespace == EXTATTR_NAMESPACE_USER) {
/*
* Also clear all versions of the alternate compat name.
*/
zfs_deleteextattr_impl(&vda, !compat);
}
return (error);
}
/*
* Vnode operation to set a named attribute.
*/
static int
zfs_setextattr(struct vop_setextattr_args *ap)
{
znode_t *zp = VTOZ(ap->a_vp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int error;
/*
* If the xattr property is off, refuse the request.
*/
if (!(zfsvfs->z_flags & ZSB_XATTR))
return (SET_ERROR(EOPNOTSUPP));
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
ap->a_cred, ap->a_td, VWRITE);
if (error != 0)
return (SET_ERROR(error));
error = zfs_check_attrname(ap->a_name);
if (error != 0)
return (error);
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (error);
rw_enter(&zp->z_xattr_lock, RW_WRITER);
error = zfs_setextattr_impl(ap, zfs_xattr_compat);
rw_exit(&zp->z_xattr_lock);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_listextattr {
IN struct vnode *a_vp;
IN int a_attrnamespace;
INOUT struct uio *a_uio;
OUT size_t *a_size;
IN struct ucred *a_cred;
IN struct thread *a_td;
};
#endif
static int
zfs_listextattr_dir(struct vop_listextattr_args *ap, const char *attrprefix)
{
struct thread *td = ap->a_td;
struct nameidata nd;
uint8_t dirbuf[sizeof (struct dirent)];
struct iovec aiov;
struct uio auio;
vnode_t *xvp = NULL, *vp;
int error, eof;
error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred,
LOOKUP_XATTR, B_FALSE);
if (error != 0) {
/*
* ENOATTR means that the EA directory does not yet exist,
* i.e. there are no extended attributes there.
*/
if (error == ENOATTR)
error = 0;
return (error);
}
#if __FreeBSD_version < 1400043
NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | LOCKSHARED,
UIO_SYSSPACE, ".", xvp, td);
#else
NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | LOCKSHARED,
UIO_SYSSPACE, ".", xvp);
#endif
error = namei(&nd);
if (error != 0)
return (SET_ERROR(error));
vp = nd.ni_vp;
NDFREE_PNBUF(&nd);
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
auio.uio_segflg = UIO_SYSSPACE;
auio.uio_td = td;
auio.uio_rw = UIO_READ;
auio.uio_offset = 0;
size_t plen = strlen(attrprefix);
do {
aiov.iov_base = (void *)dirbuf;
aiov.iov_len = sizeof (dirbuf);
auio.uio_resid = sizeof (dirbuf);
error = VOP_READDIR(vp, &auio, ap->a_cred, &eof, NULL, NULL);
if (error != 0)
break;
int done = sizeof (dirbuf) - auio.uio_resid;
for (int pos = 0; pos < done; ) {
struct dirent *dp = (struct dirent *)(dirbuf + pos);
pos += dp->d_reclen;
/*
* XXX: Temporarily we also accept DT_UNKNOWN, as this
* is what we get when attribute was created on Solaris.
*/
if (dp->d_type != DT_REG && dp->d_type != DT_UNKNOWN)
continue;
else if (plen == 0 &&
ZFS_XA_NS_PREFIX_FORBIDDEN(dp->d_name))
continue;
else if (strncmp(dp->d_name, attrprefix, plen) != 0)
continue;
uint8_t nlen = dp->d_namlen - plen;
if (ap->a_size != NULL) {
*ap->a_size += 1 + nlen;
} else if (ap->a_uio != NULL) {
/*
* Format of extattr name entry is one byte for
* length and the rest for name.
*/
error = uiomove(&nlen, 1, ap->a_uio);
if (error == 0) {
char *namep = dp->d_name + plen;
error = uiomove(namep, nlen, ap->a_uio);
}
if (error != 0) {
error = SET_ERROR(error);
break;
}
}
}
} while (!eof && error == 0);
vput(vp);
return (error);
}
static int
zfs_listextattr_sa(struct vop_listextattr_args *ap, const char *attrprefix)
{
znode_t *zp = VTOZ(ap->a_vp);
int error;
error = zfs_ensure_xattr_cached(zp);
if (error != 0)
return (error);
ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock));
ASSERT3P(zp->z_xattr_cached, !=, NULL);
size_t plen = strlen(attrprefix);
nvpair_t *nvp = NULL;
while ((nvp = nvlist_next_nvpair(zp->z_xattr_cached, nvp)) != NULL) {
ASSERT3U(nvpair_type(nvp), ==, DATA_TYPE_BYTE_ARRAY);
const char *name = nvpair_name(nvp);
if (plen == 0 && ZFS_XA_NS_PREFIX_FORBIDDEN(name))
continue;
else if (strncmp(name, attrprefix, plen) != 0)
continue;
uint8_t nlen = strlen(name) - plen;
if (ap->a_size != NULL) {
*ap->a_size += 1 + nlen;
} else if (ap->a_uio != NULL) {
/*
* Format of extattr name entry is one byte for
* length and the rest for name.
*/
error = uiomove(&nlen, 1, ap->a_uio);
if (error == 0) {
char *namep = __DECONST(char *, name) + plen;
error = uiomove(namep, nlen, ap->a_uio);
}
if (error != 0) {
error = SET_ERROR(error);
break;
}
}
}
return (error);
}
static int
zfs_listextattr_impl(struct vop_listextattr_args *ap, boolean_t compat)
{
znode_t *zp = VTOZ(ap->a_vp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
char attrprefix[16];
int error;
error = zfs_create_attrname(ap->a_attrnamespace, "", attrprefix,
sizeof (attrprefix), compat);
if (error != 0)
return (error);
if (zfsvfs->z_use_sa && zp->z_is_sa)
error = zfs_listextattr_sa(ap, attrprefix);
if (error == 0)
error = zfs_listextattr_dir(ap, attrprefix);
return (error);
}
/*
* Vnode operation to retrieve extended attributes on a vnode.
*/
static int
zfs_listextattr(struct vop_listextattr_args *ap)
{
znode_t *zp = VTOZ(ap->a_vp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int error;
if (ap->a_size != NULL)
*ap->a_size = 0;
/*
* If the xattr property is off, refuse the request.
*/
if (!(zfsvfs->z_flags & ZSB_XATTR))
return (SET_ERROR(EOPNOTSUPP));
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
ap->a_cred, ap->a_td, VREAD);
if (error != 0)
return (SET_ERROR(error));
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (error);
rw_enter(&zp->z_xattr_lock, RW_READER);
error = zfs_listextattr_impl(ap, zfs_xattr_compat);
if (error == 0 && ap->a_attrnamespace == EXTATTR_NAMESPACE_USER) {
/* Also list user xattrs with the alternate format. */
error = zfs_listextattr_impl(ap, !zfs_xattr_compat);
}
rw_exit(&zp->z_xattr_lock);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_getacl_args {
struct vnode *vp;
acl_type_t type;
struct acl *aclp;
struct ucred *cred;
struct thread *td;
};
#endif
static int
zfs_freebsd_getacl(struct vop_getacl_args *ap)
{
int error;
vsecattr_t vsecattr;
if (ap->a_type != ACL_TYPE_NFS4)
return (EINVAL);
vsecattr.vsa_mask = VSA_ACE | VSA_ACECNT;
if ((error = zfs_getsecattr(VTOZ(ap->a_vp),
&vsecattr, 0, ap->a_cred)))
return (error);
error = acl_from_aces(ap->a_aclp, vsecattr.vsa_aclentp,
vsecattr.vsa_aclcnt);
if (vsecattr.vsa_aclentp != NULL)
kmem_free(vsecattr.vsa_aclentp, vsecattr.vsa_aclentsz);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_setacl_args {
struct vnode *vp;
acl_type_t type;
struct acl *aclp;
struct ucred *cred;
struct thread *td;
};
#endif
static int
zfs_freebsd_setacl(struct vop_setacl_args *ap)
{
int error;
vsecattr_t vsecattr;
int aclbsize; /* size of acl list in bytes */
aclent_t *aaclp;
if (ap->a_type != ACL_TYPE_NFS4)
return (EINVAL);
if (ap->a_aclp == NULL)
return (EINVAL);
if (ap->a_aclp->acl_cnt < 1 || ap->a_aclp->acl_cnt > MAX_ACL_ENTRIES)
return (EINVAL);
/*
* With NFSv4 ACLs, chmod(2) may need to add additional entries,
* splitting every entry into two and appending "canonical six"
* entries at the end. Don't allow for setting an ACL that would
* cause chmod(2) to run out of ACL entries.
*/
if (ap->a_aclp->acl_cnt * 2 + 6 > ACL_MAX_ENTRIES)
return (ENOSPC);
error = acl_nfs4_check(ap->a_aclp, ap->a_vp->v_type == VDIR);
if (error != 0)
return (error);
vsecattr.vsa_mask = VSA_ACE;
aclbsize = ap->a_aclp->acl_cnt * sizeof (ace_t);
vsecattr.vsa_aclentp = kmem_alloc(aclbsize, KM_SLEEP);
aaclp = vsecattr.vsa_aclentp;
vsecattr.vsa_aclentsz = aclbsize;
aces_from_acl(vsecattr.vsa_aclentp, &vsecattr.vsa_aclcnt, ap->a_aclp);
error = zfs_setsecattr(VTOZ(ap->a_vp), &vsecattr, 0, ap->a_cred);
kmem_free(aaclp, aclbsize);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_aclcheck_args {
struct vnode *vp;
acl_type_t type;
struct acl *aclp;
struct ucred *cred;
struct thread *td;
};
#endif
static int
zfs_freebsd_aclcheck(struct vop_aclcheck_args *ap)
{
return (EOPNOTSUPP);
}
static int
zfs_vptocnp(struct vop_vptocnp_args *ap)
{
vnode_t *covered_vp;
vnode_t *vp = ap->a_vp;
zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
znode_t *zp = VTOZ(vp);
int ltype;
int error;
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (error);
/*
* If we are a snapshot mounted under .zfs, run the operation
* on the covered vnode.
*/
if (zp->z_id != zfsvfs->z_root || zfsvfs->z_parent == zfsvfs) {
char name[MAXNAMLEN + 1];
znode_t *dzp;
size_t len;
error = zfs_znode_parent_and_name(zp, &dzp, name);
if (error == 0) {
len = strlen(name);
if (*ap->a_buflen < len)
error = SET_ERROR(ENOMEM);
}
if (error == 0) {
*ap->a_buflen -= len;
memcpy(ap->a_buf + *ap->a_buflen, name, len);
*ap->a_vpp = ZTOV(dzp);
}
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
covered_vp = vp->v_mount->mnt_vnodecovered;
#if __FreeBSD_version >= 1300045
enum vgetstate vs = vget_prep(covered_vp);
#else
vhold(covered_vp);
#endif
ltype = VOP_ISLOCKED(vp);
VOP_UNLOCK1(vp);
#if __FreeBSD_version >= 1300045
error = vget_finish(covered_vp, LK_SHARED, vs);
#else
error = vget(covered_vp, LK_SHARED | LK_VNHELD, curthread);
#endif
if (error == 0) {
#if __FreeBSD_version >= 1300123
error = VOP_VPTOCNP(covered_vp, ap->a_vpp, ap->a_buf,
ap->a_buflen);
#else
error = VOP_VPTOCNP(covered_vp, ap->a_vpp, ap->a_cred,
ap->a_buf, ap->a_buflen);
#endif
vput(covered_vp);
}
vn_lock(vp, ltype | LK_RETRY);
if (VN_IS_DOOMED(vp))
error = SET_ERROR(ENOENT);
return (error);
}
#if __FreeBSD_version >= 1400032
static int
zfs_deallocate(struct vop_deallocate_args *ap)
{
znode_t *zp = VTOZ(ap->a_vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
zilog_t *zilog;
off_t off, len, file_sz;
int error;
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (error);
/*
* Callers might not be able to detect properly that we are read-only,
* so check it explicitly here.
*/
if (zfs_is_readonly(zfsvfs)) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EROFS));
}
zilog = zfsvfs->z_log;
off = *ap->a_offset;
len = *ap->a_len;
file_sz = zp->z_size;
if (off + len > file_sz)
len = file_sz - off;
/* Fast path for out-of-range request. */
if (len <= 0) {
*ap->a_len = 0;
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (0);
}
error = zfs_freesp(zp, off, len, O_RDWR, TRUE);
if (error == 0) {
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS ||
(ap->a_ioflag & IO_SYNC) != 0)
zil_commit(zilog, zp->z_id);
*ap->a_offset = off + len;
*ap->a_len = 0;
}
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
#endif
struct vop_vector zfs_vnodeops;
struct vop_vector zfs_fifoops;
struct vop_vector zfs_shareops;
struct vop_vector zfs_vnodeops = {
.vop_default = &default_vnodeops,
.vop_inactive = zfs_freebsd_inactive,
#if __FreeBSD_version >= 1300042
.vop_need_inactive = zfs_freebsd_need_inactive,
#endif
.vop_reclaim = zfs_freebsd_reclaim,
#if __FreeBSD_version >= 1300102
.vop_fplookup_vexec = zfs_freebsd_fplookup_vexec,
#endif
#if __FreeBSD_version >= 1300139
.vop_fplookup_symlink = zfs_freebsd_fplookup_symlink,
#endif
.vop_access = zfs_freebsd_access,
.vop_allocate = VOP_EINVAL,
#if __FreeBSD_version >= 1400032
.vop_deallocate = zfs_deallocate,
#endif
.vop_lookup = zfs_cache_lookup,
.vop_cachedlookup = zfs_freebsd_cachedlookup,
.vop_getattr = zfs_freebsd_getattr,
.vop_setattr = zfs_freebsd_setattr,
.vop_create = zfs_freebsd_create,
.vop_mknod = (vop_mknod_t *)zfs_freebsd_create,
.vop_mkdir = zfs_freebsd_mkdir,
.vop_readdir = zfs_freebsd_readdir,
.vop_fsync = zfs_freebsd_fsync,
.vop_open = zfs_freebsd_open,
.vop_close = zfs_freebsd_close,
.vop_rmdir = zfs_freebsd_rmdir,
.vop_ioctl = zfs_freebsd_ioctl,
.vop_link = zfs_freebsd_link,
.vop_symlink = zfs_freebsd_symlink,
.vop_readlink = zfs_freebsd_readlink,
.vop_read = zfs_freebsd_read,
.vop_write = zfs_freebsd_write,
.vop_remove = zfs_freebsd_remove,
.vop_rename = zfs_freebsd_rename,
.vop_pathconf = zfs_freebsd_pathconf,
.vop_bmap = zfs_freebsd_bmap,
.vop_fid = zfs_freebsd_fid,
.vop_getextattr = zfs_getextattr,
.vop_deleteextattr = zfs_deleteextattr,
.vop_setextattr = zfs_setextattr,
.vop_listextattr = zfs_listextattr,
.vop_getacl = zfs_freebsd_getacl,
.vop_setacl = zfs_freebsd_setacl,
.vop_aclcheck = zfs_freebsd_aclcheck,
.vop_getpages = zfs_freebsd_getpages,
.vop_putpages = zfs_freebsd_putpages,
.vop_vptocnp = zfs_vptocnp,
#if __FreeBSD_version >= 1300064
.vop_lock1 = vop_lock,
.vop_unlock = vop_unlock,
.vop_islocked = vop_islocked,
#endif
#if __FreeBSD_version >= 1400043
.vop_add_writecount = vop_stdadd_writecount_nomsync,
#endif
};
VFS_VOP_VECTOR_REGISTER(zfs_vnodeops);
struct vop_vector zfs_fifoops = {
.vop_default = &fifo_specops,
.vop_fsync = zfs_freebsd_fsync,
#if __FreeBSD_version >= 1300102
.vop_fplookup_vexec = zfs_freebsd_fplookup_vexec,
#endif
#if __FreeBSD_version >= 1300139
.vop_fplookup_symlink = zfs_freebsd_fplookup_symlink,
#endif
.vop_access = zfs_freebsd_access,
.vop_getattr = zfs_freebsd_getattr,
.vop_inactive = zfs_freebsd_inactive,
.vop_read = VOP_PANIC,
.vop_reclaim = zfs_freebsd_reclaim,
.vop_setattr = zfs_freebsd_setattr,
.vop_write = VOP_PANIC,
.vop_pathconf = zfs_freebsd_pathconf,
.vop_fid = zfs_freebsd_fid,
.vop_getacl = zfs_freebsd_getacl,
.vop_setacl = zfs_freebsd_setacl,
.vop_aclcheck = zfs_freebsd_aclcheck,
#if __FreeBSD_version >= 1400043
.vop_add_writecount = vop_stdadd_writecount_nomsync,
#endif
};
VFS_VOP_VECTOR_REGISTER(zfs_fifoops);
/*
* special share hidden files vnode operations template
*/
struct vop_vector zfs_shareops = {
.vop_default = &default_vnodeops,
#if __FreeBSD_version >= 1300121
.vop_fplookup_vexec = VOP_EAGAIN,
#endif
#if __FreeBSD_version >= 1300139
.vop_fplookup_symlink = VOP_EAGAIN,
#endif
.vop_access = zfs_freebsd_access,
.vop_inactive = zfs_freebsd_inactive,
.vop_reclaim = zfs_freebsd_reclaim,
.vop_fid = zfs_freebsd_fid,
.vop_pathconf = zfs_freebsd_pathconf,
#if __FreeBSD_version >= 1400043
.vop_add_writecount = vop_stdadd_writecount_nomsync,
#endif
};
VFS_VOP_VECTOR_REGISTER(zfs_shareops);
ZFS_MODULE_PARAM(zfs, zfs_, xattr_compat, INT, ZMOD_RW,
"Use legacy ZFS xattr naming for writing new user namespace xattrs");
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c
index ac030f75323e..8d2a6d77624b 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c
@@ -1,1555 +1,1619 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
*
* Copyright (c) 2006-2010 Pawel Jakub Dawidek <pjd@FreeBSD.org>
* All rights reserved.
*
* Portions Copyright 2010 Robert Milkowski
*
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012, 2017 by Delphix. All rights reserved.
* Copyright (c) 2013, Joyent, Inc. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
*/
/* Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org> */
/*
* ZFS volume emulation driver.
*
* Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
* Volumes are accessed through the symbolic links named:
*
* /dev/zvol/<pool_name>/<dataset_name>
*
* Volumes are persistent through reboot. No user command needs to be
* run before opening and using a device.
*
* On FreeBSD ZVOLs are simply GEOM providers like any other storage device
* in the system. Except when they're simply character devices (volmode=dev).
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/errno.h>
#include <sys/uio.h>
#include <sys/bio.h>
#include <sys/buf.h>
#include <sys/kmem.h>
#include <sys/conf.h>
#include <sys/cmn_err.h>
#include <sys/stat.h>
#include <sys/proc.h>
#include <sys/zap.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/zio.h>
#include <sys/disk.h>
#include <sys/dmu_traverse.h>
#include <sys/dnode.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dir.h>
#include <sys/byteorder.h>
#include <sys/sunddi.h>
#include <sys/dirent.h>
#include <sys/policy.h>
#include <sys/queue.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_ioctl.h>
#include <sys/zil.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_rlock.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_raidz.h>
#include <sys/zvol.h>
#include <sys/zil_impl.h>
#include <sys/dataset_kstats.h>
#include <sys/dbuf.h>
#include <sys/dmu_tx.h>
#include <sys/zfeature.h>
#include <sys/zio_checksum.h>
#include <sys/zil_impl.h>
#include <sys/filio.h>
+#include <sys/freebsd_event.h>
#include <geom/geom.h>
#include <sys/zvol.h>
#include <sys/zvol_impl.h>
#include "zfs_namecheck.h"
#define ZVOL_DUMPSIZE "dumpsize"
#ifdef ZVOL_LOCK_DEBUG
#define ZVOL_RW_READER RW_WRITER
#define ZVOL_RW_READ_HELD RW_WRITE_HELD
#else
#define ZVOL_RW_READER RW_READER
#define ZVOL_RW_READ_HELD RW_READ_HELD
#endif
enum zvol_geom_state {
ZVOL_GEOM_UNINIT,
ZVOL_GEOM_STOPPED,
ZVOL_GEOM_RUNNING,
};
struct zvol_state_os {
#define zso_dev _zso_state._zso_dev
#define zso_geom _zso_state._zso_geom
union {
/* volmode=dev */
struct zvol_state_dev {
struct cdev *zsd_cdev;
uint64_t zsd_sync_cnt;
+ struct selinfo zsd_selinfo;
} _zso_dev;
/* volmode=geom */
struct zvol_state_geom {
struct g_provider *zsg_provider;
struct bio_queue_head zsg_queue;
struct mtx zsg_queue_mtx;
enum zvol_geom_state zsg_state;
} _zso_geom;
} _zso_state;
int zso_dying;
};
static uint32_t zvol_minors;
SYSCTL_DECL(_vfs_zfs);
SYSCTL_NODE(_vfs_zfs, OID_AUTO, vol, CTLFLAG_RW, 0, "ZFS VOLUME");
SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, mode, CTLFLAG_RWTUN, &zvol_volmode, 0,
"Expose as GEOM providers (1), device files (2) or neither");
static boolean_t zpool_on_zvol = B_FALSE;
SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, recursive, CTLFLAG_RWTUN, &zpool_on_zvol, 0,
"Allow zpools to use zvols as vdevs (DANGEROUS)");
/*
* Toggle unmap functionality.
*/
boolean_t zvol_unmap_enabled = B_TRUE;
SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, unmap_enabled, CTLFLAG_RWTUN,
&zvol_unmap_enabled, 0, "Enable UNMAP functionality");
/*
* zvol maximum transfer in one DMU tx.
*/
int zvol_maxphys = DMU_MAX_ACCESS / 2;
static void zvol_ensure_zilog(zvol_state_t *zv);
static d_open_t zvol_cdev_open;
static d_close_t zvol_cdev_close;
static d_ioctl_t zvol_cdev_ioctl;
static d_read_t zvol_cdev_read;
static d_write_t zvol_cdev_write;
static d_strategy_t zvol_geom_bio_strategy;
+static d_kqfilter_t zvol_cdev_kqfilter;
static struct cdevsw zvol_cdevsw = {
.d_name = "zvol",
.d_version = D_VERSION,
.d_flags = D_DISK | D_TRACKCLOSE,
.d_open = zvol_cdev_open,
.d_close = zvol_cdev_close,
.d_ioctl = zvol_cdev_ioctl,
.d_read = zvol_cdev_read,
.d_write = zvol_cdev_write,
.d_strategy = zvol_geom_bio_strategy,
+ .d_kqfilter = zvol_cdev_kqfilter,
+};
+
+static void zvol_filter_detach(struct knote *kn);
+static int zvol_filter_vnode(struct knote *kn, long hint);
+
+static struct filterops zvol_filterops_vnode = {
+ .f_isfd = 1,
+ .f_detach = zvol_filter_detach,
+ .f_event = zvol_filter_vnode,
};
extern uint_t zfs_geom_probe_vdev_key;
struct g_class zfs_zvol_class = {
.name = "ZFS::ZVOL",
.version = G_VERSION,
};
DECLARE_GEOM_CLASS(zfs_zvol_class, zfs_zvol);
static int zvol_geom_open(struct g_provider *pp, int flag, int count);
static int zvol_geom_close(struct g_provider *pp, int flag, int count);
static void zvol_geom_run(zvol_state_t *zv);
static void zvol_geom_destroy(zvol_state_t *zv);
static int zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace);
static void zvol_geom_worker(void *arg);
static void zvol_geom_bio_start(struct bio *bp);
static int zvol_geom_bio_getattr(struct bio *bp);
/* static d_strategy_t zvol_geom_bio_strategy; (declared elsewhere) */
/*
* GEOM mode implementation
*/
static int
zvol_geom_open(struct g_provider *pp, int flag, int count)
{
zvol_state_t *zv;
int err = 0;
boolean_t drop_suspend = B_FALSE;
if (!zpool_on_zvol && tsd_get(zfs_geom_probe_vdev_key) != NULL) {
/*
* If zfs_geom_probe_vdev_key is set, that means that zfs is
* attempting to probe geom providers while looking for a
* replacement for a missing VDEV. In this case, the
* spa_namespace_lock will not be held, but it is still illegal
* to use a zvol as a vdev. Deadlocks can result if another
* thread has spa_namespace_lock.
*/
return (SET_ERROR(EOPNOTSUPP));
}
retry:
rw_enter(&zvol_state_lock, ZVOL_RW_READER);
/*
* Obtain a copy of private under zvol_state_lock to make sure either
* the result of zvol free code setting private to NULL is observed,
* or the zv is protected from being freed because of the positive
* zv_open_count.
*/
zv = pp->private;
if (zv == NULL) {
rw_exit(&zvol_state_lock);
err = SET_ERROR(ENXIO);
goto out_locked;
}
mutex_enter(&zv->zv_state_lock);
if (zv->zv_zso->zso_dying) {
rw_exit(&zvol_state_lock);
err = SET_ERROR(ENXIO);
goto out_zv_locked;
}
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
/*
* Make sure zvol is not suspended during first open
* (hold zv_suspend_lock) and respect proper lock acquisition
* ordering - zv_suspend_lock before zv_state_lock.
*/
if (zv->zv_open_count == 0) {
drop_suspend = B_TRUE;
if (!rw_tryenter(&zv->zv_suspend_lock, ZVOL_RW_READER)) {
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
mutex_enter(&zv->zv_state_lock);
/* Check to see if zv_suspend_lock is needed. */
if (zv->zv_open_count != 0) {
rw_exit(&zv->zv_suspend_lock);
drop_suspend = B_FALSE;
}
}
}
rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
if (zv->zv_open_count == 0) {
boolean_t drop_namespace = B_FALSE;
ASSERT(ZVOL_RW_READ_HELD(&zv->zv_suspend_lock));
/*
* Take spa_namespace_lock to prevent lock inversion when
* zvols from one pool are opened as vdevs in another.
*/
if (!mutex_owned(&spa_namespace_lock)) {
if (!mutex_tryenter(&spa_namespace_lock)) {
mutex_exit(&zv->zv_state_lock);
rw_exit(&zv->zv_suspend_lock);
kern_yield(PRI_USER);
goto retry;
} else {
drop_namespace = B_TRUE;
}
}
err = zvol_first_open(zv, !(flag & FWRITE));
if (drop_namespace)
mutex_exit(&spa_namespace_lock);
if (err)
goto out_zv_locked;
pp->mediasize = zv->zv_volsize;
pp->stripeoffset = 0;
pp->stripesize = zv->zv_volblocksize;
}
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
/*
* Check for a bad on-disk format version now since we
* lied about owning the dataset readonly before.
*/
if ((flag & FWRITE) && ((zv->zv_flags & ZVOL_RDONLY) ||
dmu_objset_incompatible_encryption_version(zv->zv_objset))) {
err = SET_ERROR(EROFS);
goto out_opened;
}
if (zv->zv_flags & ZVOL_EXCL) {
err = SET_ERROR(EBUSY);
goto out_opened;
}
if (flag & O_EXCL) {
if (zv->zv_open_count != 0) {
err = SET_ERROR(EBUSY);
goto out_opened;
}
zv->zv_flags |= ZVOL_EXCL;
}
zv->zv_open_count += count;
out_opened:
if (zv->zv_open_count == 0) {
zvol_last_close(zv);
wakeup(zv);
}
out_zv_locked:
mutex_exit(&zv->zv_state_lock);
out_locked:
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
return (err);
}
static int
zvol_geom_close(struct g_provider *pp, int flag, int count)
{
(void) flag;
zvol_state_t *zv;
boolean_t drop_suspend = B_TRUE;
int new_open_count;
rw_enter(&zvol_state_lock, ZVOL_RW_READER);
zv = pp->private;
if (zv == NULL) {
rw_exit(&zvol_state_lock);
return (SET_ERROR(ENXIO));
}
mutex_enter(&zv->zv_state_lock);
if (zv->zv_flags & ZVOL_EXCL) {
ASSERT3U(zv->zv_open_count, ==, 1);
zv->zv_flags &= ~ZVOL_EXCL;
}
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
/*
* If the open count is zero, this is a spurious close.
* That indicates a bug in the kernel / DDI framework.
*/
ASSERT3U(zv->zv_open_count, >, 0);
/*
* Make sure zvol is not suspended during last close
* (hold zv_suspend_lock) and respect proper lock acquisition
* ordering - zv_suspend_lock before zv_state_lock.
*/
new_open_count = zv->zv_open_count - count;
if (new_open_count == 0) {
if (!rw_tryenter(&zv->zv_suspend_lock, ZVOL_RW_READER)) {
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
mutex_enter(&zv->zv_state_lock);
/* Check to see if zv_suspend_lock is needed. */
new_open_count = zv->zv_open_count - count;
if (new_open_count != 0) {
rw_exit(&zv->zv_suspend_lock);
drop_suspend = B_FALSE;
}
}
} else {
drop_suspend = B_FALSE;
}
rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
/*
* You may get multiple opens, but only one close.
*/
zv->zv_open_count = new_open_count;
if (zv->zv_open_count == 0) {
ASSERT(ZVOL_RW_READ_HELD(&zv->zv_suspend_lock));
zvol_last_close(zv);
wakeup(zv);
}
mutex_exit(&zv->zv_state_lock);
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
return (0);
}
static void
zvol_geom_run(zvol_state_t *zv)
{
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp = zsg->zsg_provider;
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
g_error_provider(pp, 0);
kproc_kthread_add(zvol_geom_worker, zv, &system_proc, NULL, 0, 0,
"zfskern", "zvol %s", pp->name + sizeof (ZVOL_DRIVER));
}
static void
zvol_geom_destroy(zvol_state_t *zv)
{
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp = zsg->zsg_provider;
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
g_topology_assert();
mutex_enter(&zv->zv_state_lock);
VERIFY3S(zsg->zsg_state, ==, ZVOL_GEOM_RUNNING);
mutex_exit(&zv->zv_state_lock);
zsg->zsg_provider = NULL;
g_wither_geom(pp->geom, ENXIO);
}
void
zvol_wait_close(zvol_state_t *zv)
{
if (zv->zv_volmode != ZFS_VOLMODE_GEOM)
return;
mutex_enter(&zv->zv_state_lock);
zv->zv_zso->zso_dying = B_TRUE;
if (zv->zv_open_count)
msleep(zv, &zv->zv_state_lock,
PRIBIO, "zvol:dying", 10*hz);
mutex_exit(&zv->zv_state_lock);
}
static int
zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace)
{
int count, error, flags;
g_topology_assert();
/*
* To make it easier we expect either open or close, but not both
* at the same time.
*/
KASSERT((acr >= 0 && acw >= 0 && ace >= 0) ||
(acr <= 0 && acw <= 0 && ace <= 0),
("Unsupported access request to %s (acr=%d, acw=%d, ace=%d).",
pp->name, acr, acw, ace));
if (pp->private == NULL) {
if (acr <= 0 && acw <= 0 && ace <= 0)
return (0);
return (pp->error);
}
/*
* We don't pass FEXCL flag to zvol_geom_open()/zvol_geom_close() if
* ace != 0, because GEOM already handles that and handles it a bit
* differently. GEOM allows for multiple read/exclusive consumers and
* ZFS allows only one exclusive consumer, no matter if it is reader or
* writer. I like better the way GEOM works so I'll leave it for GEOM
* to decide what to do.
*/
count = acr + acw + ace;
if (count == 0)
return (0);
flags = 0;
if (acr != 0 || ace != 0)
flags |= FREAD;
if (acw != 0)
flags |= FWRITE;
g_topology_unlock();
if (count > 0)
error = zvol_geom_open(pp, flags, count);
else
error = zvol_geom_close(pp, flags, -count);
g_topology_lock();
return (error);
}
static void
zvol_geom_worker(void *arg)
{
zvol_state_t *zv = arg;
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct bio *bp;
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
thread_lock(curthread);
sched_prio(curthread, PRIBIO);
thread_unlock(curthread);
for (;;) {
mtx_lock(&zsg->zsg_queue_mtx);
bp = bioq_takefirst(&zsg->zsg_queue);
if (bp == NULL) {
if (zsg->zsg_state == ZVOL_GEOM_STOPPED) {
zsg->zsg_state = ZVOL_GEOM_RUNNING;
wakeup(&zsg->zsg_state);
mtx_unlock(&zsg->zsg_queue_mtx);
kthread_exit();
}
msleep(&zsg->zsg_queue, &zsg->zsg_queue_mtx,
PRIBIO | PDROP, "zvol:io", 0);
continue;
}
mtx_unlock(&zsg->zsg_queue_mtx);
zvol_geom_bio_strategy(bp);
}
}
static void
zvol_geom_bio_start(struct bio *bp)
{
zvol_state_t *zv = bp->bio_to->private;
struct zvol_state_geom *zsg;
boolean_t first;
if (zv == NULL) {
g_io_deliver(bp, ENXIO);
return;
}
if (bp->bio_cmd == BIO_GETATTR) {
if (zvol_geom_bio_getattr(bp))
g_io_deliver(bp, EOPNOTSUPP);
return;
}
if (!THREAD_CAN_SLEEP()) {
zsg = &zv->zv_zso->zso_geom;
mtx_lock(&zsg->zsg_queue_mtx);
first = (bioq_first(&zsg->zsg_queue) == NULL);
bioq_insert_tail(&zsg->zsg_queue, bp);
mtx_unlock(&zsg->zsg_queue_mtx);
if (first)
wakeup_one(&zsg->zsg_queue);
return;
}
zvol_geom_bio_strategy(bp);
}
static int
zvol_geom_bio_getattr(struct bio *bp)
{
zvol_state_t *zv;
zv = bp->bio_to->private;
ASSERT3P(zv, !=, NULL);
spa_t *spa = dmu_objset_spa(zv->zv_objset);
uint64_t refd, avail, usedobjs, availobjs;
if (g_handleattr_int(bp, "GEOM::candelete", 1))
return (0);
if (strcmp(bp->bio_attribute, "blocksavail") == 0) {
dmu_objset_space(zv->zv_objset, &refd, &avail,
&usedobjs, &availobjs);
if (g_handleattr_off_t(bp, "blocksavail", avail / DEV_BSIZE))
return (0);
} else if (strcmp(bp->bio_attribute, "blocksused") == 0) {
dmu_objset_space(zv->zv_objset, &refd, &avail,
&usedobjs, &availobjs);
if (g_handleattr_off_t(bp, "blocksused", refd / DEV_BSIZE))
return (0);
} else if (strcmp(bp->bio_attribute, "poolblocksavail") == 0) {
avail = metaslab_class_get_space(spa_normal_class(spa));
avail -= metaslab_class_get_alloc(spa_normal_class(spa));
if (g_handleattr_off_t(bp, "poolblocksavail",
avail / DEV_BSIZE))
return (0);
} else if (strcmp(bp->bio_attribute, "poolblocksused") == 0) {
refd = metaslab_class_get_alloc(spa_normal_class(spa));
if (g_handleattr_off_t(bp, "poolblocksused", refd / DEV_BSIZE))
return (0);
}
return (1);
}
+static void
+zvol_filter_detach(struct knote *kn)
+{
+ zvol_state_t *zv;
+ struct zvol_state_dev *zsd;
+
+ zv = kn->kn_hook;
+ zsd = &zv->zv_zso->zso_dev;
+
+ knlist_remove(&zsd->zsd_selinfo.si_note, kn, 0);
+}
+
+static int
+zvol_filter_vnode(struct knote *kn, long hint)
+{
+ kn->kn_fflags |= kn->kn_sfflags & hint;
+
+ return (kn->kn_fflags != 0);
+}
+
+static int
+zvol_cdev_kqfilter(struct cdev *dev, struct knote *kn)
+{
+ zvol_state_t *zv;
+ struct zvol_state_dev *zsd;
+
+ zv = dev->si_drv2;
+ zsd = &zv->zv_zso->zso_dev;
+
+ if (kn->kn_filter != EVFILT_VNODE)
+ return (EINVAL);
+
+ /* XXX: extend support for other NOTE_* events */
+ if (kn->kn_sfflags != NOTE_ATTRIB)
+ return (EINVAL);
+
+ kn->kn_fop = &zvol_filterops_vnode;
+ kn->kn_hook = zv;
+ knlist_add(&zsd->zsd_selinfo.si_note, kn, 0);
+
+ return (0);
+}
+
static void
zvol_geom_bio_strategy(struct bio *bp)
{
zvol_state_t *zv;
uint64_t off, volsize;
size_t resid;
char *addr;
objset_t *os;
zfs_locked_range_t *lr;
int error = 0;
boolean_t doread = B_FALSE;
boolean_t is_dumpified;
boolean_t sync;
if (bp->bio_to)
zv = bp->bio_to->private;
else
zv = bp->bio_dev->si_drv2;
if (zv == NULL) {
error = SET_ERROR(ENXIO);
goto out;
}
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
switch (bp->bio_cmd) {
case BIO_READ:
doread = B_TRUE;
break;
case BIO_WRITE:
case BIO_FLUSH:
case BIO_DELETE:
if (zv->zv_flags & ZVOL_RDONLY) {
error = SET_ERROR(EROFS);
goto resume;
}
zvol_ensure_zilog(zv);
if (bp->bio_cmd == BIO_FLUSH)
goto sync;
break;
default:
error = SET_ERROR(EOPNOTSUPP);
goto resume;
}
off = bp->bio_offset;
volsize = zv->zv_volsize;
os = zv->zv_objset;
ASSERT3P(os, !=, NULL);
addr = bp->bio_data;
resid = bp->bio_length;
if (resid > 0 && off >= volsize) {
error = SET_ERROR(EIO);
goto resume;
}
is_dumpified = B_FALSE;
sync = !doread && !is_dumpified &&
zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
/*
* There must be no buffer changes when doing a dmu_sync() because
* we can't change the data whilst calculating the checksum.
*/
lr = zfs_rangelock_enter(&zv->zv_rangelock, off, resid,
doread ? RL_READER : RL_WRITER);
if (bp->bio_cmd == BIO_DELETE) {
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0) {
dmu_tx_abort(tx);
} else {
zvol_log_truncate(zv, tx, off, resid, sync);
dmu_tx_commit(tx);
error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
off, resid);
resid = 0;
}
goto unlock;
}
while (resid != 0 && off < volsize) {
size_t size = MIN(resid, zvol_maxphys);
if (doread) {
error = dmu_read(os, ZVOL_OBJ, off, size, addr,
DMU_READ_PREFETCH);
} else {
dmu_tx_t *tx = dmu_tx_create(os);
dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, size);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
} else {
dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
zvol_log_write(zv, tx, off, size, sync);
dmu_tx_commit(tx);
}
}
if (error) {
/* Convert checksum errors into IO errors. */
if (error == ECKSUM)
error = SET_ERROR(EIO);
break;
}
off += size;
addr += size;
resid -= size;
}
unlock:
zfs_rangelock_exit(lr);
bp->bio_completed = bp->bio_length - resid;
if (bp->bio_completed < bp->bio_length && off > volsize)
error = SET_ERROR(EINVAL);
switch (bp->bio_cmd) {
case BIO_FLUSH:
break;
case BIO_READ:
dataset_kstats_update_read_kstats(&zv->zv_kstat,
bp->bio_completed);
break;
case BIO_WRITE:
dataset_kstats_update_write_kstats(&zv->zv_kstat,
bp->bio_completed);
break;
case BIO_DELETE:
break;
default:
break;
}
if (sync) {
sync:
zil_commit(zv->zv_zilog, ZVOL_OBJ);
}
resume:
rw_exit(&zv->zv_suspend_lock);
out:
if (bp->bio_to)
g_io_deliver(bp, error);
else
biofinish(bp, NULL, error);
}
/*
* Character device mode implementation
*/
static int
zvol_cdev_read(struct cdev *dev, struct uio *uio_s, int ioflag)
{
zvol_state_t *zv;
uint64_t volsize;
zfs_locked_range_t *lr;
int error = 0;
zfs_uio_t uio;
zfs_uio_init(&uio, uio_s);
zv = dev->si_drv2;
volsize = zv->zv_volsize;
/*
* uio_loffset == volsize isn't an error as
* it's required for EOF processing.
*/
if (zfs_uio_resid(&uio) > 0 &&
(zfs_uio_offset(&uio) < 0 || zfs_uio_offset(&uio) > volsize))
return (SET_ERROR(EIO));
ssize_t start_resid = zfs_uio_resid(&uio);
lr = zfs_rangelock_enter(&zv->zv_rangelock, zfs_uio_offset(&uio),
zfs_uio_resid(&uio), RL_READER);
while (zfs_uio_resid(&uio) > 0 && zfs_uio_offset(&uio) < volsize) {
uint64_t bytes = MIN(zfs_uio_resid(&uio), DMU_MAX_ACCESS >> 1);
/* Don't read past the end. */
if (bytes > volsize - zfs_uio_offset(&uio))
bytes = volsize - zfs_uio_offset(&uio);
error = dmu_read_uio_dnode(zv->zv_dn, &uio, bytes);
if (error) {
/* Convert checksum errors into IO errors. */
if (error == ECKSUM)
error = SET_ERROR(EIO);
break;
}
}
zfs_rangelock_exit(lr);
int64_t nread = start_resid - zfs_uio_resid(&uio);
dataset_kstats_update_read_kstats(&zv->zv_kstat, nread);
return (error);
}
static int
zvol_cdev_write(struct cdev *dev, struct uio *uio_s, int ioflag)
{
zvol_state_t *zv;
uint64_t volsize;
zfs_locked_range_t *lr;
int error = 0;
boolean_t sync;
zfs_uio_t uio;
zv = dev->si_drv2;
volsize = zv->zv_volsize;
zfs_uio_init(&uio, uio_s);
if (zfs_uio_resid(&uio) > 0 &&
(zfs_uio_offset(&uio) < 0 || zfs_uio_offset(&uio) > volsize))
return (SET_ERROR(EIO));
ssize_t start_resid = zfs_uio_resid(&uio);
sync = (ioflag & IO_SYNC) ||
(zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
zvol_ensure_zilog(zv);
lr = zfs_rangelock_enter(&zv->zv_rangelock, zfs_uio_offset(&uio),
zfs_uio_resid(&uio), RL_WRITER);
while (zfs_uio_resid(&uio) > 0 && zfs_uio_offset(&uio) < volsize) {
uint64_t bytes = MIN(zfs_uio_resid(&uio), DMU_MAX_ACCESS >> 1);
uint64_t off = zfs_uio_offset(&uio);
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
if (bytes > volsize - off) /* Don't write past the end. */
bytes = volsize - off;
dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, bytes);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
break;
}
error = dmu_write_uio_dnode(zv->zv_dn, &uio, bytes, tx);
if (error == 0)
zvol_log_write(zv, tx, off, bytes, sync);
dmu_tx_commit(tx);
if (error)
break;
}
zfs_rangelock_exit(lr);
int64_t nwritten = start_resid - zfs_uio_resid(&uio);
dataset_kstats_update_write_kstats(&zv->zv_kstat, nwritten);
if (sync)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
rw_exit(&zv->zv_suspend_lock);
return (error);
}
static int
zvol_cdev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
{
zvol_state_t *zv;
struct zvol_state_dev *zsd;
int err = 0;
boolean_t drop_suspend = B_FALSE;
retry:
rw_enter(&zvol_state_lock, ZVOL_RW_READER);
/*
* Obtain a copy of si_drv2 under zvol_state_lock to make sure either
* the result of zvol free code setting si_drv2 to NULL is observed,
* or the zv is protected from being freed because of the positive
* zv_open_count.
*/
zv = dev->si_drv2;
if (zv == NULL) {
rw_exit(&zvol_state_lock);
err = SET_ERROR(ENXIO);
goto out_locked;
}
mutex_enter(&zv->zv_state_lock);
if (zv->zv_zso->zso_dying) {
rw_exit(&zvol_state_lock);
err = SET_ERROR(ENXIO);
goto out_zv_locked;
}
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_DEV);
/*
* Make sure zvol is not suspended during first open
* (hold zv_suspend_lock) and respect proper lock acquisition
* ordering - zv_suspend_lock before zv_state_lock.
*/
if (zv->zv_open_count == 0) {
drop_suspend = B_TRUE;
if (!rw_tryenter(&zv->zv_suspend_lock, ZVOL_RW_READER)) {
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
mutex_enter(&zv->zv_state_lock);
/* Check to see if zv_suspend_lock is needed. */
if (zv->zv_open_count != 0) {
rw_exit(&zv->zv_suspend_lock);
drop_suspend = B_FALSE;
}
}
}
rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
if (zv->zv_open_count == 0) {
boolean_t drop_namespace = B_FALSE;
ASSERT(ZVOL_RW_READ_HELD(&zv->zv_suspend_lock));
/*
* Take spa_namespace_lock to prevent lock inversion when
* zvols from one pool are opened as vdevs in another.
*/
if (!mutex_owned(&spa_namespace_lock)) {
if (!mutex_tryenter(&spa_namespace_lock)) {
mutex_exit(&zv->zv_state_lock);
rw_exit(&zv->zv_suspend_lock);
kern_yield(PRI_USER);
goto retry;
} else {
drop_namespace = B_TRUE;
}
}
err = zvol_first_open(zv, !(flags & FWRITE));
if (drop_namespace)
mutex_exit(&spa_namespace_lock);
if (err)
goto out_zv_locked;
}
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
if ((flags & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
err = SET_ERROR(EROFS);
goto out_opened;
}
if (zv->zv_flags & ZVOL_EXCL) {
err = SET_ERROR(EBUSY);
goto out_opened;
}
if (flags & O_EXCL) {
if (zv->zv_open_count != 0) {
err = SET_ERROR(EBUSY);
goto out_opened;
}
zv->zv_flags |= ZVOL_EXCL;
}
zv->zv_open_count++;
if (flags & O_SYNC) {
zsd = &zv->zv_zso->zso_dev;
zsd->zsd_sync_cnt++;
if (zsd->zsd_sync_cnt == 1 &&
(zv->zv_flags & ZVOL_WRITTEN_TO) != 0)
zil_async_to_sync(zv->zv_zilog, ZVOL_OBJ);
}
out_opened:
if (zv->zv_open_count == 0) {
zvol_last_close(zv);
wakeup(zv);
}
out_zv_locked:
mutex_exit(&zv->zv_state_lock);
out_locked:
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
return (err);
}
static int
zvol_cdev_close(struct cdev *dev, int flags, int fmt, struct thread *td)
{
zvol_state_t *zv;
struct zvol_state_dev *zsd;
boolean_t drop_suspend = B_TRUE;
rw_enter(&zvol_state_lock, ZVOL_RW_READER);
zv = dev->si_drv2;
if (zv == NULL) {
rw_exit(&zvol_state_lock);
return (SET_ERROR(ENXIO));
}
mutex_enter(&zv->zv_state_lock);
if (zv->zv_flags & ZVOL_EXCL) {
ASSERT3U(zv->zv_open_count, ==, 1);
zv->zv_flags &= ~ZVOL_EXCL;
}
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_DEV);
/*
* If the open count is zero, this is a spurious close.
* That indicates a bug in the kernel / DDI framework.
*/
ASSERT3U(zv->zv_open_count, >, 0);
/*
* Make sure zvol is not suspended during last close
* (hold zv_suspend_lock) and respect proper lock acquisition
* ordering - zv_suspend_lock before zv_state_lock.
*/
if (zv->zv_open_count == 1) {
if (!rw_tryenter(&zv->zv_suspend_lock, ZVOL_RW_READER)) {
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
mutex_enter(&zv->zv_state_lock);
/* Check to see if zv_suspend_lock is needed. */
if (zv->zv_open_count != 1) {
rw_exit(&zv->zv_suspend_lock);
drop_suspend = B_FALSE;
}
}
} else {
drop_suspend = B_FALSE;
}
rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
/*
* You may get multiple opens, but only one close.
*/
zv->zv_open_count--;
if (flags & O_SYNC) {
zsd = &zv->zv_zso->zso_dev;
zsd->zsd_sync_cnt--;
}
if (zv->zv_open_count == 0) {
ASSERT(ZVOL_RW_READ_HELD(&zv->zv_suspend_lock));
zvol_last_close(zv);
wakeup(zv);
}
mutex_exit(&zv->zv_state_lock);
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
return (0);
}
static int
zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data,
int fflag, struct thread *td)
{
zvol_state_t *zv;
zfs_locked_range_t *lr;
off_t offset, length;
int error;
boolean_t sync;
zv = dev->si_drv2;
error = 0;
KASSERT(zv->zv_open_count > 0,
("Device with zero access count in %s", __func__));
switch (cmd) {
case DIOCGSECTORSIZE:
*(uint32_t *)data = DEV_BSIZE;
break;
case DIOCGMEDIASIZE:
*(off_t *)data = zv->zv_volsize;
break;
case DIOCGFLUSH:
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
if (zv->zv_zilog != NULL)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
rw_exit(&zv->zv_suspend_lock);
break;
case DIOCGDELETE:
if (!zvol_unmap_enabled)
break;
offset = ((off_t *)data)[0];
length = ((off_t *)data)[1];
if ((offset % DEV_BSIZE) != 0 || (length % DEV_BSIZE) != 0 ||
offset < 0 || offset >= zv->zv_volsize ||
length <= 0) {
printf("%s: offset=%jd length=%jd\n", __func__, offset,
length);
error = SET_ERROR(EINVAL);
break;
}
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
zvol_ensure_zilog(zv);
lr = zfs_rangelock_enter(&zv->zv_rangelock, offset, length,
RL_WRITER);
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0) {
sync = FALSE;
dmu_tx_abort(tx);
} else {
sync = (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
zvol_log_truncate(zv, tx, offset, length, sync);
dmu_tx_commit(tx);
error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
offset, length);
}
zfs_rangelock_exit(lr);
if (sync)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
rw_exit(&zv->zv_suspend_lock);
break;
case DIOCGSTRIPESIZE:
*(off_t *)data = zv->zv_volblocksize;
break;
case DIOCGSTRIPEOFFSET:
*(off_t *)data = 0;
break;
case DIOCGATTR: {
spa_t *spa = dmu_objset_spa(zv->zv_objset);
struct diocgattr_arg *arg = (struct diocgattr_arg *)data;
uint64_t refd, avail, usedobjs, availobjs;
if (strcmp(arg->name, "GEOM::candelete") == 0)
arg->value.i = 1;
else if (strcmp(arg->name, "blocksavail") == 0) {
dmu_objset_space(zv->zv_objset, &refd, &avail,
&usedobjs, &availobjs);
arg->value.off = avail / DEV_BSIZE;
} else if (strcmp(arg->name, "blocksused") == 0) {
dmu_objset_space(zv->zv_objset, &refd, &avail,
&usedobjs, &availobjs);
arg->value.off = refd / DEV_BSIZE;
} else if (strcmp(arg->name, "poolblocksavail") == 0) {
avail = metaslab_class_get_space(spa_normal_class(spa));
avail -= metaslab_class_get_alloc(
spa_normal_class(spa));
arg->value.off = avail / DEV_BSIZE;
} else if (strcmp(arg->name, "poolblocksused") == 0) {
refd = metaslab_class_get_alloc(spa_normal_class(spa));
arg->value.off = refd / DEV_BSIZE;
} else
error = SET_ERROR(ENOIOCTL);
break;
}
case FIOSEEKHOLE:
case FIOSEEKDATA: {
off_t *off = (off_t *)data;
uint64_t noff;
boolean_t hole;
hole = (cmd == FIOSEEKHOLE);
noff = *off;
error = dmu_offset_next(zv->zv_objset, ZVOL_OBJ, hole, &noff);
*off = noff;
break;
}
default:
error = SET_ERROR(ENOIOCTL);
}
return (error);
}
/*
* Misc. helpers
*/
static void
zvol_ensure_zilog(zvol_state_t *zv)
{
ASSERT(ZVOL_RW_READ_HELD(&zv->zv_suspend_lock));
/*
* Open a ZIL if this is the first time we have written to this
* zvol. We protect zv->zv_zilog with zv_suspend_lock rather
* than zv_state_lock so that we don't need to acquire an
* additional lock in this path.
*/
if (zv->zv_zilog == NULL) {
if (!rw_tryupgrade(&zv->zv_suspend_lock)) {
rw_exit(&zv->zv_suspend_lock);
rw_enter(&zv->zv_suspend_lock, RW_WRITER);
}
if (zv->zv_zilog == NULL) {
zv->zv_zilog = zil_open(zv->zv_objset,
zvol_get_data, &zv->zv_kstat.dk_zil_sums);
zv->zv_flags |= ZVOL_WRITTEN_TO;
/* replay / destroy done in zvol_os_create_minor() */
VERIFY0(zv->zv_zilog->zl_header->zh_flags &
ZIL_REPLAY_NEEDED);
}
rw_downgrade(&zv->zv_suspend_lock);
}
}
boolean_t
zvol_os_is_zvol(const char *device)
{
return (device && strncmp(device, ZVOL_DIR, strlen(ZVOL_DIR)) == 0);
}
void
zvol_os_rename_minor(zvol_state_t *zv, const char *newname)
{
ASSERT(RW_LOCK_HELD(&zvol_state_lock));
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
/* Move to a new hashtable entry. */
zv->zv_hash = zvol_name_hash(zv->zv_name);
hlist_del(&zv->zv_hlink);
hlist_add_head(&zv->zv_hlink, ZVOL_HT_HEAD(zv->zv_hash));
if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp = zsg->zsg_provider;
struct g_geom *gp;
g_topology_lock();
gp = pp->geom;
ASSERT3P(gp, !=, NULL);
zsg->zsg_provider = NULL;
g_wither_provider(pp, ENXIO);
pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, newname);
pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND;
pp->sectorsize = DEV_BSIZE;
pp->mediasize = zv->zv_volsize;
pp->private = zv;
zsg->zsg_provider = pp;
g_error_provider(pp, 0);
g_topology_unlock();
} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev;
struct cdev *dev;
struct make_dev_args args;
dev = zsd->zsd_cdev;
if (dev != NULL) {
destroy_dev(dev);
dev = zsd->zsd_cdev = NULL;
if (zv->zv_open_count > 0) {
zv->zv_flags &= ~ZVOL_EXCL;
zv->zv_open_count = 0;
/* XXX need suspend lock but lock order */
zvol_last_close(zv);
}
}
make_dev_args_init(&args);
args.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK;
args.mda_devsw = &zvol_cdevsw;
args.mda_cr = NULL;
args.mda_uid = UID_ROOT;
args.mda_gid = GID_OPERATOR;
args.mda_mode = 0640;
args.mda_si_drv2 = zv;
if (make_dev_s(&args, &dev, "%s/%s", ZVOL_DRIVER, newname)
== 0) {
#if __FreeBSD_version > 1300130
dev->si_iosize_max = maxphys;
#else
dev->si_iosize_max = MAXPHYS;
#endif
zsd->zsd_cdev = dev;
}
}
strlcpy(zv->zv_name, newname, sizeof (zv->zv_name));
}
/*
* Remove minor node for the specified volume.
*/
void
zvol_os_free(zvol_state_t *zv)
{
ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
ASSERT0(zv->zv_open_count);
ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name);
rw_destroy(&zv->zv_suspend_lock);
zfs_rangelock_fini(&zv->zv_rangelock);
if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp __maybe_unused = zsg->zsg_provider;
ASSERT3P(pp->private, ==, NULL);
g_topology_lock();
zvol_geom_destroy(zv);
g_topology_unlock();
mtx_destroy(&zsg->zsg_queue_mtx);
} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev;
struct cdev *dev = zsd->zsd_cdev;
if (dev != NULL) {
ASSERT3P(dev->si_drv2, ==, NULL);
destroy_dev(dev);
+ knlist_clear(&zsd->zsd_selinfo.si_note, 0);
+ knlist_destroy(&zsd->zsd_selinfo.si_note);
}
}
mutex_destroy(&zv->zv_state_lock);
dataset_kstats_destroy(&zv->zv_kstat);
kmem_free(zv->zv_zso, sizeof (struct zvol_state_os));
kmem_free(zv, sizeof (zvol_state_t));
zvol_minors--;
}
/*
* Create a minor node (plus a whole lot more) for the specified volume.
*/
int
zvol_os_create_minor(const char *name)
{
zvol_state_t *zv;
objset_t *os;
dmu_object_info_t *doi;
uint64_t volsize;
uint64_t volmode, hash;
int error;
ZFS_LOG(1, "Creating ZVOL %s...", name);
hash = zvol_name_hash(name);
if ((zv = zvol_find_by_name_hash(name, hash, RW_NONE)) != NULL) {
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
mutex_exit(&zv->zv_state_lock);
return (SET_ERROR(EEXIST));
}
DROP_GIANT();
doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
/* Lie and say we're read-only. */
error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, B_TRUE, FTAG, &os);
if (error)
goto out_doi;
error = dmu_object_info(os, ZVOL_OBJ, doi);
if (error)
goto out_dmu_objset_disown;
error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
if (error)
goto out_dmu_objset_disown;
error = dsl_prop_get_integer(name,
zfs_prop_to_name(ZFS_PROP_VOLMODE), &volmode, NULL);
if (error || volmode == ZFS_VOLMODE_DEFAULT)
volmode = zvol_volmode;
error = 0;
/*
* zvol_alloc equivalent ...
*/
zv = kmem_zalloc(sizeof (*zv), KM_SLEEP);
zv->zv_hash = hash;
mutex_init(&zv->zv_state_lock, NULL, MUTEX_DEFAULT, NULL);
zv->zv_zso = kmem_zalloc(sizeof (struct zvol_state_os), KM_SLEEP);
zv->zv_volmode = volmode;
if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp;
struct g_geom *gp;
zsg->zsg_state = ZVOL_GEOM_UNINIT;
mtx_init(&zsg->zsg_queue_mtx, "zvol", NULL, MTX_DEF);
g_topology_lock();
gp = g_new_geomf(&zfs_zvol_class, "zfs::zvol::%s", name);
gp->start = zvol_geom_bio_start;
gp->access = zvol_geom_access;
pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, name);
pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND;
pp->sectorsize = DEV_BSIZE;
pp->mediasize = 0;
pp->private = zv;
zsg->zsg_provider = pp;
bioq_init(&zsg->zsg_queue);
} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev;
struct cdev *dev;
struct make_dev_args args;
make_dev_args_init(&args);
args.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK;
args.mda_devsw = &zvol_cdevsw;
args.mda_cr = NULL;
args.mda_uid = UID_ROOT;
args.mda_gid = GID_OPERATOR;
args.mda_mode = 0640;
args.mda_si_drv2 = zv;
if (make_dev_s(&args, &dev, "%s/%s", ZVOL_DRIVER, name)
== 0) {
#if __FreeBSD_version > 1300130
dev->si_iosize_max = maxphys;
#else
dev->si_iosize_max = MAXPHYS;
#endif
zsd->zsd_cdev = dev;
+ knlist_init_sx(&zsd->zsd_selinfo.si_note,
+ &zv->zv_state_lock);
}
}
(void) strlcpy(zv->zv_name, name, MAXPATHLEN);
rw_init(&zv->zv_suspend_lock, NULL, RW_DEFAULT, NULL);
zfs_rangelock_init(&zv->zv_rangelock, NULL, NULL);
if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
zv->zv_flags |= ZVOL_RDONLY;
zv->zv_volblocksize = doi->doi_data_block_size;
zv->zv_volsize = volsize;
zv->zv_objset = os;
ASSERT3P(zv->zv_kstat.dk_kstats, ==, NULL);
error = dataset_kstats_create(&zv->zv_kstat, zv->zv_objset);
if (error)
goto out_dmu_objset_disown;
ASSERT3P(zv->zv_zilog, ==, NULL);
zv->zv_zilog = zil_open(os, zvol_get_data, &zv->zv_kstat.dk_zil_sums);
if (spa_writeable(dmu_objset_spa(os))) {
if (zil_replay_disable)
zil_destroy(zv->zv_zilog, B_FALSE);
else
zil_replay(os, zv, zvol_replay_vector);
}
zil_close(zv->zv_zilog);
zv->zv_zilog = NULL;
/* TODO: prefetch for geom tasting */
zv->zv_objset = NULL;
out_dmu_objset_disown:
dmu_objset_disown(os, B_TRUE, FTAG);
if (error == 0 && volmode == ZFS_VOLMODE_GEOM) {
zvol_geom_run(zv);
g_topology_unlock();
}
out_doi:
kmem_free(doi, sizeof (dmu_object_info_t));
if (error == 0) {
rw_enter(&zvol_state_lock, RW_WRITER);
zvol_insert(zv);
zvol_minors++;
rw_exit(&zvol_state_lock);
ZFS_LOG(1, "ZVOL %s created.", name);
}
PICKUP_GIANT();
return (error);
}
void
zvol_os_clear_private(zvol_state_t *zv)
{
ASSERT(RW_LOCK_HELD(&zvol_state_lock));
if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp = zsg->zsg_provider;
if (pp->private == NULL) /* already cleared */
return;
mtx_lock(&zsg->zsg_queue_mtx);
zsg->zsg_state = ZVOL_GEOM_STOPPED;
pp->private = NULL;
wakeup_one(&zsg->zsg_queue);
while (zsg->zsg_state != ZVOL_GEOM_RUNNING)
msleep(&zsg->zsg_state, &zsg->zsg_queue_mtx,
0, "zvol:w", 0);
mtx_unlock(&zsg->zsg_queue_mtx);
ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev;
struct cdev *dev = zsd->zsd_cdev;
if (dev != NULL)
dev->si_drv2 = NULL;
}
}
int
zvol_os_update_volsize(zvol_state_t *zv, uint64_t volsize)
{
zv->zv_volsize = volsize;
if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp = zsg->zsg_provider;
g_topology_lock();
if (pp->private == NULL) {
g_topology_unlock();
return (SET_ERROR(ENXIO));
}
/*
* Do not invoke resize event when initial size was zero.
* ZVOL initializes the size on first open, this is not
* real resizing.
*/
if (pp->mediasize == 0)
pp->mediasize = zv->zv_volsize;
else
g_resize_provider(pp, zv->zv_volsize);
g_topology_unlock();
+ } else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
+ struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev;
+
+ KNOTE_UNLOCKED(&zsd->zsd_selinfo.si_note, NOTE_ATTRIB);
}
return (0);
}
void
zvol_os_set_disk_ro(zvol_state_t *zv, int flags)
{
// XXX? set_disk_ro(zv->zv_zso->zvo_disk, flags);
}
void
zvol_os_set_capacity(zvol_state_t *zv, uint64_t capacity)
{
// XXX? set_capacity(zv->zv_zso->zvo_disk, capacity);
}
/*
* Public interfaces
*/
int
zvol_busy(void)
{
return (zvol_minors != 0);
}
int
zvol_init(void)
{
zvol_init_impl();
return (0);
}
void
zvol_fini(void)
{
zvol_fini_impl();
}
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-generic.c b/sys/contrib/openzfs/module/os/linux/spl/spl-generic.c
index 5179100d1665..de91c44257aa 100644
--- a/sys/contrib/openzfs/module/os/linux/spl/spl-generic.c
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-generic.c
@@ -1,826 +1,834 @@
/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*
* Solaris Porting Layer (SPL) Generic Implementation.
*/
#include <sys/sysmacros.h>
#include <sys/systeminfo.h>
#include <sys/vmsystm.h>
#include <sys/kmem.h>
#include <sys/kmem_cache.h>
#include <sys/vmem.h>
#include <sys/mutex.h>
#include <sys/rwlock.h>
#include <sys/taskq.h>
#include <sys/tsd.h>
#include <sys/zmod.h>
#include <sys/debug.h>
#include <sys/proc.h>
#include <sys/kstat.h>
#include <sys/file.h>
#include <sys/sunddi.h>
#include <linux/ctype.h>
#include <sys/disp.h>
#include <sys/random.h>
#include <sys/string.h>
#include <linux/kmod.h>
#include <linux/mod_compat.h>
#include <sys/cred.h>
#include <sys/vnode.h>
unsigned long spl_hostid = 0;
EXPORT_SYMBOL(spl_hostid);
/* CSTYLED */
module_param(spl_hostid, ulong, 0644);
MODULE_PARM_DESC(spl_hostid, "The system hostid.");
proc_t p0;
EXPORT_SYMBOL(p0);
/*
* Xorshift Pseudo Random Number Generator based on work by Sebastiano Vigna
*
* "Further scramblings of Marsaglia's xorshift generators"
* http://vigna.di.unimi.it/ftp/papers/xorshiftplus.pdf
*
* random_get_pseudo_bytes() is an API function on Illumos whose sole purpose
* is to provide bytes containing random numbers. It is mapped to /dev/urandom
* on Illumos, which uses a "FIPS 186-2 algorithm". No user of the SPL's
* random_get_pseudo_bytes() needs bytes that are of cryptographic quality, so
* we can implement it using a fast PRNG that we seed using Linux' actual
* equivalent to random_get_pseudo_bytes(). We do this by providing each CPU
* with an independent seed so that all calls to random_get_pseudo_bytes() are
* free of atomic instructions.
*
* A consequence of using a fast PRNG is that using random_get_pseudo_bytes()
* to generate words larger than 128 bits will paradoxically be limited to
* `2^128 - 1` possibilities. This is because we have a sequence of `2^128 - 1`
* 128-bit words and selecting the first will implicitly select the second. If
* a caller finds this behavior undesirable, random_get_bytes() should be used
* instead.
*
* XXX: Linux interrupt handlers that trigger within the critical section
* formed by `s[1] = xp[1];` and `xp[0] = s[0];` and call this function will
* see the same numbers. Nothing in the code currently calls this in an
* interrupt handler, so this is considered to be okay. If that becomes a
* problem, we could create a set of per-cpu variables for interrupt handlers
* and use them when in_interrupt() from linux/preempt_mask.h evaluates to
* true.
*/
void __percpu *spl_pseudo_entropy;
/*
* spl_rand_next()/spl_rand_jump() are copied from the following CC-0 licensed
* file:
*
* http://xorshift.di.unimi.it/xorshift128plus.c
*/
static inline uint64_t
spl_rand_next(uint64_t *s)
{
uint64_t s1 = s[0];
const uint64_t s0 = s[1];
s[0] = s0;
s1 ^= s1 << 23; // a
s[1] = s1 ^ s0 ^ (s1 >> 18) ^ (s0 >> 5); // b, c
return (s[1] + s0);
}
static inline void
spl_rand_jump(uint64_t *s)
{
static const uint64_t JUMP[] =
{ 0x8a5cd789635d2dff, 0x121fd2155c472f96 };
uint64_t s0 = 0;
uint64_t s1 = 0;
int i, b;
for (i = 0; i < sizeof (JUMP) / sizeof (*JUMP); i++)
for (b = 0; b < 64; b++) {
if (JUMP[i] & 1ULL << b) {
s0 ^= s[0];
s1 ^= s[1];
}
(void) spl_rand_next(s);
}
s[0] = s0;
s[1] = s1;
}
int
random_get_pseudo_bytes(uint8_t *ptr, size_t len)
{
uint64_t *xp, s[2];
ASSERT(ptr);
xp = get_cpu_ptr(spl_pseudo_entropy);
s[0] = xp[0];
s[1] = xp[1];
while (len) {
union {
uint64_t ui64;
uint8_t byte[sizeof (uint64_t)];
}entropy;
int i = MIN(len, sizeof (uint64_t));
len -= i;
entropy.ui64 = spl_rand_next(s);
while (i--)
*ptr++ = entropy.byte[i];
}
xp[0] = s[0];
xp[1] = s[1];
put_cpu_ptr(spl_pseudo_entropy);
return (0);
}
EXPORT_SYMBOL(random_get_pseudo_bytes);
#if BITS_PER_LONG == 32
/*
* Support 64/64 => 64 division on a 32-bit platform. While the kernel
* provides a div64_u64() function for this we do not use it because the
* implementation is flawed. There are cases which return incorrect
* results as late as linux-2.6.35. Until this is fixed upstream the
* spl must provide its own implementation.
*
* This implementation is a slightly modified version of the algorithm
* proposed by the book 'Hacker's Delight'. The original source can be
* found here and is available for use without restriction.
*
* http://www.hackersdelight.org/HDcode/newCode/divDouble.c
*/
/*
* Calculate number of leading of zeros for a 64-bit value.
*/
static int
nlz64(uint64_t x)
{
register int n = 0;
if (x == 0)
return (64);
if (x <= 0x00000000FFFFFFFFULL) { n = n + 32; x = x << 32; }
if (x <= 0x0000FFFFFFFFFFFFULL) { n = n + 16; x = x << 16; }
if (x <= 0x00FFFFFFFFFFFFFFULL) { n = n + 8; x = x << 8; }
if (x <= 0x0FFFFFFFFFFFFFFFULL) { n = n + 4; x = x << 4; }
if (x <= 0x3FFFFFFFFFFFFFFFULL) { n = n + 2; x = x << 2; }
if (x <= 0x7FFFFFFFFFFFFFFFULL) { n = n + 1; }
return (n);
}
/*
* Newer kernels have a div_u64() function but we define our own
* to simplify portability between kernel versions.
*/
static inline uint64_t
__div_u64(uint64_t u, uint32_t v)
{
(void) do_div(u, v);
return (u);
}
/*
* Turn off missing prototypes warning for these functions. They are
* replacements for libgcc-provided functions and will never be called
* directly.
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wmissing-prototypes"
/*
* Implementation of 64-bit unsigned division for 32-bit machines.
*
* First the procedure takes care of the case in which the divisor is a
* 32-bit quantity. There are two subcases: (1) If the left half of the
* dividend is less than the divisor, one execution of do_div() is all that
* is required (overflow is not possible). (2) Otherwise it does two
* divisions, using the grade school method.
*/
uint64_t
__udivdi3(uint64_t u, uint64_t v)
{
uint64_t u0, u1, v1, q0, q1, k;
int n;
if (v >> 32 == 0) { // If v < 2**32:
if (u >> 32 < v) { // If u/v cannot overflow,
return (__div_u64(u, v)); // just do one division.
} else { // If u/v would overflow:
u1 = u >> 32; // Break u into two halves.
u0 = u & 0xFFFFFFFF;
q1 = __div_u64(u1, v); // First quotient digit.
k = u1 - q1 * v; // First remainder, < v.
u0 += (k << 32);
q0 = __div_u64(u0, v); // Seconds quotient digit.
return ((q1 << 32) + q0);
}
} else { // If v >= 2**32:
n = nlz64(v); // 0 <= n <= 31.
v1 = (v << n) >> 32; // Normalize divisor, MSB is 1.
u1 = u >> 1; // To ensure no overflow.
q1 = __div_u64(u1, v1); // Get quotient from
q0 = (q1 << n) >> 31; // Undo normalization and
// division of u by 2.
if (q0 != 0) // Make q0 correct or
q0 = q0 - 1; // too small by 1.
if ((u - q0 * v) >= v)
q0 = q0 + 1; // Now q0 is correct.
return (q0);
}
}
EXPORT_SYMBOL(__udivdi3);
#ifndef abs64
/* CSTYLED */
#define abs64(x) ({ uint64_t t = (x) >> 63; ((x) ^ t) - t; })
#endif
/*
* Implementation of 64-bit signed division for 32-bit machines.
*/
int64_t
__divdi3(int64_t u, int64_t v)
{
int64_t q, t;
q = __udivdi3(abs64(u), abs64(v));
t = (u ^ v) >> 63; // If u, v have different
return ((q ^ t) - t); // signs, negate q.
}
EXPORT_SYMBOL(__divdi3);
/*
* Implementation of 64-bit unsigned modulo for 32-bit machines.
*/
uint64_t
__umoddi3(uint64_t dividend, uint64_t divisor)
{
return (dividend - (divisor * __udivdi3(dividend, divisor)));
}
EXPORT_SYMBOL(__umoddi3);
/* 64-bit signed modulo for 32-bit machines. */
int64_t
__moddi3(int64_t n, int64_t d)
{
int64_t q;
boolean_t nn = B_FALSE;
if (n < 0) {
nn = B_TRUE;
n = -n;
}
if (d < 0)
d = -d;
q = __umoddi3(n, d);
return (nn ? -q : q);
}
EXPORT_SYMBOL(__moddi3);
/*
* Implementation of 64-bit unsigned division/modulo for 32-bit machines.
*/
uint64_t
__udivmoddi4(uint64_t n, uint64_t d, uint64_t *r)
{
uint64_t q = __udivdi3(n, d);
if (r)
*r = n - d * q;
return (q);
}
EXPORT_SYMBOL(__udivmoddi4);
/*
* Implementation of 64-bit signed division/modulo for 32-bit machines.
*/
int64_t
__divmoddi4(int64_t n, int64_t d, int64_t *r)
{
int64_t q, rr;
boolean_t nn = B_FALSE;
boolean_t nd = B_FALSE;
if (n < 0) {
nn = B_TRUE;
n = -n;
}
if (d < 0) {
nd = B_TRUE;
d = -d;
}
q = __udivmoddi4(n, d, (uint64_t *)&rr);
if (nn != nd)
q = -q;
if (nn)
rr = -rr;
if (r)
*r = rr;
return (q);
}
EXPORT_SYMBOL(__divmoddi4);
#if defined(__arm) || defined(__arm__)
/*
* Implementation of 64-bit (un)signed division for 32-bit arm machines.
*
* Run-time ABI for the ARM Architecture (page 20). A pair of (unsigned)
* long longs is returned in {{r0, r1}, {r2,r3}}, the quotient in {r0, r1},
* and the remainder in {r2, r3}. The return type is specifically left
* set to 'void' to ensure the compiler does not overwrite these registers
* during the return. All results are in registers as per ABI
*/
void
__aeabi_uldivmod(uint64_t u, uint64_t v)
{
uint64_t res;
uint64_t mod;
res = __udivdi3(u, v);
mod = __umoddi3(u, v);
{
register uint32_t r0 asm("r0") = (res & 0xFFFFFFFF);
register uint32_t r1 asm("r1") = (res >> 32);
register uint32_t r2 asm("r2") = (mod & 0xFFFFFFFF);
register uint32_t r3 asm("r3") = (mod >> 32);
asm volatile(""
: "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3) /* output */
: "r"(r0), "r"(r1), "r"(r2), "r"(r3)); /* input */
return; /* r0; */
}
}
EXPORT_SYMBOL(__aeabi_uldivmod);
void
__aeabi_ldivmod(int64_t u, int64_t v)
{
int64_t res;
uint64_t mod;
res = __divdi3(u, v);
mod = __umoddi3(u, v);
{
register uint32_t r0 asm("r0") = (res & 0xFFFFFFFF);
register uint32_t r1 asm("r1") = (res >> 32);
register uint32_t r2 asm("r2") = (mod & 0xFFFFFFFF);
register uint32_t r3 asm("r3") = (mod >> 32);
asm volatile(""
: "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3) /* output */
: "r"(r0), "r"(r1), "r"(r2), "r"(r3)); /* input */
return; /* r0; */
}
}
EXPORT_SYMBOL(__aeabi_ldivmod);
#endif /* __arm || __arm__ */
#pragma GCC diagnostic pop
#endif /* BITS_PER_LONG */
/*
* NOTE: The strtoxx behavior is solely based on my reading of the Solaris
* ddi_strtol(9F) man page. I have not verified the behavior of these
* functions against their Solaris counterparts. It is possible that I
* may have misinterpreted the man page or the man page is incorrect.
*/
int ddi_strtol(const char *, char **, int, long *);
int ddi_strtoull(const char *, char **, int, unsigned long long *);
int ddi_strtoll(const char *, char **, int, long long *);
#define define_ddi_strtox(type, valtype) \
int ddi_strto##type(const char *str, char **endptr, \
int base, valtype *result) \
{ \
valtype last_value, value = 0; \
char *ptr = (char *)str; \
int digit, minus = 0; \
\
while (strchr(" \t\n\r\f", *ptr)) \
++ptr; \
\
if (strlen(ptr) == 0) \
return (EINVAL); \
\
switch (*ptr) { \
case '-': \
minus = 1; \
zfs_fallthrough; \
case '+': \
++ptr; \
break; \
} \
\
/* Auto-detect base based on prefix */ \
if (!base) { \
if (str[0] == '0') { \
if (tolower(str[1]) == 'x' && isxdigit(str[2])) { \
base = 16; /* hex */ \
ptr += 2; \
} else if (str[1] >= '0' && str[1] < 8) { \
base = 8; /* octal */ \
ptr += 1; \
} else { \
return (EINVAL); \
} \
} else { \
base = 10; /* decimal */ \
} \
} \
\
while (1) { \
if (isdigit(*ptr)) \
digit = *ptr - '0'; \
else if (isalpha(*ptr)) \
digit = tolower(*ptr) - 'a' + 10; \
else \
break; \
\
if (digit >= base) \
break; \
\
last_value = value; \
value = value * base + digit; \
if (last_value > value) /* Overflow */ \
return (ERANGE); \
\
ptr++; \
} \
\
*result = minus ? -value : value; \
\
if (endptr) \
*endptr = ptr; \
\
return (0); \
} \
define_ddi_strtox(l, long)
define_ddi_strtox(ull, unsigned long long)
define_ddi_strtox(ll, long long)
EXPORT_SYMBOL(ddi_strtol);
EXPORT_SYMBOL(ddi_strtoll);
EXPORT_SYMBOL(ddi_strtoull);
int
ddi_copyin(const void *from, void *to, size_t len, int flags)
{
/* Fake ioctl() issued by kernel, 'from' is a kernel address */
if (flags & FKIOCTL) {
memcpy(to, from, len);
return (0);
}
return (copyin(from, to, len));
}
EXPORT_SYMBOL(ddi_copyin);
int
ddi_copyout(const void *from, void *to, size_t len, int flags)
{
/* Fake ioctl() issued by kernel, 'from' is a kernel address */
if (flags & FKIOCTL) {
memcpy(to, from, len);
return (0);
}
return (copyout(from, to, len));
}
EXPORT_SYMBOL(ddi_copyout);
static ssize_t
spl_kernel_read(struct file *file, void *buf, size_t count, loff_t *pos)
{
#if defined(HAVE_KERNEL_READ_PPOS)
return (kernel_read(file, buf, count, pos));
#else
mm_segment_t saved_fs;
ssize_t ret;
saved_fs = get_fs();
set_fs(KERNEL_DS);
ret = vfs_read(file, (void __user *)buf, count, pos);
set_fs(saved_fs);
return (ret);
#endif
}
static int
spl_getattr(struct file *filp, struct kstat *stat)
{
int rc;
ASSERT(filp);
ASSERT(stat);
#if defined(HAVE_4ARGS_VFS_GETATTR)
rc = vfs_getattr(&filp->f_path, stat, STATX_BASIC_STATS,
AT_STATX_SYNC_AS_STAT);
#elif defined(HAVE_2ARGS_VFS_GETATTR)
rc = vfs_getattr(&filp->f_path, stat);
#elif defined(HAVE_3ARGS_VFS_GETATTR)
rc = vfs_getattr(filp->f_path.mnt, filp->f_dentry, stat);
#else
#error "No available vfs_getattr()"
#endif
if (rc)
return (-rc);
return (0);
}
/*
* Read the unique system identifier from the /etc/hostid file.
*
* The behavior of /usr/bin/hostid on Linux systems with the
* regular eglibc and coreutils is:
*
* 1. Generate the value if the /etc/hostid file does not exist
* or if the /etc/hostid file is less than four bytes in size.
*
* 2. If the /etc/hostid file is at least 4 bytes, then return
* the first four bytes [0..3] in native endian order.
*
* 3. Always ignore bytes [4..] if they exist in the file.
*
* Only the first four bytes are significant, even on systems that
* have a 64-bit word size.
*
* See:
*
* eglibc: sysdeps/unix/sysv/linux/gethostid.c
* coreutils: src/hostid.c
*
* Notes:
*
* The /etc/hostid file on Solaris is a text file that often reads:
*
* # DO NOT EDIT
* "0123456789"
*
* Directly copying this file to Linux results in a constant
* hostid of 4f442023 because the default comment constitutes
* the first four bytes of the file.
*
*/
static char *spl_hostid_path = HW_HOSTID_PATH;
module_param(spl_hostid_path, charp, 0444);
MODULE_PARM_DESC(spl_hostid_path, "The system hostid file (/etc/hostid)");
static int
hostid_read(uint32_t *hostid)
{
uint64_t size;
uint32_t value = 0;
int error;
loff_t off;
struct file *filp;
struct kstat stat;
filp = filp_open(spl_hostid_path, 0, 0);
if (IS_ERR(filp))
return (ENOENT);
error = spl_getattr(filp, &stat);
if (error) {
filp_close(filp, 0);
return (error);
}
size = stat.size;
// cppcheck-suppress sizeofwithnumericparameter
if (size < sizeof (HW_HOSTID_MASK)) {
filp_close(filp, 0);
return (EINVAL);
}
off = 0;
/*
* Read directly into the variable like eglibc does.
* Short reads are okay; native behavior is preserved.
*/
error = spl_kernel_read(filp, &value, sizeof (value), &off);
if (error < 0) {
filp_close(filp, 0);
return (EIO);
}
/* Mask down to 32 bits like coreutils does. */
*hostid = (value & HW_HOSTID_MASK);
filp_close(filp, 0);
return (0);
}
/*
* Return the system hostid. Preferentially use the spl_hostid module option
* when set, otherwise use the value in the /etc/hostid file.
*/
uint32_t
zone_get_hostid(void *zone)
{
uint32_t hostid;
ASSERT3P(zone, ==, NULL);
if (spl_hostid != 0)
return ((uint32_t)(spl_hostid & HW_HOSTID_MASK));
if (hostid_read(&hostid) == 0)
return (hostid);
return (0);
}
EXPORT_SYMBOL(zone_get_hostid);
static int
spl_kvmem_init(void)
{
int rc = 0;
rc = spl_kmem_init();
if (rc)
return (rc);
rc = spl_vmem_init();
if (rc) {
spl_kmem_fini();
return (rc);
}
return (rc);
}
/*
* We initialize the random number generator with 128 bits of entropy from the
* system random number generator. In the improbable case that we have a zero
* seed, we fallback to the system jiffies, unless it is also zero, in which
* situation we use a preprogrammed seed. We step forward by 2^64 iterations to
* initialize each of the per-cpu seeds so that the sequences generated on each
* CPU are guaranteed to never overlap in practice.
*/
-static void __init
+static int __init
spl_random_init(void)
{
uint64_t s[2];
int i = 0;
spl_pseudo_entropy = __alloc_percpu(2 * sizeof (uint64_t),
sizeof (uint64_t));
+ if (!spl_pseudo_entropy)
+ return (-ENOMEM);
+
get_random_bytes(s, sizeof (s));
if (s[0] == 0 && s[1] == 0) {
if (jiffies != 0) {
s[0] = jiffies;
s[1] = ~0 - jiffies;
} else {
(void) memcpy(s, "improbable seed", sizeof (s));
}
printk("SPL: get_random_bytes() returned 0 "
"when generating random seed. Setting initial seed to "
"0x%016llx%016llx.\n", cpu_to_be64(s[0]),
cpu_to_be64(s[1]));
}
for_each_possible_cpu(i) {
uint64_t *wordp = per_cpu_ptr(spl_pseudo_entropy, i);
spl_rand_jump(s);
wordp[0] = s[0];
wordp[1] = s[1];
}
+
+ return (0);
}
static void
spl_random_fini(void)
{
free_percpu(spl_pseudo_entropy);
}
static void
spl_kvmem_fini(void)
{
spl_vmem_fini();
spl_kmem_fini();
}
static int __init
spl_init(void)
{
int rc = 0;
- spl_random_init();
+ if ((rc = spl_random_init()))
+ goto out0;
if ((rc = spl_kvmem_init()))
goto out1;
if ((rc = spl_tsd_init()))
goto out2;
if ((rc = spl_taskq_init()))
goto out3;
if ((rc = spl_kmem_cache_init()))
goto out4;
if ((rc = spl_proc_init()))
goto out5;
if ((rc = spl_kstat_init()))
goto out6;
if ((rc = spl_zlib_init()))
goto out7;
if ((rc = spl_zone_init()))
goto out8;
return (rc);
out8:
spl_zlib_fini();
out7:
spl_kstat_fini();
out6:
spl_proc_fini();
out5:
spl_kmem_cache_fini();
out4:
spl_taskq_fini();
out3:
spl_tsd_fini();
out2:
spl_kvmem_fini();
out1:
+ spl_random_fini();
+out0:
return (rc);
}
static void __exit
spl_fini(void)
{
spl_zone_fini();
spl_zlib_fini();
spl_kstat_fini();
spl_proc_fini();
spl_kmem_cache_fini();
spl_taskq_fini();
spl_tsd_fini();
spl_kvmem_fini();
spl_random_fini();
}
module_init(spl_init);
module_exit(spl_fini);
MODULE_DESCRIPTION("Solaris Porting Layer");
MODULE_AUTHOR(ZFS_META_AUTHOR);
MODULE_LICENSE("GPL");
MODULE_VERSION(ZFS_META_VERSION "-" ZFS_META_RELEASE);
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-kmem-cache.c b/sys/contrib/openzfs/module/os/linux/spl/spl-kmem-cache.c
index ba4ca49a2ac9..efb8d0c30330 100644
--- a/sys/contrib/openzfs/module/os/linux/spl/spl-kmem-cache.c
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-kmem-cache.c
@@ -1,1462 +1,1465 @@
/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/percpu_compat.h>
#include <sys/kmem.h>
#include <sys/kmem_cache.h>
#include <sys/taskq.h>
#include <sys/timer.h>
#include <sys/vmem.h>
#include <sys/wait.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/prefetch.h>
/*
* Within the scope of spl-kmem.c file the kmem_cache_* definitions
* are removed to allow access to the real Linux slab allocator.
*/
#undef kmem_cache_destroy
#undef kmem_cache_create
#undef kmem_cache_alloc
#undef kmem_cache_free
/*
* Linux 3.16 replaced smp_mb__{before,after}_{atomic,clear}_{dec,inc,bit}()
* with smp_mb__{before,after}_atomic() because they were redundant. This is
* only used inside our SLAB allocator, so we implement an internal wrapper
* here to give us smp_mb__{before,after}_atomic() on older kernels.
*/
#ifndef smp_mb__before_atomic
#define smp_mb__before_atomic(x) smp_mb__before_clear_bit(x)
#endif
#ifndef smp_mb__after_atomic
#define smp_mb__after_atomic(x) smp_mb__after_clear_bit(x)
#endif
/* BEGIN CSTYLED */
/*
* Cache magazines are an optimization designed to minimize the cost of
* allocating memory. They do this by keeping a per-cpu cache of recently
* freed objects, which can then be reallocated without taking a lock. This
* can improve performance on highly contended caches. However, because
* objects in magazines will prevent otherwise empty slabs from being
* immediately released this may not be ideal for low memory machines.
*
* For this reason spl_kmem_cache_magazine_size can be used to set a maximum
* magazine size. When this value is set to 0 the magazine size will be
* automatically determined based on the object size. Otherwise magazines
* will be limited to 2-256 objects per magazine (i.e per cpu). Magazines
* may never be entirely disabled in this implementation.
*/
static unsigned int spl_kmem_cache_magazine_size = 0;
module_param(spl_kmem_cache_magazine_size, uint, 0444);
MODULE_PARM_DESC(spl_kmem_cache_magazine_size,
"Default magazine size (2-256), set automatically (0)");
/*
* The default behavior is to report the number of objects remaining in the
* cache. This allows the Linux VM to repeatedly reclaim objects from the
* cache when memory is low satisfy other memory allocations. Alternately,
* setting this value to KMC_RECLAIM_ONCE limits how aggressively the cache
* is reclaimed. This may increase the likelihood of out of memory events.
*/
static unsigned int spl_kmem_cache_reclaim = 0 /* KMC_RECLAIM_ONCE */;
module_param(spl_kmem_cache_reclaim, uint, 0644);
MODULE_PARM_DESC(spl_kmem_cache_reclaim, "Single reclaim pass (0x1)");
static unsigned int spl_kmem_cache_obj_per_slab = SPL_KMEM_CACHE_OBJ_PER_SLAB;
module_param(spl_kmem_cache_obj_per_slab, uint, 0644);
MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab, "Number of objects per slab");
static unsigned int spl_kmem_cache_max_size = SPL_KMEM_CACHE_MAX_SIZE;
module_param(spl_kmem_cache_max_size, uint, 0644);
MODULE_PARM_DESC(spl_kmem_cache_max_size, "Maximum size of slab in MB");
/*
* For small objects the Linux slab allocator should be used to make the most
* efficient use of the memory. However, large objects are not supported by
* the Linux slab and therefore the SPL implementation is preferred. A cutoff
* of 16K was determined to be optimal for architectures using 4K pages and
* to also work well on architecutres using larger 64K page sizes.
*/
static unsigned int spl_kmem_cache_slab_limit = 16384;
module_param(spl_kmem_cache_slab_limit, uint, 0644);
MODULE_PARM_DESC(spl_kmem_cache_slab_limit,
"Objects less than N bytes use the Linux slab");
/*
* The number of threads available to allocate new slabs for caches. This
* should not need to be tuned but it is available for performance analysis.
*/
static unsigned int spl_kmem_cache_kmem_threads = 4;
module_param(spl_kmem_cache_kmem_threads, uint, 0444);
MODULE_PARM_DESC(spl_kmem_cache_kmem_threads,
"Number of spl_kmem_cache threads");
/* END CSTYLED */
/*
* Slab allocation interfaces
*
* While the Linux slab implementation was inspired by the Solaris
* implementation I cannot use it to emulate the Solaris APIs. I
* require two features which are not provided by the Linux slab.
*
* 1) Constructors AND destructors. Recent versions of the Linux
* kernel have removed support for destructors. This is a deal
* breaker for the SPL which contains particularly expensive
* initializers for mutex's, condition variables, etc. We also
* require a minimal level of cleanup for these data types unlike
* many Linux data types which do need to be explicitly destroyed.
*
* 2) Virtual address space backed slab. Callers of the Solaris slab
* expect it to work well for both small are very large allocations.
* Because of memory fragmentation the Linux slab which is backed
* by kmalloc'ed memory performs very badly when confronted with
* large numbers of large allocations. Basing the slab on the
* virtual address space removes the need for contiguous pages
* and greatly improve performance for large allocations.
*
* For these reasons, the SPL has its own slab implementation with
* the needed features. It is not as highly optimized as either the
* Solaris or Linux slabs, but it should get me most of what is
* needed until it can be optimized or obsoleted by another approach.
*
* One serious concern I do have about this method is the relatively
* small virtual address space on 32bit arches. This will seriously
* constrain the size of the slab caches and their performance.
*/
struct list_head spl_kmem_cache_list; /* List of caches */
struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */
taskq_t *spl_kmem_cache_taskq; /* Task queue for aging / reclaim */
static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj);
static void *
kv_alloc(spl_kmem_cache_t *skc, int size, int flags)
{
gfp_t lflags = kmem_flags_convert(flags);
void *ptr;
ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM);
/* Resulting allocated memory will be page aligned */
ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
return (ptr);
}
static void
kv_free(spl_kmem_cache_t *skc, void *ptr, int size)
{
ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
/*
* The Linux direct reclaim path uses this out of band value to
* determine if forward progress is being made. Normally this is
* incremented by kmem_freepages() which is part of the various
* Linux slab implementations. However, since we are using none
* of that infrastructure we are responsible for incrementing it.
*/
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += size >> PAGE_SHIFT;
vfree(ptr);
}
/*
* Required space for each aligned sks.
*/
static inline uint32_t
spl_sks_size(spl_kmem_cache_t *skc)
{
return (P2ROUNDUP_TYPED(sizeof (spl_kmem_slab_t),
skc->skc_obj_align, uint32_t));
}
/*
* Required space for each aligned object.
*/
static inline uint32_t
spl_obj_size(spl_kmem_cache_t *skc)
{
uint32_t align = skc->skc_obj_align;
return (P2ROUNDUP_TYPED(skc->skc_obj_size, align, uint32_t) +
P2ROUNDUP_TYPED(sizeof (spl_kmem_obj_t), align, uint32_t));
}
uint64_t
spl_kmem_cache_inuse(kmem_cache_t *cache)
{
return (cache->skc_obj_total);
}
EXPORT_SYMBOL(spl_kmem_cache_inuse);
uint64_t
spl_kmem_cache_entry_size(kmem_cache_t *cache)
{
return (cache->skc_obj_size);
}
EXPORT_SYMBOL(spl_kmem_cache_entry_size);
/*
* Lookup the spl_kmem_object_t for an object given that object.
*/
static inline spl_kmem_obj_t *
spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj)
{
return (obj + P2ROUNDUP_TYPED(skc->skc_obj_size,
skc->skc_obj_align, uint32_t));
}
/*
* It's important that we pack the spl_kmem_obj_t structure and the
* actual objects in to one large address space to minimize the number
* of calls to the allocator. It is far better to do a few large
* allocations and then subdivide it ourselves. Now which allocator
* we use requires balancing a few trade offs.
*
* For small objects we use kmem_alloc() because as long as you are
* only requesting a small number of pages (ideally just one) its cheap.
* However, when you start requesting multiple pages with kmem_alloc()
* it gets increasingly expensive since it requires contiguous pages.
* For this reason we shift to vmem_alloc() for slabs of large objects
* which removes the need for contiguous pages. We do not use
* vmem_alloc() in all cases because there is significant locking
* overhead in __get_vm_area_node(). This function takes a single
* global lock when acquiring an available virtual address range which
* serializes all vmem_alloc()'s for all slab caches. Using slightly
* different allocation functions for small and large objects should
* give us the best of both worlds.
*
* +------------------------+
* | spl_kmem_slab_t --+-+ |
* | skc_obj_size <-+ | |
* | spl_kmem_obj_t | |
* | skc_obj_size <---+ |
* | spl_kmem_obj_t | |
* | ... v |
* +------------------------+
*/
static spl_kmem_slab_t *
spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
{
spl_kmem_slab_t *sks;
void *base;
uint32_t obj_size;
base = kv_alloc(skc, skc->skc_slab_size, flags);
if (base == NULL)
return (NULL);
sks = (spl_kmem_slab_t *)base;
sks->sks_magic = SKS_MAGIC;
sks->sks_objs = skc->skc_slab_objs;
sks->sks_age = jiffies;
sks->sks_cache = skc;
INIT_LIST_HEAD(&sks->sks_list);
INIT_LIST_HEAD(&sks->sks_free_list);
sks->sks_ref = 0;
obj_size = spl_obj_size(skc);
for (int i = 0; i < sks->sks_objs; i++) {
void *obj = base + spl_sks_size(skc) + (i * obj_size);
ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
spl_kmem_obj_t *sko = spl_sko_from_obj(skc, obj);
sko->sko_addr = obj;
sko->sko_magic = SKO_MAGIC;
sko->sko_slab = sks;
INIT_LIST_HEAD(&sko->sko_list);
list_add_tail(&sko->sko_list, &sks->sks_free_list);
}
return (sks);
}
/*
* Remove a slab from complete or partial list, it must be called with
* the 'skc->skc_lock' held but the actual free must be performed
* outside the lock to prevent deadlocking on vmem addresses.
*/
static void
spl_slab_free(spl_kmem_slab_t *sks,
struct list_head *sks_list, struct list_head *sko_list)
{
spl_kmem_cache_t *skc;
ASSERT(sks->sks_magic == SKS_MAGIC);
ASSERT(sks->sks_ref == 0);
skc = sks->sks_cache;
ASSERT(skc->skc_magic == SKC_MAGIC);
/*
* Update slab/objects counters in the cache, then remove the
* slab from the skc->skc_partial_list. Finally add the slab
* and all its objects in to the private work lists where the
* destructors will be called and the memory freed to the system.
*/
skc->skc_obj_total -= sks->sks_objs;
skc->skc_slab_total--;
list_del(&sks->sks_list);
list_add(&sks->sks_list, sks_list);
list_splice_init(&sks->sks_free_list, sko_list);
}
/*
* Reclaim empty slabs at the end of the partial list.
*/
static void
spl_slab_reclaim(spl_kmem_cache_t *skc)
{
spl_kmem_slab_t *sks = NULL, *m = NULL;
spl_kmem_obj_t *sko = NULL, *n = NULL;
LIST_HEAD(sks_list);
LIST_HEAD(sko_list);
/*
* Empty slabs and objects must be moved to a private list so they
* can be safely freed outside the spin lock. All empty slabs are
* at the end of skc->skc_partial_list, therefore once a non-empty
* slab is found we can stop scanning.
*/
spin_lock(&skc->skc_lock);
list_for_each_entry_safe_reverse(sks, m,
&skc->skc_partial_list, sks_list) {
if (sks->sks_ref > 0)
break;
spl_slab_free(sks, &sks_list, &sko_list);
}
spin_unlock(&skc->skc_lock);
/*
* The following two loops ensure all the object destructors are run,
* and the slabs themselves are freed. This is all done outside the
* skc->skc_lock since this allows the destructor to sleep, and
* allows us to perform a conditional reschedule when a freeing a
* large number of objects and slabs back to the system.
*/
list_for_each_entry_safe(sko, n, &sko_list, sko_list) {
ASSERT(sko->sko_magic == SKO_MAGIC);
}
list_for_each_entry_safe(sks, m, &sks_list, sks_list) {
ASSERT(sks->sks_magic == SKS_MAGIC);
kv_free(skc, sks, skc->skc_slab_size);
}
}
static spl_kmem_emergency_t *
spl_emergency_search(struct rb_root *root, void *obj)
{
struct rb_node *node = root->rb_node;
spl_kmem_emergency_t *ske;
unsigned long address = (unsigned long)obj;
while (node) {
ske = container_of(node, spl_kmem_emergency_t, ske_node);
if (address < ske->ske_obj)
node = node->rb_left;
else if (address > ske->ske_obj)
node = node->rb_right;
else
return (ske);
}
return (NULL);
}
static int
spl_emergency_insert(struct rb_root *root, spl_kmem_emergency_t *ske)
{
struct rb_node **new = &(root->rb_node), *parent = NULL;
spl_kmem_emergency_t *ske_tmp;
unsigned long address = ske->ske_obj;
while (*new) {
ske_tmp = container_of(*new, spl_kmem_emergency_t, ske_node);
parent = *new;
if (address < ske_tmp->ske_obj)
new = &((*new)->rb_left);
else if (address > ske_tmp->ske_obj)
new = &((*new)->rb_right);
else
return (0);
}
rb_link_node(&ske->ske_node, parent, new);
rb_insert_color(&ske->ske_node, root);
return (1);
}
/*
* Allocate a single emergency object and track it in a red black tree.
*/
static int
spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj)
{
gfp_t lflags = kmem_flags_convert(flags);
spl_kmem_emergency_t *ske;
int order = get_order(skc->skc_obj_size);
int empty;
/* Last chance use a partial slab if one now exists */
spin_lock(&skc->skc_lock);
empty = list_empty(&skc->skc_partial_list);
spin_unlock(&skc->skc_lock);
if (!empty)
return (-EEXIST);
ske = kmalloc(sizeof (*ske), lflags);
if (ske == NULL)
return (-ENOMEM);
ske->ske_obj = __get_free_pages(lflags, order);
if (ske->ske_obj == 0) {
kfree(ske);
return (-ENOMEM);
}
spin_lock(&skc->skc_lock);
empty = spl_emergency_insert(&skc->skc_emergency_tree, ske);
if (likely(empty)) {
skc->skc_obj_total++;
skc->skc_obj_emergency++;
if (skc->skc_obj_emergency > skc->skc_obj_emergency_max)
skc->skc_obj_emergency_max = skc->skc_obj_emergency;
}
spin_unlock(&skc->skc_lock);
if (unlikely(!empty)) {
free_pages(ske->ske_obj, order);
kfree(ske);
return (-EINVAL);
}
*obj = (void *)ske->ske_obj;
return (0);
}
/*
* Locate the passed object in the red black tree and free it.
*/
static int
spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
{
spl_kmem_emergency_t *ske;
int order = get_order(skc->skc_obj_size);
spin_lock(&skc->skc_lock);
ske = spl_emergency_search(&skc->skc_emergency_tree, obj);
if (ske) {
rb_erase(&ske->ske_node, &skc->skc_emergency_tree);
skc->skc_obj_emergency--;
skc->skc_obj_total--;
}
spin_unlock(&skc->skc_lock);
if (ske == NULL)
return (-ENOENT);
free_pages(ske->ske_obj, order);
kfree(ske);
return (0);
}
/*
* Release objects from the per-cpu magazine back to their slab. The flush
* argument contains the max number of entries to remove from the magazine.
*/
static void
spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
{
spin_lock(&skc->skc_lock);
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(skm->skm_magic == SKM_MAGIC);
int count = MIN(flush, skm->skm_avail);
for (int i = 0; i < count; i++)
spl_cache_shrink(skc, skm->skm_objs[i]);
skm->skm_avail -= count;
memmove(skm->skm_objs, &(skm->skm_objs[count]),
sizeof (void *) * skm->skm_avail);
spin_unlock(&skc->skc_lock);
}
/*
* Size a slab based on the size of each aligned object plus spl_kmem_obj_t.
* When on-slab we want to target spl_kmem_cache_obj_per_slab. However,
* for very small objects we may end up with more than this so as not
* to waste space in the minimal allocation of a single page.
*/
static int
spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
{
uint32_t sks_size, obj_size, max_size, tgt_size, tgt_objs;
sks_size = spl_sks_size(skc);
obj_size = spl_obj_size(skc);
max_size = (spl_kmem_cache_max_size * 1024 * 1024);
tgt_size = (spl_kmem_cache_obj_per_slab * obj_size + sks_size);
if (tgt_size <= max_size) {
tgt_objs = (tgt_size - sks_size) / obj_size;
} else {
tgt_objs = (max_size - sks_size) / obj_size;
tgt_size = (tgt_objs * obj_size) + sks_size;
}
if (tgt_objs == 0)
return (-ENOSPC);
*objs = tgt_objs;
*size = tgt_size;
return (0);
}
/*
* Make a guess at reasonable per-cpu magazine size based on the size of
* each object and the cost of caching N of them in each magazine. Long
* term this should really adapt based on an observed usage heuristic.
*/
static int
spl_magazine_size(spl_kmem_cache_t *skc)
{
uint32_t obj_size = spl_obj_size(skc);
int size;
if (spl_kmem_cache_magazine_size > 0)
return (MAX(MIN(spl_kmem_cache_magazine_size, 256), 2));
/* Per-magazine sizes below assume a 4Kib page size */
if (obj_size > (PAGE_SIZE * 256))
size = 4; /* Minimum 4Mib per-magazine */
else if (obj_size > (PAGE_SIZE * 32))
size = 16; /* Minimum 2Mib per-magazine */
else if (obj_size > (PAGE_SIZE))
size = 64; /* Minimum 256Kib per-magazine */
else if (obj_size > (PAGE_SIZE / 4))
size = 128; /* Minimum 128Kib per-magazine */
else
size = 256;
return (size);
}
/*
* Allocate a per-cpu magazine to associate with a specific core.
*/
static spl_kmem_magazine_t *
spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu)
{
spl_kmem_magazine_t *skm;
int size = sizeof (spl_kmem_magazine_t) +
sizeof (void *) * skc->skc_mag_size;
skm = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu));
if (skm) {
skm->skm_magic = SKM_MAGIC;
skm->skm_avail = 0;
skm->skm_size = skc->skc_mag_size;
skm->skm_refill = skc->skc_mag_refill;
skm->skm_cache = skc;
skm->skm_cpu = cpu;
}
return (skm);
}
/*
* Free a per-cpu magazine associated with a specific core.
*/
static void
spl_magazine_free(spl_kmem_magazine_t *skm)
{
ASSERT(skm->skm_magic == SKM_MAGIC);
ASSERT(skm->skm_avail == 0);
kfree(skm);
}
/*
* Create all pre-cpu magazines of reasonable sizes.
*/
static int
spl_magazine_create(spl_kmem_cache_t *skc)
{
int i = 0;
ASSERT((skc->skc_flags & KMC_SLAB) == 0);
skc->skc_mag = kzalloc(sizeof (spl_kmem_magazine_t *) *
num_possible_cpus(), kmem_flags_convert(KM_SLEEP));
skc->skc_mag_size = spl_magazine_size(skc);
skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2;
for_each_possible_cpu(i) {
skc->skc_mag[i] = spl_magazine_alloc(skc, i);
if (!skc->skc_mag[i]) {
for (i--; i >= 0; i--)
spl_magazine_free(skc->skc_mag[i]);
kfree(skc->skc_mag);
return (-ENOMEM);
}
}
return (0);
}
/*
* Destroy all pre-cpu magazines.
*/
static void
spl_magazine_destroy(spl_kmem_cache_t *skc)
{
spl_kmem_magazine_t *skm;
int i = 0;
ASSERT((skc->skc_flags & KMC_SLAB) == 0);
for_each_possible_cpu(i) {
skm = skc->skc_mag[i];
spl_cache_flush(skc, skm, skm->skm_avail);
spl_magazine_free(skm);
}
kfree(skc->skc_mag);
}
/*
* Create a object cache based on the following arguments:
* name cache name
* size cache object size
* align cache object alignment
* ctor cache object constructor
* dtor cache object destructor
* reclaim cache object reclaim
* priv cache private data for ctor/dtor/reclaim
* vmp unused must be NULL
* flags
* KMC_KVMEM Force kvmem backed SPL cache
* KMC_SLAB Force Linux slab backed cache
* KMC_NODEBUG Disable debugging (unsupported)
*/
spl_kmem_cache_t *
spl_kmem_cache_create(const char *name, size_t size, size_t align,
spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, void *reclaim,
void *priv, void *vmp, int flags)
{
gfp_t lflags = kmem_flags_convert(KM_SLEEP);
spl_kmem_cache_t *skc;
int rc;
/*
* Unsupported flags
*/
ASSERT(vmp == NULL);
ASSERT(reclaim == NULL);
might_sleep();
skc = kzalloc(sizeof (*skc), lflags);
if (skc == NULL)
return (NULL);
skc->skc_magic = SKC_MAGIC;
skc->skc_name_size = strlen(name) + 1;
skc->skc_name = (char *)kmalloc(skc->skc_name_size, lflags);
if (skc->skc_name == NULL) {
kfree(skc);
return (NULL);
}
strncpy(skc->skc_name, name, skc->skc_name_size);
skc->skc_ctor = ctor;
skc->skc_dtor = dtor;
skc->skc_private = priv;
skc->skc_vmp = vmp;
skc->skc_linux_cache = NULL;
skc->skc_flags = flags;
skc->skc_obj_size = size;
skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN;
atomic_set(&skc->skc_ref, 0);
INIT_LIST_HEAD(&skc->skc_list);
INIT_LIST_HEAD(&skc->skc_complete_list);
INIT_LIST_HEAD(&skc->skc_partial_list);
skc->skc_emergency_tree = RB_ROOT;
spin_lock_init(&skc->skc_lock);
init_waitqueue_head(&skc->skc_waitq);
skc->skc_slab_fail = 0;
skc->skc_slab_create = 0;
skc->skc_slab_destroy = 0;
skc->skc_slab_total = 0;
skc->skc_slab_alloc = 0;
skc->skc_slab_max = 0;
skc->skc_obj_total = 0;
skc->skc_obj_alloc = 0;
skc->skc_obj_max = 0;
skc->skc_obj_deadlock = 0;
skc->skc_obj_emergency = 0;
skc->skc_obj_emergency_max = 0;
rc = percpu_counter_init_common(&skc->skc_linux_alloc, 0,
GFP_KERNEL);
if (rc != 0) {
kfree(skc);
return (NULL);
}
/*
* Verify the requested alignment restriction is sane.
*/
if (align) {
VERIFY(ISP2(align));
VERIFY3U(align, >=, SPL_KMEM_CACHE_ALIGN);
VERIFY3U(align, <=, PAGE_SIZE);
skc->skc_obj_align = align;
}
/*
* When no specific type of slab is requested (kmem, vmem, or
* linuxslab) then select a cache type based on the object size
* and default tunables.
*/
if (!(skc->skc_flags & (KMC_SLAB | KMC_KVMEM))) {
if (spl_kmem_cache_slab_limit &&
size <= (size_t)spl_kmem_cache_slab_limit) {
/*
* Objects smaller than spl_kmem_cache_slab_limit can
* use the Linux slab for better space-efficiency.
*/
skc->skc_flags |= KMC_SLAB;
} else {
/*
* All other objects are considered large and are
* placed on kvmem backed slabs.
*/
skc->skc_flags |= KMC_KVMEM;
}
}
/*
* Given the type of slab allocate the required resources.
*/
if (skc->skc_flags & KMC_KVMEM) {
rc = spl_slab_size(skc,
&skc->skc_slab_objs, &skc->skc_slab_size);
if (rc)
goto out;
rc = spl_magazine_create(skc);
if (rc)
goto out;
} else {
unsigned long slabflags = 0;
if (size > (SPL_MAX_KMEM_ORDER_NR_PAGES * PAGE_SIZE)) {
rc = EINVAL;
goto out;
}
#if defined(SLAB_USERCOPY)
/*
* Required for PAX-enabled kernels if the slab is to be
* used for copying between user and kernel space.
*/
slabflags |= SLAB_USERCOPY;
#endif
#if defined(HAVE_KMEM_CACHE_CREATE_USERCOPY)
/*
* Newer grsec patchset uses kmem_cache_create_usercopy()
* instead of SLAB_USERCOPY flag
*/
skc->skc_linux_cache = kmem_cache_create_usercopy(
skc->skc_name, size, align, slabflags, 0, size, NULL);
#else
skc->skc_linux_cache = kmem_cache_create(
skc->skc_name, size, align, slabflags, NULL);
#endif
if (skc->skc_linux_cache == NULL) {
rc = ENOMEM;
goto out;
}
}
down_write(&spl_kmem_cache_sem);
list_add_tail(&skc->skc_list, &spl_kmem_cache_list);
up_write(&spl_kmem_cache_sem);
return (skc);
out:
kfree(skc->skc_name);
percpu_counter_destroy(&skc->skc_linux_alloc);
kfree(skc);
return (NULL);
}
EXPORT_SYMBOL(spl_kmem_cache_create);
/*
* Register a move callback for cache defragmentation.
* XXX: Unimplemented but harmless to stub out for now.
*/
void
spl_kmem_cache_set_move(spl_kmem_cache_t *skc,
kmem_cbrc_t (move)(void *, void *, size_t, void *))
{
ASSERT(move != NULL);
}
EXPORT_SYMBOL(spl_kmem_cache_set_move);
/*
* Destroy a cache and all objects associated with the cache.
*/
void
spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
{
DECLARE_WAIT_QUEUE_HEAD(wq);
taskqid_t id;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(skc->skc_flags & (KMC_KVMEM | KMC_SLAB));
down_write(&spl_kmem_cache_sem);
list_del_init(&skc->skc_list);
up_write(&spl_kmem_cache_sem);
/* Cancel any and wait for any pending delayed tasks */
VERIFY(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags));
spin_lock(&skc->skc_lock);
id = skc->skc_taskqid;
spin_unlock(&skc->skc_lock);
taskq_cancel_id(spl_kmem_cache_taskq, id);
/*
* Wait until all current callers complete, this is mainly
* to catch the case where a low memory situation triggers a
* cache reaping action which races with this destroy.
*/
wait_event(wq, atomic_read(&skc->skc_ref) == 0);
if (skc->skc_flags & KMC_KVMEM) {
spl_magazine_destroy(skc);
spl_slab_reclaim(skc);
} else {
ASSERT(skc->skc_flags & KMC_SLAB);
kmem_cache_destroy(skc->skc_linux_cache);
}
spin_lock(&skc->skc_lock);
/*
* Validate there are no objects in use and free all the
* spl_kmem_slab_t, spl_kmem_obj_t, and object buffers.
*/
ASSERT3U(skc->skc_slab_alloc, ==, 0);
ASSERT3U(skc->skc_obj_alloc, ==, 0);
ASSERT3U(skc->skc_slab_total, ==, 0);
ASSERT3U(skc->skc_obj_total, ==, 0);
ASSERT3U(skc->skc_obj_emergency, ==, 0);
ASSERT(list_empty(&skc->skc_complete_list));
ASSERT3U(percpu_counter_sum(&skc->skc_linux_alloc), ==, 0);
percpu_counter_destroy(&skc->skc_linux_alloc);
spin_unlock(&skc->skc_lock);
kfree(skc->skc_name);
kfree(skc);
}
EXPORT_SYMBOL(spl_kmem_cache_destroy);
/*
* Allocate an object from a slab attached to the cache. This is used to
* repopulate the per-cpu magazine caches in batches when they run low.
*/
static void *
spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks)
{
spl_kmem_obj_t *sko;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(sks->sks_magic == SKS_MAGIC);
sko = list_entry(sks->sks_free_list.next, spl_kmem_obj_t, sko_list);
ASSERT(sko->sko_magic == SKO_MAGIC);
ASSERT(sko->sko_addr != NULL);
/* Remove from sks_free_list */
list_del_init(&sko->sko_list);
sks->sks_age = jiffies;
sks->sks_ref++;
skc->skc_obj_alloc++;
/* Track max obj usage statistics */
if (skc->skc_obj_alloc > skc->skc_obj_max)
skc->skc_obj_max = skc->skc_obj_alloc;
/* Track max slab usage statistics */
if (sks->sks_ref == 1) {
skc->skc_slab_alloc++;
if (skc->skc_slab_alloc > skc->skc_slab_max)
skc->skc_slab_max = skc->skc_slab_alloc;
}
return (sko->sko_addr);
}
/*
* Generic slab allocation function to run by the global work queues.
* It is responsible for allocating a new slab, linking it in to the list
* of partial slabs, and then waking any waiters.
*/
static int
__spl_cache_grow(spl_kmem_cache_t *skc, int flags)
{
spl_kmem_slab_t *sks;
fstrans_cookie_t cookie = spl_fstrans_mark();
sks = spl_slab_alloc(skc, flags);
spl_fstrans_unmark(cookie);
spin_lock(&skc->skc_lock);
if (sks) {
skc->skc_slab_total++;
skc->skc_obj_total += sks->sks_objs;
list_add_tail(&sks->sks_list, &skc->skc_partial_list);
smp_mb__before_atomic();
clear_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
smp_mb__after_atomic();
}
spin_unlock(&skc->skc_lock);
return (sks == NULL ? -ENOMEM : 0);
}
static void
spl_cache_grow_work(void *data)
{
spl_kmem_alloc_t *ska = (spl_kmem_alloc_t *)data;
spl_kmem_cache_t *skc = ska->ska_cache;
int error = __spl_cache_grow(skc, ska->ska_flags);
atomic_dec(&skc->skc_ref);
smp_mb__before_atomic();
clear_bit(KMC_BIT_GROWING, &skc->skc_flags);
smp_mb__after_atomic();
if (error == 0)
wake_up_all(&skc->skc_waitq);
kfree(ska);
}
/*
* Returns non-zero when a new slab should be available.
*/
static int
spl_cache_grow_wait(spl_kmem_cache_t *skc)
{
return (!test_bit(KMC_BIT_GROWING, &skc->skc_flags));
}
/*
* No available objects on any slabs, create a new slab. Note that this
* functionality is disabled for KMC_SLAB caches which are backed by the
* Linux slab.
*/
static int
spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
{
int remaining, rc = 0;
ASSERT0(flags & ~KM_PUBLIC_MASK);
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT((skc->skc_flags & KMC_SLAB) == 0);
might_sleep();
*obj = NULL;
/*
* Before allocating a new slab wait for any reaping to complete and
* then return so the local magazine can be rechecked for new objects.
*/
if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) {
rc = spl_wait_on_bit(&skc->skc_flags, KMC_BIT_REAPING,
TASK_UNINTERRUPTIBLE);
return (rc ? rc : -EAGAIN);
}
/*
* Note: It would be nice to reduce the overhead of context switch
* and improve NUMA locality, by trying to allocate a new slab in the
* current process context with KM_NOSLEEP flag.
*
* However, this can't be applied to vmem/kvmem due to a bug that
* spl_vmalloc() doesn't honor gfp flags in page table allocation.
*/
/*
* This is handled by dispatching a work request to the global work
* queue. This allows us to asynchronously allocate a new slab while
* retaining the ability to safely fall back to a smaller synchronous
* allocations to ensure forward progress is always maintained.
*/
if (test_and_set_bit(KMC_BIT_GROWING, &skc->skc_flags) == 0) {
spl_kmem_alloc_t *ska;
ska = kmalloc(sizeof (*ska), kmem_flags_convert(flags));
if (ska == NULL) {
clear_bit_unlock(KMC_BIT_GROWING, &skc->skc_flags);
smp_mb__after_atomic();
wake_up_all(&skc->skc_waitq);
return (-ENOMEM);
}
atomic_inc(&skc->skc_ref);
ska->ska_cache = skc;
ska->ska_flags = flags;
taskq_init_ent(&ska->ska_tqe);
taskq_dispatch_ent(spl_kmem_cache_taskq,
spl_cache_grow_work, ska, 0, &ska->ska_tqe);
}
/*
* The goal here is to only detect the rare case where a virtual slab
* allocation has deadlocked. We must be careful to minimize the use
* of emergency objects which are more expensive to track. Therefore,
* we set a very long timeout for the asynchronous allocation and if
* the timeout is reached the cache is flagged as deadlocked. From
* this point only new emergency objects will be allocated until the
* asynchronous allocation completes and clears the deadlocked flag.
*/
if (test_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags)) {
rc = spl_emergency_alloc(skc, flags, obj);
} else {
remaining = wait_event_timeout(skc->skc_waitq,
spl_cache_grow_wait(skc), HZ / 10);
if (!remaining) {
spin_lock(&skc->skc_lock);
if (test_bit(KMC_BIT_GROWING, &skc->skc_flags)) {
set_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
skc->skc_obj_deadlock++;
}
spin_unlock(&skc->skc_lock);
}
rc = -ENOMEM;
}
return (rc);
}
/*
* Refill a per-cpu magazine with objects from the slabs for this cache.
* Ideally the magazine can be repopulated using existing objects which have
* been released, however if we are unable to locate enough free objects new
* slabs of objects will be created. On success NULL is returned, otherwise
* the address of a single emergency object is returned for use by the caller.
*/
static void *
spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
{
spl_kmem_slab_t *sks;
int count = 0, rc, refill;
void *obj = NULL;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(skm->skm_magic == SKM_MAGIC);
refill = MIN(skm->skm_refill, skm->skm_size - skm->skm_avail);
spin_lock(&skc->skc_lock);
while (refill > 0) {
/* No slabs available we may need to grow the cache */
if (list_empty(&skc->skc_partial_list)) {
spin_unlock(&skc->skc_lock);
local_irq_enable();
rc = spl_cache_grow(skc, flags, &obj);
local_irq_disable();
/* Emergency object for immediate use by caller */
if (rc == 0 && obj != NULL)
return (obj);
if (rc)
goto out;
/* Rescheduled to different CPU skm is not local */
if (skm != skc->skc_mag[smp_processor_id()])
goto out;
/*
* Potentially rescheduled to the same CPU but
* allocations may have occurred from this CPU while
* we were sleeping so recalculate max refill.
*/
refill = MIN(refill, skm->skm_size - skm->skm_avail);
spin_lock(&skc->skc_lock);
continue;
}
/* Grab the next available slab */
sks = list_entry((&skc->skc_partial_list)->next,
spl_kmem_slab_t, sks_list);
ASSERT(sks->sks_magic == SKS_MAGIC);
ASSERT(sks->sks_ref < sks->sks_objs);
ASSERT(!list_empty(&sks->sks_free_list));
/*
* Consume as many objects as needed to refill the requested
* cache. We must also be careful not to overfill it.
*/
while (sks->sks_ref < sks->sks_objs && refill-- > 0 &&
++count) {
ASSERT(skm->skm_avail < skm->skm_size);
ASSERT(count < skm->skm_size);
skm->skm_objs[skm->skm_avail++] =
spl_cache_obj(skc, sks);
}
/* Move slab to skc_complete_list when full */
if (sks->sks_ref == sks->sks_objs) {
list_del(&sks->sks_list);
list_add(&sks->sks_list, &skc->skc_complete_list);
}
}
spin_unlock(&skc->skc_lock);
out:
return (NULL);
}
/*
* Release an object back to the slab from which it came.
*/
static void
spl_cache_shrink(spl_kmem_cache_t *skc, void *obj)
{
spl_kmem_slab_t *sks = NULL;
spl_kmem_obj_t *sko = NULL;
ASSERT(skc->skc_magic == SKC_MAGIC);
sko = spl_sko_from_obj(skc, obj);
ASSERT(sko->sko_magic == SKO_MAGIC);
sks = sko->sko_slab;
ASSERT(sks->sks_magic == SKS_MAGIC);
ASSERT(sks->sks_cache == skc);
list_add(&sko->sko_list, &sks->sks_free_list);
sks->sks_age = jiffies;
sks->sks_ref--;
skc->skc_obj_alloc--;
/*
* Move slab to skc_partial_list when no longer full. Slabs
* are added to the head to keep the partial list is quasi-full
* sorted order. Fuller at the head, emptier at the tail.
*/
if (sks->sks_ref == (sks->sks_objs - 1)) {
list_del(&sks->sks_list);
list_add(&sks->sks_list, &skc->skc_partial_list);
}
/*
* Move empty slabs to the end of the partial list so
* they can be easily found and freed during reclamation.
*/
if (sks->sks_ref == 0) {
list_del(&sks->sks_list);
list_add_tail(&sks->sks_list, &skc->skc_partial_list);
skc->skc_slab_alloc--;
}
}
/*
* Allocate an object from the per-cpu magazine, or if the magazine
* is empty directly allocate from a slab and repopulate the magazine.
*/
void *
spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
{
spl_kmem_magazine_t *skm;
void *obj = NULL;
ASSERT0(flags & ~KM_PUBLIC_MASK);
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
/*
* Allocate directly from a Linux slab. All optimizations are left
* to the underlying cache we only need to guarantee that KM_SLEEP
* callers will never fail.
*/
if (skc->skc_flags & KMC_SLAB) {
struct kmem_cache *slc = skc->skc_linux_cache;
do {
obj = kmem_cache_alloc(slc, kmem_flags_convert(flags));
} while ((obj == NULL) && !(flags & KM_NOSLEEP));
if (obj != NULL) {
/*
* Even though we leave everything up to the
* underlying cache we still keep track of
* how many objects we've allocated in it for
* better debuggability.
*/
percpu_counter_inc(&skc->skc_linux_alloc);
}
goto ret;
}
local_irq_disable();
restart:
/*
* Safe to update per-cpu structure without lock, but
* in the restart case we must be careful to reacquire
* the local magazine since this may have changed
* when we need to grow the cache.
*/
skm = skc->skc_mag[smp_processor_id()];
ASSERT(skm->skm_magic == SKM_MAGIC);
if (likely(skm->skm_avail)) {
/* Object available in CPU cache, use it */
obj = skm->skm_objs[--skm->skm_avail];
} else {
obj = spl_cache_refill(skc, skm, flags);
if ((obj == NULL) && !(flags & KM_NOSLEEP))
goto restart;
local_irq_enable();
goto ret;
}
local_irq_enable();
ASSERT(obj);
ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
ret:
/* Pre-emptively migrate object to CPU L1 cache */
if (obj) {
if (obj && skc->skc_ctor)
skc->skc_ctor(obj, skc->skc_private, flags);
else
prefetchw(obj);
}
return (obj);
}
EXPORT_SYMBOL(spl_kmem_cache_alloc);
/*
* Free an object back to the local per-cpu magazine, there is no
* guarantee that this is the same magazine the object was originally
* allocated from. We may need to flush entire from the magazine
* back to the slabs to make space.
*/
void
spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
{
spl_kmem_magazine_t *skm;
unsigned long flags;
int do_reclaim = 0;
int do_emergency = 0;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
/*
* Run the destructor
*/
if (skc->skc_dtor)
skc->skc_dtor(obj, skc->skc_private);
/*
* Free the object from the Linux underlying Linux slab.
*/
if (skc->skc_flags & KMC_SLAB) {
kmem_cache_free(skc->skc_linux_cache, obj);
percpu_counter_dec(&skc->skc_linux_alloc);
return;
}
/*
* While a cache has outstanding emergency objects all freed objects
* must be checked. However, since emergency objects will never use
* a virtual address these objects can be safely excluded as an
* optimization.
*/
if (!is_vmalloc_addr(obj)) {
spin_lock(&skc->skc_lock);
do_emergency = (skc->skc_obj_emergency > 0);
spin_unlock(&skc->skc_lock);
if (do_emergency && (spl_emergency_free(skc, obj) == 0))
return;
}
local_irq_save(flags);
/*
* Safe to update per-cpu structure without lock, but
* no remote memory allocation tracking is being performed
* it is entirely possible to allocate an object from one
* CPU cache and return it to another.
*/
skm = skc->skc_mag[smp_processor_id()];
ASSERT(skm->skm_magic == SKM_MAGIC);
/*
* Per-CPU cache full, flush it to make space for this object,
* this may result in an empty slab which can be reclaimed once
* interrupts are re-enabled.
*/
if (unlikely(skm->skm_avail >= skm->skm_size)) {
spl_cache_flush(skc, skm, skm->skm_refill);
do_reclaim = 1;
}
/* Available space in cache, use it */
skm->skm_objs[skm->skm_avail++] = obj;
local_irq_restore(flags);
if (do_reclaim)
spl_slab_reclaim(skc);
}
EXPORT_SYMBOL(spl_kmem_cache_free);
/*
* Depending on how many and which objects are released it may simply
* repopulate the local magazine which will then need to age-out. Objects
* which cannot fit in the magazine will be released back to their slabs
* which will also need to age out before being released. This is all just
* best effort and we do not want to thrash creating and destroying slabs.
*/
void
spl_kmem_cache_reap_now(spl_kmem_cache_t *skc)
{
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
if (skc->skc_flags & KMC_SLAB)
return;
atomic_inc(&skc->skc_ref);
/*
* Prevent concurrent cache reaping when contended.
*/
if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags))
goto out;
/* Reclaim from the magazine and free all now empty slabs. */
unsigned long irq_flags;
local_irq_save(irq_flags);
spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()];
spl_cache_flush(skc, skm, skm->skm_avail);
local_irq_restore(irq_flags);
spl_slab_reclaim(skc);
clear_bit_unlock(KMC_BIT_REAPING, &skc->skc_flags);
smp_mb__after_atomic();
wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING);
out:
atomic_dec(&skc->skc_ref);
}
EXPORT_SYMBOL(spl_kmem_cache_reap_now);
/*
* This is stubbed out for code consistency with other platforms. There
* is existing logic to prevent concurrent reaping so while this is ugly
* it should do no harm.
*/
int
spl_kmem_cache_reap_active(void)
{
return (0);
}
EXPORT_SYMBOL(spl_kmem_cache_reap_active);
/*
* Reap all free slabs from all registered caches.
*/
void
spl_kmem_reap(void)
{
spl_kmem_cache_t *skc = NULL;
down_read(&spl_kmem_cache_sem);
list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
spl_kmem_cache_reap_now(skc);
}
up_read(&spl_kmem_cache_sem);
}
EXPORT_SYMBOL(spl_kmem_reap);
int
spl_kmem_cache_init(void)
{
init_rwsem(&spl_kmem_cache_sem);
INIT_LIST_HEAD(&spl_kmem_cache_list);
spl_kmem_cache_taskq = taskq_create("spl_kmem_cache",
spl_kmem_cache_kmem_threads, maxclsyspri,
spl_kmem_cache_kmem_threads * 8, INT_MAX,
TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
+ if (spl_kmem_cache_taskq == NULL)
+ return (-ENOMEM);
+
return (0);
}
void
spl_kmem_cache_fini(void)
{
taskq_destroy(spl_kmem_cache_taskq);
}
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-taskq.c b/sys/contrib/openzfs/module/os/linux/spl/spl-taskq.c
index 0aab148975aa..3b0c29606c2e 100644
--- a/sys/contrib/openzfs/module/os/linux/spl/spl-taskq.c
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-taskq.c
@@ -1,1433 +1,1433 @@
/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*
* Solaris Porting Layer (SPL) Task Queue Implementation.
*/
#include <sys/timer.h>
#include <sys/taskq.h>
#include <sys/kmem.h>
#include <sys/tsd.h>
#include <sys/trace_spl.h>
#ifdef HAVE_CPU_HOTPLUG
#include <linux/cpuhotplug.h>
#endif
static int spl_taskq_thread_bind = 0;
module_param(spl_taskq_thread_bind, int, 0644);
MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default");
static int spl_taskq_thread_dynamic = 1;
module_param(spl_taskq_thread_dynamic, int, 0444);
MODULE_PARM_DESC(spl_taskq_thread_dynamic, "Allow dynamic taskq threads");
static int spl_taskq_thread_priority = 1;
module_param(spl_taskq_thread_priority, int, 0644);
MODULE_PARM_DESC(spl_taskq_thread_priority,
"Allow non-default priority for taskq threads");
static int spl_taskq_thread_sequential = 4;
module_param(spl_taskq_thread_sequential, int, 0644);
MODULE_PARM_DESC(spl_taskq_thread_sequential,
"Create new taskq threads after N sequential tasks");
/*
* Global system-wide dynamic task queue available for all consumers. This
* taskq is not intended for long-running tasks; instead, a dedicated taskq
* should be created.
*/
taskq_t *system_taskq;
EXPORT_SYMBOL(system_taskq);
/* Global dynamic task queue for long delay */
taskq_t *system_delay_taskq;
EXPORT_SYMBOL(system_delay_taskq);
/* Private dedicated taskq for creating new taskq threads on demand. */
static taskq_t *dynamic_taskq;
static taskq_thread_t *taskq_thread_create(taskq_t *);
#ifdef HAVE_CPU_HOTPLUG
/* Multi-callback id for cpu hotplugging. */
static int spl_taskq_cpuhp_state;
#endif
/* List of all taskqs */
LIST_HEAD(tq_list);
struct rw_semaphore tq_list_sem;
static uint_t taskq_tsd;
static int
task_km_flags(uint_t flags)
{
if (flags & TQ_NOSLEEP)
return (KM_NOSLEEP);
if (flags & TQ_PUSHPAGE)
return (KM_PUSHPAGE);
return (KM_SLEEP);
}
/*
* taskq_find_by_name - Find the largest instance number of a named taskq.
*/
static int
taskq_find_by_name(const char *name)
{
struct list_head *tql = NULL;
taskq_t *tq;
list_for_each_prev(tql, &tq_list) {
tq = list_entry(tql, taskq_t, tq_taskqs);
if (strcmp(name, tq->tq_name) == 0)
return (tq->tq_instance);
}
return (-1);
}
/*
* NOTE: Must be called with tq->tq_lock held, returns a list_t which
* is not attached to the free, work, or pending taskq lists.
*/
static taskq_ent_t *
task_alloc(taskq_t *tq, uint_t flags, unsigned long *irqflags)
{
taskq_ent_t *t;
int count = 0;
ASSERT(tq);
retry:
/* Acquire taskq_ent_t's from free list if available */
if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) {
t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list);
ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
ASSERT(!(t->tqent_flags & TQENT_FLAG_CANCEL));
ASSERT(!timer_pending(&t->tqent_timer));
list_del_init(&t->tqent_list);
return (t);
}
/* Free list is empty and memory allocations are prohibited */
if (flags & TQ_NOALLOC)
return (NULL);
/* Hit maximum taskq_ent_t pool size */
if (tq->tq_nalloc >= tq->tq_maxalloc) {
if (flags & TQ_NOSLEEP)
return (NULL);
/*
* Sleep periodically polling the free list for an available
* taskq_ent_t. Dispatching with TQ_SLEEP should always succeed
* but we cannot block forever waiting for an taskq_ent_t to
* show up in the free list, otherwise a deadlock can happen.
*
* Therefore, we need to allocate a new task even if the number
* of allocated tasks is above tq->tq_maxalloc, but we still
* end up delaying the task allocation by one second, thereby
* throttling the task dispatch rate.
*/
spin_unlock_irqrestore(&tq->tq_lock, *irqflags);
schedule_timeout(HZ / 100);
spin_lock_irqsave_nested(&tq->tq_lock, *irqflags,
tq->tq_lock_class);
if (count < 100) {
count++;
goto retry;
}
}
spin_unlock_irqrestore(&tq->tq_lock, *irqflags);
t = kmem_alloc(sizeof (taskq_ent_t), task_km_flags(flags));
spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, tq->tq_lock_class);
if (t) {
taskq_init_ent(t);
tq->tq_nalloc++;
}
return (t);
}
/*
* NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t
* to already be removed from the free, work, or pending taskq lists.
*/
static void
task_free(taskq_t *tq, taskq_ent_t *t)
{
ASSERT(tq);
ASSERT(t);
ASSERT(list_empty(&t->tqent_list));
ASSERT(!timer_pending(&t->tqent_timer));
kmem_free(t, sizeof (taskq_ent_t));
tq->tq_nalloc--;
}
/*
* NOTE: Must be called with tq->tq_lock held, either destroys the
* taskq_ent_t if too many exist or moves it to the free list for later use.
*/
static void
task_done(taskq_t *tq, taskq_ent_t *t)
{
ASSERT(tq);
ASSERT(t);
/* Wake tasks blocked in taskq_wait_id() */
wake_up_all(&t->tqent_waitq);
list_del_init(&t->tqent_list);
if (tq->tq_nalloc <= tq->tq_minalloc) {
t->tqent_id = TASKQID_INVALID;
t->tqent_func = NULL;
t->tqent_arg = NULL;
t->tqent_flags = 0;
list_add_tail(&t->tqent_list, &tq->tq_free_list);
} else {
task_free(tq, t);
}
}
/*
* When a delayed task timer expires remove it from the delay list and
* add it to the priority list in order for immediate processing.
*/
static void
task_expire_impl(taskq_ent_t *t)
{
taskq_ent_t *w;
taskq_t *tq = t->tqent_taskq;
struct list_head *l = NULL;
unsigned long flags;
spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
if (t->tqent_flags & TQENT_FLAG_CANCEL) {
ASSERT(list_empty(&t->tqent_list));
spin_unlock_irqrestore(&tq->tq_lock, flags);
return;
}
t->tqent_birth = jiffies;
DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t);
/*
* The priority list must be maintained in strict task id order
* from lowest to highest for lowest_id to be easily calculable.
*/
list_del(&t->tqent_list);
list_for_each_prev(l, &tq->tq_prio_list) {
w = list_entry(l, taskq_ent_t, tqent_list);
if (w->tqent_id < t->tqent_id) {
list_add(&t->tqent_list, l);
break;
}
}
if (l == &tq->tq_prio_list)
list_add(&t->tqent_list, &tq->tq_prio_list);
spin_unlock_irqrestore(&tq->tq_lock, flags);
wake_up(&tq->tq_work_waitq);
}
static void
task_expire(spl_timer_list_t tl)
{
struct timer_list *tmr = (struct timer_list *)tl;
taskq_ent_t *t = from_timer(t, tmr, tqent_timer);
task_expire_impl(t);
}
/*
* Returns the lowest incomplete taskqid_t. The taskqid_t may
* be queued on the pending list, on the priority list, on the
* delay list, or on the work list currently being handled, but
* it is not 100% complete yet.
*/
static taskqid_t
taskq_lowest_id(taskq_t *tq)
{
taskqid_t lowest_id = tq->tq_next_id;
taskq_ent_t *t;
taskq_thread_t *tqt;
if (!list_empty(&tq->tq_pend_list)) {
t = list_entry(tq->tq_pend_list.next, taskq_ent_t, tqent_list);
lowest_id = MIN(lowest_id, t->tqent_id);
}
if (!list_empty(&tq->tq_prio_list)) {
t = list_entry(tq->tq_prio_list.next, taskq_ent_t, tqent_list);
lowest_id = MIN(lowest_id, t->tqent_id);
}
if (!list_empty(&tq->tq_delay_list)) {
t = list_entry(tq->tq_delay_list.next, taskq_ent_t, tqent_list);
lowest_id = MIN(lowest_id, t->tqent_id);
}
if (!list_empty(&tq->tq_active_list)) {
tqt = list_entry(tq->tq_active_list.next, taskq_thread_t,
tqt_active_list);
ASSERT(tqt->tqt_id != TASKQID_INVALID);
lowest_id = MIN(lowest_id, tqt->tqt_id);
}
return (lowest_id);
}
/*
* Insert a task into a list keeping the list sorted by increasing taskqid.
*/
static void
taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt)
{
taskq_thread_t *w;
struct list_head *l = NULL;
ASSERT(tq);
ASSERT(tqt);
list_for_each_prev(l, &tq->tq_active_list) {
w = list_entry(l, taskq_thread_t, tqt_active_list);
if (w->tqt_id < tqt->tqt_id) {
list_add(&tqt->tqt_active_list, l);
break;
}
}
if (l == &tq->tq_active_list)
list_add(&tqt->tqt_active_list, &tq->tq_active_list);
}
/*
* Find and return a task from the given list if it exists. The list
* must be in lowest to highest task id order.
*/
static taskq_ent_t *
taskq_find_list(taskq_t *tq, struct list_head *lh, taskqid_t id)
{
struct list_head *l = NULL;
taskq_ent_t *t;
list_for_each(l, lh) {
t = list_entry(l, taskq_ent_t, tqent_list);
if (t->tqent_id == id)
return (t);
if (t->tqent_id > id)
break;
}
return (NULL);
}
/*
* Find an already dispatched task given the task id regardless of what
* state it is in. If a task is still pending it will be returned.
* If a task is executing, then -EBUSY will be returned instead.
* If the task has already been run then NULL is returned.
*/
static taskq_ent_t *
taskq_find(taskq_t *tq, taskqid_t id)
{
taskq_thread_t *tqt;
struct list_head *l = NULL;
taskq_ent_t *t;
t = taskq_find_list(tq, &tq->tq_delay_list, id);
if (t)
return (t);
t = taskq_find_list(tq, &tq->tq_prio_list, id);
if (t)
return (t);
t = taskq_find_list(tq, &tq->tq_pend_list, id);
if (t)
return (t);
list_for_each(l, &tq->tq_active_list) {
tqt = list_entry(l, taskq_thread_t, tqt_active_list);
if (tqt->tqt_id == id) {
/*
* Instead of returning tqt_task, we just return a non
* NULL value to prevent misuse, since tqt_task only
* has two valid fields.
*/
return (ERR_PTR(-EBUSY));
}
}
return (NULL);
}
/*
* Theory for the taskq_wait_id(), taskq_wait_outstanding(), and
* taskq_wait() functions below.
*
* Taskq waiting is accomplished by tracking the lowest outstanding task
* id and the next available task id. As tasks are dispatched they are
* added to the tail of the pending, priority, or delay lists. As worker
* threads become available the tasks are removed from the heads of these
* lists and linked to the worker threads. This ensures the lists are
* kept sorted by lowest to highest task id.
*
* Therefore the lowest outstanding task id can be quickly determined by
* checking the head item from all of these lists. This value is stored
* with the taskq as the lowest id. It only needs to be recalculated when
* either the task with the current lowest id completes or is canceled.
*
* By blocking until the lowest task id exceeds the passed task id the
* taskq_wait_outstanding() function can be easily implemented. Similarly,
* by blocking until the lowest task id matches the next task id taskq_wait()
* can be implemented.
*
* Callers should be aware that when there are multiple worked threads it
* is possible for larger task ids to complete before smaller ones. Also
* when the taskq contains delay tasks with small task ids callers may
* block for a considerable length of time waiting for them to expire and
* execute.
*/
static int
taskq_wait_id_check(taskq_t *tq, taskqid_t id)
{
int rc;
unsigned long flags;
spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
rc = (taskq_find(tq, id) == NULL);
spin_unlock_irqrestore(&tq->tq_lock, flags);
return (rc);
}
/*
* The taskq_wait_id() function blocks until the passed task id completes.
* This does not guarantee that all lower task ids have completed.
*/
void
taskq_wait_id(taskq_t *tq, taskqid_t id)
{
wait_event(tq->tq_wait_waitq, taskq_wait_id_check(tq, id));
}
EXPORT_SYMBOL(taskq_wait_id);
static int
taskq_wait_outstanding_check(taskq_t *tq, taskqid_t id)
{
int rc;
unsigned long flags;
spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
rc = (id < tq->tq_lowest_id);
spin_unlock_irqrestore(&tq->tq_lock, flags);
return (rc);
}
/*
* The taskq_wait_outstanding() function will block until all tasks with a
* lower taskqid than the passed 'id' have been completed. Note that all
* task id's are assigned monotonically at dispatch time. Zero may be
* passed for the id to indicate all tasks dispatch up to this point,
* but not after, should be waited for.
*/
void
taskq_wait_outstanding(taskq_t *tq, taskqid_t id)
{
id = id ? id : tq->tq_next_id - 1;
wait_event(tq->tq_wait_waitq, taskq_wait_outstanding_check(tq, id));
}
EXPORT_SYMBOL(taskq_wait_outstanding);
static int
taskq_wait_check(taskq_t *tq)
{
int rc;
unsigned long flags;
spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
rc = (tq->tq_lowest_id == tq->tq_next_id);
spin_unlock_irqrestore(&tq->tq_lock, flags);
return (rc);
}
/*
* The taskq_wait() function will block until the taskq is empty.
* This means that if a taskq re-dispatches work to itself taskq_wait()
* callers will block indefinitely.
*/
void
taskq_wait(taskq_t *tq)
{
wait_event(tq->tq_wait_waitq, taskq_wait_check(tq));
}
EXPORT_SYMBOL(taskq_wait);
int
taskq_member(taskq_t *tq, kthread_t *t)
{
return (tq == (taskq_t *)tsd_get_by_thread(taskq_tsd, t));
}
EXPORT_SYMBOL(taskq_member);
taskq_t *
taskq_of_curthread(void)
{
return (tsd_get(taskq_tsd));
}
EXPORT_SYMBOL(taskq_of_curthread);
/*
* Cancel an already dispatched task given the task id. Still pending tasks
* will be immediately canceled, and if the task is active the function will
* block until it completes. Preallocated tasks which are canceled must be
* freed by the caller.
*/
int
taskq_cancel_id(taskq_t *tq, taskqid_t id)
{
taskq_ent_t *t;
int rc = ENOENT;
unsigned long flags;
ASSERT(tq);
spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
t = taskq_find(tq, id);
if (t && t != ERR_PTR(-EBUSY)) {
list_del_init(&t->tqent_list);
t->tqent_flags |= TQENT_FLAG_CANCEL;
/*
* When canceling the lowest outstanding task id we
* must recalculate the new lowest outstanding id.
*/
if (tq->tq_lowest_id == t->tqent_id) {
tq->tq_lowest_id = taskq_lowest_id(tq);
ASSERT3S(tq->tq_lowest_id, >, t->tqent_id);
}
/*
* The task_expire() function takes the tq->tq_lock so drop
* drop the lock before synchronously cancelling the timer.
*/
if (timer_pending(&t->tqent_timer)) {
spin_unlock_irqrestore(&tq->tq_lock, flags);
del_timer_sync(&t->tqent_timer);
spin_lock_irqsave_nested(&tq->tq_lock, flags,
tq->tq_lock_class);
}
if (!(t->tqent_flags & TQENT_FLAG_PREALLOC))
task_done(tq, t);
rc = 0;
}
spin_unlock_irqrestore(&tq->tq_lock, flags);
if (t == ERR_PTR(-EBUSY)) {
taskq_wait_id(tq, id);
rc = EBUSY;
}
return (rc);
}
EXPORT_SYMBOL(taskq_cancel_id);
static int taskq_thread_spawn(taskq_t *tq);
taskqid_t
taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
{
taskq_ent_t *t;
taskqid_t rc = TASKQID_INVALID;
unsigned long irqflags;
ASSERT(tq);
ASSERT(func);
spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class);
/* Taskq being destroyed and all tasks drained */
if (!(tq->tq_flags & TASKQ_ACTIVE))
goto out;
/* Do not queue the task unless there is idle thread for it */
ASSERT(tq->tq_nactive <= tq->tq_nthreads);
if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) {
/* Dynamic taskq may be able to spawn another thread */
if (!(tq->tq_flags & TASKQ_DYNAMIC) ||
taskq_thread_spawn(tq) == 0)
goto out;
}
if ((t = task_alloc(tq, flags, &irqflags)) == NULL)
goto out;
spin_lock(&t->tqent_lock);
/* Queue to the front of the list to enforce TQ_NOQUEUE semantics */
if (flags & TQ_NOQUEUE)
list_add(&t->tqent_list, &tq->tq_prio_list);
/* Queue to the priority list instead of the pending list */
else if (flags & TQ_FRONT)
list_add_tail(&t->tqent_list, &tq->tq_prio_list);
else
list_add_tail(&t->tqent_list, &tq->tq_pend_list);
t->tqent_id = rc = tq->tq_next_id;
tq->tq_next_id++;
t->tqent_func = func;
t->tqent_arg = arg;
t->tqent_taskq = tq;
t->tqent_timer.function = NULL;
t->tqent_timer.expires = 0;
t->tqent_birth = jiffies;
DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t);
ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
spin_unlock(&t->tqent_lock);
wake_up(&tq->tq_work_waitq);
out:
/* Spawn additional taskq threads if required. */
if (!(flags & TQ_NOQUEUE) && tq->tq_nactive == tq->tq_nthreads)
(void) taskq_thread_spawn(tq);
spin_unlock_irqrestore(&tq->tq_lock, irqflags);
return (rc);
}
EXPORT_SYMBOL(taskq_dispatch);
taskqid_t
taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
uint_t flags, clock_t expire_time)
{
taskqid_t rc = TASKQID_INVALID;
taskq_ent_t *t;
unsigned long irqflags;
ASSERT(tq);
ASSERT(func);
spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class);
/* Taskq being destroyed and all tasks drained */
if (!(tq->tq_flags & TASKQ_ACTIVE))
goto out;
if ((t = task_alloc(tq, flags, &irqflags)) == NULL)
goto out;
spin_lock(&t->tqent_lock);
/* Queue to the delay list for subsequent execution */
list_add_tail(&t->tqent_list, &tq->tq_delay_list);
t->tqent_id = rc = tq->tq_next_id;
tq->tq_next_id++;
t->tqent_func = func;
t->tqent_arg = arg;
t->tqent_taskq = tq;
t->tqent_timer.function = task_expire;
t->tqent_timer.expires = (unsigned long)expire_time;
add_timer(&t->tqent_timer);
ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
spin_unlock(&t->tqent_lock);
out:
/* Spawn additional taskq threads if required. */
if (tq->tq_nactive == tq->tq_nthreads)
(void) taskq_thread_spawn(tq);
spin_unlock_irqrestore(&tq->tq_lock, irqflags);
return (rc);
}
EXPORT_SYMBOL(taskq_dispatch_delay);
void
taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
taskq_ent_t *t)
{
unsigned long irqflags;
ASSERT(tq);
ASSERT(func);
spin_lock_irqsave_nested(&tq->tq_lock, irqflags,
tq->tq_lock_class);
/* Taskq being destroyed and all tasks drained */
if (!(tq->tq_flags & TASKQ_ACTIVE)) {
t->tqent_id = TASKQID_INVALID;
goto out;
}
if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) {
/* Dynamic taskq may be able to spawn another thread */
if (!(tq->tq_flags & TASKQ_DYNAMIC) ||
taskq_thread_spawn(tq) == 0)
goto out2;
flags |= TQ_FRONT;
}
spin_lock(&t->tqent_lock);
/*
* Make sure the entry is not on some other taskq; it is important to
* ASSERT() under lock
*/
ASSERT(taskq_empty_ent(t));
/*
* Mark it as a prealloc'd task. This is important
* to ensure that we don't free it later.
*/
t->tqent_flags |= TQENT_FLAG_PREALLOC;
/* Queue to the priority list instead of the pending list */
if (flags & TQ_FRONT)
list_add_tail(&t->tqent_list, &tq->tq_prio_list);
else
list_add_tail(&t->tqent_list, &tq->tq_pend_list);
t->tqent_id = tq->tq_next_id;
tq->tq_next_id++;
t->tqent_func = func;
t->tqent_arg = arg;
t->tqent_taskq = tq;
t->tqent_birth = jiffies;
DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t);
spin_unlock(&t->tqent_lock);
wake_up(&tq->tq_work_waitq);
out:
/* Spawn additional taskq threads if required. */
if (tq->tq_nactive == tq->tq_nthreads)
(void) taskq_thread_spawn(tq);
out2:
spin_unlock_irqrestore(&tq->tq_lock, irqflags);
}
EXPORT_SYMBOL(taskq_dispatch_ent);
int
taskq_empty_ent(taskq_ent_t *t)
{
return (list_empty(&t->tqent_list));
}
EXPORT_SYMBOL(taskq_empty_ent);
void
taskq_init_ent(taskq_ent_t *t)
{
spin_lock_init(&t->tqent_lock);
init_waitqueue_head(&t->tqent_waitq);
timer_setup(&t->tqent_timer, NULL, 0);
INIT_LIST_HEAD(&t->tqent_list);
t->tqent_id = 0;
t->tqent_func = NULL;
t->tqent_arg = NULL;
t->tqent_flags = 0;
t->tqent_taskq = NULL;
}
EXPORT_SYMBOL(taskq_init_ent);
/*
* Return the next pending task, preference is given to tasks on the
* priority list which were dispatched with TQ_FRONT.
*/
static taskq_ent_t *
taskq_next_ent(taskq_t *tq)
{
struct list_head *list;
if (!list_empty(&tq->tq_prio_list))
list = &tq->tq_prio_list;
else if (!list_empty(&tq->tq_pend_list))
list = &tq->tq_pend_list;
else
return (NULL);
return (list_entry(list->next, taskq_ent_t, tqent_list));
}
/*
* Spawns a new thread for the specified taskq.
*/
static void
taskq_thread_spawn_task(void *arg)
{
taskq_t *tq = (taskq_t *)arg;
unsigned long flags;
if (taskq_thread_create(tq) == NULL) {
/* restore spawning count if failed */
spin_lock_irqsave_nested(&tq->tq_lock, flags,
tq->tq_lock_class);
tq->tq_nspawn--;
spin_unlock_irqrestore(&tq->tq_lock, flags);
}
}
/*
* Spawn addition threads for dynamic taskqs (TASKQ_DYNAMIC) the current
* number of threads is insufficient to handle the pending tasks. These
* new threads must be created by the dedicated dynamic_taskq to avoid
* deadlocks between thread creation and memory reclaim. The system_taskq
* which is also a dynamic taskq cannot be safely used for this.
*/
static int
taskq_thread_spawn(taskq_t *tq)
{
int spawning = 0;
if (!(tq->tq_flags & TASKQ_DYNAMIC))
return (0);
if ((tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) &&
(tq->tq_flags & TASKQ_ACTIVE)) {
spawning = (++tq->tq_nspawn);
taskq_dispatch(dynamic_taskq, taskq_thread_spawn_task,
tq, TQ_NOSLEEP);
}
return (spawning);
}
/*
* Threads in a dynamic taskq should only exit once it has been completely
* drained and no other threads are actively servicing tasks. This prevents
* threads from being created and destroyed more than is required.
*
* The first thread is the thread list is treated as the primary thread.
* There is nothing special about the primary thread but in order to avoid
* all the taskq pids from changing we opt to make it long running.
*/
static int
taskq_thread_should_stop(taskq_t *tq, taskq_thread_t *tqt)
{
if (!(tq->tq_flags & TASKQ_DYNAMIC))
return (0);
if (list_first_entry(&(tq->tq_thread_list), taskq_thread_t,
tqt_thread_list) == tqt)
return (0);
return
((tq->tq_nspawn == 0) && /* No threads are being spawned */
(tq->tq_nactive == 0) && /* No threads are handling tasks */
(tq->tq_nthreads > 1) && /* More than 1 thread is running */
(!taskq_next_ent(tq)) && /* There are no pending tasks */
(spl_taskq_thread_dynamic)); /* Dynamic taskqs are allowed */
}
static int
taskq_thread(void *args)
{
DECLARE_WAITQUEUE(wait, current);
sigset_t blocked;
taskq_thread_t *tqt = args;
taskq_t *tq;
taskq_ent_t *t;
int seq_tasks = 0;
unsigned long flags;
taskq_ent_t dup_task = {};
ASSERT(tqt);
ASSERT(tqt->tqt_tq);
tq = tqt->tqt_tq;
current->flags |= PF_NOFREEZE;
(void) spl_fstrans_mark();
sigfillset(&blocked);
sigprocmask(SIG_BLOCK, &blocked, NULL);
flush_signals(current);
tsd_set(taskq_tsd, tq);
spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
/*
* If we are dynamically spawned, decrease spawning count. Note that
* we could be created during taskq_create, in which case we shouldn't
* do the decrement. But it's fine because taskq_create will reset
* tq_nspawn later.
*/
if (tq->tq_flags & TASKQ_DYNAMIC)
tq->tq_nspawn--;
/* Immediately exit if more threads than allowed were created. */
if (tq->tq_nthreads >= tq->tq_maxthreads)
goto error;
tq->tq_nthreads++;
list_add_tail(&tqt->tqt_thread_list, &tq->tq_thread_list);
wake_up(&tq->tq_wait_waitq);
set_current_state(TASK_INTERRUPTIBLE);
while (!kthread_should_stop()) {
if (list_empty(&tq->tq_pend_list) &&
list_empty(&tq->tq_prio_list)) {
if (taskq_thread_should_stop(tq, tqt)) {
wake_up_all(&tq->tq_wait_waitq);
break;
}
add_wait_queue_exclusive(&tq->tq_work_waitq, &wait);
spin_unlock_irqrestore(&tq->tq_lock, flags);
schedule();
seq_tasks = 0;
spin_lock_irqsave_nested(&tq->tq_lock, flags,
tq->tq_lock_class);
remove_wait_queue(&tq->tq_work_waitq, &wait);
} else {
__set_current_state(TASK_RUNNING);
}
if ((t = taskq_next_ent(tq)) != NULL) {
list_del_init(&t->tqent_list);
/*
* A TQENT_FLAG_PREALLOC task may be reused or freed
* during the task function call. Store tqent_id and
* tqent_flags here.
*
* Also use an on stack taskq_ent_t for tqt_task
* assignment in this case; we want to make sure
* to duplicate all fields, so the values are
* correct when it's accessed via DTRACE_PROBE*.
*/
tqt->tqt_id = t->tqent_id;
tqt->tqt_flags = t->tqent_flags;
if (t->tqent_flags & TQENT_FLAG_PREALLOC) {
dup_task = *t;
t = &dup_task;
}
tqt->tqt_task = t;
taskq_insert_in_order(tq, tqt);
tq->tq_nactive++;
spin_unlock_irqrestore(&tq->tq_lock, flags);
DTRACE_PROBE1(taskq_ent__start, taskq_ent_t *, t);
/* Perform the requested task */
t->tqent_func(t->tqent_arg);
DTRACE_PROBE1(taskq_ent__finish, taskq_ent_t *, t);
spin_lock_irqsave_nested(&tq->tq_lock, flags,
tq->tq_lock_class);
tq->tq_nactive--;
list_del_init(&tqt->tqt_active_list);
tqt->tqt_task = NULL;
/* For prealloc'd tasks, we don't free anything. */
if (!(tqt->tqt_flags & TQENT_FLAG_PREALLOC))
task_done(tq, t);
/*
* When the current lowest outstanding taskqid is
* done calculate the new lowest outstanding id
*/
if (tq->tq_lowest_id == tqt->tqt_id) {
tq->tq_lowest_id = taskq_lowest_id(tq);
ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id);
}
/* Spawn additional taskq threads if required. */
if ((++seq_tasks) > spl_taskq_thread_sequential &&
taskq_thread_spawn(tq))
seq_tasks = 0;
tqt->tqt_id = TASKQID_INVALID;
tqt->tqt_flags = 0;
wake_up_all(&tq->tq_wait_waitq);
} else {
if (taskq_thread_should_stop(tq, tqt))
break;
}
set_current_state(TASK_INTERRUPTIBLE);
}
__set_current_state(TASK_RUNNING);
tq->tq_nthreads--;
list_del_init(&tqt->tqt_thread_list);
error:
kmem_free(tqt, sizeof (taskq_thread_t));
spin_unlock_irqrestore(&tq->tq_lock, flags);
tsd_set(taskq_tsd, NULL);
thread_exit();
return (0);
}
static taskq_thread_t *
taskq_thread_create(taskq_t *tq)
{
static int last_used_cpu = 0;
taskq_thread_t *tqt;
tqt = kmem_alloc(sizeof (*tqt), KM_PUSHPAGE);
INIT_LIST_HEAD(&tqt->tqt_thread_list);
INIT_LIST_HEAD(&tqt->tqt_active_list);
tqt->tqt_tq = tq;
tqt->tqt_id = TASKQID_INVALID;
tqt->tqt_thread = spl_kthread_create(taskq_thread, tqt,
"%s", tq->tq_name);
if (tqt->tqt_thread == NULL) {
kmem_free(tqt, sizeof (taskq_thread_t));
return (NULL);
}
if (spl_taskq_thread_bind) {
last_used_cpu = (last_used_cpu + 1) % num_online_cpus();
kthread_bind(tqt->tqt_thread, last_used_cpu);
}
if (spl_taskq_thread_priority)
set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(tq->tq_pri));
wake_up_process(tqt->tqt_thread);
return (tqt);
}
taskq_t *
taskq_create(const char *name, int threads_arg, pri_t pri,
int minalloc, int maxalloc, uint_t flags)
{
taskq_t *tq;
taskq_thread_t *tqt;
int count = 0, rc = 0, i;
unsigned long irqflags;
int nthreads = threads_arg;
ASSERT(name != NULL);
ASSERT(minalloc >= 0);
ASSERT(maxalloc <= INT_MAX);
ASSERT(!(flags & (TASKQ_CPR_SAFE))); /* Unsupported */
/* Scale the number of threads using nthreads as a percentage */
if (flags & TASKQ_THREADS_CPU_PCT) {
ASSERT(nthreads <= 100);
ASSERT(nthreads >= 0);
nthreads = MIN(threads_arg, 100);
nthreads = MAX(nthreads, 0);
nthreads = MAX((num_online_cpus() * nthreads) /100, 1);
}
tq = kmem_alloc(sizeof (*tq), KM_PUSHPAGE);
if (tq == NULL)
return (NULL);
tq->tq_hp_support = B_FALSE;
#ifdef HAVE_CPU_HOTPLUG
if (flags & TASKQ_THREADS_CPU_PCT) {
tq->tq_hp_support = B_TRUE;
if (cpuhp_state_add_instance_nocalls(spl_taskq_cpuhp_state,
&tq->tq_hp_cb_node) != 0) {
kmem_free(tq, sizeof (*tq));
return (NULL);
}
}
#endif
spin_lock_init(&tq->tq_lock);
INIT_LIST_HEAD(&tq->tq_thread_list);
INIT_LIST_HEAD(&tq->tq_active_list);
tq->tq_name = kmem_strdup(name);
tq->tq_nactive = 0;
tq->tq_nthreads = 0;
tq->tq_nspawn = 0;
tq->tq_maxthreads = nthreads;
tq->tq_cpu_pct = threads_arg;
tq->tq_pri = pri;
tq->tq_minalloc = minalloc;
tq->tq_maxalloc = maxalloc;
tq->tq_nalloc = 0;
tq->tq_flags = (flags | TASKQ_ACTIVE);
tq->tq_next_id = TASKQID_INITIAL;
tq->tq_lowest_id = TASKQID_INITIAL;
INIT_LIST_HEAD(&tq->tq_free_list);
INIT_LIST_HEAD(&tq->tq_pend_list);
INIT_LIST_HEAD(&tq->tq_prio_list);
INIT_LIST_HEAD(&tq->tq_delay_list);
init_waitqueue_head(&tq->tq_work_waitq);
init_waitqueue_head(&tq->tq_wait_waitq);
tq->tq_lock_class = TQ_LOCK_GENERAL;
INIT_LIST_HEAD(&tq->tq_taskqs);
if (flags & TASKQ_PREPOPULATE) {
spin_lock_irqsave_nested(&tq->tq_lock, irqflags,
tq->tq_lock_class);
for (i = 0; i < minalloc; i++)
task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW,
&irqflags));
spin_unlock_irqrestore(&tq->tq_lock, irqflags);
}
if ((flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic)
nthreads = 1;
for (i = 0; i < nthreads; i++) {
tqt = taskq_thread_create(tq);
if (tqt == NULL)
rc = 1;
else
count++;
}
/* Wait for all threads to be started before potential destroy */
wait_event(tq->tq_wait_waitq, tq->tq_nthreads == count);
/*
* taskq_thread might have touched nspawn, but we don't want them to
* because they're not dynamically spawned. So we reset it to 0
*/
tq->tq_nspawn = 0;
if (rc) {
taskq_destroy(tq);
tq = NULL;
} else {
down_write(&tq_list_sem);
tq->tq_instance = taskq_find_by_name(name) + 1;
list_add_tail(&tq->tq_taskqs, &tq_list);
up_write(&tq_list_sem);
}
return (tq);
}
EXPORT_SYMBOL(taskq_create);
void
taskq_destroy(taskq_t *tq)
{
struct task_struct *thread;
taskq_thread_t *tqt;
taskq_ent_t *t;
unsigned long flags;
ASSERT(tq);
spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
tq->tq_flags &= ~TASKQ_ACTIVE;
spin_unlock_irqrestore(&tq->tq_lock, flags);
#ifdef HAVE_CPU_HOTPLUG
if (tq->tq_hp_support) {
VERIFY0(cpuhp_state_remove_instance_nocalls(
spl_taskq_cpuhp_state, &tq->tq_hp_cb_node));
}
#endif
/*
* When TASKQ_ACTIVE is clear new tasks may not be added nor may
* new worker threads be spawned for dynamic taskq.
*/
if (dynamic_taskq != NULL)
taskq_wait_outstanding(dynamic_taskq, 0);
taskq_wait(tq);
/* remove taskq from global list used by the kstats */
down_write(&tq_list_sem);
list_del(&tq->tq_taskqs);
up_write(&tq_list_sem);
spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
/* wait for spawning threads to insert themselves to the list */
while (tq->tq_nspawn) {
spin_unlock_irqrestore(&tq->tq_lock, flags);
schedule_timeout_interruptible(1);
spin_lock_irqsave_nested(&tq->tq_lock, flags,
tq->tq_lock_class);
}
/*
* Signal each thread to exit and block until it does. Each thread
* is responsible for removing itself from the list and freeing its
* taskq_thread_t. This allows for idle threads to opt to remove
* themselves from the taskq. They can be recreated as needed.
*/
while (!list_empty(&tq->tq_thread_list)) {
tqt = list_entry(tq->tq_thread_list.next,
taskq_thread_t, tqt_thread_list);
thread = tqt->tqt_thread;
spin_unlock_irqrestore(&tq->tq_lock, flags);
kthread_stop(thread);
spin_lock_irqsave_nested(&tq->tq_lock, flags,
tq->tq_lock_class);
}
while (!list_empty(&tq->tq_free_list)) {
t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list);
ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
list_del_init(&t->tqent_list);
task_free(tq, t);
}
ASSERT0(tq->tq_nthreads);
ASSERT0(tq->tq_nalloc);
ASSERT0(tq->tq_nspawn);
ASSERT(list_empty(&tq->tq_thread_list));
ASSERT(list_empty(&tq->tq_active_list));
ASSERT(list_empty(&tq->tq_free_list));
ASSERT(list_empty(&tq->tq_pend_list));
ASSERT(list_empty(&tq->tq_prio_list));
ASSERT(list_empty(&tq->tq_delay_list));
spin_unlock_irqrestore(&tq->tq_lock, flags);
kmem_strfree(tq->tq_name);
kmem_free(tq, sizeof (taskq_t));
}
EXPORT_SYMBOL(taskq_destroy);
static unsigned int spl_taskq_kick = 0;
/*
* 2.6.36 API Change
* module_param_cb is introduced to take kernel_param_ops and
* module_param_call is marked as obsolete. Also set and get operations
* were changed to take a 'const struct kernel_param *'.
*/
static int
#ifdef module_param_cb
param_set_taskq_kick(const char *val, const struct kernel_param *kp)
#else
param_set_taskq_kick(const char *val, struct kernel_param *kp)
#endif
{
int ret;
taskq_t *tq = NULL;
taskq_ent_t *t;
unsigned long flags;
ret = param_set_uint(val, kp);
if (ret < 0 || !spl_taskq_kick)
return (ret);
/* reset value */
spl_taskq_kick = 0;
down_read(&tq_list_sem);
list_for_each_entry(tq, &tq_list, tq_taskqs) {
spin_lock_irqsave_nested(&tq->tq_lock, flags,
tq->tq_lock_class);
/* Check if the first pending is older than 5 seconds */
t = taskq_next_ent(tq);
if (t && time_after(jiffies, t->tqent_birth + 5*HZ)) {
(void) taskq_thread_spawn(tq);
printk(KERN_INFO "spl: Kicked taskq %s/%d\n",
tq->tq_name, tq->tq_instance);
}
spin_unlock_irqrestore(&tq->tq_lock, flags);
}
up_read(&tq_list_sem);
return (ret);
}
#ifdef module_param_cb
static const struct kernel_param_ops param_ops_taskq_kick = {
.set = param_set_taskq_kick,
.get = param_get_uint,
};
module_param_cb(spl_taskq_kick, &param_ops_taskq_kick, &spl_taskq_kick, 0644);
#else
module_param_call(spl_taskq_kick, param_set_taskq_kick, param_get_uint,
&spl_taskq_kick, 0644);
#endif
MODULE_PARM_DESC(spl_taskq_kick,
"Write nonzero to kick stuck taskqs to spawn more threads");
#ifdef HAVE_CPU_HOTPLUG
/*
* This callback will be called exactly once for each core that comes online,
* for each dynamic taskq. We attempt to expand taskqs that have
* TASKQ_THREADS_CPU_PCT set. We need to redo the percentage calculation every
* time, to correctly determine whether or not to add a thread.
*/
static int
spl_taskq_expand(unsigned int cpu, struct hlist_node *node)
{
taskq_t *tq = list_entry(node, taskq_t, tq_hp_cb_node);
unsigned long flags;
int err = 0;
ASSERT(tq);
spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
if (!(tq->tq_flags & TASKQ_ACTIVE)) {
spin_unlock_irqrestore(&tq->tq_lock, flags);
return (err);
}
ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT);
int nthreads = MIN(tq->tq_cpu_pct, 100);
nthreads = MAX(((num_online_cpus() + 1) * nthreads) / 100, 1);
tq->tq_maxthreads = nthreads;
if (!((tq->tq_flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) &&
tq->tq_maxthreads > tq->tq_nthreads) {
spin_unlock_irqrestore(&tq->tq_lock, flags);
taskq_thread_t *tqt = taskq_thread_create(tq);
if (tqt == NULL)
err = -1;
return (err);
}
spin_unlock_irqrestore(&tq->tq_lock, flags);
return (err);
}
/*
* While we don't support offlining CPUs, it is possible that CPUs will fail
* to online successfully. We do need to be able to handle this case
* gracefully.
*/
static int
spl_taskq_prepare_down(unsigned int cpu, struct hlist_node *node)
{
taskq_t *tq = list_entry(node, taskq_t, tq_hp_cb_node);
unsigned long flags;
ASSERT(tq);
spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
if (!(tq->tq_flags & TASKQ_ACTIVE))
goto out;
ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT);
int nthreads = MIN(tq->tq_cpu_pct, 100);
nthreads = MAX(((num_online_cpus()) * nthreads) / 100, 1);
tq->tq_maxthreads = nthreads;
if (!((tq->tq_flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) &&
tq->tq_maxthreads < tq->tq_nthreads) {
ASSERT3U(tq->tq_maxthreads, ==, tq->tq_nthreads - 1);
taskq_thread_t *tqt = list_entry(tq->tq_thread_list.next,
taskq_thread_t, tqt_thread_list);
struct task_struct *thread = tqt->tqt_thread;
spin_unlock_irqrestore(&tq->tq_lock, flags);
kthread_stop(thread);
return (0);
}
out:
spin_unlock_irqrestore(&tq->tq_lock, flags);
return (0);
}
#endif
int
spl_taskq_init(void)
{
init_rwsem(&tq_list_sem);
tsd_create(&taskq_tsd, NULL);
#ifdef HAVE_CPU_HOTPLUG
spl_taskq_cpuhp_state = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
"fs/spl_taskq:online", spl_taskq_expand, spl_taskq_prepare_down);
#endif
system_taskq = taskq_create("spl_system_taskq", MAX(boot_ncpus, 64),
maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC);
if (system_taskq == NULL)
- return (1);
+ return (-ENOMEM);
system_delay_taskq = taskq_create("spl_delay_taskq", MAX(boot_ncpus, 4),
maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC);
if (system_delay_taskq == NULL) {
#ifdef HAVE_CPU_HOTPLUG
cpuhp_remove_multi_state(spl_taskq_cpuhp_state);
#endif
taskq_destroy(system_taskq);
- return (1);
+ return (-ENOMEM);
}
dynamic_taskq = taskq_create("spl_dynamic_taskq", 1,
maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE);
if (dynamic_taskq == NULL) {
#ifdef HAVE_CPU_HOTPLUG
cpuhp_remove_multi_state(spl_taskq_cpuhp_state);
#endif
taskq_destroy(system_taskq);
taskq_destroy(system_delay_taskq);
- return (1);
+ return (-ENOMEM);
}
/*
* This is used to annotate tq_lock, so
* taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch
* does not trigger a lockdep warning re: possible recursive locking
*/
dynamic_taskq->tq_lock_class = TQ_LOCK_DYNAMIC;
return (0);
}
void
spl_taskq_fini(void)
{
taskq_destroy(dynamic_taskq);
dynamic_taskq = NULL;
taskq_destroy(system_delay_taskq);
system_delay_taskq = NULL;
taskq_destroy(system_taskq);
system_taskq = NULL;
tsd_destroy(&taskq_tsd);
#ifdef HAVE_CPU_HOTPLUG
cpuhp_remove_multi_state(spl_taskq_cpuhp_state);
spl_taskq_cpuhp_state = 0;
#endif
}
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-tsd.c b/sys/contrib/openzfs/module/os/linux/spl/spl-tsd.c
index 546db9ab8bd7..389c9d0d6df3 100644
--- a/sys/contrib/openzfs/module/os/linux/spl/spl-tsd.c
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-tsd.c
@@ -1,719 +1,719 @@
/*
* Copyright (C) 2010 Lawrence Livermore National Security, LLC.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*
*
* Solaris Porting Layer (SPL) Thread Specific Data Implementation.
*
* Thread specific data has implemented using a hash table, this avoids
* the need to add a member to the task structure and allows maximum
* portability between kernels. This implementation has been optimized
* to keep the tsd_set() and tsd_get() times as small as possible.
*
* The majority of the entries in the hash table are for specific tsd
* entries. These entries are hashed by the product of their key and
* pid because by design the key and pid are guaranteed to be unique.
* Their product also has the desirable properly that it will be uniformly
* distributed over the hash bins providing neither the pid nor key is zero.
* Under linux the zero pid is always the init process and thus won't be
* used, and this implementation is careful to never to assign a zero key.
* By default the hash table is sized to 512 bins which is expected to
* be sufficient for light to moderate usage of thread specific data.
*
* The hash table contains two additional type of entries. They first
* type is entry is called a 'key' entry and it is added to the hash during
* tsd_create(). It is used to store the address of the destructor function
* and it is used as an anchor point. All tsd entries which use the same
* key will be linked to this entry. This is used during tsd_destroy() to
* quickly call the destructor function for all tsd associated with the key.
* The 'key' entry may be looked up with tsd_hash_search() by passing the
* key you wish to lookup and DTOR_PID constant as the pid.
*
* The second type of entry is called a 'pid' entry and it is added to the
* hash the first time a process set a key. The 'pid' entry is also used
* as an anchor and all tsd for the process will be linked to it. This
* list is using during tsd_exit() to ensure all registered destructors
* are run for the process. The 'pid' entry may be looked up with
* tsd_hash_search() by passing the PID_KEY constant as the key, and
* the process pid. Note that tsd_exit() is called by thread_exit()
* so if your using the Solaris thread API you should not need to call
* tsd_exit() directly.
*
*/
#include <sys/kmem.h>
#include <sys/thread.h>
#include <sys/tsd.h>
#include <linux/hash.h>
typedef struct tsd_hash_bin {
spinlock_t hb_lock;
struct hlist_head hb_head;
} tsd_hash_bin_t;
typedef struct tsd_hash_table {
spinlock_t ht_lock;
uint_t ht_bits;
uint_t ht_key;
tsd_hash_bin_t *ht_bins;
} tsd_hash_table_t;
typedef struct tsd_hash_entry {
uint_t he_key;
pid_t he_pid;
dtor_func_t he_dtor;
void *he_value;
struct hlist_node he_list;
struct list_head he_key_list;
struct list_head he_pid_list;
} tsd_hash_entry_t;
static tsd_hash_table_t *tsd_hash_table = NULL;
/*
* tsd_hash_search - searches hash table for tsd_hash_entry
* @table: hash table
* @key: search key
* @pid: search pid
*/
static tsd_hash_entry_t *
tsd_hash_search(tsd_hash_table_t *table, uint_t key, pid_t pid)
{
struct hlist_node *node = NULL;
tsd_hash_entry_t *entry;
tsd_hash_bin_t *bin;
ulong_t hash;
hash = hash_long((ulong_t)key * (ulong_t)pid, table->ht_bits);
bin = &table->ht_bins[hash];
spin_lock(&bin->hb_lock);
hlist_for_each(node, &bin->hb_head) {
entry = list_entry(node, tsd_hash_entry_t, he_list);
if ((entry->he_key == key) && (entry->he_pid == pid)) {
spin_unlock(&bin->hb_lock);
return (entry);
}
}
spin_unlock(&bin->hb_lock);
return (NULL);
}
/*
* tsd_hash_dtor - call the destructor and free all entries on the list
* @work: list of hash entries
*
* For a list of entries which have all already been removed from the
* hash call their registered destructor then free the associated memory.
*/
static void
tsd_hash_dtor(struct hlist_head *work)
{
tsd_hash_entry_t *entry;
while (!hlist_empty(work)) {
entry = hlist_entry(work->first, tsd_hash_entry_t, he_list);
hlist_del(&entry->he_list);
if (entry->he_dtor && entry->he_pid != DTOR_PID)
entry->he_dtor(entry->he_value);
kmem_free(entry, sizeof (tsd_hash_entry_t));
}
}
/*
* tsd_hash_add - adds an entry to hash table
* @table: hash table
* @key: search key
* @pid: search pid
*
* The caller is responsible for ensuring the unique key/pid do not
* already exist in the hash table. This possible because all entries
* are thread specific thus a concurrent thread will never attempt to
* add this key/pid. Because multiple bins must be checked to add
* links to the dtor and pid entries the entire table is locked.
*/
static int
tsd_hash_add(tsd_hash_table_t *table, uint_t key, pid_t pid, void *value)
{
tsd_hash_entry_t *entry, *dtor_entry, *pid_entry;
tsd_hash_bin_t *bin;
ulong_t hash;
int rc = 0;
ASSERT3P(tsd_hash_search(table, key, pid), ==, NULL);
/* New entry allocate structure, set value, and add to hash */
entry = kmem_alloc(sizeof (tsd_hash_entry_t), KM_PUSHPAGE);
if (entry == NULL)
return (ENOMEM);
entry->he_key = key;
entry->he_pid = pid;
entry->he_value = value;
INIT_HLIST_NODE(&entry->he_list);
INIT_LIST_HEAD(&entry->he_key_list);
INIT_LIST_HEAD(&entry->he_pid_list);
spin_lock(&table->ht_lock);
/* Destructor entry must exist for all valid keys */
dtor_entry = tsd_hash_search(table, entry->he_key, DTOR_PID);
ASSERT3P(dtor_entry, !=, NULL);
entry->he_dtor = dtor_entry->he_dtor;
/* Process entry must exist for all valid processes */
pid_entry = tsd_hash_search(table, PID_KEY, entry->he_pid);
ASSERT3P(pid_entry, !=, NULL);
hash = hash_long((ulong_t)key * (ulong_t)pid, table->ht_bits);
bin = &table->ht_bins[hash];
spin_lock(&bin->hb_lock);
/* Add to the hash, key, and pid lists */
hlist_add_head(&entry->he_list, &bin->hb_head);
list_add(&entry->he_key_list, &dtor_entry->he_key_list);
list_add(&entry->he_pid_list, &pid_entry->he_pid_list);
spin_unlock(&bin->hb_lock);
spin_unlock(&table->ht_lock);
return (rc);
}
/*
* tsd_hash_add_key - adds a destructor entry to the hash table
* @table: hash table
* @keyp: search key
* @dtor: key destructor
*
* For every unique key there is a single entry in the hash which is used
* as anchor. All other thread specific entries for this key are linked
* to this anchor via the 'he_key_list' list head. On return they keyp
* will be set to the next available key for the hash table.
*/
static int
tsd_hash_add_key(tsd_hash_table_t *table, uint_t *keyp, dtor_func_t dtor)
{
tsd_hash_entry_t *tmp_entry, *entry;
tsd_hash_bin_t *bin;
ulong_t hash;
int keys_checked = 0;
ASSERT3P(table, !=, NULL);
/* Allocate entry to be used as a destructor for this key */
entry = kmem_alloc(sizeof (tsd_hash_entry_t), KM_PUSHPAGE);
if (entry == NULL)
return (ENOMEM);
/* Determine next available key value */
spin_lock(&table->ht_lock);
do {
/* Limited to TSD_KEYS_MAX concurrent unique keys */
if (table->ht_key++ > TSD_KEYS_MAX)
table->ht_key = 1;
/* Ensure failure when all TSD_KEYS_MAX keys are in use */
if (keys_checked++ >= TSD_KEYS_MAX) {
spin_unlock(&table->ht_lock);
return (ENOENT);
}
tmp_entry = tsd_hash_search(table, table->ht_key, DTOR_PID);
} while (tmp_entry);
/* Add destructor entry in to hash table */
entry->he_key = *keyp = table->ht_key;
entry->he_pid = DTOR_PID;
entry->he_dtor = dtor;
entry->he_value = NULL;
INIT_HLIST_NODE(&entry->he_list);
INIT_LIST_HEAD(&entry->he_key_list);
INIT_LIST_HEAD(&entry->he_pid_list);
hash = hash_long((ulong_t)*keyp * (ulong_t)DTOR_PID, table->ht_bits);
bin = &table->ht_bins[hash];
spin_lock(&bin->hb_lock);
hlist_add_head(&entry->he_list, &bin->hb_head);
spin_unlock(&bin->hb_lock);
spin_unlock(&table->ht_lock);
return (0);
}
/*
* tsd_hash_add_pid - adds a process entry to the hash table
* @table: hash table
* @pid: search pid
*
* For every process there is a single entry in the hash which is used
* as anchor. All other thread specific entries for this process are
* linked to this anchor via the 'he_pid_list' list head.
*/
static int
tsd_hash_add_pid(tsd_hash_table_t *table, pid_t pid)
{
tsd_hash_entry_t *entry;
tsd_hash_bin_t *bin;
ulong_t hash;
/* Allocate entry to be used as the process reference */
entry = kmem_alloc(sizeof (tsd_hash_entry_t), KM_PUSHPAGE);
if (entry == NULL)
return (ENOMEM);
spin_lock(&table->ht_lock);
entry->he_key = PID_KEY;
entry->he_pid = pid;
entry->he_dtor = NULL;
entry->he_value = NULL;
INIT_HLIST_NODE(&entry->he_list);
INIT_LIST_HEAD(&entry->he_key_list);
INIT_LIST_HEAD(&entry->he_pid_list);
hash = hash_long((ulong_t)PID_KEY * (ulong_t)pid, table->ht_bits);
bin = &table->ht_bins[hash];
spin_lock(&bin->hb_lock);
hlist_add_head(&entry->he_list, &bin->hb_head);
spin_unlock(&bin->hb_lock);
spin_unlock(&table->ht_lock);
return (0);
}
/*
* tsd_hash_del - delete an entry from hash table, key, and pid lists
* @table: hash table
* @key: search key
* @pid: search pid
*/
static void
tsd_hash_del(tsd_hash_table_t *table, tsd_hash_entry_t *entry)
{
hlist_del(&entry->he_list);
list_del_init(&entry->he_key_list);
list_del_init(&entry->he_pid_list);
}
/*
* tsd_hash_table_init - allocate a hash table
* @bits: hash table size
*
* A hash table with 2^bits bins will be created, it may not be resized
* after the fact and must be free'd with tsd_hash_table_fini().
*/
static tsd_hash_table_t *
tsd_hash_table_init(uint_t bits)
{
tsd_hash_table_t *table;
int hash, size = (1 << bits);
table = kmem_zalloc(sizeof (tsd_hash_table_t), KM_SLEEP);
if (table == NULL)
return (NULL);
table->ht_bins = kmem_zalloc(sizeof (tsd_hash_bin_t) * size, KM_SLEEP);
if (table->ht_bins == NULL) {
kmem_free(table, sizeof (tsd_hash_table_t));
return (NULL);
}
for (hash = 0; hash < size; hash++) {
spin_lock_init(&table->ht_bins[hash].hb_lock);
INIT_HLIST_HEAD(&table->ht_bins[hash].hb_head);
}
spin_lock_init(&table->ht_lock);
table->ht_bits = bits;
table->ht_key = 1;
return (table);
}
/*
* tsd_hash_table_fini - free a hash table
* @table: hash table
*
* Free a hash table allocated by tsd_hash_table_init(). If the hash
* table is not empty this function will call the proper destructor for
* all remaining entries before freeing the memory used by those entries.
*/
static void
tsd_hash_table_fini(tsd_hash_table_t *table)
{
HLIST_HEAD(work);
tsd_hash_bin_t *bin;
tsd_hash_entry_t *entry;
int size, i;
ASSERT3P(table, !=, NULL);
spin_lock(&table->ht_lock);
for (i = 0, size = (1 << table->ht_bits); i < size; i++) {
bin = &table->ht_bins[i];
spin_lock(&bin->hb_lock);
while (!hlist_empty(&bin->hb_head)) {
entry = hlist_entry(bin->hb_head.first,
tsd_hash_entry_t, he_list);
tsd_hash_del(table, entry);
hlist_add_head(&entry->he_list, &work);
}
spin_unlock(&bin->hb_lock);
}
spin_unlock(&table->ht_lock);
tsd_hash_dtor(&work);
kmem_free(table->ht_bins, sizeof (tsd_hash_bin_t)*(1<<table->ht_bits));
kmem_free(table, sizeof (tsd_hash_table_t));
}
/*
* tsd_remove_entry - remove a tsd entry for this thread
* @entry: entry to remove
*
* Remove the thread specific data @entry for this thread.
* If this is the last entry for this thread, also remove the PID entry.
*/
static void
tsd_remove_entry(tsd_hash_entry_t *entry)
{
HLIST_HEAD(work);
tsd_hash_table_t *table;
tsd_hash_entry_t *pid_entry;
tsd_hash_bin_t *pid_entry_bin, *entry_bin;
ulong_t hash;
table = tsd_hash_table;
ASSERT3P(table, !=, NULL);
ASSERT3P(entry, !=, NULL);
spin_lock(&table->ht_lock);
hash = hash_long((ulong_t)entry->he_key *
(ulong_t)entry->he_pid, table->ht_bits);
entry_bin = &table->ht_bins[hash];
/* save the possible pid_entry */
pid_entry = list_entry(entry->he_pid_list.next, tsd_hash_entry_t,
he_pid_list);
/* remove entry */
spin_lock(&entry_bin->hb_lock);
tsd_hash_del(table, entry);
hlist_add_head(&entry->he_list, &work);
spin_unlock(&entry_bin->hb_lock);
/* if pid_entry is indeed pid_entry, then remove it if it's empty */
if (pid_entry->he_key == PID_KEY &&
list_empty(&pid_entry->he_pid_list)) {
hash = hash_long((ulong_t)pid_entry->he_key *
(ulong_t)pid_entry->he_pid, table->ht_bits);
pid_entry_bin = &table->ht_bins[hash];
spin_lock(&pid_entry_bin->hb_lock);
tsd_hash_del(table, pid_entry);
hlist_add_head(&pid_entry->he_list, &work);
spin_unlock(&pid_entry_bin->hb_lock);
}
spin_unlock(&table->ht_lock);
tsd_hash_dtor(&work);
}
/*
* tsd_set - set thread specific data
* @key: lookup key
* @value: value to set
*
* Caller must prevent racing tsd_create() or tsd_destroy(), protected
* from racing tsd_get() or tsd_set() because it is thread specific.
* This function has been optimized to be fast for the update case.
* When setting the tsd initially it will be slower due to additional
* required locking and potential memory allocations.
*/
int
tsd_set(uint_t key, void *value)
{
tsd_hash_table_t *table;
tsd_hash_entry_t *entry;
pid_t pid;
int rc;
/* mark remove if value is NULL */
boolean_t remove = (value == NULL);
table = tsd_hash_table;
pid = curthread->pid;
ASSERT3P(table, !=, NULL);
if ((key == 0) || (key > TSD_KEYS_MAX))
return (EINVAL);
/* Entry already exists in hash table update value */
entry = tsd_hash_search(table, key, pid);
if (entry) {
entry->he_value = value;
/* remove the entry */
if (remove)
tsd_remove_entry(entry);
return (0);
}
/* don't create entry if value is NULL */
if (remove)
return (0);
/* Add a process entry to the hash if not yet exists */
entry = tsd_hash_search(table, PID_KEY, pid);
if (entry == NULL) {
rc = tsd_hash_add_pid(table, pid);
if (rc)
return (rc);
}
rc = tsd_hash_add(table, key, pid, value);
return (rc);
}
EXPORT_SYMBOL(tsd_set);
/*
* tsd_get - get thread specific data
* @key: lookup key
*
* Caller must prevent racing tsd_create() or tsd_destroy(). This
* implementation is designed to be fast and scalable, it does not
* lock the entire table only a single hash bin.
*/
void *
tsd_get(uint_t key)
{
tsd_hash_entry_t *entry;
ASSERT3P(tsd_hash_table, !=, NULL);
if ((key == 0) || (key > TSD_KEYS_MAX))
return (NULL);
entry = tsd_hash_search(tsd_hash_table, key, curthread->pid);
if (entry == NULL)
return (NULL);
return (entry->he_value);
}
EXPORT_SYMBOL(tsd_get);
/*
* tsd_get_by_thread - get thread specific data for specified thread
* @key: lookup key
* @thread: thread to lookup
*
* Caller must prevent racing tsd_create() or tsd_destroy(). This
* implementation is designed to be fast and scalable, it does not
* lock the entire table only a single hash bin.
*/
void *
tsd_get_by_thread(uint_t key, kthread_t *thread)
{
tsd_hash_entry_t *entry;
ASSERT3P(tsd_hash_table, !=, NULL);
if ((key == 0) || (key > TSD_KEYS_MAX))
return (NULL);
entry = tsd_hash_search(tsd_hash_table, key, thread->pid);
if (entry == NULL)
return (NULL);
return (entry->he_value);
}
EXPORT_SYMBOL(tsd_get_by_thread);
/*
* tsd_create - create thread specific data key
* @keyp: lookup key address
* @dtor: destructor called during tsd_destroy() or tsd_exit()
*
* Provided key must be set to 0 or it assumed to be already in use.
* The dtor is allowed to be NULL in which case no additional cleanup
* for the data is performed during tsd_destroy() or tsd_exit().
*
* Caller must prevent racing tsd_set() or tsd_get(), this function is
* safe from racing tsd_create(), tsd_destroy(), and tsd_exit().
*/
void
tsd_create(uint_t *keyp, dtor_func_t dtor)
{
ASSERT3P(keyp, !=, NULL);
if (*keyp)
return;
(void) tsd_hash_add_key(tsd_hash_table, keyp, dtor);
}
EXPORT_SYMBOL(tsd_create);
/*
* tsd_destroy - destroy thread specific data
* @keyp: lookup key address
*
* Destroys the thread specific data on all threads which use this key.
*
* Caller must prevent racing tsd_set() or tsd_get(), this function is
* safe from racing tsd_create(), tsd_destroy(), and tsd_exit().
*/
void
tsd_destroy(uint_t *keyp)
{
HLIST_HEAD(work);
tsd_hash_table_t *table;
tsd_hash_entry_t *dtor_entry, *entry;
tsd_hash_bin_t *dtor_entry_bin, *entry_bin;
ulong_t hash;
table = tsd_hash_table;
ASSERT3P(table, !=, NULL);
spin_lock(&table->ht_lock);
dtor_entry = tsd_hash_search(table, *keyp, DTOR_PID);
if (dtor_entry == NULL) {
spin_unlock(&table->ht_lock);
return;
}
/*
* All threads which use this key must be linked off of the
* DTOR_PID entry. They are removed from the hash table and
* linked in to a private working list to be destroyed.
*/
while (!list_empty(&dtor_entry->he_key_list)) {
entry = list_entry(dtor_entry->he_key_list.next,
tsd_hash_entry_t, he_key_list);
ASSERT3U(dtor_entry->he_key, ==, entry->he_key);
ASSERT3P(dtor_entry->he_dtor, ==, entry->he_dtor);
hash = hash_long((ulong_t)entry->he_key *
(ulong_t)entry->he_pid, table->ht_bits);
entry_bin = &table->ht_bins[hash];
spin_lock(&entry_bin->hb_lock);
tsd_hash_del(table, entry);
hlist_add_head(&entry->he_list, &work);
spin_unlock(&entry_bin->hb_lock);
}
hash = hash_long((ulong_t)dtor_entry->he_key *
(ulong_t)dtor_entry->he_pid, table->ht_bits);
dtor_entry_bin = &table->ht_bins[hash];
spin_lock(&dtor_entry_bin->hb_lock);
tsd_hash_del(table, dtor_entry);
hlist_add_head(&dtor_entry->he_list, &work);
spin_unlock(&dtor_entry_bin->hb_lock);
spin_unlock(&table->ht_lock);
tsd_hash_dtor(&work);
*keyp = 0;
}
EXPORT_SYMBOL(tsd_destroy);
/*
* tsd_exit - destroys all thread specific data for this thread
*
* Destroys all the thread specific data for this thread.
*
* Caller must prevent racing tsd_set() or tsd_get(), this function is
* safe from racing tsd_create(), tsd_destroy(), and tsd_exit().
*/
void
tsd_exit(void)
{
HLIST_HEAD(work);
tsd_hash_table_t *table;
tsd_hash_entry_t *pid_entry, *entry;
tsd_hash_bin_t *pid_entry_bin, *entry_bin;
ulong_t hash;
table = tsd_hash_table;
ASSERT3P(table, !=, NULL);
spin_lock(&table->ht_lock);
pid_entry = tsd_hash_search(table, PID_KEY, curthread->pid);
if (pid_entry == NULL) {
spin_unlock(&table->ht_lock);
return;
}
/*
* All keys associated with this pid must be linked off of the
* PID_KEY entry. They are removed from the hash table and
* linked in to a private working list to be destroyed.
*/
while (!list_empty(&pid_entry->he_pid_list)) {
entry = list_entry(pid_entry->he_pid_list.next,
tsd_hash_entry_t, he_pid_list);
ASSERT3U(pid_entry->he_pid, ==, entry->he_pid);
hash = hash_long((ulong_t)entry->he_key *
(ulong_t)entry->he_pid, table->ht_bits);
entry_bin = &table->ht_bins[hash];
spin_lock(&entry_bin->hb_lock);
tsd_hash_del(table, entry);
hlist_add_head(&entry->he_list, &work);
spin_unlock(&entry_bin->hb_lock);
}
hash = hash_long((ulong_t)pid_entry->he_key *
(ulong_t)pid_entry->he_pid, table->ht_bits);
pid_entry_bin = &table->ht_bins[hash];
spin_lock(&pid_entry_bin->hb_lock);
tsd_hash_del(table, pid_entry);
hlist_add_head(&pid_entry->he_list, &work);
spin_unlock(&pid_entry_bin->hb_lock);
spin_unlock(&table->ht_lock);
tsd_hash_dtor(&work);
}
EXPORT_SYMBOL(tsd_exit);
int
spl_tsd_init(void)
{
tsd_hash_table = tsd_hash_table_init(TSD_HASH_TABLE_BITS_DEFAULT);
if (tsd_hash_table == NULL)
- return (1);
+ return (-ENOMEM);
return (0);
}
void
spl_tsd_fini(void)
{
tsd_hash_table_fini(tsd_hash_table);
tsd_hash_table = NULL;
}
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-zlib.c b/sys/contrib/openzfs/module/os/linux/spl/spl-zlib.c
index 589496da0c78..8c6282ee5d16 100644
--- a/sys/contrib/openzfs/module/os/linux/spl/spl-zlib.c
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-zlib.c
@@ -1,217 +1,217 @@
/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*
*
* z_compress_level/z_uncompress are nearly identical copies of the
* compress2/uncompress functions provided by the official zlib package
* available at http://zlib.net/. The only changes made we to slightly
* adapt the functions called to match the linux kernel implementation
* of zlib. The full zlib license follows:
*
* zlib.h -- interface of the 'zlib' general purpose compression library
* version 1.2.5, April 19th, 2010
*
* Copyright (C) 1995-2010 Jean-loup Gailly and Mark Adler
*
* This software is provided 'as-is', without any express or implied
* warranty. In no event will the authors be held liable for any damages
* arising from the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software
* in a product, an acknowledgment in the product documentation would be
* appreciated but is not required.
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
* 3. This notice may not be removed or altered from any source distribution.
*
* Jean-loup Gailly
* Mark Adler
*/
#include <linux/percpu_compat.h>
#include <sys/kmem.h>
#include <sys/kmem_cache.h>
#include <sys/zmod.h>
static spl_kmem_cache_t *zlib_workspace_cache;
/*
* A kmem_cache is used for the zlib workspaces to avoid having to vmalloc
* and vfree for every call. Using a kmem_cache also has the advantage
* that improves the odds that the memory used will be local to this cpu.
* To further improve things it might be wise to create a dedicated per-cpu
* workspace for use. This would take some additional care because we then
* must disable preemption around the critical section, and verify that
* zlib_deflate* and zlib_inflate* never internally call schedule().
*/
static void *
zlib_workspace_alloc(int flags)
{
return (kmem_cache_alloc(zlib_workspace_cache, flags & ~(__GFP_FS)));
}
static void
zlib_workspace_free(void *workspace)
{
kmem_cache_free(zlib_workspace_cache, workspace);
}
/*
* Compresses the source buffer into the destination buffer. The level
* parameter has the same meaning as in deflateInit. sourceLen is the byte
* length of the source buffer. Upon entry, destLen is the total size of the
* destination buffer, which must be at least 0.1% larger than sourceLen plus
* 12 bytes. Upon exit, destLen is the actual size of the compressed buffer.
*
* compress2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
* memory, Z_BUF_ERROR if there was not enough room in the output buffer,
* Z_STREAM_ERROR if the level parameter is invalid.
*/
int
z_compress_level(void *dest, size_t *destLen, const void *source,
size_t sourceLen, int level)
{
z_stream stream;
int err;
stream.next_in = (Byte *)source;
stream.avail_in = (uInt)sourceLen;
stream.next_out = dest;
stream.avail_out = (uInt)*destLen;
if ((size_t)stream.avail_out != *destLen)
return (Z_BUF_ERROR);
stream.workspace = zlib_workspace_alloc(KM_SLEEP);
if (!stream.workspace)
return (Z_MEM_ERROR);
err = zlib_deflateInit(&stream, level);
if (err != Z_OK) {
zlib_workspace_free(stream.workspace);
return (err);
}
err = zlib_deflate(&stream, Z_FINISH);
if (err != Z_STREAM_END) {
zlib_deflateEnd(&stream);
zlib_workspace_free(stream.workspace);
return (err == Z_OK ? Z_BUF_ERROR : err);
}
*destLen = stream.total_out;
err = zlib_deflateEnd(&stream);
zlib_workspace_free(stream.workspace);
return (err);
}
EXPORT_SYMBOL(z_compress_level);
/*
* Decompresses the source buffer into the destination buffer. sourceLen is
* the byte length of the source buffer. Upon entry, destLen is the total
* size of the destination buffer, which must be large enough to hold the
* entire uncompressed data. (The size of the uncompressed data must have
* been saved previously by the compressor and transmitted to the decompressor
* by some mechanism outside the scope of this compression library.)
* Upon exit, destLen is the actual size of the compressed buffer.
* This function can be used to decompress a whole file at once if the
* input file is mmap'ed.
*
* uncompress returns Z_OK if success, Z_MEM_ERROR if there was not
* enough memory, Z_BUF_ERROR if there was not enough room in the output
* buffer, or Z_DATA_ERROR if the input data was corrupted.
*/
int
z_uncompress(void *dest, size_t *destLen, const void *source, size_t sourceLen)
{
z_stream stream;
int err;
stream.next_in = (Byte *)source;
stream.avail_in = (uInt)sourceLen;
stream.next_out = dest;
stream.avail_out = (uInt)*destLen;
if ((size_t)stream.avail_out != *destLen)
return (Z_BUF_ERROR);
stream.workspace = zlib_workspace_alloc(KM_SLEEP);
if (!stream.workspace)
return (Z_MEM_ERROR);
err = zlib_inflateInit(&stream);
if (err != Z_OK) {
zlib_workspace_free(stream.workspace);
return (err);
}
err = zlib_inflate(&stream, Z_FINISH);
if (err != Z_STREAM_END) {
zlib_inflateEnd(&stream);
zlib_workspace_free(stream.workspace);
if (err == Z_NEED_DICT ||
(err == Z_BUF_ERROR && stream.avail_in == 0))
return (Z_DATA_ERROR);
return (err);
}
*destLen = stream.total_out;
err = zlib_inflateEnd(&stream);
zlib_workspace_free(stream.workspace);
return (err);
}
EXPORT_SYMBOL(z_uncompress);
int
spl_zlib_init(void)
{
int size;
size = MAX(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
zlib_inflate_workspacesize());
zlib_workspace_cache = kmem_cache_create(
"spl_zlib_workspace_cache",
size, 0, NULL, NULL, NULL, NULL, NULL,
KMC_KVMEM);
if (!zlib_workspace_cache)
- return (1);
+ return (-ENOMEM);
return (0);
}
void
spl_zlib_fini(void)
{
kmem_cache_destroy(zlib_workspace_cache);
zlib_workspace_cache = NULL;
}
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-zone.c b/sys/contrib/openzfs/module/os/linux/spl/spl-zone.c
index b8a8b7cd8cd8..234ae7f6cd0c 100644
--- a/sys/contrib/openzfs/module/os/linux/spl/spl-zone.c
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-zone.c
@@ -1,424 +1,424 @@
/*
* Copyright (c) 2021 Klara Systems, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/types.h>
#include <sys/mutex.h>
#include <sys/sysmacros.h>
#include <sys/kmem.h>
#include <linux/file.h>
#include <linux/magic.h>
#include <sys/zone.h>
#if defined(CONFIG_USER_NS)
#include <linux/statfs.h>
#include <linux/proc_ns.h>
#endif
static kmutex_t zone_datasets_lock;
static struct list_head zone_datasets;
typedef struct zone_datasets {
struct list_head zds_list; /* zone_datasets linkage */
struct user_namespace *zds_userns; /* namespace reference */
struct list_head zds_datasets; /* datasets for the namespace */
} zone_datasets_t;
typedef struct zone_dataset {
struct list_head zd_list; /* zone_dataset linkage */
size_t zd_dsnamelen; /* length of name */
char zd_dsname[0]; /* name of the member dataset */
} zone_dataset_t;
#if defined(CONFIG_USER_NS) && defined(HAVE_USER_NS_COMMON_INUM)
/*
* Returns:
* - 0 on success
* - EBADF if it cannot open the provided file descriptor
* - ENOTTY if the file itself is a not a user namespace file. We want to
* intercept this error in the ZFS layer. We cannot just return one of the
* ZFS_ERR_* errors here as we want to preserve the seperation of the ZFS
* and the SPL layers.
*/
static int
user_ns_get(int fd, struct user_namespace **userns)
{
struct kstatfs st;
struct file *nsfile;
struct ns_common *ns;
int error;
if ((nsfile = fget(fd)) == NULL)
return (EBADF);
if (vfs_statfs(&nsfile->f_path, &st) != 0) {
error = ENOTTY;
goto done;
}
if (st.f_type != NSFS_MAGIC) {
error = ENOTTY;
goto done;
}
ns = get_proc_ns(file_inode(nsfile));
if (ns->ops->type != CLONE_NEWUSER) {
error = ENOTTY;
goto done;
}
*userns = container_of(ns, struct user_namespace, ns);
error = 0;
done:
fput(nsfile);
return (error);
}
#endif /* defined(CONFIG_USER_NS) && defined(HAVE_USER_NS_COMMON_INUM) */
static unsigned int
user_ns_zoneid(struct user_namespace *user_ns)
{
unsigned int r;
#if defined(HAVE_USER_NS_COMMON_INUM)
r = user_ns->ns.inum;
#else
r = user_ns->proc_inum;
#endif
return (r);
}
static struct zone_datasets *
zone_datasets_lookup(unsigned int nsinum)
{
zone_datasets_t *zds;
list_for_each_entry(zds, &zone_datasets, zds_list) {
if (user_ns_zoneid(zds->zds_userns) == nsinum)
return (zds);
}
return (NULL);
}
#if defined(CONFIG_USER_NS) && defined(HAVE_USER_NS_COMMON_INUM)
static struct zone_dataset *
zone_dataset_lookup(zone_datasets_t *zds, const char *dataset, size_t dsnamelen)
{
zone_dataset_t *zd;
list_for_each_entry(zd, &zds->zds_datasets, zd_list) {
if (zd->zd_dsnamelen != dsnamelen)
continue;
if (strncmp(zd->zd_dsname, dataset, dsnamelen) == 0)
return (zd);
}
return (NULL);
}
static int
zone_dataset_cred_check(cred_t *cred)
{
if (!uid_eq(cred->uid, GLOBAL_ROOT_UID))
return (EPERM);
return (0);
}
#endif /* defined(CONFIG_USER_NS) && defined(HAVE_USER_NS_COMMON_INUM) */
static int
zone_dataset_name_check(const char *dataset, size_t *dsnamelen)
{
if (dataset[0] == '\0' || dataset[0] == '/')
return (ENOENT);
*dsnamelen = strlen(dataset);
/* Ignore trailing slash, if supplied. */
if (dataset[*dsnamelen - 1] == '/')
(*dsnamelen)--;
return (0);
}
int
zone_dataset_attach(cred_t *cred, const char *dataset, int userns_fd)
{
#if defined(CONFIG_USER_NS) && defined(HAVE_USER_NS_COMMON_INUM)
struct user_namespace *userns;
zone_datasets_t *zds;
zone_dataset_t *zd;
int error;
size_t dsnamelen;
if ((error = zone_dataset_cred_check(cred)) != 0)
return (error);
if ((error = zone_dataset_name_check(dataset, &dsnamelen)) != 0)
return (error);
if ((error = user_ns_get(userns_fd, &userns)) != 0)
return (error);
mutex_enter(&zone_datasets_lock);
zds = zone_datasets_lookup(user_ns_zoneid(userns));
if (zds == NULL) {
zds = kmem_alloc(sizeof (zone_datasets_t), KM_SLEEP);
INIT_LIST_HEAD(&zds->zds_list);
INIT_LIST_HEAD(&zds->zds_datasets);
zds->zds_userns = userns;
/*
* Lock the namespace by incresing its refcount to prevent
* the namespace ID from being reused.
*/
get_user_ns(userns);
list_add_tail(&zds->zds_list, &zone_datasets);
} else {
zd = zone_dataset_lookup(zds, dataset, dsnamelen);
if (zd != NULL) {
mutex_exit(&zone_datasets_lock);
return (EEXIST);
}
}
zd = kmem_alloc(sizeof (zone_dataset_t) + dsnamelen + 1, KM_SLEEP);
zd->zd_dsnamelen = dsnamelen;
strncpy(zd->zd_dsname, dataset, dsnamelen);
zd->zd_dsname[dsnamelen] = '\0';
INIT_LIST_HEAD(&zd->zd_list);
list_add_tail(&zd->zd_list, &zds->zds_datasets);
mutex_exit(&zone_datasets_lock);
return (0);
#else
return (ENXIO);
#endif /* defined(CONFIG_USER_NS) && defined(HAVE_USER_NS_COMMON_INUM) */
}
EXPORT_SYMBOL(zone_dataset_attach);
int
zone_dataset_detach(cred_t *cred, const char *dataset, int userns_fd)
{
#if defined(CONFIG_USER_NS) && defined(HAVE_USER_NS_COMMON_INUM)
struct user_namespace *userns;
zone_datasets_t *zds;
zone_dataset_t *zd;
int error;
size_t dsnamelen;
if ((error = zone_dataset_cred_check(cred)) != 0)
return (error);
if ((error = zone_dataset_name_check(dataset, &dsnamelen)) != 0)
return (error);
if ((error = user_ns_get(userns_fd, &userns)) != 0)
return (error);
mutex_enter(&zone_datasets_lock);
zds = zone_datasets_lookup(user_ns_zoneid(userns));
if (zds != NULL)
zd = zone_dataset_lookup(zds, dataset, dsnamelen);
if (zds == NULL || zd == NULL) {
mutex_exit(&zone_datasets_lock);
return (ENOENT);
}
list_del(&zd->zd_list);
kmem_free(zd, sizeof (*zd) + zd->zd_dsnamelen + 1);
/* Prune the namespace entry if it has no more delegations. */
if (list_empty(&zds->zds_datasets)) {
/*
* Decrease the refcount now that the namespace is no longer
* used. It is no longer necessary to prevent the namespace ID
* from being reused.
*/
put_user_ns(userns);
list_del(&zds->zds_list);
kmem_free(zds, sizeof (*zds));
}
mutex_exit(&zone_datasets_lock);
return (0);
#else
return (ENXIO);
#endif /* defined(CONFIG_USER_NS) && defined(HAVE_USER_NS_COMMON_INUM) */
}
EXPORT_SYMBOL(zone_dataset_detach);
/*
* A dataset is visible if:
* - It is a parent of a namespace entry.
* - It is one of the namespace entries.
* - It is a child of a namespace entry.
*
* A dataset is writable if:
* - It is one of the namespace entries.
* - It is a child of a namespace entry.
*
* The parent datasets of namespace entries are visible and
* read-only to provide a path back to the root of the pool.
*/
int
zone_dataset_visible(const char *dataset, int *write)
{
zone_datasets_t *zds;
zone_dataset_t *zd;
size_t dsnamelen, zd_len;
int visible;
/* Default to read-only, in case visible is returned. */
if (write != NULL)
*write = 0;
if (zone_dataset_name_check(dataset, &dsnamelen) != 0)
return (0);
if (INGLOBALZONE(curproc)) {
if (write != NULL)
*write = 1;
return (1);
}
mutex_enter(&zone_datasets_lock);
zds = zone_datasets_lookup(crgetzoneid(curproc->cred));
if (zds == NULL) {
mutex_exit(&zone_datasets_lock);
return (0);
}
visible = 0;
list_for_each_entry(zd, &zds->zds_datasets, zd_list) {
zd_len = strlen(zd->zd_dsname);
if (zd_len > dsnamelen) {
/*
* The name of the namespace entry is longer than that
* of the dataset, so it could be that the dataset is a
* parent of the namespace entry.
*/
visible = memcmp(zd->zd_dsname, dataset,
dsnamelen) == 0 &&
zd->zd_dsname[dsnamelen] == '/';
if (visible)
break;
} else if (zd_len == dsnamelen) {
/*
* The name of the namespace entry is as long as that
* of the dataset, so perhaps the dataset itself is the
* namespace entry.
*/
visible = memcmp(zd->zd_dsname, dataset, zd_len) == 0;
if (visible) {
if (write != NULL)
*write = 1;
break;
}
} else {
/*
* The name of the namespace entry is shorter than that
* of the dataset, so perhaps the dataset is a child of
* the namespace entry.
*/
visible = memcmp(zd->zd_dsname, dataset,
zd_len) == 0 && dataset[zd_len] == '/';
if (visible) {
if (write != NULL)
*write = 1;
break;
}
}
}
mutex_exit(&zone_datasets_lock);
return (visible);
}
EXPORT_SYMBOL(zone_dataset_visible);
unsigned int
global_zoneid(void)
{
unsigned int z = 0;
#if defined(CONFIG_USER_NS)
z = user_ns_zoneid(&init_user_ns);
#endif
return (z);
}
EXPORT_SYMBOL(global_zoneid);
unsigned int
crgetzoneid(const cred_t *cr)
{
unsigned int r = 0;
#if defined(CONFIG_USER_NS)
r = user_ns_zoneid(cr->user_ns);
#endif
return (r);
}
EXPORT_SYMBOL(crgetzoneid);
boolean_t
inglobalzone(proc_t *proc)
{
#if defined(CONFIG_USER_NS)
return (proc->cred->user_ns == &init_user_ns);
#else
return (B_TRUE);
#endif
}
EXPORT_SYMBOL(inglobalzone);
int
spl_zone_init(void)
{
mutex_init(&zone_datasets_lock, NULL, MUTEX_DEFAULT, NULL);
INIT_LIST_HEAD(&zone_datasets);
return (0);
}
void
spl_zone_fini(void)
{
zone_datasets_t *zds;
zone_dataset_t *zd;
/*
* It would be better to assert an empty zone_datasets, but since
* there's no automatic mechanism for cleaning them up if the user
* namespace is destroyed, just do it here, since spl is about to go
* out of context.
*/
while (!list_empty(&zone_datasets)) {
zds = list_entry(zone_datasets.next, zone_datasets_t, zds_list);
while (!list_empty(&zds->zds_datasets)) {
zd = list_entry(zds->zds_datasets.next,
zone_dataset_t, zd_list);
list_del(&zd->zd_list);
kmem_free(zd, sizeof (*zd) + zd->zd_dsnamelen + 1);
- put_user_ns(zds->zds_userns);
}
+ put_user_ns(zds->zds_userns);
list_del(&zds->zds_list);
kmem_free(zds, sizeof (*zds));
}
mutex_destroy(&zone_datasets_lock);
}
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_acl.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_acl.c
index a139ee12c4d8..4fd071d3cb20 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_acl.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_acl.c
@@ -1,3018 +1,3019 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
* Copyright 2014 Nexenta Systems, Inc. All rights reserved.
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/sysmacros.h>
#include <sys/vfs.h>
#include <sys/vnode.h>
#include <sys/sid.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/kmem.h>
#include <sys/cmn_err.h>
#include <sys/errno.h>
#include <sys/fs/zfs.h>
#include <sys/policy.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_fuid.h>
#include <sys/zfs_acl.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_quota.h>
#include <sys/zfs_vfsops.h>
#include <sys/dmu.h>
#include <sys/dnode.h>
#include <sys/zap.h>
#include <sys/sa.h>
#include <sys/trace_acl.h>
#include <sys/zpl.h>
#define ALLOW ACE_ACCESS_ALLOWED_ACE_TYPE
#define DENY ACE_ACCESS_DENIED_ACE_TYPE
#define MAX_ACE_TYPE ACE_SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE
#define MIN_ACE_TYPE ALLOW
#define OWNING_GROUP (ACE_GROUP|ACE_IDENTIFIER_GROUP)
#define EVERYONE_ALLOW_MASK (ACE_READ_ACL|ACE_READ_ATTRIBUTES | \
ACE_READ_NAMED_ATTRS|ACE_SYNCHRONIZE)
#define EVERYONE_DENY_MASK (ACE_WRITE_ACL|ACE_WRITE_OWNER | \
ACE_WRITE_ATTRIBUTES|ACE_WRITE_NAMED_ATTRS)
#define OWNER_ALLOW_MASK (ACE_WRITE_ACL | ACE_WRITE_OWNER | \
ACE_WRITE_ATTRIBUTES|ACE_WRITE_NAMED_ATTRS)
#define ZFS_CHECKED_MASKS (ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_READ_DATA| \
ACE_READ_NAMED_ATTRS|ACE_WRITE_DATA|ACE_WRITE_ATTRIBUTES| \
ACE_WRITE_NAMED_ATTRS|ACE_APPEND_DATA|ACE_EXECUTE|ACE_WRITE_OWNER| \
ACE_WRITE_ACL|ACE_DELETE|ACE_DELETE_CHILD|ACE_SYNCHRONIZE)
#define WRITE_MASK_DATA (ACE_WRITE_DATA|ACE_APPEND_DATA|ACE_WRITE_NAMED_ATTRS)
#define WRITE_MASK_ATTRS (ACE_WRITE_ACL|ACE_WRITE_OWNER|ACE_WRITE_ATTRIBUTES| \
ACE_DELETE|ACE_DELETE_CHILD)
#define WRITE_MASK (WRITE_MASK_DATA|WRITE_MASK_ATTRS)
#define OGE_CLEAR (ACE_READ_DATA|ACE_LIST_DIRECTORY|ACE_WRITE_DATA| \
ACE_ADD_FILE|ACE_APPEND_DATA|ACE_ADD_SUBDIRECTORY|ACE_EXECUTE)
#define OKAY_MASK_BITS (ACE_READ_DATA|ACE_LIST_DIRECTORY|ACE_WRITE_DATA| \
ACE_ADD_FILE|ACE_APPEND_DATA|ACE_ADD_SUBDIRECTORY|ACE_EXECUTE)
#define ALL_INHERIT (ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE | \
ACE_NO_PROPAGATE_INHERIT_ACE|ACE_INHERIT_ONLY_ACE|ACE_INHERITED_ACE)
#define RESTRICTED_CLEAR (ACE_WRITE_ACL|ACE_WRITE_OWNER)
#define V4_ACL_WIDE_FLAGS (ZFS_ACL_AUTO_INHERIT|ZFS_ACL_DEFAULTED|\
ZFS_ACL_PROTECTED)
#define ZFS_ACL_WIDE_FLAGS (V4_ACL_WIDE_FLAGS|ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|\
ZFS_ACL_OBJ_ACE)
#define ALL_MODE_EXECS (S_IXUSR | S_IXGRP | S_IXOTH)
#define IDMAP_WK_CREATOR_OWNER_UID 2147483648U
static uint16_t
zfs_ace_v0_get_type(void *acep)
{
return (((zfs_oldace_t *)acep)->z_type);
}
static uint16_t
zfs_ace_v0_get_flags(void *acep)
{
return (((zfs_oldace_t *)acep)->z_flags);
}
static uint32_t
zfs_ace_v0_get_mask(void *acep)
{
return (((zfs_oldace_t *)acep)->z_access_mask);
}
static uint64_t
zfs_ace_v0_get_who(void *acep)
{
return (((zfs_oldace_t *)acep)->z_fuid);
}
static void
zfs_ace_v0_set_type(void *acep, uint16_t type)
{
((zfs_oldace_t *)acep)->z_type = type;
}
static void
zfs_ace_v0_set_flags(void *acep, uint16_t flags)
{
((zfs_oldace_t *)acep)->z_flags = flags;
}
static void
zfs_ace_v0_set_mask(void *acep, uint32_t mask)
{
((zfs_oldace_t *)acep)->z_access_mask = mask;
}
static void
zfs_ace_v0_set_who(void *acep, uint64_t who)
{
((zfs_oldace_t *)acep)->z_fuid = who;
}
static size_t
zfs_ace_v0_size(void *acep)
{
(void) acep;
return (sizeof (zfs_oldace_t));
}
static size_t
zfs_ace_v0_abstract_size(void)
{
return (sizeof (zfs_oldace_t));
}
static int
zfs_ace_v0_mask_off(void)
{
return (offsetof(zfs_oldace_t, z_access_mask));
}
static int
zfs_ace_v0_data(void *acep, void **datap)
{
(void) acep;
*datap = NULL;
return (0);
}
static const acl_ops_t zfs_acl_v0_ops = {
.ace_mask_get = zfs_ace_v0_get_mask,
.ace_mask_set = zfs_ace_v0_set_mask,
.ace_flags_get = zfs_ace_v0_get_flags,
.ace_flags_set = zfs_ace_v0_set_flags,
.ace_type_get = zfs_ace_v0_get_type,
.ace_type_set = zfs_ace_v0_set_type,
.ace_who_get = zfs_ace_v0_get_who,
.ace_who_set = zfs_ace_v0_set_who,
.ace_size = zfs_ace_v0_size,
.ace_abstract_size = zfs_ace_v0_abstract_size,
.ace_mask_off = zfs_ace_v0_mask_off,
.ace_data = zfs_ace_v0_data
};
static uint16_t
zfs_ace_fuid_get_type(void *acep)
{
return (((zfs_ace_hdr_t *)acep)->z_type);
}
static uint16_t
zfs_ace_fuid_get_flags(void *acep)
{
return (((zfs_ace_hdr_t *)acep)->z_flags);
}
static uint32_t
zfs_ace_fuid_get_mask(void *acep)
{
return (((zfs_ace_hdr_t *)acep)->z_access_mask);
}
static uint64_t
zfs_ace_fuid_get_who(void *args)
{
uint16_t entry_type;
zfs_ace_t *acep = args;
entry_type = acep->z_hdr.z_flags & ACE_TYPE_FLAGS;
if (entry_type == ACE_OWNER || entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE)
return (-1);
return (((zfs_ace_t *)acep)->z_fuid);
}
static void
zfs_ace_fuid_set_type(void *acep, uint16_t type)
{
((zfs_ace_hdr_t *)acep)->z_type = type;
}
static void
zfs_ace_fuid_set_flags(void *acep, uint16_t flags)
{
((zfs_ace_hdr_t *)acep)->z_flags = flags;
}
static void
zfs_ace_fuid_set_mask(void *acep, uint32_t mask)
{
((zfs_ace_hdr_t *)acep)->z_access_mask = mask;
}
static void
zfs_ace_fuid_set_who(void *arg, uint64_t who)
{
zfs_ace_t *acep = arg;
uint16_t entry_type = acep->z_hdr.z_flags & ACE_TYPE_FLAGS;
if (entry_type == ACE_OWNER || entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE)
return;
acep->z_fuid = who;
}
static size_t
zfs_ace_fuid_size(void *acep)
{
zfs_ace_hdr_t *zacep = acep;
uint16_t entry_type;
switch (zacep->z_type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
return (sizeof (zfs_object_ace_t));
case ALLOW:
case DENY:
entry_type =
(((zfs_ace_hdr_t *)acep)->z_flags & ACE_TYPE_FLAGS);
if (entry_type == ACE_OWNER ||
entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE)
return (sizeof (zfs_ace_hdr_t));
zfs_fallthrough;
default:
return (sizeof (zfs_ace_t));
}
}
static size_t
zfs_ace_fuid_abstract_size(void)
{
return (sizeof (zfs_ace_hdr_t));
}
static int
zfs_ace_fuid_mask_off(void)
{
return (offsetof(zfs_ace_hdr_t, z_access_mask));
}
static int
zfs_ace_fuid_data(void *acep, void **datap)
{
zfs_ace_t *zacep = acep;
zfs_object_ace_t *zobjp;
switch (zacep->z_hdr.z_type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
zobjp = acep;
*datap = (caddr_t)zobjp + sizeof (zfs_ace_t);
return (sizeof (zfs_object_ace_t) - sizeof (zfs_ace_t));
default:
*datap = NULL;
return (0);
}
}
static const acl_ops_t zfs_acl_fuid_ops = {
.ace_mask_get = zfs_ace_fuid_get_mask,
.ace_mask_set = zfs_ace_fuid_set_mask,
.ace_flags_get = zfs_ace_fuid_get_flags,
.ace_flags_set = zfs_ace_fuid_set_flags,
.ace_type_get = zfs_ace_fuid_get_type,
.ace_type_set = zfs_ace_fuid_set_type,
.ace_who_get = zfs_ace_fuid_get_who,
.ace_who_set = zfs_ace_fuid_set_who,
.ace_size = zfs_ace_fuid_size,
.ace_abstract_size = zfs_ace_fuid_abstract_size,
.ace_mask_off = zfs_ace_fuid_mask_off,
.ace_data = zfs_ace_fuid_data
};
/*
* The following three functions are provided for compatibility with
* older ZPL version in order to determine if the file use to have
* an external ACL and what version of ACL previously existed on the
* file. Would really be nice to not need this, sigh.
*/
uint64_t
zfs_external_acl(znode_t *zp)
{
zfs_acl_phys_t acl_phys;
int error;
if (zp->z_is_sa)
return (0);
/*
* Need to deal with a potential
* race where zfs_sa_upgrade could cause
* z_isa_sa to change.
*
* If the lookup fails then the state of z_is_sa should have
* changed.
*/
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(ZTOZSB(zp)),
&acl_phys, sizeof (acl_phys))) == 0)
return (acl_phys.z_acl_extern_obj);
else {
/*
* after upgrade the SA_ZPL_ZNODE_ACL should have been
* removed
*/
VERIFY(zp->z_is_sa && error == ENOENT);
return (0);
}
}
/*
* Determine size of ACL in bytes
*
* This is more complicated than it should be since we have to deal
* with old external ACLs.
*/
static int
zfs_acl_znode_info(znode_t *zp, int *aclsize, int *aclcount,
zfs_acl_phys_t *aclphys)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
uint64_t acl_count;
int size;
int error;
ASSERT(MUTEX_HELD(&zp->z_acl_lock));
if (zp->z_is_sa) {
if ((error = sa_size(zp->z_sa_hdl, SA_ZPL_DACL_ACES(zfsvfs),
&size)) != 0)
return (error);
*aclsize = size;
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DACL_COUNT(zfsvfs),
&acl_count, sizeof (acl_count))) != 0)
return (error);
*aclcount = acl_count;
} else {
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs),
aclphys, sizeof (*aclphys))) != 0)
return (error);
if (aclphys->z_acl_version == ZFS_ACL_VERSION_INITIAL) {
*aclsize = ZFS_ACL_SIZE(aclphys->z_acl_size);
*aclcount = aclphys->z_acl_size;
} else {
*aclsize = aclphys->z_acl_size;
*aclcount = aclphys->z_acl_count;
}
}
return (0);
}
int
zfs_znode_acl_version(znode_t *zp)
{
zfs_acl_phys_t acl_phys;
if (zp->z_is_sa)
return (ZFS_ACL_VERSION_FUID);
else {
int error;
/*
* Need to deal with a potential
* race where zfs_sa_upgrade could cause
* z_isa_sa to change.
*
* If the lookup fails then the state of z_is_sa should have
* changed.
*/
if ((error = sa_lookup(zp->z_sa_hdl,
SA_ZPL_ZNODE_ACL(ZTOZSB(zp)),
&acl_phys, sizeof (acl_phys))) == 0)
return (acl_phys.z_acl_version);
else {
/*
* After upgrade SA_ZPL_ZNODE_ACL should have
* been removed.
*/
VERIFY(zp->z_is_sa && error == ENOENT);
return (ZFS_ACL_VERSION_FUID);
}
}
}
static int
zfs_acl_version(int version)
{
if (version < ZPL_VERSION_FUID)
return (ZFS_ACL_VERSION_INITIAL);
else
return (ZFS_ACL_VERSION_FUID);
}
static int
zfs_acl_version_zp(znode_t *zp)
{
return (zfs_acl_version(ZTOZSB(zp)->z_version));
}
zfs_acl_t *
zfs_acl_alloc(int vers)
{
zfs_acl_t *aclp;
aclp = kmem_zalloc(sizeof (zfs_acl_t), KM_SLEEP);
list_create(&aclp->z_acl, sizeof (zfs_acl_node_t),
offsetof(zfs_acl_node_t, z_next));
aclp->z_version = vers;
if (vers == ZFS_ACL_VERSION_FUID)
aclp->z_ops = &zfs_acl_fuid_ops;
else
aclp->z_ops = &zfs_acl_v0_ops;
return (aclp);
}
zfs_acl_node_t *
zfs_acl_node_alloc(size_t bytes)
{
zfs_acl_node_t *aclnode;
aclnode = kmem_zalloc(sizeof (zfs_acl_node_t), KM_SLEEP);
if (bytes) {
aclnode->z_acldata = kmem_alloc(bytes, KM_SLEEP);
aclnode->z_allocdata = aclnode->z_acldata;
aclnode->z_allocsize = bytes;
aclnode->z_size = bytes;
}
return (aclnode);
}
static void
zfs_acl_node_free(zfs_acl_node_t *aclnode)
{
if (aclnode->z_allocsize)
kmem_free(aclnode->z_allocdata, aclnode->z_allocsize);
kmem_free(aclnode, sizeof (zfs_acl_node_t));
}
static void
zfs_acl_release_nodes(zfs_acl_t *aclp)
{
zfs_acl_node_t *aclnode;
while ((aclnode = list_head(&aclp->z_acl))) {
list_remove(&aclp->z_acl, aclnode);
zfs_acl_node_free(aclnode);
}
aclp->z_acl_count = 0;
aclp->z_acl_bytes = 0;
}
void
zfs_acl_free(zfs_acl_t *aclp)
{
zfs_acl_release_nodes(aclp);
list_destroy(&aclp->z_acl);
kmem_free(aclp, sizeof (zfs_acl_t));
}
static boolean_t
zfs_acl_valid_ace_type(uint_t type, uint_t flags)
{
uint16_t entry_type;
switch (type) {
case ALLOW:
case DENY:
case ACE_SYSTEM_AUDIT_ACE_TYPE:
case ACE_SYSTEM_ALARM_ACE_TYPE:
entry_type = flags & ACE_TYPE_FLAGS;
return (entry_type == ACE_OWNER ||
entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE || entry_type == 0 ||
entry_type == ACE_IDENTIFIER_GROUP);
default:
if (type >= MIN_ACE_TYPE && type <= MAX_ACE_TYPE)
return (B_TRUE);
}
return (B_FALSE);
}
static boolean_t
zfs_ace_valid(umode_t obj_mode, zfs_acl_t *aclp, uint16_t type, uint16_t iflags)
{
/*
* first check type of entry
*/
if (!zfs_acl_valid_ace_type(type, iflags))
return (B_FALSE);
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
if (aclp->z_version < ZFS_ACL_VERSION_FUID)
return (B_FALSE);
aclp->z_hints |= ZFS_ACL_OBJ_ACE;
}
/*
* next check inheritance level flags
*/
if (S_ISDIR(obj_mode) &&
(iflags & (ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE)))
aclp->z_hints |= ZFS_INHERIT_ACE;
if (iflags & (ACE_INHERIT_ONLY_ACE|ACE_NO_PROPAGATE_INHERIT_ACE)) {
if ((iflags & (ACE_FILE_INHERIT_ACE|
ACE_DIRECTORY_INHERIT_ACE)) == 0) {
return (B_FALSE);
}
}
return (B_TRUE);
}
static void *
zfs_acl_next_ace(zfs_acl_t *aclp, void *start, uint64_t *who,
uint32_t *access_mask, uint16_t *iflags, uint16_t *type)
{
zfs_acl_node_t *aclnode;
ASSERT(aclp);
if (start == NULL) {
aclnode = list_head(&aclp->z_acl);
if (aclnode == NULL)
return (NULL);
aclp->z_next_ace = aclnode->z_acldata;
aclp->z_curr_node = aclnode;
aclnode->z_ace_idx = 0;
}
aclnode = aclp->z_curr_node;
if (aclnode == NULL)
return (NULL);
if (aclnode->z_ace_idx >= aclnode->z_ace_count) {
aclnode = list_next(&aclp->z_acl, aclnode);
if (aclnode == NULL)
return (NULL);
else {
aclp->z_curr_node = aclnode;
aclnode->z_ace_idx = 0;
aclp->z_next_ace = aclnode->z_acldata;
}
}
if (aclnode->z_ace_idx < aclnode->z_ace_count) {
void *acep = aclp->z_next_ace;
size_t ace_size;
/*
* Make sure we don't overstep our bounds
*/
ace_size = aclp->z_ops->ace_size(acep);
if (((caddr_t)acep + ace_size) >
((caddr_t)aclnode->z_acldata + aclnode->z_size)) {
return (NULL);
}
*iflags = aclp->z_ops->ace_flags_get(acep);
*type = aclp->z_ops->ace_type_get(acep);
*access_mask = aclp->z_ops->ace_mask_get(acep);
*who = aclp->z_ops->ace_who_get(acep);
aclp->z_next_ace = (caddr_t)aclp->z_next_ace + ace_size;
aclnode->z_ace_idx++;
return ((void *)acep);
}
return (NULL);
}
static uint64_t
zfs_ace_walk(void *datap, uint64_t cookie, int aclcnt,
uint16_t *flags, uint16_t *type, uint32_t *mask)
{
(void) aclcnt;
zfs_acl_t *aclp = datap;
zfs_ace_hdr_t *acep = (zfs_ace_hdr_t *)(uintptr_t)cookie;
uint64_t who;
acep = zfs_acl_next_ace(aclp, acep, &who, mask,
flags, type);
return ((uint64_t)(uintptr_t)acep);
}
/*
* Copy ACE to internal ZFS format.
* While processing the ACL each ACE will be validated for correctness.
* ACE FUIDs will be created later.
*/
static int
zfs_copy_ace_2_fuid(zfsvfs_t *zfsvfs, umode_t obj_mode, zfs_acl_t *aclp,
void *datap, zfs_ace_t *z_acl, uint64_t aclcnt, size_t *size,
zfs_fuid_info_t **fuidp, cred_t *cr)
{
int i;
uint16_t entry_type;
zfs_ace_t *aceptr = z_acl;
ace_t *acep = datap;
zfs_object_ace_t *zobjacep;
ace_object_t *aceobjp;
for (i = 0; i != aclcnt; i++) {
aceptr->z_hdr.z_access_mask = acep->a_access_mask;
aceptr->z_hdr.z_flags = acep->a_flags;
aceptr->z_hdr.z_type = acep->a_type;
entry_type = aceptr->z_hdr.z_flags & ACE_TYPE_FLAGS;
if (entry_type != ACE_OWNER && entry_type != OWNING_GROUP &&
entry_type != ACE_EVERYONE) {
aceptr->z_fuid = zfs_fuid_create(zfsvfs, acep->a_who,
cr, (entry_type == 0) ?
ZFS_ACE_USER : ZFS_ACE_GROUP, fuidp);
}
/*
* Make sure ACE is valid
*/
if (zfs_ace_valid(obj_mode, aclp, aceptr->z_hdr.z_type,
aceptr->z_hdr.z_flags) != B_TRUE)
return (SET_ERROR(EINVAL));
switch (acep->a_type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
zobjacep = (zfs_object_ace_t *)aceptr;
aceobjp = (ace_object_t *)acep;
memcpy(zobjacep->z_object_type, aceobjp->a_obj_type,
sizeof (aceobjp->a_obj_type));
memcpy(zobjacep->z_inherit_type,
aceobjp->a_inherit_obj_type,
sizeof (aceobjp->a_inherit_obj_type));
acep = (ace_t *)((caddr_t)acep + sizeof (ace_object_t));
break;
default:
acep = (ace_t *)((caddr_t)acep + sizeof (ace_t));
}
aceptr = (zfs_ace_t *)((caddr_t)aceptr +
aclp->z_ops->ace_size(aceptr));
}
*size = (caddr_t)aceptr - (caddr_t)z_acl;
return (0);
}
/*
* Copy ZFS ACEs to fixed size ace_t layout
*/
static void
zfs_copy_fuid_2_ace(zfsvfs_t *zfsvfs, zfs_acl_t *aclp, cred_t *cr,
void *datap, int filter)
{
uint64_t who;
uint32_t access_mask;
uint16_t iflags, type;
zfs_ace_hdr_t *zacep = NULL;
ace_t *acep = datap;
ace_object_t *objacep;
zfs_object_ace_t *zobjacep;
size_t ace_size;
uint16_t entry_type;
while ((zacep = zfs_acl_next_ace(aclp, zacep,
&who, &access_mask, &iflags, &type))) {
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
if (filter) {
continue;
}
zobjacep = (zfs_object_ace_t *)zacep;
objacep = (ace_object_t *)acep;
memcpy(objacep->a_obj_type,
zobjacep->z_object_type,
sizeof (zobjacep->z_object_type));
memcpy(objacep->a_inherit_obj_type,
zobjacep->z_inherit_type,
sizeof (zobjacep->z_inherit_type));
ace_size = sizeof (ace_object_t);
break;
default:
ace_size = sizeof (ace_t);
break;
}
entry_type = (iflags & ACE_TYPE_FLAGS);
if ((entry_type != ACE_OWNER &&
entry_type != OWNING_GROUP &&
entry_type != ACE_EVERYONE)) {
acep->a_who = zfs_fuid_map_id(zfsvfs, who,
cr, (entry_type & ACE_IDENTIFIER_GROUP) ?
ZFS_ACE_GROUP : ZFS_ACE_USER);
} else {
acep->a_who = (uid_t)(int64_t)who;
}
acep->a_access_mask = access_mask;
acep->a_flags = iflags;
acep->a_type = type;
acep = (ace_t *)((caddr_t)acep + ace_size);
}
}
static int
zfs_copy_ace_2_oldace(umode_t obj_mode, zfs_acl_t *aclp, ace_t *acep,
zfs_oldace_t *z_acl, int aclcnt, size_t *size)
{
int i;
zfs_oldace_t *aceptr = z_acl;
for (i = 0; i != aclcnt; i++, aceptr++) {
aceptr->z_access_mask = acep[i].a_access_mask;
aceptr->z_type = acep[i].a_type;
aceptr->z_flags = acep[i].a_flags;
aceptr->z_fuid = acep[i].a_who;
/*
* Make sure ACE is valid
*/
if (zfs_ace_valid(obj_mode, aclp, aceptr->z_type,
aceptr->z_flags) != B_TRUE)
return (SET_ERROR(EINVAL));
}
*size = (caddr_t)aceptr - (caddr_t)z_acl;
return (0);
}
/*
* convert old ACL format to new
*/
void
zfs_acl_xform(znode_t *zp, zfs_acl_t *aclp, cred_t *cr)
{
zfs_oldace_t *oldaclp;
int i;
uint16_t type, iflags;
uint32_t access_mask;
uint64_t who;
void *cookie = NULL;
zfs_acl_node_t *newaclnode;
ASSERT(aclp->z_version == ZFS_ACL_VERSION_INITIAL);
/*
* First create the ACE in a contiguous piece of memory
* for zfs_copy_ace_2_fuid().
*
* We only convert an ACL once, so this won't happen
* every time.
*/
oldaclp = kmem_alloc(sizeof (zfs_oldace_t) * aclp->z_acl_count,
KM_SLEEP);
i = 0;
while ((cookie = zfs_acl_next_ace(aclp, cookie, &who,
&access_mask, &iflags, &type))) {
oldaclp[i].z_flags = iflags;
oldaclp[i].z_type = type;
oldaclp[i].z_fuid = who;
oldaclp[i++].z_access_mask = access_mask;
}
newaclnode = zfs_acl_node_alloc(aclp->z_acl_count *
sizeof (zfs_object_ace_t));
aclp->z_ops = &zfs_acl_fuid_ops;
VERIFY(zfs_copy_ace_2_fuid(ZTOZSB(zp), ZTOI(zp)->i_mode,
aclp, oldaclp, newaclnode->z_acldata, aclp->z_acl_count,
&newaclnode->z_size, NULL, cr) == 0);
newaclnode->z_ace_count = aclp->z_acl_count;
aclp->z_version = ZFS_ACL_VERSION;
kmem_free(oldaclp, aclp->z_acl_count * sizeof (zfs_oldace_t));
/*
* Release all previous ACL nodes
*/
zfs_acl_release_nodes(aclp);
list_insert_head(&aclp->z_acl, newaclnode);
aclp->z_acl_bytes = newaclnode->z_size;
aclp->z_acl_count = newaclnode->z_ace_count;
}
/*
* Convert unix access mask to v4 access mask
*/
static uint32_t
zfs_unix_to_v4(uint32_t access_mask)
{
uint32_t new_mask = 0;
if (access_mask & S_IXOTH)
new_mask |= ACE_EXECUTE;
if (access_mask & S_IWOTH)
new_mask |= ACE_WRITE_DATA;
if (access_mask & S_IROTH)
new_mask |= ACE_READ_DATA;
return (new_mask);
}
static int
zfs_v4_to_unix(uint32_t access_mask, int *unmapped)
{
int new_mask = 0;
*unmapped = access_mask &
(ACE_WRITE_OWNER | ACE_WRITE_ACL | ACE_DELETE);
if (access_mask & WRITE_MASK)
new_mask |= S_IWOTH;
if (access_mask & ACE_READ_DATA)
new_mask |= S_IROTH;
if (access_mask & ACE_EXECUTE)
new_mask |= S_IXOTH;
return (new_mask);
}
static void
zfs_set_ace(zfs_acl_t *aclp, void *acep, uint32_t access_mask,
uint16_t access_type, uint64_t fuid, uint16_t entry_type)
{
uint16_t type = entry_type & ACE_TYPE_FLAGS;
aclp->z_ops->ace_mask_set(acep, access_mask);
aclp->z_ops->ace_type_set(acep, access_type);
aclp->z_ops->ace_flags_set(acep, entry_type);
if ((type != ACE_OWNER && type != OWNING_GROUP &&
type != ACE_EVERYONE))
aclp->z_ops->ace_who_set(acep, fuid);
}
/*
* Determine mode of file based on ACL.
*/
uint64_t
zfs_mode_compute(uint64_t fmode, zfs_acl_t *aclp,
uint64_t *pflags, uint64_t fuid, uint64_t fgid)
{
int entry_type;
mode_t mode;
mode_t seen = 0;
zfs_ace_hdr_t *acep = NULL;
uint64_t who;
uint16_t iflags, type;
uint32_t access_mask;
boolean_t an_exec_denied = B_FALSE;
mode = (fmode & (S_IFMT | S_ISUID | S_ISGID | S_ISVTX));
while ((acep = zfs_acl_next_ace(aclp, acep, &who,
&access_mask, &iflags, &type))) {
if (!zfs_acl_valid_ace_type(type, iflags))
continue;
entry_type = (iflags & ACE_TYPE_FLAGS);
/*
* Skip over any inherit_only ACEs
*/
if (iflags & ACE_INHERIT_ONLY_ACE)
continue;
if (entry_type == ACE_OWNER || (entry_type == 0 &&
who == fuid)) {
if ((access_mask & ACE_READ_DATA) &&
(!(seen & S_IRUSR))) {
seen |= S_IRUSR;
if (type == ALLOW) {
mode |= S_IRUSR;
}
}
if ((access_mask & ACE_WRITE_DATA) &&
(!(seen & S_IWUSR))) {
seen |= S_IWUSR;
if (type == ALLOW) {
mode |= S_IWUSR;
}
}
if ((access_mask & ACE_EXECUTE) &&
(!(seen & S_IXUSR))) {
seen |= S_IXUSR;
if (type == ALLOW) {
mode |= S_IXUSR;
}
}
} else if (entry_type == OWNING_GROUP ||
(entry_type == ACE_IDENTIFIER_GROUP && who == fgid)) {
if ((access_mask & ACE_READ_DATA) &&
(!(seen & S_IRGRP))) {
seen |= S_IRGRP;
if (type == ALLOW) {
mode |= S_IRGRP;
}
}
if ((access_mask & ACE_WRITE_DATA) &&
(!(seen & S_IWGRP))) {
seen |= S_IWGRP;
if (type == ALLOW) {
mode |= S_IWGRP;
}
}
if ((access_mask & ACE_EXECUTE) &&
(!(seen & S_IXGRP))) {
seen |= S_IXGRP;
if (type == ALLOW) {
mode |= S_IXGRP;
}
}
} else if (entry_type == ACE_EVERYONE) {
if ((access_mask & ACE_READ_DATA)) {
if (!(seen & S_IRUSR)) {
seen |= S_IRUSR;
if (type == ALLOW) {
mode |= S_IRUSR;
}
}
if (!(seen & S_IRGRP)) {
seen |= S_IRGRP;
if (type == ALLOW) {
mode |= S_IRGRP;
}
}
if (!(seen & S_IROTH)) {
seen |= S_IROTH;
if (type == ALLOW) {
mode |= S_IROTH;
}
}
}
if ((access_mask & ACE_WRITE_DATA)) {
if (!(seen & S_IWUSR)) {
seen |= S_IWUSR;
if (type == ALLOW) {
mode |= S_IWUSR;
}
}
if (!(seen & S_IWGRP)) {
seen |= S_IWGRP;
if (type == ALLOW) {
mode |= S_IWGRP;
}
}
if (!(seen & S_IWOTH)) {
seen |= S_IWOTH;
if (type == ALLOW) {
mode |= S_IWOTH;
}
}
}
if ((access_mask & ACE_EXECUTE)) {
if (!(seen & S_IXUSR)) {
seen |= S_IXUSR;
if (type == ALLOW) {
mode |= S_IXUSR;
}
}
if (!(seen & S_IXGRP)) {
seen |= S_IXGRP;
if (type == ALLOW) {
mode |= S_IXGRP;
}
}
if (!(seen & S_IXOTH)) {
seen |= S_IXOTH;
if (type == ALLOW) {
mode |= S_IXOTH;
}
}
}
} else {
/*
* Only care if this IDENTIFIER_GROUP or
* USER ACE denies execute access to someone,
* mode is not affected
*/
if ((access_mask & ACE_EXECUTE) && type == DENY)
an_exec_denied = B_TRUE;
}
}
/*
* Failure to allow is effectively a deny, so execute permission
* is denied if it was never mentioned or if we explicitly
* weren't allowed it.
*/
if (!an_exec_denied &&
((seen & ALL_MODE_EXECS) != ALL_MODE_EXECS ||
(mode & ALL_MODE_EXECS) != ALL_MODE_EXECS))
an_exec_denied = B_TRUE;
if (an_exec_denied)
*pflags &= ~ZFS_NO_EXECS_DENIED;
else
*pflags |= ZFS_NO_EXECS_DENIED;
return (mode);
}
/*
* Read an external acl object. If the intent is to modify, always
* create a new acl and leave any cached acl in place.
*/
int
zfs_acl_node_read(struct znode *zp, boolean_t have_lock, zfs_acl_t **aclpp,
boolean_t will_modify)
{
zfs_acl_t *aclp;
int aclsize = 0;
int acl_count = 0;
zfs_acl_node_t *aclnode;
zfs_acl_phys_t znode_acl;
int version;
int error;
boolean_t drop_lock = B_FALSE;
ASSERT(MUTEX_HELD(&zp->z_acl_lock));
if (zp->z_acl_cached && !will_modify) {
*aclpp = zp->z_acl_cached;
return (0);
}
/*
* close race where znode could be upgrade while trying to
* read the znode attributes.
*
* But this could only happen if the file isn't already an SA
* znode
*/
if (!zp->z_is_sa && !have_lock) {
mutex_enter(&zp->z_lock);
drop_lock = B_TRUE;
}
version = zfs_znode_acl_version(zp);
if ((error = zfs_acl_znode_info(zp, &aclsize,
&acl_count, &znode_acl)) != 0) {
goto done;
}
aclp = zfs_acl_alloc(version);
aclp->z_acl_count = acl_count;
aclp->z_acl_bytes = aclsize;
aclnode = zfs_acl_node_alloc(aclsize);
aclnode->z_ace_count = aclp->z_acl_count;
aclnode->z_size = aclsize;
if (!zp->z_is_sa) {
if (znode_acl.z_acl_extern_obj) {
error = dmu_read(ZTOZSB(zp)->z_os,
znode_acl.z_acl_extern_obj, 0, aclnode->z_size,
aclnode->z_acldata, DMU_READ_PREFETCH);
} else {
memcpy(aclnode->z_acldata, znode_acl.z_ace_data,
aclnode->z_size);
}
} else {
error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DACL_ACES(ZTOZSB(zp)),
aclnode->z_acldata, aclnode->z_size);
}
if (error != 0) {
zfs_acl_free(aclp);
zfs_acl_node_free(aclnode);
/* convert checksum errors into IO errors */
if (error == ECKSUM)
error = SET_ERROR(EIO);
goto done;
}
list_insert_head(&aclp->z_acl, aclnode);
*aclpp = aclp;
if (!will_modify)
zp->z_acl_cached = aclp;
done:
if (drop_lock)
mutex_exit(&zp->z_lock);
return (error);
}
void
zfs_acl_data_locator(void **dataptr, uint32_t *length, uint32_t buflen,
boolean_t start, void *userdata)
{
(void) buflen;
zfs_acl_locator_cb_t *cb = (zfs_acl_locator_cb_t *)userdata;
if (start) {
cb->cb_acl_node = list_head(&cb->cb_aclp->z_acl);
} else {
cb->cb_acl_node = list_next(&cb->cb_aclp->z_acl,
cb->cb_acl_node);
}
*dataptr = cb->cb_acl_node->z_acldata;
*length = cb->cb_acl_node->z_size;
}
int
zfs_acl_chown_setattr(znode_t *zp)
{
int error;
zfs_acl_t *aclp;
if (ZTOZSB(zp)->z_acl_type == ZFS_ACLTYPE_POSIX)
return (0);
ASSERT(MUTEX_HELD(&zp->z_lock));
ASSERT(MUTEX_HELD(&zp->z_acl_lock));
error = zfs_acl_node_read(zp, B_TRUE, &aclp, B_FALSE);
if (error == 0 && aclp->z_acl_count > 0)
zp->z_mode = ZTOI(zp)->i_mode =
zfs_mode_compute(zp->z_mode, aclp,
&zp->z_pflags, KUID_TO_SUID(ZTOI(zp)->i_uid),
KGID_TO_SGID(ZTOI(zp)->i_gid));
/*
* Some ZFS implementations (ZEVO) create neither a ZNODE_ACL
* nor a DACL_ACES SA in which case ENOENT is returned from
* zfs_acl_node_read() when the SA can't be located.
* Allow chown/chgrp to succeed in these cases rather than
* returning an error that makes no sense in the context of
* the caller.
*/
if (error == ENOENT)
return (0);
return (error);
}
typedef struct trivial_acl {
uint32_t allow0; /* allow mask for bits only in owner */
uint32_t deny1; /* deny mask for bits not in owner */
uint32_t deny2; /* deny mask for bits not in group */
uint32_t owner; /* allow mask matching mode */
uint32_t group; /* allow mask matching mode */
uint32_t everyone; /* allow mask matching mode */
} trivial_acl_t;
static void
acl_trivial_access_masks(mode_t mode, boolean_t isdir, trivial_acl_t *masks)
{
uint32_t read_mask = ACE_READ_DATA;
uint32_t write_mask = ACE_WRITE_DATA|ACE_APPEND_DATA;
uint32_t execute_mask = ACE_EXECUTE;
if (isdir)
write_mask |= ACE_DELETE_CHILD;
masks->deny1 = 0;
if (!(mode & S_IRUSR) && (mode & (S_IRGRP|S_IROTH)))
masks->deny1 |= read_mask;
if (!(mode & S_IWUSR) && (mode & (S_IWGRP|S_IWOTH)))
masks->deny1 |= write_mask;
if (!(mode & S_IXUSR) && (mode & (S_IXGRP|S_IXOTH)))
masks->deny1 |= execute_mask;
masks->deny2 = 0;
if (!(mode & S_IRGRP) && (mode & S_IROTH))
masks->deny2 |= read_mask;
if (!(mode & S_IWGRP) && (mode & S_IWOTH))
masks->deny2 |= write_mask;
if (!(mode & S_IXGRP) && (mode & S_IXOTH))
masks->deny2 |= execute_mask;
masks->allow0 = 0;
if ((mode & S_IRUSR) && (!(mode & S_IRGRP) && (mode & S_IROTH)))
masks->allow0 |= read_mask;
if ((mode & S_IWUSR) && (!(mode & S_IWGRP) && (mode & S_IWOTH)))
masks->allow0 |= write_mask;
if ((mode & S_IXUSR) && (!(mode & S_IXGRP) && (mode & S_IXOTH)))
masks->allow0 |= execute_mask;
masks->owner = ACE_WRITE_ATTRIBUTES|ACE_WRITE_OWNER|ACE_WRITE_ACL|
ACE_WRITE_NAMED_ATTRS|ACE_READ_ACL|ACE_READ_ATTRIBUTES|
ACE_READ_NAMED_ATTRS|ACE_SYNCHRONIZE;
if (mode & S_IRUSR)
masks->owner |= read_mask;
if (mode & S_IWUSR)
masks->owner |= write_mask;
if (mode & S_IXUSR)
masks->owner |= execute_mask;
masks->group = ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_READ_NAMED_ATTRS|
ACE_SYNCHRONIZE;
if (mode & S_IRGRP)
masks->group |= read_mask;
if (mode & S_IWGRP)
masks->group |= write_mask;
if (mode & S_IXGRP)
masks->group |= execute_mask;
masks->everyone = ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_READ_NAMED_ATTRS|
ACE_SYNCHRONIZE;
if (mode & S_IROTH)
masks->everyone |= read_mask;
if (mode & S_IWOTH)
masks->everyone |= write_mask;
if (mode & S_IXOTH)
masks->everyone |= execute_mask;
}
/*
* ace_trivial:
* determine whether an ace_t acl is trivial
*
* Trivialness implies that the acl is composed of only
* owner, group, everyone entries. ACL can't
* have read_acl denied, and write_owner/write_acl/write_attributes
* can only be owner@ entry.
*/
static int
ace_trivial_common(void *acep, int aclcnt,
uint64_t (*walk)(void *, uint64_t, int aclcnt,
uint16_t *, uint16_t *, uint32_t *))
{
uint16_t flags;
uint32_t mask;
uint16_t type;
uint64_t cookie = 0;
while ((cookie = walk(acep, cookie, aclcnt, &flags, &type, &mask))) {
switch (flags & ACE_TYPE_FLAGS) {
case ACE_OWNER:
case ACE_GROUP|ACE_IDENTIFIER_GROUP:
case ACE_EVERYONE:
break;
default:
return (1);
}
if (flags & (ACE_FILE_INHERIT_ACE|
ACE_DIRECTORY_INHERIT_ACE|ACE_NO_PROPAGATE_INHERIT_ACE|
ACE_INHERIT_ONLY_ACE))
return (1);
/*
* Special check for some special bits
*
* Don't allow anybody to deny reading basic
* attributes or a files ACL.
*/
if ((mask & (ACE_READ_ACL|ACE_READ_ATTRIBUTES)) &&
(type == ACE_ACCESS_DENIED_ACE_TYPE))
return (1);
/*
* Delete permission is never set by default
*/
if (mask & ACE_DELETE)
return (1);
/*
* Child delete permission should be accompanied by write
*/
if ((mask & ACE_DELETE_CHILD) && !(mask & ACE_WRITE_DATA))
return (1);
/*
* only allow owner@ to have
* write_acl/write_owner/write_attributes/write_xattr/
*/
if (type == ACE_ACCESS_ALLOWED_ACE_TYPE &&
(!(flags & ACE_OWNER) && (mask &
(ACE_WRITE_OWNER|ACE_WRITE_ACL| ACE_WRITE_ATTRIBUTES|
ACE_WRITE_NAMED_ATTRS))))
return (1);
}
return (0);
}
/*
* common code for setting ACLs.
*
* This function is called from zfs_mode_update, zfs_perm_init, and zfs_setacl.
* zfs_setacl passes a non-NULL inherit pointer (ihp) to indicate that it's
* already checked the acl and knows whether to inherit.
*/
int
zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
{
int error;
zfsvfs_t *zfsvfs = ZTOZSB(zp);
dmu_object_type_t otype;
zfs_acl_locator_cb_t locate = { 0 };
uint64_t mode;
sa_bulk_attr_t bulk[5];
uint64_t ctime[2];
int count = 0;
zfs_acl_phys_t acl_phys;
mode = zp->z_mode;
mode = zfs_mode_compute(mode, aclp, &zp->z_pflags,
KUID_TO_SUID(ZTOI(zp)->i_uid), KGID_TO_SGID(ZTOI(zp)->i_gid));
zp->z_mode = ZTOI(zp)->i_mode = mode;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
&mode, sizeof (mode));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, sizeof (ctime));
if (zp->z_acl_cached) {
zfs_acl_free(zp->z_acl_cached);
zp->z_acl_cached = NULL;
}
/*
* Upgrade needed?
*/
if (!zfsvfs->z_use_fuids) {
otype = DMU_OT_OLDACL;
} else {
if ((aclp->z_version == ZFS_ACL_VERSION_INITIAL) &&
(zfsvfs->z_version >= ZPL_VERSION_FUID))
zfs_acl_xform(zp, aclp, cr);
ASSERT(aclp->z_version >= ZFS_ACL_VERSION_FUID);
otype = DMU_OT_ACL;
}
/*
* Arrgh, we have to handle old on disk format
* as well as newer (preferred) SA format.
*/
if (zp->z_is_sa) { /* the easy case, just update the ACL attribute */
locate.cb_aclp = aclp;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_ACES(zfsvfs),
zfs_acl_data_locator, &locate, aclp->z_acl_bytes);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_COUNT(zfsvfs),
NULL, &aclp->z_acl_count, sizeof (uint64_t));
} else { /* Painful legacy way */
zfs_acl_node_t *aclnode;
uint64_t off = 0;
uint64_t aoid;
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs),
&acl_phys, sizeof (acl_phys))) != 0)
return (error);
aoid = acl_phys.z_acl_extern_obj;
if (aclp->z_acl_bytes > ZFS_ACE_SPACE) {
/*
* If ACL was previously external and we are now
* converting to new ACL format then release old
* ACL object and create a new one.
*/
if (aoid &&
aclp->z_version != acl_phys.z_acl_version) {
error = dmu_object_free(zfsvfs->z_os, aoid, tx);
if (error)
return (error);
aoid = 0;
}
if (aoid == 0) {
aoid = dmu_object_alloc(zfsvfs->z_os,
otype, aclp->z_acl_bytes,
otype == DMU_OT_ACL ?
DMU_OT_SYSACL : DMU_OT_NONE,
otype == DMU_OT_ACL ?
DN_OLD_MAX_BONUSLEN : 0, tx);
} else {
(void) dmu_object_set_blocksize(zfsvfs->z_os,
aoid, aclp->z_acl_bytes, 0, tx);
}
acl_phys.z_acl_extern_obj = aoid;
for (aclnode = list_head(&aclp->z_acl); aclnode;
aclnode = list_next(&aclp->z_acl, aclnode)) {
if (aclnode->z_ace_count == 0)
continue;
dmu_write(zfsvfs->z_os, aoid, off,
aclnode->z_size, aclnode->z_acldata, tx);
off += aclnode->z_size;
}
} else {
void *start = acl_phys.z_ace_data;
/*
* Migrating back embedded?
*/
if (acl_phys.z_acl_extern_obj) {
error = dmu_object_free(zfsvfs->z_os,
acl_phys.z_acl_extern_obj, tx);
if (error)
return (error);
acl_phys.z_acl_extern_obj = 0;
}
for (aclnode = list_head(&aclp->z_acl); aclnode;
aclnode = list_next(&aclp->z_acl, aclnode)) {
if (aclnode->z_ace_count == 0)
continue;
memcpy(start, aclnode->z_acldata,
aclnode->z_size);
start = (caddr_t)start + aclnode->z_size;
}
}
/*
* If Old version then swap count/bytes to match old
* layout of znode_acl_phys_t.
*/
if (aclp->z_version == ZFS_ACL_VERSION_INITIAL) {
acl_phys.z_acl_size = aclp->z_acl_count;
acl_phys.z_acl_count = aclp->z_acl_bytes;
} else {
acl_phys.z_acl_size = aclp->z_acl_bytes;
acl_phys.z_acl_count = aclp->z_acl_count;
}
acl_phys.z_acl_version = aclp->z_version;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
&acl_phys, sizeof (acl_phys));
}
/*
* Replace ACL wide bits, but first clear them.
*/
zp->z_pflags &= ~ZFS_ACL_WIDE_FLAGS;
zp->z_pflags |= aclp->z_hints;
if (ace_trivial_common(aclp, 0, zfs_ace_walk) == 0)
zp->z_pflags |= ZFS_ACL_TRIVIAL;
zfs_tstamp_update_setup(zp, STATE_CHANGED, NULL, ctime);
return (sa_bulk_update(zp->z_sa_hdl, bulk, count, tx));
}
static void
zfs_acl_chmod(boolean_t isdir, uint64_t mode, boolean_t split, boolean_t trim,
zfs_acl_t *aclp)
{
void *acep = NULL;
uint64_t who;
int new_count, new_bytes;
int ace_size;
int entry_type;
uint16_t iflags, type;
uint32_t access_mask;
zfs_acl_node_t *newnode;
size_t abstract_size = aclp->z_ops->ace_abstract_size();
void *zacep;
trivial_acl_t masks;
new_count = new_bytes = 0;
acl_trivial_access_masks((mode_t)mode, isdir, &masks);
newnode = zfs_acl_node_alloc((abstract_size * 6) + aclp->z_acl_bytes);
zacep = newnode->z_acldata;
if (masks.allow0) {
zfs_set_ace(aclp, zacep, masks.allow0, ALLOW, -1, ACE_OWNER);
zacep = (void *)((uintptr_t)zacep + abstract_size);
new_count++;
new_bytes += abstract_size;
}
if (masks.deny1) {
zfs_set_ace(aclp, zacep, masks.deny1, DENY, -1, ACE_OWNER);
zacep = (void *)((uintptr_t)zacep + abstract_size);
new_count++;
new_bytes += abstract_size;
}
if (masks.deny2) {
zfs_set_ace(aclp, zacep, masks.deny2, DENY, -1, OWNING_GROUP);
zacep = (void *)((uintptr_t)zacep + abstract_size);
new_count++;
new_bytes += abstract_size;
}
while ((acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask,
&iflags, &type))) {
entry_type = (iflags & ACE_TYPE_FLAGS);
/*
* ACEs used to represent the file mode may be divided
* into an equivalent pair of inherit-only and regular
* ACEs, if they are inheritable.
* Skip regular ACEs, which are replaced by the new mode.
*/
if (split && (entry_type == ACE_OWNER ||
entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE)) {
if (!isdir || !(iflags &
(ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE)))
continue;
/*
* We preserve owner@, group@, or @everyone
* permissions, if they are inheritable, by
* copying them to inherit_only ACEs. This
* prevents inheritable permissions from being
* altered along with the file mode.
*/
iflags |= ACE_INHERIT_ONLY_ACE;
}
/*
* If this ACL has any inheritable ACEs, mark that in
* the hints (which are later masked into the pflags)
* so create knows to do inheritance.
*/
if (isdir && (iflags &
(ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE)))
aclp->z_hints |= ZFS_INHERIT_ACE;
if ((type != ALLOW && type != DENY) ||
(iflags & ACE_INHERIT_ONLY_ACE)) {
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
aclp->z_hints |= ZFS_ACL_OBJ_ACE;
break;
}
} else {
/*
* Limit permissions to be no greater than
* group permissions.
* The "aclinherit" and "aclmode" properties
* affect policy for create and chmod(2),
* respectively.
*/
if ((type == ALLOW) && trim)
access_mask &= masks.group;
}
zfs_set_ace(aclp, zacep, access_mask, type, who, iflags);
ace_size = aclp->z_ops->ace_size(acep);
zacep = (void *)((uintptr_t)zacep + ace_size);
new_count++;
new_bytes += ace_size;
}
zfs_set_ace(aclp, zacep, masks.owner, ALLOW, -1, ACE_OWNER);
zacep = (void *)((uintptr_t)zacep + abstract_size);
zfs_set_ace(aclp, zacep, masks.group, ALLOW, -1, OWNING_GROUP);
zacep = (void *)((uintptr_t)zacep + abstract_size);
zfs_set_ace(aclp, zacep, masks.everyone, ALLOW, -1, ACE_EVERYONE);
new_count += 3;
new_bytes += abstract_size * 3;
zfs_acl_release_nodes(aclp);
aclp->z_acl_count = new_count;
aclp->z_acl_bytes = new_bytes;
newnode->z_ace_count = new_count;
newnode->z_size = new_bytes;
list_insert_tail(&aclp->z_acl, newnode);
}
int
zfs_acl_chmod_setattr(znode_t *zp, zfs_acl_t **aclp, uint64_t mode)
{
int error = 0;
mutex_enter(&zp->z_acl_lock);
mutex_enter(&zp->z_lock);
if (ZTOZSB(zp)->z_acl_mode == ZFS_ACL_DISCARD)
*aclp = zfs_acl_alloc(zfs_acl_version_zp(zp));
else
error = zfs_acl_node_read(zp, B_TRUE, aclp, B_TRUE);
if (error == 0) {
(*aclp)->z_hints = zp->z_pflags & V4_ACL_WIDE_FLAGS;
zfs_acl_chmod(S_ISDIR(ZTOI(zp)->i_mode), mode, B_TRUE,
(ZTOZSB(zp)->z_acl_mode == ZFS_ACL_GROUPMASK), *aclp);
}
mutex_exit(&zp->z_lock);
mutex_exit(&zp->z_acl_lock);
return (error);
}
/*
* Should ACE be inherited?
*/
static int
zfs_ace_can_use(umode_t obj_mode, uint16_t acep_flags)
{
int iflags = (acep_flags & 0xf);
if (S_ISDIR(obj_mode) && (iflags & ACE_DIRECTORY_INHERIT_ACE))
return (1);
else if (iflags & ACE_FILE_INHERIT_ACE)
return (!(S_ISDIR(obj_mode) &&
(iflags & ACE_NO_PROPAGATE_INHERIT_ACE)));
return (0);
}
/*
* inherit inheritable ACEs from parent
*/
static zfs_acl_t *
zfs_acl_inherit(zfsvfs_t *zfsvfs, umode_t va_mode, zfs_acl_t *paclp,
uint64_t mode, boolean_t *need_chmod)
{
void *pacep = NULL;
void *acep;
zfs_acl_node_t *aclnode;
zfs_acl_t *aclp = NULL;
uint64_t who;
uint32_t access_mask;
uint16_t iflags, newflags, type;
size_t ace_size;
void *data1, *data2;
size_t data1sz, data2sz;
uint_t aclinherit;
boolean_t isdir = S_ISDIR(va_mode);
boolean_t isreg = S_ISREG(va_mode);
*need_chmod = B_TRUE;
aclp = zfs_acl_alloc(paclp->z_version);
aclinherit = zfsvfs->z_acl_inherit;
if (aclinherit == ZFS_ACL_DISCARD || S_ISLNK(va_mode))
return (aclp);
while ((pacep = zfs_acl_next_ace(paclp, pacep, &who,
&access_mask, &iflags, &type))) {
/*
* don't inherit bogus ACEs
*/
if (!zfs_acl_valid_ace_type(type, iflags))
continue;
/*
* Check if ACE is inheritable by this vnode
*/
if ((aclinherit == ZFS_ACL_NOALLOW && type == ALLOW) ||
!zfs_ace_can_use(va_mode, iflags))
continue;
/*
* If owner@, group@, or everyone@ inheritable
* then zfs_acl_chmod() isn't needed.
*/
if ((aclinherit == ZFS_ACL_PASSTHROUGH ||
aclinherit == ZFS_ACL_PASSTHROUGH_X) &&
((iflags & (ACE_OWNER|ACE_EVERYONE)) ||
((iflags & OWNING_GROUP) == OWNING_GROUP)) &&
(isreg || (isdir && (iflags & ACE_DIRECTORY_INHERIT_ACE))))
*need_chmod = B_FALSE;
/*
* Strip inherited execute permission from file if
* not in mode
*/
if (aclinherit == ZFS_ACL_PASSTHROUGH_X && type == ALLOW &&
!isdir && ((mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)) {
access_mask &= ~ACE_EXECUTE;
}
/*
* Strip write_acl and write_owner from permissions
* when inheriting an ACE
*/
if (aclinherit == ZFS_ACL_RESTRICTED && type == ALLOW) {
access_mask &= ~RESTRICTED_CLEAR;
}
ace_size = aclp->z_ops->ace_size(pacep);
aclnode = zfs_acl_node_alloc(ace_size);
list_insert_tail(&aclp->z_acl, aclnode);
acep = aclnode->z_acldata;
zfs_set_ace(aclp, acep, access_mask, type,
who, iflags|ACE_INHERITED_ACE);
/*
* Copy special opaque data if any
*/
if ((data1sz = paclp->z_ops->ace_data(pacep, &data1)) != 0) {
VERIFY((data2sz = aclp->z_ops->ace_data(acep,
&data2)) == data1sz);
memcpy(data2, data1, data2sz);
}
aclp->z_acl_count++;
aclnode->z_ace_count++;
aclp->z_acl_bytes += aclnode->z_size;
newflags = aclp->z_ops->ace_flags_get(acep);
/*
* If ACE is not to be inherited further, or if the vnode is
* not a directory, remove all inheritance flags
*/
if (!isdir || (iflags & ACE_NO_PROPAGATE_INHERIT_ACE)) {
newflags &= ~ALL_INHERIT;
aclp->z_ops->ace_flags_set(acep,
newflags|ACE_INHERITED_ACE);
continue;
}
/*
* This directory has an inheritable ACE
*/
aclp->z_hints |= ZFS_INHERIT_ACE;
/*
* If only FILE_INHERIT is set then turn on
* inherit_only
*/
if ((iflags & (ACE_FILE_INHERIT_ACE |
ACE_DIRECTORY_INHERIT_ACE)) == ACE_FILE_INHERIT_ACE) {
newflags |= ACE_INHERIT_ONLY_ACE;
aclp->z_ops->ace_flags_set(acep,
newflags|ACE_INHERITED_ACE);
} else {
newflags &= ~ACE_INHERIT_ONLY_ACE;
aclp->z_ops->ace_flags_set(acep,
newflags|ACE_INHERITED_ACE);
}
}
if (zfsvfs->z_acl_mode == ZFS_ACL_RESTRICTED &&
aclp->z_acl_count != 0) {
*need_chmod = B_FALSE;
}
return (aclp);
}
/*
* Create file system object initial permissions
* including inheritable ACEs.
* Also, create FUIDs for owner and group.
*/
int
zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
vsecattr_t *vsecp, zfs_acl_ids_t *acl_ids)
{
int error;
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
zfs_acl_t *paclp;
gid_t gid = vap->va_gid;
boolean_t need_chmod = B_TRUE;
boolean_t trim = B_FALSE;
boolean_t inherited = B_FALSE;
memset(acl_ids, 0, sizeof (zfs_acl_ids_t));
acl_ids->z_mode = vap->va_mode;
if (vsecp)
if ((error = zfs_vsec_2_aclp(zfsvfs, vap->va_mode, vsecp,
cr, &acl_ids->z_fuidp, &acl_ids->z_aclp)) != 0)
return (error);
acl_ids->z_fuid = vap->va_uid;
acl_ids->z_fgid = vap->va_gid;
#ifdef HAVE_KSID
/*
* Determine uid and gid.
*/
if ((flag & IS_ROOT_NODE) || zfsvfs->z_replay ||
((flag & IS_XATTR) && (S_ISDIR(vap->va_mode)))) {
acl_ids->z_fuid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_uid,
cr, ZFS_OWNER, &acl_ids->z_fuidp);
acl_ids->z_fgid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_gid,
cr, ZFS_GROUP, &acl_ids->z_fuidp);
gid = vap->va_gid;
} else {
acl_ids->z_fuid = zfs_fuid_create_cred(zfsvfs, ZFS_OWNER,
cr, &acl_ids->z_fuidp);
acl_ids->z_fgid = 0;
if (vap->va_mask & AT_GID) {
acl_ids->z_fgid = zfs_fuid_create(zfsvfs,
(uint64_t)vap->va_gid,
cr, ZFS_GROUP, &acl_ids->z_fuidp);
gid = vap->va_gid;
if (acl_ids->z_fgid != KGID_TO_SGID(ZTOI(dzp)->i_gid) &&
!groupmember(vap->va_gid, cr) &&
secpolicy_vnode_create_gid(cr) != 0)
acl_ids->z_fgid = 0;
}
if (acl_ids->z_fgid == 0) {
if (dzp->z_mode & S_ISGID) {
char *domain;
uint32_t rid;
acl_ids->z_fgid = KGID_TO_SGID(
ZTOI(dzp)->i_gid);
gid = zfs_fuid_map_id(zfsvfs, acl_ids->z_fgid,
cr, ZFS_GROUP);
if (zfsvfs->z_use_fuids &&
IS_EPHEMERAL(acl_ids->z_fgid)) {
domain = zfs_fuid_idx_domain(
&zfsvfs->z_fuid_idx,
FUID_INDEX(acl_ids->z_fgid));
rid = FUID_RID(acl_ids->z_fgid);
zfs_fuid_node_add(&acl_ids->z_fuidp,
domain, rid,
FUID_INDEX(acl_ids->z_fgid),
acl_ids->z_fgid, ZFS_GROUP);
}
} else {
acl_ids->z_fgid = zfs_fuid_create_cred(zfsvfs,
ZFS_GROUP, cr, &acl_ids->z_fuidp);
gid = crgetgid(cr);
}
}
}
#endif /* HAVE_KSID */
/*
* If we're creating a directory, and the parent directory has the
* set-GID bit set, set in on the new directory.
* Otherwise, if the user is neither privileged nor a member of the
* file's new group, clear the file's set-GID bit.
*/
if (!(flag & IS_ROOT_NODE) && (dzp->z_mode & S_ISGID) &&
(S_ISDIR(vap->va_mode))) {
acl_ids->z_mode |= S_ISGID;
} else {
if ((acl_ids->z_mode & S_ISGID) &&
secpolicy_vnode_setids_setgids(cr, gid) != 0)
acl_ids->z_mode &= ~S_ISGID;
}
if (acl_ids->z_aclp == NULL) {
mutex_enter(&dzp->z_acl_lock);
mutex_enter(&dzp->z_lock);
if (!(flag & IS_ROOT_NODE) &&
(dzp->z_pflags & ZFS_INHERIT_ACE) &&
!(dzp->z_pflags & ZFS_XATTR)) {
VERIFY(0 == zfs_acl_node_read(dzp, B_TRUE,
&paclp, B_FALSE));
acl_ids->z_aclp = zfs_acl_inherit(zfsvfs,
vap->va_mode, paclp, acl_ids->z_mode, &need_chmod);
inherited = B_TRUE;
} else {
acl_ids->z_aclp =
zfs_acl_alloc(zfs_acl_version_zp(dzp));
acl_ids->z_aclp->z_hints |= ZFS_ACL_TRIVIAL;
}
mutex_exit(&dzp->z_lock);
mutex_exit(&dzp->z_acl_lock);
if (need_chmod) {
if (S_ISDIR(vap->va_mode))
acl_ids->z_aclp->z_hints |=
ZFS_ACL_AUTO_INHERIT;
if (zfsvfs->z_acl_mode == ZFS_ACL_GROUPMASK &&
zfsvfs->z_acl_inherit != ZFS_ACL_PASSTHROUGH &&
zfsvfs->z_acl_inherit != ZFS_ACL_PASSTHROUGH_X)
trim = B_TRUE;
zfs_acl_chmod(vap->va_mode, acl_ids->z_mode, B_FALSE,
trim, acl_ids->z_aclp);
}
}
if (inherited || vsecp) {
acl_ids->z_mode = zfs_mode_compute(acl_ids->z_mode,
acl_ids->z_aclp, &acl_ids->z_aclp->z_hints,
acl_ids->z_fuid, acl_ids->z_fgid);
if (ace_trivial_common(acl_ids->z_aclp, 0, zfs_ace_walk) == 0)
acl_ids->z_aclp->z_hints |= ZFS_ACL_TRIVIAL;
}
return (0);
}
/*
* Free ACL and fuid_infop, but not the acl_ids structure
*/
void
zfs_acl_ids_free(zfs_acl_ids_t *acl_ids)
{
if (acl_ids->z_aclp)
zfs_acl_free(acl_ids->z_aclp);
if (acl_ids->z_fuidp)
zfs_fuid_info_free(acl_ids->z_fuidp);
acl_ids->z_aclp = NULL;
acl_ids->z_fuidp = NULL;
}
boolean_t
zfs_acl_ids_overquota(zfsvfs_t *zv, zfs_acl_ids_t *acl_ids, uint64_t projid)
{
return (zfs_id_overquota(zv, DMU_USERUSED_OBJECT, acl_ids->z_fuid) ||
zfs_id_overquota(zv, DMU_GROUPUSED_OBJECT, acl_ids->z_fgid) ||
(projid != ZFS_DEFAULT_PROJID && projid != ZFS_INVALID_PROJID &&
zfs_id_overquota(zv, DMU_PROJECTUSED_OBJECT, projid)));
}
/*
* Retrieve a file's ACL
*/
int
zfs_getacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
{
zfs_acl_t *aclp;
ulong_t mask;
int error;
int count = 0;
int largeace = 0;
mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT |
VSA_ACE_ACLFLAGS | VSA_ACE_ALLTYPES);
if (mask == 0)
return (SET_ERROR(ENOSYS));
if ((error = zfs_zaccess(zp, ACE_READ_ACL, 0, skipaclchk, cr)))
return (error);
mutex_enter(&zp->z_acl_lock);
error = zfs_acl_node_read(zp, B_FALSE, &aclp, B_FALSE);
if (error != 0) {
mutex_exit(&zp->z_acl_lock);
return (error);
}
/*
* Scan ACL to determine number of ACEs
*/
if ((zp->z_pflags & ZFS_ACL_OBJ_ACE) && !(mask & VSA_ACE_ALLTYPES)) {
void *zacep = NULL;
uint64_t who;
uint32_t access_mask;
uint16_t type, iflags;
while ((zacep = zfs_acl_next_ace(aclp, zacep,
&who, &access_mask, &iflags, &type))) {
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
largeace++;
continue;
default:
count++;
}
}
vsecp->vsa_aclcnt = count;
} else
count = (int)aclp->z_acl_count;
if (mask & VSA_ACECNT) {
vsecp->vsa_aclcnt = count;
}
if (mask & VSA_ACE) {
size_t aclsz;
aclsz = count * sizeof (ace_t) +
sizeof (ace_object_t) * largeace;
vsecp->vsa_aclentp = kmem_alloc(aclsz, KM_SLEEP);
vsecp->vsa_aclentsz = aclsz;
if (aclp->z_version == ZFS_ACL_VERSION_FUID)
zfs_copy_fuid_2_ace(ZTOZSB(zp), aclp, cr,
vsecp->vsa_aclentp, !(mask & VSA_ACE_ALLTYPES));
else {
zfs_acl_node_t *aclnode;
void *start = vsecp->vsa_aclentp;
for (aclnode = list_head(&aclp->z_acl); aclnode;
aclnode = list_next(&aclp->z_acl, aclnode)) {
memcpy(start, aclnode->z_acldata,
aclnode->z_size);
start = (caddr_t)start + aclnode->z_size;
}
ASSERT((caddr_t)start - (caddr_t)vsecp->vsa_aclentp ==
aclp->z_acl_bytes);
}
}
if (mask & VSA_ACE_ACLFLAGS) {
vsecp->vsa_aclflags = 0;
if (zp->z_pflags & ZFS_ACL_DEFAULTED)
vsecp->vsa_aclflags |= ACL_DEFAULTED;
if (zp->z_pflags & ZFS_ACL_PROTECTED)
vsecp->vsa_aclflags |= ACL_PROTECTED;
if (zp->z_pflags & ZFS_ACL_AUTO_INHERIT)
vsecp->vsa_aclflags |= ACL_AUTO_INHERIT;
}
mutex_exit(&zp->z_acl_lock);
return (0);
}
int
zfs_vsec_2_aclp(zfsvfs_t *zfsvfs, umode_t obj_mode,
vsecattr_t *vsecp, cred_t *cr, zfs_fuid_info_t **fuidp, zfs_acl_t **zaclp)
{
zfs_acl_t *aclp;
zfs_acl_node_t *aclnode;
int aclcnt = vsecp->vsa_aclcnt;
int error;
if (vsecp->vsa_aclcnt > MAX_ACL_ENTRIES || vsecp->vsa_aclcnt <= 0)
return (SET_ERROR(EINVAL));
aclp = zfs_acl_alloc(zfs_acl_version(zfsvfs->z_version));
aclp->z_hints = 0;
aclnode = zfs_acl_node_alloc(aclcnt * sizeof (zfs_object_ace_t));
if (aclp->z_version == ZFS_ACL_VERSION_INITIAL) {
if ((error = zfs_copy_ace_2_oldace(obj_mode, aclp,
(ace_t *)vsecp->vsa_aclentp, aclnode->z_acldata,
aclcnt, &aclnode->z_size)) != 0) {
zfs_acl_free(aclp);
zfs_acl_node_free(aclnode);
return (error);
}
} else {
if ((error = zfs_copy_ace_2_fuid(zfsvfs, obj_mode, aclp,
vsecp->vsa_aclentp, aclnode->z_acldata, aclcnt,
&aclnode->z_size, fuidp, cr)) != 0) {
zfs_acl_free(aclp);
zfs_acl_node_free(aclnode);
return (error);
}
}
aclp->z_acl_bytes = aclnode->z_size;
aclnode->z_ace_count = aclcnt;
aclp->z_acl_count = aclcnt;
list_insert_head(&aclp->z_acl, aclnode);
/*
* If flags are being set then add them to z_hints
*/
if (vsecp->vsa_mask & VSA_ACE_ACLFLAGS) {
if (vsecp->vsa_aclflags & ACL_PROTECTED)
aclp->z_hints |= ZFS_ACL_PROTECTED;
if (vsecp->vsa_aclflags & ACL_DEFAULTED)
aclp->z_hints |= ZFS_ACL_DEFAULTED;
if (vsecp->vsa_aclflags & ACL_AUTO_INHERIT)
aclp->z_hints |= ZFS_ACL_AUTO_INHERIT;
}
*zaclp = aclp;
return (0);
}
/*
* Set a file's ACL
*/
int
zfs_setacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
zilog_t *zilog = zfsvfs->z_log;
ulong_t mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT);
dmu_tx_t *tx;
int error;
zfs_acl_t *aclp;
zfs_fuid_info_t *fuidp = NULL;
boolean_t fuid_dirtied;
uint64_t acl_obj;
if (mask == 0)
return (SET_ERROR(ENOSYS));
if (zp->z_pflags & ZFS_IMMUTABLE)
return (SET_ERROR(EPERM));
if ((error = zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr)))
return (error);
error = zfs_vsec_2_aclp(zfsvfs, ZTOI(zp)->i_mode, vsecp, cr, &fuidp,
&aclp);
if (error)
return (error);
/*
* If ACL wide flags aren't being set then preserve any
* existing flags.
*/
if (!(vsecp->vsa_mask & VSA_ACE_ACLFLAGS)) {
aclp->z_hints |=
(zp->z_pflags & V4_ACL_WIDE_FLAGS);
}
top:
mutex_enter(&zp->z_acl_lock);
mutex_enter(&zp->z_lock);
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
/*
* If old version and ACL won't fit in bonus and we aren't
* upgrading then take out necessary DMU holds
*/
if ((acl_obj = zfs_external_acl(zp)) != 0) {
if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
zfs_znode_acl_version(zp) <= ZFS_ACL_VERSION_INITIAL) {
dmu_tx_hold_free(tx, acl_obj, 0,
DMU_OBJECT_END);
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
aclp->z_acl_bytes);
} else {
dmu_tx_hold_write(tx, acl_obj, 0, aclp->z_acl_bytes);
}
} else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, aclp->z_acl_bytes);
}
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_NOWAIT);
if (error) {
mutex_exit(&zp->z_acl_lock);
mutex_exit(&zp->z_lock);
if (error == ERESTART) {
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
dmu_tx_abort(tx);
zfs_acl_free(aclp);
return (error);
}
error = zfs_aclset_common(zp, aclp, cr, tx);
ASSERT(error == 0);
ASSERT(zp->z_acl_cached == NULL);
zp->z_acl_cached = aclp;
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
zfs_log_acl(zilog, tx, zp, vsecp, fuidp);
if (fuidp)
zfs_fuid_info_free(fuidp);
dmu_tx_commit(tx);
mutex_exit(&zp->z_lock);
mutex_exit(&zp->z_acl_lock);
return (error);
}
/*
* Check accesses of interest (AoI) against attributes of the dataset
* such as read-only. Returns zero if no AoI conflict with dataset
* attributes, otherwise an appropriate errno is returned.
*/
static int
zfs_zaccess_dataset_check(znode_t *zp, uint32_t v4_mode)
{
if ((v4_mode & WRITE_MASK) && (zfs_is_readonly(ZTOZSB(zp))) &&
(!Z_ISDEV(ZTOI(zp)->i_mode) ||
(Z_ISDEV(ZTOI(zp)->i_mode) && (v4_mode & WRITE_MASK_ATTRS)))) {
return (SET_ERROR(EROFS));
}
/*
* Intentionally allow ZFS_READONLY through here.
* See zfs_zaccess_common().
*/
if ((v4_mode & WRITE_MASK_DATA) &&
(zp->z_pflags & ZFS_IMMUTABLE)) {
return (SET_ERROR(EPERM));
}
if ((v4_mode & (ACE_DELETE | ACE_DELETE_CHILD)) &&
(zp->z_pflags & ZFS_NOUNLINK)) {
return (SET_ERROR(EPERM));
}
if (((v4_mode & (ACE_READ_DATA|ACE_EXECUTE)) &&
(zp->z_pflags & ZFS_AV_QUARANTINED))) {
return (SET_ERROR(EACCES));
}
return (0);
}
/*
* The primary usage of this function is to loop through all of the
* ACEs in the znode, determining what accesses of interest (AoI) to
* the caller are allowed or denied. The AoI are expressed as bits in
* the working_mode parameter. As each ACE is processed, bits covered
* by that ACE are removed from the working_mode. This removal
* facilitates two things. The first is that when the working mode is
* empty (= 0), we know we've looked at all the AoI. The second is
* that the ACE interpretation rules don't allow a later ACE to undo
* something granted or denied by an earlier ACE. Removing the
* discovered access or denial enforces this rule. At the end of
* processing the ACEs, all AoI that were found to be denied are
* placed into the working_mode, giving the caller a mask of denied
* accesses. Returns:
* 0 if all AoI granted
* EACCES if the denied mask is non-zero
* other error if abnormal failure (e.g., IO error)
*
* A secondary usage of the function is to determine if any of the
* AoI are granted. If an ACE grants any access in
* the working_mode, we immediately short circuit out of the function.
* This mode is chosen by setting anyaccess to B_TRUE. The
* working_mode is not a denied access mask upon exit if the function
* is used in this manner.
*/
static int
zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode,
boolean_t anyaccess, cred_t *cr)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
zfs_acl_t *aclp;
int error;
uid_t uid = crgetuid(cr);
uint64_t who;
uint16_t type, iflags;
uint16_t entry_type;
uint32_t access_mask;
uint32_t deny_mask = 0;
zfs_ace_hdr_t *acep = NULL;
boolean_t checkit;
uid_t gowner;
uid_t fowner;
zfs_fuid_map_ids(zp, cr, &fowner, &gowner);
mutex_enter(&zp->z_acl_lock);
error = zfs_acl_node_read(zp, B_FALSE, &aclp, B_FALSE);
if (error != 0) {
mutex_exit(&zp->z_acl_lock);
return (error);
}
ASSERT(zp->z_acl_cached);
while ((acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask,
&iflags, &type))) {
uint32_t mask_matched;
if (!zfs_acl_valid_ace_type(type, iflags))
continue;
if (S_ISDIR(ZTOI(zp)->i_mode) &&
(iflags & ACE_INHERIT_ONLY_ACE))
continue;
/* Skip ACE if it does not affect any AoI */
mask_matched = (access_mask & *working_mode);
if (!mask_matched)
continue;
entry_type = (iflags & ACE_TYPE_FLAGS);
checkit = B_FALSE;
switch (entry_type) {
case ACE_OWNER:
if (uid == fowner)
checkit = B_TRUE;
break;
case OWNING_GROUP:
who = gowner;
zfs_fallthrough;
case ACE_IDENTIFIER_GROUP:
checkit = zfs_groupmember(zfsvfs, who, cr);
break;
case ACE_EVERYONE:
checkit = B_TRUE;
break;
/* USER Entry */
default:
if (entry_type == 0) {
uid_t newid;
newid = zfs_fuid_map_id(zfsvfs, who, cr,
ZFS_ACE_USER);
if (newid != IDMAP_WK_CREATOR_OWNER_UID &&
uid == newid)
checkit = B_TRUE;
break;
} else {
mutex_exit(&zp->z_acl_lock);
return (SET_ERROR(EIO));
}
}
if (checkit) {
if (type == DENY) {
DTRACE_PROBE3(zfs__ace__denies,
znode_t *, zp,
zfs_ace_hdr_t *, acep,
uint32_t, mask_matched);
deny_mask |= mask_matched;
} else {
DTRACE_PROBE3(zfs__ace__allows,
znode_t *, zp,
zfs_ace_hdr_t *, acep,
uint32_t, mask_matched);
if (anyaccess) {
mutex_exit(&zp->z_acl_lock);
return (0);
}
}
*working_mode &= ~mask_matched;
}
/* Are we done? */
if (*working_mode == 0)
break;
}
mutex_exit(&zp->z_acl_lock);
/* Put the found 'denies' back on the working mode */
if (deny_mask) {
*working_mode |= deny_mask;
return (SET_ERROR(EACCES));
} else if (*working_mode) {
return (-1);
}
return (0);
}
/*
* Return true if any access whatsoever granted, we don't actually
* care what access is granted.
*/
boolean_t
zfs_has_access(znode_t *zp, cred_t *cr)
{
uint32_t have = ACE_ALL_PERMS;
if (zfs_zaccess_aces_check(zp, &have, B_TRUE, cr) != 0) {
uid_t owner;
owner = zfs_fuid_map_id(ZTOZSB(zp),
KUID_TO_SUID(ZTOI(zp)->i_uid), cr, ZFS_OWNER);
return (secpolicy_vnode_any_access(cr, ZTOI(zp), owner) == 0);
}
return (B_TRUE);
}
/*
* Simplified access check for case where ACL is known to not contain
* information beyond what is defined in the mode. In this case, we
* can pass along to the kernel / vfs generic_permission() check, which
* evaluates the mode and POSIX ACL.
*
* NFSv4 ACLs allow granting permissions that are usually relegated only
* to the file owner or superuser. Examples are ACE_WRITE_OWNER (chown),
* ACE_WRITE_ACL(chmod), and ACE_DELETE. ACE_DELETE requests must fail
* because with conventional posix permissions, right to delete file
* is determined by write bit on the parent dir.
*
* If unmappable perms are requested, then we must return EPERM
* and include those bits in the working_mode so that the caller of
* zfs_zaccess_common() can decide whether to perform additional
* policy / capability checks. EACCES is used in zfs_zaccess_aces_check()
* to indicate access check failed due to explicit DENY entry, and so
* we want to avoid that here.
*/
static int
zfs_zaccess_trivial(znode_t *zp, uint32_t *working_mode, cred_t *cr)
{
int err, mask;
int unmapped = 0;
ASSERT(zp->z_pflags & ZFS_ACL_TRIVIAL);
mask = zfs_v4_to_unix(*working_mode, &unmapped);
if (mask == 0 || unmapped) {
*working_mode = unmapped;
return (unmapped ? SET_ERROR(EPERM) : 0);
}
#if defined(HAVE_IOPS_PERMISSION_USERNS)
err = generic_permission(cr->user_ns, ZTOI(zp), mask);
#else
err = generic_permission(ZTOI(zp), mask);
#endif
if (err != 0) {
return (SET_ERROR(EPERM));
}
*working_mode = unmapped;
return (0);
}
static int
zfs_zaccess_common(znode_t *zp, uint32_t v4_mode, uint32_t *working_mode,
boolean_t *check_privs, boolean_t skipaclchk, cred_t *cr)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int err;
*working_mode = v4_mode;
*check_privs = B_TRUE;
/*
* Short circuit empty requests
*/
if (v4_mode == 0 || zfsvfs->z_replay) {
*working_mode = 0;
return (0);
}
if ((err = zfs_zaccess_dataset_check(zp, v4_mode)) != 0) {
*check_privs = B_FALSE;
return (err);
}
/*
* The caller requested that the ACL check be skipped. This
* would only happen if the caller checked VOP_ACCESS() with a
* 32 bit ACE mask and already had the appropriate permissions.
*/
if (skipaclchk) {
*working_mode = 0;
return (0);
}
/*
* Note: ZFS_READONLY represents the "DOS R/O" attribute.
* When that flag is set, we should behave as if write access
* were not granted by anything in the ACL. In particular:
* We _must_ allow writes after opening the file r/w, then
* setting the DOS R/O attribute, and writing some more.
* (Similar to how you can write after fchmod(fd, 0444).)
*
* Therefore ZFS_READONLY is ignored in the dataset check
* above, and checked here as if part of the ACL check.
* Also note: DOS R/O is ignored for directories.
*/
if ((v4_mode & WRITE_MASK_DATA) &&
S_ISDIR(ZTOI(zp)->i_mode) &&
(zp->z_pflags & ZFS_READONLY)) {
return (SET_ERROR(EPERM));
}
if (zp->z_pflags & ZFS_ACL_TRIVIAL)
return (zfs_zaccess_trivial(zp, working_mode, cr));
return (zfs_zaccess_aces_check(zp, working_mode, B_FALSE, cr));
}
static int
zfs_zaccess_append(znode_t *zp, uint32_t *working_mode, boolean_t *check_privs,
cred_t *cr)
{
if (*working_mode != ACE_WRITE_DATA)
return (SET_ERROR(EACCES));
return (zfs_zaccess_common(zp, ACE_APPEND_DATA, working_mode,
check_privs, B_FALSE, cr));
}
int
zfs_fastaccesschk_execute(znode_t *zdp, cred_t *cr)
{
boolean_t owner = B_FALSE;
boolean_t groupmbr = B_FALSE;
boolean_t is_attr;
uid_t uid = crgetuid(cr);
int error;
if (zdp->z_pflags & ZFS_AV_QUARANTINED)
return (SET_ERROR(EACCES));
is_attr = ((zdp->z_pflags & ZFS_XATTR) &&
(S_ISDIR(ZTOI(zdp)->i_mode)));
if (is_attr)
goto slow;
mutex_enter(&zdp->z_acl_lock);
if (zdp->z_pflags & ZFS_NO_EXECS_DENIED) {
mutex_exit(&zdp->z_acl_lock);
return (0);
}
if (KUID_TO_SUID(ZTOI(zdp)->i_uid) != 0 ||
KGID_TO_SGID(ZTOI(zdp)->i_gid) != 0) {
mutex_exit(&zdp->z_acl_lock);
goto slow;
}
if (uid == KUID_TO_SUID(ZTOI(zdp)->i_uid)) {
owner = B_TRUE;
if (zdp->z_mode & S_IXUSR) {
mutex_exit(&zdp->z_acl_lock);
return (0);
} else {
mutex_exit(&zdp->z_acl_lock);
goto slow;
}
}
if (groupmember(KGID_TO_SGID(ZTOI(zdp)->i_gid), cr)) {
groupmbr = B_TRUE;
if (zdp->z_mode & S_IXGRP) {
mutex_exit(&zdp->z_acl_lock);
return (0);
} else {
mutex_exit(&zdp->z_acl_lock);
goto slow;
}
}
if (!owner && !groupmbr) {
if (zdp->z_mode & S_IXOTH) {
mutex_exit(&zdp->z_acl_lock);
return (0);
}
}
mutex_exit(&zdp->z_acl_lock);
slow:
DTRACE_PROBE(zfs__fastpath__execute__access__miss);
- ZFS_ENTER(ZTOZSB(zdp));
+ if ((error = zfs_enter(ZTOZSB(zdp), FTAG)) != 0)
+ return (error);
error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr);
- ZFS_EXIT(ZTOZSB(zdp));
+ zfs_exit(ZTOZSB(zdp), FTAG);
return (error);
}
/*
* Determine whether Access should be granted/denied.
*
* The least priv subsystem is always consulted as a basic privilege
* can define any form of access.
*/
int
zfs_zaccess(znode_t *zp, int mode, int flags, boolean_t skipaclchk, cred_t *cr)
{
uint32_t working_mode;
int error;
int is_attr;
boolean_t check_privs;
znode_t *xzp;
znode_t *check_zp = zp;
mode_t needed_bits;
uid_t owner;
is_attr = ((zp->z_pflags & ZFS_XATTR) && S_ISDIR(ZTOI(zp)->i_mode));
/*
* If attribute then validate against base file
*/
if (is_attr) {
if ((error = zfs_zget(ZTOZSB(zp),
zp->z_xattr_parent, &xzp)) != 0) {
return (error);
}
check_zp = xzp;
/*
* fixup mode to map to xattr perms
*/
if (mode & (ACE_WRITE_DATA|ACE_APPEND_DATA)) {
mode &= ~(ACE_WRITE_DATA|ACE_APPEND_DATA);
mode |= ACE_WRITE_NAMED_ATTRS;
}
if (mode & (ACE_READ_DATA|ACE_EXECUTE)) {
mode &= ~(ACE_READ_DATA|ACE_EXECUTE);
mode |= ACE_READ_NAMED_ATTRS;
}
}
owner = zfs_fuid_map_id(ZTOZSB(zp), KUID_TO_SUID(ZTOI(zp)->i_uid),
cr, ZFS_OWNER);
/*
* Map the bits required to the standard inode flags
* S_IRUSR|S_IWUSR|S_IXUSR in the needed_bits. Map the bits
* mapped by working_mode (currently missing) in missing_bits.
* Call secpolicy_vnode_access2() with (needed_bits & ~checkmode),
* needed_bits.
*/
needed_bits = 0;
working_mode = mode;
if ((working_mode & (ACE_READ_ACL|ACE_READ_ATTRIBUTES)) &&
owner == crgetuid(cr))
working_mode &= ~(ACE_READ_ACL|ACE_READ_ATTRIBUTES);
if (working_mode & (ACE_READ_DATA|ACE_READ_NAMED_ATTRS|
ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_SYNCHRONIZE))
needed_bits |= S_IRUSR;
if (working_mode & (ACE_WRITE_DATA|ACE_WRITE_NAMED_ATTRS|
ACE_APPEND_DATA|ACE_WRITE_ATTRIBUTES|ACE_SYNCHRONIZE))
needed_bits |= S_IWUSR;
if (working_mode & ACE_EXECUTE)
needed_bits |= S_IXUSR;
if ((error = zfs_zaccess_common(check_zp, mode, &working_mode,
&check_privs, skipaclchk, cr)) == 0) {
if (is_attr)
zrele(xzp);
return (secpolicy_vnode_access2(cr, ZTOI(zp), owner,
needed_bits, needed_bits));
}
if (error && !check_privs) {
if (is_attr)
zrele(xzp);
return (error);
}
if (error && (flags & V_APPEND)) {
error = zfs_zaccess_append(zp, &working_mode, &check_privs, cr);
}
if (error && check_privs) {
mode_t checkmode = 0;
/*
* First check for implicit owner permission on
* read_acl/read_attributes
*/
error = 0;
ASSERT(working_mode != 0);
if ((working_mode & (ACE_READ_ACL|ACE_READ_ATTRIBUTES) &&
owner == crgetuid(cr)))
working_mode &= ~(ACE_READ_ACL|ACE_READ_ATTRIBUTES);
if (working_mode & (ACE_READ_DATA|ACE_READ_NAMED_ATTRS|
ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_SYNCHRONIZE))
checkmode |= S_IRUSR;
if (working_mode & (ACE_WRITE_DATA|ACE_WRITE_NAMED_ATTRS|
ACE_APPEND_DATA|ACE_WRITE_ATTRIBUTES|ACE_SYNCHRONIZE))
checkmode |= S_IWUSR;
if (working_mode & ACE_EXECUTE)
checkmode |= S_IXUSR;
error = secpolicy_vnode_access2(cr, ZTOI(check_zp), owner,
needed_bits & ~checkmode, needed_bits);
if (error == 0 && (working_mode & ACE_WRITE_OWNER))
error = secpolicy_vnode_chown(cr, owner);
if (error == 0 && (working_mode & ACE_WRITE_ACL))
error = secpolicy_vnode_setdac(cr, owner);
if (error == 0 && (working_mode &
(ACE_DELETE|ACE_DELETE_CHILD)))
error = secpolicy_vnode_remove(cr);
if (error == 0 && (working_mode & ACE_SYNCHRONIZE)) {
error = secpolicy_vnode_chown(cr, owner);
}
if (error == 0) {
/*
* See if any bits other than those already checked
* for are still present. If so then return EACCES
*/
if (working_mode & ~(ZFS_CHECKED_MASKS)) {
error = SET_ERROR(EACCES);
}
}
} else if (error == 0) {
error = secpolicy_vnode_access2(cr, ZTOI(zp), owner,
needed_bits, needed_bits);
}
if (is_attr)
zrele(xzp);
return (error);
}
/*
* Translate traditional unix S_IRUSR/S_IWUSR/S_IXUSR mode into
* NFSv4-style ZFS ACL format and call zfs_zaccess()
*/
int
zfs_zaccess_rwx(znode_t *zp, mode_t mode, int flags, cred_t *cr)
{
return (zfs_zaccess(zp, zfs_unix_to_v4(mode >> 6), flags, B_FALSE, cr));
}
/*
* Access function for secpolicy_vnode_setattr
*/
int
zfs_zaccess_unix(znode_t *zp, mode_t mode, cred_t *cr)
{
int v4_mode = zfs_unix_to_v4(mode >> 6);
return (zfs_zaccess(zp, v4_mode, 0, B_FALSE, cr));
}
/* See zfs_zaccess_delete() */
static const boolean_t zfs_write_implies_delete_child = B_TRUE;
/*
* Determine whether delete access should be granted.
*
* The following chart outlines how we handle delete permissions which is
* how recent versions of windows (Windows 2008) handles it. The efficiency
* comes from not having to check the parent ACL where the object itself grants
* delete:
*
* -------------------------------------------------------
* | Parent Dir | Target Object Permissions |
* | permissions | |
* -------------------------------------------------------
* | | ACL Allows | ACL Denies| Delete |
* | | Delete | Delete | unspecified|
* -------------------------------------------------------
* | ACL Allows | Permit | Deny * | Permit |
* | DELETE_CHILD | | | |
* -------------------------------------------------------
* | ACL Denies | Permit | Deny | Deny |
* | DELETE_CHILD | | | |
* -------------------------------------------------------
* | ACL specifies | | | |
* | only allow | Permit | Deny * | Permit |
* | write and | | | |
* | execute | | | |
* -------------------------------------------------------
* | ACL denies | | | |
* | write and | Permit | Deny | Deny |
* | execute | | | |
* -------------------------------------------------------
* ^
* |
* Re. execute permission on the directory: if that's missing,
* the vnode lookup of the target will fail before we get here.
*
* Re [*] in the table above: NFSv4 would normally Permit delete for
* these two cells of the matrix.
* See acl.h for notes on which ACE_... flags should be checked for which
* operations. Specifically, the NFSv4 committee recommendation is in
* conflict with the Windows interpretation of DENY ACEs, where DENY ACEs
* should take precedence ahead of ALLOW ACEs.
*
* This implementation always consults the target object's ACL first.
* If a DENY ACE is present on the target object that specifies ACE_DELETE,
* delete access is denied. If an ALLOW ACE with ACE_DELETE is present on
* the target object, access is allowed. If and only if no entries with
* ACE_DELETE are present in the object's ACL, check the container's ACL
* for entries with ACE_DELETE_CHILD.
*
* A summary of the logic implemented from the table above is as follows:
*
* First check for DENY ACEs that apply.
* If either target or container has a deny, EACCES.
*
* Delete access can then be summarized as follows:
* 1: The object to be deleted grants ACE_DELETE, or
* 2: The containing directory grants ACE_DELETE_CHILD.
* In a Windows system, that would be the end of the story.
* In this system, (2) has some complications...
* 2a: "sticky" bit on a directory adds restrictions, and
* 2b: existing ACEs from previous versions of ZFS may
* not carry ACE_DELETE_CHILD where they should, so we
* also allow delete when ACE_WRITE_DATA is granted.
*
* Note: 2b is technically a work-around for a prior bug,
* which hopefully can go away some day. For those who
* no longer need the work around, and for testing, this
* work-around is made conditional via the tunable:
* zfs_write_implies_delete_child
*/
int
zfs_zaccess_delete(znode_t *dzp, znode_t *zp, cred_t *cr)
{
uint32_t wanted_dirperms;
uint32_t dzp_working_mode = 0;
uint32_t zp_working_mode = 0;
int dzp_error, zp_error;
boolean_t dzpcheck_privs;
boolean_t zpcheck_privs;
if (zp->z_pflags & (ZFS_IMMUTABLE | ZFS_NOUNLINK))
return (SET_ERROR(EPERM));
/*
* Case 1:
* If target object grants ACE_DELETE then we are done. This is
* indicated by a return value of 0. For this case we don't worry
* about the sticky bit because sticky only applies to the parent
* directory and this is the child access result.
*
* If we encounter a DENY ACE here, we're also done (EACCES).
* Note that if we hit a DENY ACE here (on the target) it should
* take precedence over a DENY ACE on the container, so that when
* we have more complete auditing support we will be able to
* report an access failure against the specific target.
* (This is part of why we're checking the target first.)
*/
zp_error = zfs_zaccess_common(zp, ACE_DELETE, &zp_working_mode,
&zpcheck_privs, B_FALSE, cr);
if (zp_error == EACCES) {
/* We hit a DENY ACE. */
if (!zpcheck_privs)
return (SET_ERROR(zp_error));
return (secpolicy_vnode_remove(cr));
}
if (zp_error == 0)
return (0);
/*
* Case 2:
* If the containing directory grants ACE_DELETE_CHILD,
* or we're in backward compatibility mode and the
* containing directory has ACE_WRITE_DATA, allow.
* Case 2b is handled with wanted_dirperms.
*/
wanted_dirperms = ACE_DELETE_CHILD;
if (zfs_write_implies_delete_child)
wanted_dirperms |= ACE_WRITE_DATA;
dzp_error = zfs_zaccess_common(dzp, wanted_dirperms,
&dzp_working_mode, &dzpcheck_privs, B_FALSE, cr);
if (dzp_error == EACCES) {
/* We hit a DENY ACE. */
if (!dzpcheck_privs)
return (SET_ERROR(dzp_error));
return (secpolicy_vnode_remove(cr));
}
/*
* Cases 2a, 2b (continued)
*
* Note: dzp_working_mode now contains any permissions
* that were NOT granted. Therefore, if any of the
* wanted_dirperms WERE granted, we will have:
* dzp_working_mode != wanted_dirperms
* We're really asking if ANY of those permissions
* were granted, and if so, grant delete access.
*/
if (dzp_working_mode != wanted_dirperms)
dzp_error = 0;
/*
* dzp_error is 0 if the container granted us permissions to "modify".
* If we do not have permission via one or more ACEs, our current
* privileges may still permit us to modify the container.
*
* dzpcheck_privs is false when i.e. the FS is read-only.
* Otherwise, do privilege checks for the container.
*/
if (dzp_error != 0 && dzpcheck_privs) {
uid_t owner;
/*
* The secpolicy call needs the requested access and
* the current access mode of the container, but it
* only knows about Unix-style modes (VEXEC, VWRITE),
* so this must condense the fine-grained ACE bits into
* Unix modes.
*
* The VEXEC flag is easy, because we know that has
* always been checked before we get here (during the
* lookup of the target vnode). The container has not
* granted us permissions to "modify", so we do not set
* the VWRITE flag in the current access mode.
*/
owner = zfs_fuid_map_id(ZTOZSB(dzp),
KUID_TO_SUID(ZTOI(dzp)->i_uid), cr, ZFS_OWNER);
dzp_error = secpolicy_vnode_access2(cr, ZTOI(dzp),
owner, S_IXUSR, S_IWUSR|S_IXUSR);
}
if (dzp_error != 0) {
/*
* Note: We may have dzp_error = -1 here (from
* zfs_zacess_common). Don't return that.
*/
return (SET_ERROR(EACCES));
}
/*
* At this point, we know that the directory permissions allow
* us to modify, but we still need to check for the additional
* restrictions that apply when the "sticky bit" is set.
*
* Yes, zfs_sticky_remove_access() also checks this bit, but
* checking it here and skipping the call below is nice when
* you're watching all of this with dtrace.
*/
if ((dzp->z_mode & S_ISVTX) == 0)
return (0);
/*
* zfs_sticky_remove_access will succeed if:
* 1. The sticky bit is absent.
* 2. We pass the sticky bit restrictions.
* 3. We have privileges that always allow file removal.
*/
return (zfs_sticky_remove_access(dzp, zp, cr));
}
int
zfs_zaccess_rename(znode_t *sdzp, znode_t *szp, znode_t *tdzp,
znode_t *tzp, cred_t *cr)
{
int add_perm;
int error;
if (szp->z_pflags & ZFS_AV_QUARANTINED)
return (SET_ERROR(EACCES));
add_perm = S_ISDIR(ZTOI(szp)->i_mode) ?
ACE_ADD_SUBDIRECTORY : ACE_ADD_FILE;
/*
* Rename permissions are combination of delete permission +
* add file/subdir permission.
*/
/*
* first make sure we do the delete portion.
*
* If that succeeds then check for add_file/add_subdir permissions
*/
if ((error = zfs_zaccess_delete(sdzp, szp, cr)))
return (error);
/*
* If we have a tzp, see if we can delete it?
*/
if (tzp) {
if ((error = zfs_zaccess_delete(tdzp, tzp, cr)))
return (error);
}
/*
* Now check for add permissions
*/
error = zfs_zaccess(tdzp, add_perm, 0, B_FALSE, cr);
return (error);
}
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_ctldir.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_ctldir.c
index 32342d25ce6f..4ae0a65370e5 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_ctldir.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_ctldir.c
@@ -1,1280 +1,1288 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (C) 2011 Lawrence Livermore National Security, LLC.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* LLNL-CODE-403049.
* Rewritten for Linux by:
* Rohan Puri <rohan.puri15@gmail.com>
* Brian Behlendorf <behlendorf1@llnl.gov>
* Copyright (c) 2013 by Delphix. All rights reserved.
* Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved.
* Copyright (c) 2018 George Melikov. All Rights Reserved.
* Copyright (c) 2019 Datto, Inc. All rights reserved.
* Copyright (c) 2020 The MathWorks, Inc. All rights reserved.
*/
/*
* ZFS control directory (a.k.a. ".zfs")
*
* This directory provides a common location for all ZFS meta-objects.
* Currently, this is only the 'snapshot' and 'shares' directory, but this may
* expand in the future. The elements are built dynamically, as the hierarchy
* does not actually exist on disk.
*
* For 'snapshot', we don't want to have all snapshots always mounted, because
* this would take up a huge amount of space in /etc/mnttab. We have three
* types of objects:
*
* ctldir ------> snapshotdir -------> snapshot
* |
* |
* V
* mounted fs
*
* The 'snapshot' node contains just enough information to lookup '..' and act
* as a mountpoint for the snapshot. Whenever we lookup a specific snapshot, we
* perform an automount of the underlying filesystem and return the
* corresponding inode.
*
* All mounts are handled automatically by an user mode helper which invokes
* the mount procedure. Unmounts are handled by allowing the mount
* point to expire so the kernel may automatically unmount it.
*
* The '.zfs', '.zfs/snapshot', and all directories created under
* '.zfs/snapshot' (ie: '.zfs/snapshot/<snapname>') all share the same
* zfsvfs_t as the head filesystem (what '.zfs' lives under).
*
* File systems mounted on top of the '.zfs/snapshot/<snapname>' paths
* (ie: snapshots) are complete ZFS filesystems and have their own unique
* zfsvfs_t. However, the fsid reported by these mounts will be the same
* as that used by the parent zfsvfs_t to make NFS happy.
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/sysmacros.h>
#include <sys/pathname.h>
#include <sys/vfs.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_vnops.h>
#include <sys/stat.h>
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_destroy.h>
#include <sys/dsl_deleg.h>
#include <sys/zpl.h>
#include <sys/mntent.h>
#include "zfs_namecheck.h"
/*
* Two AVL trees are maintained which contain all currently automounted
* snapshots. Every automounted snapshots maps to a single zfs_snapentry_t
* entry which MUST:
*
* - be attached to both trees, and
* - be unique, no duplicate entries are allowed.
*
* The zfs_snapshots_by_name tree is indexed by the full dataset name
* while the zfs_snapshots_by_objsetid tree is indexed by the unique
* objsetid. This allows for fast lookups either by name or objsetid.
*/
static avl_tree_t zfs_snapshots_by_name;
static avl_tree_t zfs_snapshots_by_objsetid;
static krwlock_t zfs_snapshot_lock;
/*
* Control Directory Tunables (.zfs)
*/
int zfs_expire_snapshot = ZFSCTL_EXPIRE_SNAPSHOT;
static int zfs_admin_snapshot = 0;
typedef struct {
char *se_name; /* full snapshot name */
char *se_path; /* full mount path */
spa_t *se_spa; /* pool spa */
uint64_t se_objsetid; /* snapshot objset id */
struct dentry *se_root_dentry; /* snapshot root dentry */
krwlock_t se_taskqid_lock; /* scheduled unmount taskqid lock */
taskqid_t se_taskqid; /* scheduled unmount taskqid */
avl_node_t se_node_name; /* zfs_snapshots_by_name link */
avl_node_t se_node_objsetid; /* zfs_snapshots_by_objsetid link */
zfs_refcount_t se_refcount; /* reference count */
} zfs_snapentry_t;
static void zfsctl_snapshot_unmount_delay_impl(zfs_snapentry_t *se, int delay);
/*
* Allocate a new zfs_snapentry_t being careful to make a copy of the
* the snapshot name and provided mount point. No reference is taken.
*/
static zfs_snapentry_t *
zfsctl_snapshot_alloc(const char *full_name, const char *full_path, spa_t *spa,
uint64_t objsetid, struct dentry *root_dentry)
{
zfs_snapentry_t *se;
se = kmem_zalloc(sizeof (zfs_snapentry_t), KM_SLEEP);
se->se_name = kmem_strdup(full_name);
se->se_path = kmem_strdup(full_path);
se->se_spa = spa;
se->se_objsetid = objsetid;
se->se_root_dentry = root_dentry;
se->se_taskqid = TASKQID_INVALID;
rw_init(&se->se_taskqid_lock, NULL, RW_DEFAULT, NULL);
zfs_refcount_create(&se->se_refcount);
return (se);
}
/*
* Free a zfs_snapentry_t the caller must ensure there are no active
* references.
*/
static void
zfsctl_snapshot_free(zfs_snapentry_t *se)
{
zfs_refcount_destroy(&se->se_refcount);
kmem_strfree(se->se_name);
kmem_strfree(se->se_path);
rw_destroy(&se->se_taskqid_lock);
kmem_free(se, sizeof (zfs_snapentry_t));
}
/*
* Hold a reference on the zfs_snapentry_t.
*/
static void
zfsctl_snapshot_hold(zfs_snapentry_t *se)
{
zfs_refcount_add(&se->se_refcount, NULL);
}
/*
* Release a reference on the zfs_snapentry_t. When the number of
* references drops to zero the structure will be freed.
*/
static void
zfsctl_snapshot_rele(zfs_snapentry_t *se)
{
if (zfs_refcount_remove(&se->se_refcount, NULL) == 0)
zfsctl_snapshot_free(se);
}
/*
* Add a zfs_snapentry_t to both the zfs_snapshots_by_name and
* zfs_snapshots_by_objsetid trees. While the zfs_snapentry_t is part
* of the trees a reference is held.
*/
static void
zfsctl_snapshot_add(zfs_snapentry_t *se)
{
ASSERT(RW_WRITE_HELD(&zfs_snapshot_lock));
zfsctl_snapshot_hold(se);
avl_add(&zfs_snapshots_by_name, se);
avl_add(&zfs_snapshots_by_objsetid, se);
}
/*
* Remove a zfs_snapentry_t from both the zfs_snapshots_by_name and
* zfs_snapshots_by_objsetid trees. Upon removal a reference is dropped,
* this can result in the structure being freed if that was the last
* remaining reference.
*/
static void
zfsctl_snapshot_remove(zfs_snapentry_t *se)
{
ASSERT(RW_WRITE_HELD(&zfs_snapshot_lock));
avl_remove(&zfs_snapshots_by_name, se);
avl_remove(&zfs_snapshots_by_objsetid, se);
zfsctl_snapshot_rele(se);
}
/*
* Snapshot name comparison function for the zfs_snapshots_by_name.
*/
static int
snapentry_compare_by_name(const void *a, const void *b)
{
const zfs_snapentry_t *se_a = a;
const zfs_snapentry_t *se_b = b;
int ret;
ret = strcmp(se_a->se_name, se_b->se_name);
if (ret < 0)
return (-1);
else if (ret > 0)
return (1);
else
return (0);
}
/*
* Snapshot name comparison function for the zfs_snapshots_by_objsetid.
*/
static int
snapentry_compare_by_objsetid(const void *a, const void *b)
{
const zfs_snapentry_t *se_a = a;
const zfs_snapentry_t *se_b = b;
if (se_a->se_spa != se_b->se_spa)
return ((ulong_t)se_a->se_spa < (ulong_t)se_b->se_spa ? -1 : 1);
if (se_a->se_objsetid < se_b->se_objsetid)
return (-1);
else if (se_a->se_objsetid > se_b->se_objsetid)
return (1);
else
return (0);
}
/*
* Find a zfs_snapentry_t in zfs_snapshots_by_name. If the snapname
* is found a pointer to the zfs_snapentry_t is returned and a reference
* taken on the structure. The caller is responsible for dropping the
* reference with zfsctl_snapshot_rele(). If the snapname is not found
* NULL will be returned.
*/
static zfs_snapentry_t *
zfsctl_snapshot_find_by_name(const char *snapname)
{
zfs_snapentry_t *se, search;
ASSERT(RW_LOCK_HELD(&zfs_snapshot_lock));
search.se_name = (char *)snapname;
se = avl_find(&zfs_snapshots_by_name, &search, NULL);
if (se)
zfsctl_snapshot_hold(se);
return (se);
}
/*
* Find a zfs_snapentry_t in zfs_snapshots_by_objsetid given the objset id
* rather than the snapname. In all other respects it behaves the same
* as zfsctl_snapshot_find_by_name().
*/
static zfs_snapentry_t *
zfsctl_snapshot_find_by_objsetid(spa_t *spa, uint64_t objsetid)
{
zfs_snapentry_t *se, search;
ASSERT(RW_LOCK_HELD(&zfs_snapshot_lock));
search.se_spa = spa;
search.se_objsetid = objsetid;
se = avl_find(&zfs_snapshots_by_objsetid, &search, NULL);
if (se)
zfsctl_snapshot_hold(se);
return (se);
}
/*
* Rename a zfs_snapentry_t in the zfs_snapshots_by_name. The structure is
* removed, renamed, and added back to the new correct location in the tree.
*/
static int
zfsctl_snapshot_rename(const char *old_snapname, const char *new_snapname)
{
zfs_snapentry_t *se;
ASSERT(RW_WRITE_HELD(&zfs_snapshot_lock));
se = zfsctl_snapshot_find_by_name(old_snapname);
if (se == NULL)
return (SET_ERROR(ENOENT));
zfsctl_snapshot_remove(se);
kmem_strfree(se->se_name);
se->se_name = kmem_strdup(new_snapname);
zfsctl_snapshot_add(se);
zfsctl_snapshot_rele(se);
return (0);
}
/*
* Delayed task responsible for unmounting an expired automounted snapshot.
*/
static void
snapentry_expire(void *data)
{
zfs_snapentry_t *se = (zfs_snapentry_t *)data;
spa_t *spa = se->se_spa;
uint64_t objsetid = se->se_objsetid;
if (zfs_expire_snapshot <= 0) {
zfsctl_snapshot_rele(se);
return;
}
rw_enter(&se->se_taskqid_lock, RW_WRITER);
se->se_taskqid = TASKQID_INVALID;
rw_exit(&se->se_taskqid_lock);
(void) zfsctl_snapshot_unmount(se->se_name, MNT_EXPIRE);
zfsctl_snapshot_rele(se);
/*
* Reschedule the unmount if the zfs_snapentry_t wasn't removed.
* This can occur when the snapshot is busy.
*/
rw_enter(&zfs_snapshot_lock, RW_READER);
if ((se = zfsctl_snapshot_find_by_objsetid(spa, objsetid)) != NULL) {
zfsctl_snapshot_unmount_delay_impl(se, zfs_expire_snapshot);
zfsctl_snapshot_rele(se);
}
rw_exit(&zfs_snapshot_lock);
}
/*
* Cancel an automatic unmount of a snapname. This callback is responsible
* for dropping the reference on the zfs_snapentry_t which was taken when
* during dispatch.
*/
static void
zfsctl_snapshot_unmount_cancel(zfs_snapentry_t *se)
{
int err = 0;
rw_enter(&se->se_taskqid_lock, RW_WRITER);
err = taskq_cancel_id(system_delay_taskq, se->se_taskqid);
/*
* if we get ENOENT, the taskq couldn't be found to be
* canceled, so we can just mark it as invalid because
* it's already gone. If we got EBUSY, then we already
* blocked until it was gone _anyway_, so we don't care.
*/
se->se_taskqid = TASKQID_INVALID;
rw_exit(&se->se_taskqid_lock);
if (err == 0) {
zfsctl_snapshot_rele(se);
}
}
/*
* Dispatch the unmount task for delayed handling with a hold protecting it.
*/
static void
zfsctl_snapshot_unmount_delay_impl(zfs_snapentry_t *se, int delay)
{
if (delay <= 0)
return;
zfsctl_snapshot_hold(se);
rw_enter(&se->se_taskqid_lock, RW_WRITER);
ASSERT3S(se->se_taskqid, ==, TASKQID_INVALID);
se->se_taskqid = taskq_dispatch_delay(system_delay_taskq,
snapentry_expire, se, TQ_SLEEP, ddi_get_lbolt() + delay * HZ);
rw_exit(&se->se_taskqid_lock);
}
/*
* Schedule an automatic unmount of objset id to occur in delay seconds from
* now. Any previous delayed unmount will be cancelled in favor of the
* updated deadline. A reference is taken by zfsctl_snapshot_find_by_name()
* and held until the outstanding task is handled or cancelled.
*/
int
zfsctl_snapshot_unmount_delay(spa_t *spa, uint64_t objsetid, int delay)
{
zfs_snapentry_t *se;
int error = ENOENT;
rw_enter(&zfs_snapshot_lock, RW_READER);
if ((se = zfsctl_snapshot_find_by_objsetid(spa, objsetid)) != NULL) {
zfsctl_snapshot_unmount_cancel(se);
zfsctl_snapshot_unmount_delay_impl(se, delay);
zfsctl_snapshot_rele(se);
error = 0;
}
rw_exit(&zfs_snapshot_lock);
return (error);
}
/*
* Check if snapname is currently mounted. Returned non-zero when mounted
* and zero when unmounted.
*/
static boolean_t
zfsctl_snapshot_ismounted(const char *snapname)
{
zfs_snapentry_t *se;
boolean_t ismounted = B_FALSE;
rw_enter(&zfs_snapshot_lock, RW_READER);
if ((se = zfsctl_snapshot_find_by_name(snapname)) != NULL) {
zfsctl_snapshot_rele(se);
ismounted = B_TRUE;
}
rw_exit(&zfs_snapshot_lock);
return (ismounted);
}
/*
* Check if the given inode is a part of the virtual .zfs directory.
*/
boolean_t
zfsctl_is_node(struct inode *ip)
{
return (ITOZ(ip)->z_is_ctldir);
}
/*
* Check if the given inode is a .zfs/snapshots/snapname directory.
*/
boolean_t
zfsctl_is_snapdir(struct inode *ip)
{
return (zfsctl_is_node(ip) && (ip->i_ino <= ZFSCTL_INO_SNAPDIRS));
}
/*
* Allocate a new inode with the passed id and ops.
*/
static struct inode *
zfsctl_inode_alloc(zfsvfs_t *zfsvfs, uint64_t id,
const struct file_operations *fops, const struct inode_operations *ops)
{
inode_timespec_t now;
struct inode *ip;
znode_t *zp;
ip = new_inode(zfsvfs->z_sb);
if (ip == NULL)
return (NULL);
now = current_time(ip);
zp = ITOZ(ip);
ASSERT3P(zp->z_dirlocks, ==, NULL);
ASSERT3P(zp->z_acl_cached, ==, NULL);
ASSERT3P(zp->z_xattr_cached, ==, NULL);
zp->z_id = id;
zp->z_unlinked = B_FALSE;
zp->z_atime_dirty = B_FALSE;
zp->z_zn_prefetch = B_FALSE;
zp->z_is_sa = B_FALSE;
zp->z_is_mapped = B_FALSE;
zp->z_is_ctldir = B_TRUE;
zp->z_is_stale = B_FALSE;
zp->z_sa_hdl = NULL;
zp->z_blksz = 0;
zp->z_seq = 0;
zp->z_mapcnt = 0;
zp->z_size = 0;
zp->z_pflags = 0;
zp->z_mode = 0;
zp->z_sync_cnt = 0;
zp->z_sync_writes_cnt = 0;
zp->z_async_writes_cnt = 0;
ip->i_generation = 0;
ip->i_ino = id;
ip->i_mode = (S_IFDIR | S_IRWXUGO);
ip->i_uid = SUID_TO_KUID(0);
ip->i_gid = SGID_TO_KGID(0);
ip->i_blkbits = SPA_MINBLOCKSHIFT;
ip->i_atime = now;
ip->i_mtime = now;
ip->i_ctime = now;
ip->i_fop = fops;
ip->i_op = ops;
#if defined(IOP_XATTR)
ip->i_opflags &= ~IOP_XATTR;
#endif
if (insert_inode_locked(ip)) {
unlock_new_inode(ip);
iput(ip);
return (NULL);
}
mutex_enter(&zfsvfs->z_znodes_lock);
list_insert_tail(&zfsvfs->z_all_znodes, zp);
zfsvfs->z_nr_znodes++;
membar_producer();
mutex_exit(&zfsvfs->z_znodes_lock);
unlock_new_inode(ip);
return (ip);
}
/*
* Lookup the inode with given id, it will be allocated if needed.
*/
static struct inode *
zfsctl_inode_lookup(zfsvfs_t *zfsvfs, uint64_t id,
const struct file_operations *fops, const struct inode_operations *ops)
{
struct inode *ip = NULL;
while (ip == NULL) {
ip = ilookup(zfsvfs->z_sb, (unsigned long)id);
if (ip)
break;
/* May fail due to concurrent zfsctl_inode_alloc() */
ip = zfsctl_inode_alloc(zfsvfs, id, fops, ops);
}
return (ip);
}
/*
* Create the '.zfs' directory. This directory is cached as part of the VFS
* structure. This results in a hold on the zfsvfs_t. The code in zfs_umount()
* therefore checks against a vfs_count of 2 instead of 1. This reference
* is removed when the ctldir is destroyed in the unmount. All other entities
* under the '.zfs' directory are created dynamically as needed.
*
* Because the dynamically created '.zfs' directory entries assume the use
* of 64-bit inode numbers this support must be disabled on 32-bit systems.
*/
int
zfsctl_create(zfsvfs_t *zfsvfs)
{
ASSERT(zfsvfs->z_ctldir == NULL);
zfsvfs->z_ctldir = zfsctl_inode_alloc(zfsvfs, ZFSCTL_INO_ROOT,
&zpl_fops_root, &zpl_ops_root);
if (zfsvfs->z_ctldir == NULL)
return (SET_ERROR(ENOENT));
return (0);
}
/*
* Destroy the '.zfs' directory or remove a snapshot from zfs_snapshots_by_name.
* Only called when the filesystem is unmounted.
*/
void
zfsctl_destroy(zfsvfs_t *zfsvfs)
{
if (zfsvfs->z_issnap) {
zfs_snapentry_t *se;
spa_t *spa = zfsvfs->z_os->os_spa;
uint64_t objsetid = dmu_objset_id(zfsvfs->z_os);
rw_enter(&zfs_snapshot_lock, RW_WRITER);
se = zfsctl_snapshot_find_by_objsetid(spa, objsetid);
if (se != NULL)
zfsctl_snapshot_remove(se);
rw_exit(&zfs_snapshot_lock);
if (se != NULL) {
zfsctl_snapshot_unmount_cancel(se);
zfsctl_snapshot_rele(se);
}
} else if (zfsvfs->z_ctldir) {
iput(zfsvfs->z_ctldir);
zfsvfs->z_ctldir = NULL;
}
}
/*
* Given a root znode, retrieve the associated .zfs directory.
* Add a hold to the vnode and return it.
*/
struct inode *
zfsctl_root(znode_t *zp)
{
ASSERT(zfs_has_ctldir(zp));
/* Must have an existing ref, so igrab() cannot return NULL */
VERIFY3P(igrab(ZTOZSB(zp)->z_ctldir), !=, NULL);
return (ZTOZSB(zp)->z_ctldir);
}
/*
* Generate a long fid to indicate a snapdir. We encode whether snapdir is
* already mounted in gen field. We do this because nfsd lookup will not
* trigger automount. Next time the nfsd does fh_to_dentry, we will notice
* this and do automount and return ESTALE to force nfsd revalidate and follow
* mount.
*/
static int
zfsctl_snapdir_fid(struct inode *ip, fid_t *fidp)
{
zfid_short_t *zfid = (zfid_short_t *)fidp;
zfid_long_t *zlfid = (zfid_long_t *)fidp;
uint32_t gen = 0;
uint64_t object;
uint64_t objsetid;
int i;
struct dentry *dentry;
if (fidp->fid_len < LONG_FID_LEN) {
fidp->fid_len = LONG_FID_LEN;
return (SET_ERROR(ENOSPC));
}
object = ip->i_ino;
objsetid = ZFSCTL_INO_SNAPDIRS - ip->i_ino;
zfid->zf_len = LONG_FID_LEN;
dentry = d_obtain_alias(igrab(ip));
if (!IS_ERR(dentry)) {
gen = !!d_mountpoint(dentry);
dput(dentry);
}
for (i = 0; i < sizeof (zfid->zf_object); i++)
zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
for (i = 0; i < sizeof (zfid->zf_gen); i++)
zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
for (i = 0; i < sizeof (zlfid->zf_setid); i++)
zlfid->zf_setid[i] = (uint8_t)(objsetid >> (8 * i));
for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
zlfid->zf_setgen[i] = 0;
return (0);
}
/*
* Generate an appropriate fid for an entry in the .zfs directory.
*/
int
zfsctl_fid(struct inode *ip, fid_t *fidp)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
uint64_t object = zp->z_id;
zfid_short_t *zfid;
int i;
+ int error;
- ZFS_ENTER(zfsvfs);
+ if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
+ return (error);
if (zfsctl_is_snapdir(ip)) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (zfsctl_snapdir_fid(ip, fidp));
}
if (fidp->fid_len < SHORT_FID_LEN) {
fidp->fid_len = SHORT_FID_LEN;
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(ENOSPC));
}
zfid = (zfid_short_t *)fidp;
zfid->zf_len = SHORT_FID_LEN;
for (i = 0; i < sizeof (zfid->zf_object); i++)
zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
/* .zfs znodes always have a generation number of 0 */
for (i = 0; i < sizeof (zfid->zf_gen); i++)
zfid->zf_gen[i] = 0;
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (0);
}
/*
* Construct a full dataset name in full_name: "pool/dataset@snap_name"
*/
static int
zfsctl_snapshot_name(zfsvfs_t *zfsvfs, const char *snap_name, int len,
char *full_name)
{
objset_t *os = zfsvfs->z_os;
if (zfs_component_namecheck(snap_name, NULL, NULL) != 0)
return (SET_ERROR(EILSEQ));
dmu_objset_name(os, full_name);
if ((strlen(full_name) + 1 + strlen(snap_name)) >= len)
return (SET_ERROR(ENAMETOOLONG));
(void) strcat(full_name, "@");
(void) strcat(full_name, snap_name);
return (0);
}
/*
* Returns full path in full_path: "/pool/dataset/.zfs/snapshot/snap_name/"
*/
static int
zfsctl_snapshot_path_objset(zfsvfs_t *zfsvfs, uint64_t objsetid,
int path_len, char *full_path)
{
objset_t *os = zfsvfs->z_os;
fstrans_cookie_t cookie;
char *snapname;
boolean_t case_conflict;
uint64_t id, pos = 0;
int error = 0;
if (zfsvfs->z_vfs->vfs_mntpoint == NULL)
return (SET_ERROR(ENOENT));
cookie = spl_fstrans_mark();
snapname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
while (error == 0) {
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
error = dmu_snapshot_list_next(zfsvfs->z_os,
ZFS_MAX_DATASET_NAME_LEN, snapname, &id, &pos,
&case_conflict);
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
if (error)
goto out;
if (id == objsetid)
break;
}
snprintf(full_path, path_len, "%s/.zfs/snapshot/%s",
zfsvfs->z_vfs->vfs_mntpoint, snapname);
out:
kmem_free(snapname, ZFS_MAX_DATASET_NAME_LEN);
spl_fstrans_unmark(cookie);
return (error);
}
/*
* Special case the handling of "..".
*/
int
zfsctl_root_lookup(struct inode *dip, const char *name, struct inode **ipp,
int flags, cred_t *cr, int *direntflags, pathname_t *realpnp)
{
zfsvfs_t *zfsvfs = ITOZSB(dip);
int error = 0;
- ZFS_ENTER(zfsvfs);
+ if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
+ return (error);
if (strcmp(name, "..") == 0) {
*ipp = dip->i_sb->s_root->d_inode;
} else if (strcmp(name, ZFS_SNAPDIR_NAME) == 0) {
*ipp = zfsctl_inode_lookup(zfsvfs, ZFSCTL_INO_SNAPDIR,
&zpl_fops_snapdir, &zpl_ops_snapdir);
} else if (strcmp(name, ZFS_SHAREDIR_NAME) == 0) {
*ipp = zfsctl_inode_lookup(zfsvfs, ZFSCTL_INO_SHARES,
&zpl_fops_shares, &zpl_ops_shares);
} else {
*ipp = NULL;
}
if (*ipp == NULL)
error = SET_ERROR(ENOENT);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Lookup entry point for the 'snapshot' directory. Try to open the
* snapshot if it exist, creating the pseudo filesystem inode as necessary.
*/
int
zfsctl_snapdir_lookup(struct inode *dip, const char *name, struct inode **ipp,
int flags, cred_t *cr, int *direntflags, pathname_t *realpnp)
{
zfsvfs_t *zfsvfs = ITOZSB(dip);
uint64_t id;
int error;
- ZFS_ENTER(zfsvfs);
+ if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
+ return (error);
error = dmu_snapshot_lookup(zfsvfs->z_os, name, &id);
if (error) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
*ipp = zfsctl_inode_lookup(zfsvfs, ZFSCTL_INO_SNAPDIRS - id,
&simple_dir_operations, &simple_dir_inode_operations);
if (*ipp == NULL)
error = SET_ERROR(ENOENT);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Renaming a directory under '.zfs/snapshot' will automatically trigger
* a rename of the snapshot to the new given name. The rename is confined
* to the '.zfs/snapshot' directory snapshots cannot be moved elsewhere.
*/
int
zfsctl_snapdir_rename(struct inode *sdip, const char *snm,
struct inode *tdip, const char *tnm, cred_t *cr, int flags)
{
zfsvfs_t *zfsvfs = ITOZSB(sdip);
char *to, *from, *real, *fsname;
int error;
if (!zfs_admin_snapshot)
return (SET_ERROR(EACCES));
- ZFS_ENTER(zfsvfs);
+ if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
+ return (error);
to = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
from = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
real = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
fsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE) {
error = dmu_snapshot_realname(zfsvfs->z_os, snm, real,
ZFS_MAX_DATASET_NAME_LEN, NULL);
if (error == 0) {
snm = real;
} else if (error != ENOTSUP) {
goto out;
}
}
dmu_objset_name(zfsvfs->z_os, fsname);
error = zfsctl_snapshot_name(ITOZSB(sdip), snm,
ZFS_MAX_DATASET_NAME_LEN, from);
if (error == 0)
error = zfsctl_snapshot_name(ITOZSB(tdip), tnm,
ZFS_MAX_DATASET_NAME_LEN, to);
if (error == 0)
error = zfs_secpolicy_rename_perms(from, to, cr);
if (error != 0)
goto out;
/*
* Cannot move snapshots out of the snapdir.
*/
if (sdip != tdip) {
error = SET_ERROR(EINVAL);
goto out;
}
/*
* No-op when names are identical.
*/
if (strcmp(snm, tnm) == 0) {
error = 0;
goto out;
}
rw_enter(&zfs_snapshot_lock, RW_WRITER);
error = dsl_dataset_rename_snapshot(fsname, snm, tnm, B_FALSE);
if (error == 0)
(void) zfsctl_snapshot_rename(snm, tnm);
rw_exit(&zfs_snapshot_lock);
out:
kmem_free(from, ZFS_MAX_DATASET_NAME_LEN);
kmem_free(to, ZFS_MAX_DATASET_NAME_LEN);
kmem_free(real, ZFS_MAX_DATASET_NAME_LEN);
kmem_free(fsname, ZFS_MAX_DATASET_NAME_LEN);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Removing a directory under '.zfs/snapshot' will automatically trigger
* the removal of the snapshot with the given name.
*/
int
zfsctl_snapdir_remove(struct inode *dip, const char *name, cred_t *cr,
int flags)
{
zfsvfs_t *zfsvfs = ITOZSB(dip);
char *snapname, *real;
int error;
if (!zfs_admin_snapshot)
return (SET_ERROR(EACCES));
- ZFS_ENTER(zfsvfs);
+ if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
+ return (error);
snapname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
real = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE) {
error = dmu_snapshot_realname(zfsvfs->z_os, name, real,
ZFS_MAX_DATASET_NAME_LEN, NULL);
if (error == 0) {
name = real;
} else if (error != ENOTSUP) {
goto out;
}
}
error = zfsctl_snapshot_name(ITOZSB(dip), name,
ZFS_MAX_DATASET_NAME_LEN, snapname);
if (error == 0)
error = zfs_secpolicy_destroy_perms(snapname, cr);
if (error != 0)
goto out;
error = zfsctl_snapshot_unmount(snapname, MNT_FORCE);
if ((error == 0) || (error == ENOENT))
error = dsl_destroy_snapshot(snapname, B_FALSE);
out:
kmem_free(snapname, ZFS_MAX_DATASET_NAME_LEN);
kmem_free(real, ZFS_MAX_DATASET_NAME_LEN);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Creating a directory under '.zfs/snapshot' will automatically trigger
* the creation of a new snapshot with the given name.
*/
int
zfsctl_snapdir_mkdir(struct inode *dip, const char *dirname, vattr_t *vap,
struct inode **ipp, cred_t *cr, int flags)
{
zfsvfs_t *zfsvfs = ITOZSB(dip);
char *dsname;
int error;
if (!zfs_admin_snapshot)
return (SET_ERROR(EACCES));
dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
if (zfs_component_namecheck(dirname, NULL, NULL) != 0) {
error = SET_ERROR(EILSEQ);
goto out;
}
dmu_objset_name(zfsvfs->z_os, dsname);
error = zfs_secpolicy_snapshot_perms(dsname, cr);
if (error != 0)
goto out;
if (error == 0) {
error = dmu_objset_snapshot_one(dsname, dirname);
if (error != 0)
goto out;
error = zfsctl_snapdir_lookup(dip, dirname, ipp,
0, cr, NULL, NULL);
}
out:
kmem_free(dsname, ZFS_MAX_DATASET_NAME_LEN);
return (error);
}
/*
* Flush everything out of the kernel's export table and such.
* This is needed as once the snapshot is used over NFS, its
* entries in svc_export and svc_expkey caches hold reference
* to the snapshot mount point. There is no known way of flushing
* only the entries related to the snapshot.
*/
static void
exportfs_flush(void)
{
char *argv[] = { "/usr/sbin/exportfs", "-f", NULL };
char *envp[] = { NULL };
(void) call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
}
/*
* Attempt to unmount a snapshot by making a call to user space.
* There is no assurance that this can or will succeed, is just a
* best effort. In the case where it does fail, perhaps because
* it's in use, the unmount will fail harmlessly.
*/
int
zfsctl_snapshot_unmount(const char *snapname, int flags)
{
char *argv[] = { "/usr/bin/env", "umount", "-t", "zfs", "-n", NULL,
NULL };
char *envp[] = { NULL };
zfs_snapentry_t *se;
int error;
rw_enter(&zfs_snapshot_lock, RW_READER);
if ((se = zfsctl_snapshot_find_by_name(snapname)) == NULL) {
rw_exit(&zfs_snapshot_lock);
return (SET_ERROR(ENOENT));
}
rw_exit(&zfs_snapshot_lock);
exportfs_flush();
if (flags & MNT_FORCE)
argv[4] = "-fn";
argv[5] = se->se_path;
dprintf("unmount; path=%s\n", se->se_path);
error = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
zfsctl_snapshot_rele(se);
/*
* The umount system utility will return 256 on error. We must
* assume this error is because the file system is busy so it is
* converted to the more sensible EBUSY.
*/
if (error)
error = SET_ERROR(EBUSY);
return (error);
}
int
zfsctl_snapshot_mount(struct path *path, int flags)
{
struct dentry *dentry = path->dentry;
struct inode *ip = dentry->d_inode;
zfsvfs_t *zfsvfs;
zfsvfs_t *snap_zfsvfs;
zfs_snapentry_t *se;
char *full_name, *full_path;
char *argv[] = { "/usr/bin/env", "mount", "-t", "zfs", "-n", NULL, NULL,
NULL };
char *envp[] = { NULL };
int error;
struct path spath;
if (ip == NULL)
return (SET_ERROR(EISDIR));
zfsvfs = ITOZSB(ip);
- ZFS_ENTER(zfsvfs);
+ if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
+ return (error);
full_name = kmem_zalloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
full_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
error = zfsctl_snapshot_name(zfsvfs, dname(dentry),
ZFS_MAX_DATASET_NAME_LEN, full_name);
if (error)
goto error;
/*
* Construct a mount point path from sb of the ctldir inode and dirent
* name, instead of from d_path(), so that chroot'd process doesn't fail
* on mount.zfs(8).
*/
snprintf(full_path, MAXPATHLEN, "%s/.zfs/snapshot/%s",
zfsvfs->z_vfs->vfs_mntpoint ? zfsvfs->z_vfs->vfs_mntpoint : "",
dname(dentry));
/*
* Multiple concurrent automounts of a snapshot are never allowed.
* The snapshot may be manually mounted as many times as desired.
*/
if (zfsctl_snapshot_ismounted(full_name)) {
error = 0;
goto error;
}
/*
* Attempt to mount the snapshot from user space. Normally this
* would be done using the vfs_kern_mount() function, however that
* function is marked GPL-only and cannot be used. On error we
* careful to log the real error to the console and return EISDIR
* to safely abort the automount. This should be very rare.
*
* If the user mode helper happens to return EBUSY, a concurrent
* mount is already in progress in which case the error is ignored.
* Take note that if the program was executed successfully the return
* value from call_usermodehelper() will be (exitcode << 8 + signal).
*/
dprintf("mount; name=%s path=%s\n", full_name, full_path);
argv[5] = full_name;
argv[6] = full_path;
error = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
if (error) {
if (!(error & MOUNT_BUSY << 8)) {
zfs_dbgmsg("Unable to automount %s error=%d",
full_path, error);
error = SET_ERROR(EISDIR);
} else {
/*
* EBUSY, this could mean a concurrent mount, or the
* snapshot has already been mounted at completely
* different place. We return 0 so VFS will retry. For
* the latter case the VFS will retry several times
* and return ELOOP, which is probably not a very good
* behavior.
*/
error = 0;
}
goto error;
}
/*
* Follow down in to the mounted snapshot and set MNT_SHRINKABLE
* to identify this as an automounted filesystem.
*/
spath = *path;
path_get(&spath);
if (follow_down_one(&spath)) {
snap_zfsvfs = ITOZSB(spath.dentry->d_inode);
snap_zfsvfs->z_parent = zfsvfs;
dentry = spath.dentry;
spath.mnt->mnt_flags |= MNT_SHRINKABLE;
rw_enter(&zfs_snapshot_lock, RW_WRITER);
se = zfsctl_snapshot_alloc(full_name, full_path,
snap_zfsvfs->z_os->os_spa, dmu_objset_id(snap_zfsvfs->z_os),
dentry);
zfsctl_snapshot_add(se);
zfsctl_snapshot_unmount_delay_impl(se, zfs_expire_snapshot);
rw_exit(&zfs_snapshot_lock);
}
path_put(&spath);
error:
kmem_free(full_name, ZFS_MAX_DATASET_NAME_LEN);
kmem_free(full_path, MAXPATHLEN);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Get the snapdir inode from fid
*/
int
zfsctl_snapdir_vget(struct super_block *sb, uint64_t objsetid, int gen,
struct inode **ipp)
{
int error;
struct path path;
char *mnt;
struct dentry *dentry;
mnt = kmem_alloc(MAXPATHLEN, KM_SLEEP);
error = zfsctl_snapshot_path_objset(sb->s_fs_info, objsetid,
MAXPATHLEN, mnt);
if (error)
goto out;
/* Trigger automount */
error = -kern_path(mnt, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, &path);
if (error)
goto out;
path_put(&path);
/*
* Get the snapdir inode. Note, we don't want to use the above
* path because it contains the root of the snapshot rather
* than the snapdir.
*/
*ipp = ilookup(sb, ZFSCTL_INO_SNAPDIRS - objsetid);
if (*ipp == NULL) {
error = SET_ERROR(ENOENT);
goto out;
}
/* check gen, see zfsctl_snapdir_fid */
dentry = d_obtain_alias(igrab(*ipp));
if (gen != (!IS_ERR(dentry) && d_mountpoint(dentry))) {
iput(*ipp);
*ipp = NULL;
error = SET_ERROR(ENOENT);
}
if (!IS_ERR(dentry))
dput(dentry);
out:
kmem_free(mnt, MAXPATHLEN);
return (error);
}
int
zfsctl_shares_lookup(struct inode *dip, char *name, struct inode **ipp,
int flags, cred_t *cr, int *direntflags, pathname_t *realpnp)
{
zfsvfs_t *zfsvfs = ITOZSB(dip);
znode_t *zp;
znode_t *dzp;
int error;
- ZFS_ENTER(zfsvfs);
+ if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
+ return (error);
if (zfsvfs->z_shares_dir == 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(ENOTSUP));
}
if ((error = zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp)) == 0) {
error = zfs_lookup(dzp, name, &zp, 0, cr, NULL, NULL);
zrele(dzp);
}
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Initialize the various pieces we'll need to create and manipulate .zfs
* directories. Currently this is unused but available.
*/
void
zfsctl_init(void)
{
avl_create(&zfs_snapshots_by_name, snapentry_compare_by_name,
sizeof (zfs_snapentry_t), offsetof(zfs_snapentry_t,
se_node_name));
avl_create(&zfs_snapshots_by_objsetid, snapentry_compare_by_objsetid,
sizeof (zfs_snapentry_t), offsetof(zfs_snapentry_t,
se_node_objsetid));
rw_init(&zfs_snapshot_lock, NULL, RW_DEFAULT, NULL);
}
/*
* Cleanup the various pieces we needed for .zfs directories. In particular
* ensure the expiry timer is canceled safely.
*/
void
zfsctl_fini(void)
{
avl_destroy(&zfs_snapshots_by_name);
avl_destroy(&zfs_snapshots_by_objsetid);
rw_destroy(&zfs_snapshot_lock);
}
module_param(zfs_admin_snapshot, int, 0644);
MODULE_PARM_DESC(zfs_admin_snapshot, "Enable mkdir/rmdir/mv in .zfs/snapshot");
module_param(zfs_expire_snapshot, int, 0644);
MODULE_PARM_DESC(zfs_expire_snapshot, "Seconds to expire .zfs/snapshot");
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c
index eac3dcb6a55c..64d6b4616e1c 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c
@@ -1,2202 +1,2206 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
*/
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/types.h>
#include <sys/param.h>
#include <sys/sysmacros.h>
#include <sys/kmem.h>
#include <sys/pathname.h>
#include <sys/vnode.h>
#include <sys/vfs.h>
#include <sys/mntent.h>
#include <sys/cmn_err.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_dir.h>
#include <sys/zil.h>
#include <sys/fs/zfs.h>
#include <sys/dmu.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_deleg.h>
#include <sys/spa.h>
#include <sys/zap.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/policy.h>
#include <sys/atomic.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_fuid.h>
#include <sys/zfs_quota.h>
#include <sys/sunddi.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dir.h>
-#include <sys/spa_boot.h>
#include <sys/objlist.h>
#include <sys/zpl.h>
#include <linux/vfs_compat.h>
#include "zfs_comutil.h"
enum {
TOKEN_RO,
TOKEN_RW,
TOKEN_SETUID,
TOKEN_NOSETUID,
TOKEN_EXEC,
TOKEN_NOEXEC,
TOKEN_DEVICES,
TOKEN_NODEVICES,
TOKEN_DIRXATTR,
TOKEN_SAXATTR,
TOKEN_XATTR,
TOKEN_NOXATTR,
TOKEN_ATIME,
TOKEN_NOATIME,
TOKEN_RELATIME,
TOKEN_NORELATIME,
TOKEN_NBMAND,
TOKEN_NONBMAND,
TOKEN_MNTPOINT,
TOKEN_LAST,
};
static const match_table_t zpl_tokens = {
{ TOKEN_RO, MNTOPT_RO },
{ TOKEN_RW, MNTOPT_RW },
{ TOKEN_SETUID, MNTOPT_SETUID },
{ TOKEN_NOSETUID, MNTOPT_NOSETUID },
{ TOKEN_EXEC, MNTOPT_EXEC },
{ TOKEN_NOEXEC, MNTOPT_NOEXEC },
{ TOKEN_DEVICES, MNTOPT_DEVICES },
{ TOKEN_NODEVICES, MNTOPT_NODEVICES },
{ TOKEN_DIRXATTR, MNTOPT_DIRXATTR },
{ TOKEN_SAXATTR, MNTOPT_SAXATTR },
{ TOKEN_XATTR, MNTOPT_XATTR },
{ TOKEN_NOXATTR, MNTOPT_NOXATTR },
{ TOKEN_ATIME, MNTOPT_ATIME },
{ TOKEN_NOATIME, MNTOPT_NOATIME },
{ TOKEN_RELATIME, MNTOPT_RELATIME },
{ TOKEN_NORELATIME, MNTOPT_NORELATIME },
{ TOKEN_NBMAND, MNTOPT_NBMAND },
{ TOKEN_NONBMAND, MNTOPT_NONBMAND },
{ TOKEN_MNTPOINT, MNTOPT_MNTPOINT "=%s" },
{ TOKEN_LAST, NULL },
};
static void
zfsvfs_vfs_free(vfs_t *vfsp)
{
if (vfsp != NULL) {
if (vfsp->vfs_mntpoint != NULL)
kmem_strfree(vfsp->vfs_mntpoint);
kmem_free(vfsp, sizeof (vfs_t));
}
}
static int
zfsvfs_parse_option(char *option, int token, substring_t *args, vfs_t *vfsp)
{
switch (token) {
case TOKEN_RO:
vfsp->vfs_readonly = B_TRUE;
vfsp->vfs_do_readonly = B_TRUE;
break;
case TOKEN_RW:
vfsp->vfs_readonly = B_FALSE;
vfsp->vfs_do_readonly = B_TRUE;
break;
case TOKEN_SETUID:
vfsp->vfs_setuid = B_TRUE;
vfsp->vfs_do_setuid = B_TRUE;
break;
case TOKEN_NOSETUID:
vfsp->vfs_setuid = B_FALSE;
vfsp->vfs_do_setuid = B_TRUE;
break;
case TOKEN_EXEC:
vfsp->vfs_exec = B_TRUE;
vfsp->vfs_do_exec = B_TRUE;
break;
case TOKEN_NOEXEC:
vfsp->vfs_exec = B_FALSE;
vfsp->vfs_do_exec = B_TRUE;
break;
case TOKEN_DEVICES:
vfsp->vfs_devices = B_TRUE;
vfsp->vfs_do_devices = B_TRUE;
break;
case TOKEN_NODEVICES:
vfsp->vfs_devices = B_FALSE;
vfsp->vfs_do_devices = B_TRUE;
break;
case TOKEN_DIRXATTR:
vfsp->vfs_xattr = ZFS_XATTR_DIR;
vfsp->vfs_do_xattr = B_TRUE;
break;
case TOKEN_SAXATTR:
vfsp->vfs_xattr = ZFS_XATTR_SA;
vfsp->vfs_do_xattr = B_TRUE;
break;
case TOKEN_XATTR:
vfsp->vfs_xattr = ZFS_XATTR_DIR;
vfsp->vfs_do_xattr = B_TRUE;
break;
case TOKEN_NOXATTR:
vfsp->vfs_xattr = ZFS_XATTR_OFF;
vfsp->vfs_do_xattr = B_TRUE;
break;
case TOKEN_ATIME:
vfsp->vfs_atime = B_TRUE;
vfsp->vfs_do_atime = B_TRUE;
break;
case TOKEN_NOATIME:
vfsp->vfs_atime = B_FALSE;
vfsp->vfs_do_atime = B_TRUE;
break;
case TOKEN_RELATIME:
vfsp->vfs_relatime = B_TRUE;
vfsp->vfs_do_relatime = B_TRUE;
break;
case TOKEN_NORELATIME:
vfsp->vfs_relatime = B_FALSE;
vfsp->vfs_do_relatime = B_TRUE;
break;
case TOKEN_NBMAND:
vfsp->vfs_nbmand = B_TRUE;
vfsp->vfs_do_nbmand = B_TRUE;
break;
case TOKEN_NONBMAND:
vfsp->vfs_nbmand = B_FALSE;
vfsp->vfs_do_nbmand = B_TRUE;
break;
case TOKEN_MNTPOINT:
vfsp->vfs_mntpoint = match_strdup(&args[0]);
if (vfsp->vfs_mntpoint == NULL)
return (SET_ERROR(ENOMEM));
break;
default:
break;
}
return (0);
}
/*
* Parse the raw mntopts and return a vfs_t describing the options.
*/
static int
zfsvfs_parse_options(char *mntopts, vfs_t **vfsp)
{
vfs_t *tmp_vfsp;
int error;
tmp_vfsp = kmem_zalloc(sizeof (vfs_t), KM_SLEEP);
if (mntopts != NULL) {
substring_t args[MAX_OPT_ARGS];
char *tmp_mntopts, *p, *t;
int token;
tmp_mntopts = t = kmem_strdup(mntopts);
if (tmp_mntopts == NULL)
return (SET_ERROR(ENOMEM));
while ((p = strsep(&t, ",")) != NULL) {
if (!*p)
continue;
args[0].to = args[0].from = NULL;
token = match_token(p, zpl_tokens, args);
error = zfsvfs_parse_option(p, token, args, tmp_vfsp);
if (error) {
kmem_strfree(tmp_mntopts);
zfsvfs_vfs_free(tmp_vfsp);
return (error);
}
}
kmem_strfree(tmp_mntopts);
}
*vfsp = tmp_vfsp;
return (0);
}
boolean_t
zfs_is_readonly(zfsvfs_t *zfsvfs)
{
return (!!(zfsvfs->z_sb->s_flags & SB_RDONLY));
}
int
zfs_sync(struct super_block *sb, int wait, cred_t *cr)
{
(void) cr;
zfsvfs_t *zfsvfs = sb->s_fs_info;
/*
* Semantically, the only requirement is that the sync be initiated.
* The DMU syncs out txgs frequently, so there's nothing to do.
*/
if (!wait)
return (0);
if (zfsvfs != NULL) {
/*
* Sync a specific filesystem.
*/
dsl_pool_t *dp;
+ int error;
- ZFS_ENTER(zfsvfs);
+ if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
+ return (error);
dp = dmu_objset_pool(zfsvfs->z_os);
/*
* If the system is shutting down, then skip any
* filesystems which may exist on a suspended pool.
*/
if (spa_suspended(dp->dp_spa)) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (0);
}
if (zfsvfs->z_log != NULL)
zil_commit(zfsvfs->z_log, 0);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
} else {
/*
* Sync all ZFS filesystems. This is what happens when you
* run sync(1). Unlike other filesystems, ZFS honors the
* request by waiting for all pools to commit all dirty data.
*/
spa_sync_allpools();
}
return (0);
}
static void
atime_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
struct super_block *sb = zfsvfs->z_sb;
if (sb == NULL)
return;
/*
* Update SB_NOATIME bit in VFS super block. Since atime update is
* determined by atime_needs_update(), atime_needs_update() needs to
* return false if atime is turned off, and not unconditionally return
* false if atime is turned on.
*/
if (newval)
sb->s_flags &= ~SB_NOATIME;
else
sb->s_flags |= SB_NOATIME;
}
static void
relatime_changed_cb(void *arg, uint64_t newval)
{
((zfsvfs_t *)arg)->z_relatime = newval;
}
static void
xattr_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
if (newval == ZFS_XATTR_OFF) {
zfsvfs->z_flags &= ~ZSB_XATTR;
} else {
zfsvfs->z_flags |= ZSB_XATTR;
if (newval == ZFS_XATTR_SA)
zfsvfs->z_xattr_sa = B_TRUE;
else
zfsvfs->z_xattr_sa = B_FALSE;
}
}
static void
acltype_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
switch (newval) {
case ZFS_ACLTYPE_NFSV4:
case ZFS_ACLTYPE_OFF:
zfsvfs->z_acl_type = ZFS_ACLTYPE_OFF;
zfsvfs->z_sb->s_flags &= ~SB_POSIXACL;
break;
case ZFS_ACLTYPE_POSIX:
#ifdef CONFIG_FS_POSIX_ACL
zfsvfs->z_acl_type = ZFS_ACLTYPE_POSIX;
zfsvfs->z_sb->s_flags |= SB_POSIXACL;
#else
zfsvfs->z_acl_type = ZFS_ACLTYPE_OFF;
zfsvfs->z_sb->s_flags &= ~SB_POSIXACL;
#endif /* CONFIG_FS_POSIX_ACL */
break;
default:
break;
}
}
static void
blksz_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
ASSERT3U(newval, <=, spa_maxblocksize(dmu_objset_spa(zfsvfs->z_os)));
ASSERT3U(newval, >=, SPA_MINBLOCKSIZE);
ASSERT(ISP2(newval));
zfsvfs->z_max_blksz = newval;
}
static void
readonly_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
struct super_block *sb = zfsvfs->z_sb;
if (sb == NULL)
return;
if (newval)
sb->s_flags |= SB_RDONLY;
else
sb->s_flags &= ~SB_RDONLY;
}
static void
devices_changed_cb(void *arg, uint64_t newval)
{
}
static void
setuid_changed_cb(void *arg, uint64_t newval)
{
}
static void
exec_changed_cb(void *arg, uint64_t newval)
{
}
static void
nbmand_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
struct super_block *sb = zfsvfs->z_sb;
if (sb == NULL)
return;
if (newval == TRUE)
sb->s_flags |= SB_MANDLOCK;
else
sb->s_flags &= ~SB_MANDLOCK;
}
static void
snapdir_changed_cb(void *arg, uint64_t newval)
{
((zfsvfs_t *)arg)->z_show_ctldir = newval;
}
static void
acl_mode_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
zfsvfs->z_acl_mode = newval;
}
static void
acl_inherit_changed_cb(void *arg, uint64_t newval)
{
((zfsvfs_t *)arg)->z_acl_inherit = newval;
}
static int
zfs_register_callbacks(vfs_t *vfsp)
{
struct dsl_dataset *ds = NULL;
objset_t *os = NULL;
zfsvfs_t *zfsvfs = NULL;
int error = 0;
ASSERT(vfsp);
zfsvfs = vfsp->vfs_data;
ASSERT(zfsvfs);
os = zfsvfs->z_os;
/*
* The act of registering our callbacks will destroy any mount
* options we may have. In order to enable temporary overrides
* of mount options, we stash away the current values and
* restore them after we register the callbacks.
*/
if (zfs_is_readonly(zfsvfs) || !spa_writeable(dmu_objset_spa(os))) {
vfsp->vfs_do_readonly = B_TRUE;
vfsp->vfs_readonly = B_TRUE;
}
/*
* Register property callbacks.
*
* It would probably be fine to just check for i/o error from
* the first prop_register(), but I guess I like to go
* overboard...
*/
ds = dmu_objset_ds(os);
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
error = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ATIME), atime_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_RELATIME), relatime_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_XATTR), xattr_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_RECORDSIZE), blksz_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_READONLY), readonly_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_DEVICES), devices_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_SETUID), setuid_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_EXEC), exec_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_SNAPDIR), snapdir_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLTYPE), acltype_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLMODE), acl_mode_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLINHERIT), acl_inherit_changed_cb,
zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_NBMAND), nbmand_changed_cb, zfsvfs);
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
if (error)
goto unregister;
/*
* Invoke our callbacks to restore temporary mount options.
*/
if (vfsp->vfs_do_readonly)
readonly_changed_cb(zfsvfs, vfsp->vfs_readonly);
if (vfsp->vfs_do_setuid)
setuid_changed_cb(zfsvfs, vfsp->vfs_setuid);
if (vfsp->vfs_do_exec)
exec_changed_cb(zfsvfs, vfsp->vfs_exec);
if (vfsp->vfs_do_devices)
devices_changed_cb(zfsvfs, vfsp->vfs_devices);
if (vfsp->vfs_do_xattr)
xattr_changed_cb(zfsvfs, vfsp->vfs_xattr);
if (vfsp->vfs_do_atime)
atime_changed_cb(zfsvfs, vfsp->vfs_atime);
if (vfsp->vfs_do_relatime)
relatime_changed_cb(zfsvfs, vfsp->vfs_relatime);
if (vfsp->vfs_do_nbmand)
nbmand_changed_cb(zfsvfs, vfsp->vfs_nbmand);
return (0);
unregister:
dsl_prop_unregister_all(ds, zfsvfs);
return (error);
}
/*
* Takes a dataset, a property, a value and that value's setpoint as
* found in the ZAP. Checks if the property has been changed in the vfs.
* If so, val and setpoint will be overwritten with updated content.
* Otherwise, they are left unchanged.
*/
int
zfs_get_temporary_prop(dsl_dataset_t *ds, zfs_prop_t zfs_prop, uint64_t *val,
char *setpoint)
{
int error;
zfsvfs_t *zfvp;
vfs_t *vfsp;
objset_t *os;
uint64_t tmp = *val;
error = dmu_objset_from_ds(ds, &os);
if (error != 0)
return (error);
if (dmu_objset_type(os) != DMU_OST_ZFS)
return (EINVAL);
mutex_enter(&os->os_user_ptr_lock);
zfvp = dmu_objset_get_user(os);
mutex_exit(&os->os_user_ptr_lock);
if (zfvp == NULL)
return (ESRCH);
vfsp = zfvp->z_vfs;
switch (zfs_prop) {
case ZFS_PROP_ATIME:
if (vfsp->vfs_do_atime)
tmp = vfsp->vfs_atime;
break;
case ZFS_PROP_RELATIME:
if (vfsp->vfs_do_relatime)
tmp = vfsp->vfs_relatime;
break;
case ZFS_PROP_DEVICES:
if (vfsp->vfs_do_devices)
tmp = vfsp->vfs_devices;
break;
case ZFS_PROP_EXEC:
if (vfsp->vfs_do_exec)
tmp = vfsp->vfs_exec;
break;
case ZFS_PROP_SETUID:
if (vfsp->vfs_do_setuid)
tmp = vfsp->vfs_setuid;
break;
case ZFS_PROP_READONLY:
if (vfsp->vfs_do_readonly)
tmp = vfsp->vfs_readonly;
break;
case ZFS_PROP_XATTR:
if (vfsp->vfs_do_xattr)
tmp = vfsp->vfs_xattr;
break;
case ZFS_PROP_NBMAND:
if (vfsp->vfs_do_nbmand)
tmp = vfsp->vfs_nbmand;
break;
default:
return (ENOENT);
}
if (tmp != *val) {
(void) strcpy(setpoint, "temporary");
*val = tmp;
}
return (0);
}
/*
* Associate this zfsvfs with the given objset, which must be owned.
* This will cache a bunch of on-disk state from the objset in the
* zfsvfs.
*/
static int
zfsvfs_init(zfsvfs_t *zfsvfs, objset_t *os)
{
int error;
uint64_t val;
zfsvfs->z_max_blksz = SPA_OLD_MAXBLOCKSIZE;
zfsvfs->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;
zfsvfs->z_os = os;
error = zfs_get_zplprop(os, ZFS_PROP_VERSION, &zfsvfs->z_version);
if (error != 0)
return (error);
if (zfsvfs->z_version >
zfs_zpl_version_map(spa_version(dmu_objset_spa(os)))) {
(void) printk("Can't mount a version %lld file system "
"on a version %lld pool\n. Pool must be upgraded to mount "
"this file system.\n", (u_longlong_t)zfsvfs->z_version,
(u_longlong_t)spa_version(dmu_objset_spa(os)));
return (SET_ERROR(ENOTSUP));
}
error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &val);
if (error != 0)
return (error);
zfsvfs->z_norm = (int)val;
error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &val);
if (error != 0)
return (error);
zfsvfs->z_utf8 = (val != 0);
error = zfs_get_zplprop(os, ZFS_PROP_CASE, &val);
if (error != 0)
return (error);
zfsvfs->z_case = (uint_t)val;
if ((error = zfs_get_zplprop(os, ZFS_PROP_ACLTYPE, &val)) != 0)
return (error);
zfsvfs->z_acl_type = (uint_t)val;
/*
* Fold case on file systems that are always or sometimes case
* insensitive.
*/
if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
zfsvfs->z_case == ZFS_CASE_MIXED)
zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER;
zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os);
uint64_t sa_obj = 0;
if (zfsvfs->z_use_sa) {
/* should either have both of these objects or none */
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1,
&sa_obj);
if (error != 0)
return (error);
error = zfs_get_zplprop(os, ZFS_PROP_XATTR, &val);
if ((error == 0) && (val == ZFS_XATTR_SA))
zfsvfs->z_xattr_sa = B_TRUE;
}
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1,
&zfsvfs->z_root);
if (error != 0)
return (error);
ASSERT(zfsvfs->z_root != 0);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
&zfsvfs->z_unlinkedobj);
if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA],
8, 1, &zfsvfs->z_userquota_obj);
if (error == ENOENT)
zfsvfs->z_userquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA],
8, 1, &zfsvfs->z_groupquota_obj);
if (error == ENOENT)
zfsvfs->z_groupquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_PROJECTQUOTA],
8, 1, &zfsvfs->z_projectquota_obj);
if (error == ENOENT)
zfsvfs->z_projectquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_USEROBJQUOTA],
8, 1, &zfsvfs->z_userobjquota_obj);
if (error == ENOENT)
zfsvfs->z_userobjquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_GROUPOBJQUOTA],
8, 1, &zfsvfs->z_groupobjquota_obj);
if (error == ENOENT)
zfsvfs->z_groupobjquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_PROJECTOBJQUOTA],
8, 1, &zfsvfs->z_projectobjquota_obj);
if (error == ENOENT)
zfsvfs->z_projectobjquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1,
&zfsvfs->z_fuid_obj);
if (error == ENOENT)
zfsvfs->z_fuid_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SHARES_DIR, 8, 1,
&zfsvfs->z_shares_dir);
if (error == ENOENT)
zfsvfs->z_shares_dir = 0;
else if (error != 0)
return (error);
error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
&zfsvfs->z_attr_table);
if (error != 0)
return (error);
if (zfsvfs->z_version >= ZPL_VERSION_SA)
sa_register_update_callback(os, zfs_sa_upgrade);
return (0);
}
int
zfsvfs_create(const char *osname, boolean_t readonly, zfsvfs_t **zfvp)
{
objset_t *os;
zfsvfs_t *zfsvfs;
int error;
boolean_t ro = (readonly || (strchr(osname, '@') != NULL));
zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
error = dmu_objset_own(osname, DMU_OST_ZFS, ro, B_TRUE, zfsvfs, &os);
if (error != 0) {
kmem_free(zfsvfs, sizeof (zfsvfs_t));
return (error);
}
error = zfsvfs_create_impl(zfvp, zfsvfs, os);
- if (error != 0) {
- dmu_objset_disown(os, B_TRUE, zfsvfs);
- }
+
return (error);
}
/*
* Note: zfsvfs is assumed to be malloc'd, and will be freed by this function
* on a failure. Do not pass in a statically allocated zfsvfs.
*/
int
zfsvfs_create_impl(zfsvfs_t **zfvp, zfsvfs_t *zfsvfs, objset_t *os)
{
int error;
zfsvfs->z_vfs = NULL;
zfsvfs->z_sb = NULL;
zfsvfs->z_parent = zfsvfs;
mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&zfsvfs->z_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
offsetof(znode_t, z_link_node));
ZFS_TEARDOWN_INIT(zfsvfs);
rw_init(&zfsvfs->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL);
rw_init(&zfsvfs->z_fuid_lock, NULL, RW_DEFAULT, NULL);
int size = MIN(1 << (highbit64(zfs_object_mutex_size) - 1),
ZFS_OBJ_MTX_MAX);
zfsvfs->z_hold_size = size;
zfsvfs->z_hold_trees = vmem_zalloc(sizeof (avl_tree_t) * size,
KM_SLEEP);
zfsvfs->z_hold_locks = vmem_zalloc(sizeof (kmutex_t) * size, KM_SLEEP);
for (int i = 0; i != size; i++) {
avl_create(&zfsvfs->z_hold_trees[i], zfs_znode_hold_compare,
sizeof (znode_hold_t), offsetof(znode_hold_t, zh_node));
mutex_init(&zfsvfs->z_hold_locks[i], NULL, MUTEX_DEFAULT, NULL);
}
error = zfsvfs_init(zfsvfs, os);
if (error != 0) {
+ dmu_objset_disown(os, B_TRUE, zfsvfs);
*zfvp = NULL;
zfsvfs_free(zfsvfs);
return (error);
}
zfsvfs->z_drain_task = TASKQID_INVALID;
zfsvfs->z_draining = B_FALSE;
zfsvfs->z_drain_cancel = B_TRUE;
*zfvp = zfsvfs;
return (0);
}
static int
zfsvfs_setup(zfsvfs_t *zfsvfs, boolean_t mounting)
{
int error;
boolean_t readonly = zfs_is_readonly(zfsvfs);
error = zfs_register_callbacks(zfsvfs->z_vfs);
if (error)
return (error);
/*
* If we are not mounting (ie: online recv), then we don't
* have to worry about replaying the log as we blocked all
* operations out since we closed the ZIL.
*/
if (mounting) {
ASSERT3P(zfsvfs->z_kstat.dk_kstats, ==, NULL);
error = dataset_kstats_create(&zfsvfs->z_kstat, zfsvfs->z_os);
if (error)
return (error);
zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data,
&zfsvfs->z_kstat.dk_zil_sums);
/*
* During replay we remove the read only flag to
* allow replays to succeed.
*/
if (readonly != 0) {
readonly_changed_cb(zfsvfs, B_FALSE);
} else {
zap_stats_t zs;
if (zap_get_stats(zfsvfs->z_os, zfsvfs->z_unlinkedobj,
&zs) == 0) {
dataset_kstats_update_nunlinks_kstat(
&zfsvfs->z_kstat, zs.zs_num_entries);
dprintf_ds(zfsvfs->z_os->os_dsl_dataset,
"num_entries in unlinked set: %llu",
zs.zs_num_entries);
}
zfs_unlinked_drain(zfsvfs);
dsl_dir_t *dd = zfsvfs->z_os->os_dsl_dataset->ds_dir;
dd->dd_activity_cancelled = B_FALSE;
}
/*
* Parse and replay the intent log.
*
* Because of ziltest, this must be done after
* zfs_unlinked_drain(). (Further note: ziltest
* doesn't use readonly mounts, where
* zfs_unlinked_drain() isn't called.) This is because
* ziltest causes spa_sync() to think it's committed,
* but actually it is not, so the intent log contains
* many txg's worth of changes.
*
* In particular, if object N is in the unlinked set in
* the last txg to actually sync, then it could be
* actually freed in a later txg and then reallocated
* in a yet later txg. This would write a "create
* object N" record to the intent log. Normally, this
* would be fine because the spa_sync() would have
* written out the fact that object N is free, before
* we could write the "create object N" intent log
* record.
*
* But when we are in ziltest mode, we advance the "open
* txg" without actually spa_sync()-ing the changes to
* disk. So we would see that object N is still
* allocated and in the unlinked set, and there is an
* intent log record saying to allocate it.
*/
if (spa_writeable(dmu_objset_spa(zfsvfs->z_os))) {
if (zil_replay_disable) {
zil_destroy(zfsvfs->z_log, B_FALSE);
} else {
zfsvfs->z_replay = B_TRUE;
zil_replay(zfsvfs->z_os, zfsvfs,
zfs_replay_vector);
zfsvfs->z_replay = B_FALSE;
}
}
/* restore readonly bit */
if (readonly != 0)
readonly_changed_cb(zfsvfs, B_TRUE);
} else {
ASSERT3P(zfsvfs->z_kstat.dk_kstats, !=, NULL);
zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data,
&zfsvfs->z_kstat.dk_zil_sums);
}
/*
* Set the objset user_ptr to track its zfsvfs.
*/
mutex_enter(&zfsvfs->z_os->os_user_ptr_lock);
dmu_objset_set_user(zfsvfs->z_os, zfsvfs);
mutex_exit(&zfsvfs->z_os->os_user_ptr_lock);
return (0);
}
void
zfsvfs_free(zfsvfs_t *zfsvfs)
{
int i, size = zfsvfs->z_hold_size;
zfs_fuid_destroy(zfsvfs);
mutex_destroy(&zfsvfs->z_znodes_lock);
mutex_destroy(&zfsvfs->z_lock);
list_destroy(&zfsvfs->z_all_znodes);
ZFS_TEARDOWN_DESTROY(zfsvfs);
rw_destroy(&zfsvfs->z_teardown_inactive_lock);
rw_destroy(&zfsvfs->z_fuid_lock);
for (i = 0; i != size; i++) {
avl_destroy(&zfsvfs->z_hold_trees[i]);
mutex_destroy(&zfsvfs->z_hold_locks[i]);
}
vmem_free(zfsvfs->z_hold_trees, sizeof (avl_tree_t) * size);
vmem_free(zfsvfs->z_hold_locks, sizeof (kmutex_t) * size);
zfsvfs_vfs_free(zfsvfs->z_vfs);
dataset_kstats_destroy(&zfsvfs->z_kstat);
kmem_free(zfsvfs, sizeof (zfsvfs_t));
}
static void
zfs_set_fuid_feature(zfsvfs_t *zfsvfs)
{
zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os);
}
static void
zfs_unregister_callbacks(zfsvfs_t *zfsvfs)
{
objset_t *os = zfsvfs->z_os;
if (!dmu_objset_is_snapshot(os))
dsl_prop_unregister_all(dmu_objset_ds(os), zfsvfs);
}
#ifdef HAVE_MLSLABEL
/*
* Check that the hex label string is appropriate for the dataset being
* mounted into the global_zone proper.
*
* Return an error if the hex label string is not default or
* admin_low/admin_high. For admin_low labels, the corresponding
* dataset must be readonly.
*/
int
zfs_check_global_label(const char *dsname, const char *hexsl)
{
if (strcasecmp(hexsl, ZFS_MLSLABEL_DEFAULT) == 0)
return (0);
if (strcasecmp(hexsl, ADMIN_HIGH) == 0)
return (0);
if (strcasecmp(hexsl, ADMIN_LOW) == 0) {
/* must be readonly */
uint64_t rdonly;
if (dsl_prop_get_integer(dsname,
zfs_prop_to_name(ZFS_PROP_READONLY), &rdonly, NULL))
return (SET_ERROR(EACCES));
return (rdonly ? 0 : SET_ERROR(EACCES));
}
return (SET_ERROR(EACCES));
}
#endif /* HAVE_MLSLABEL */
static int
zfs_statfs_project(zfsvfs_t *zfsvfs, znode_t *zp, struct kstatfs *statp,
uint32_t bshift)
{
char buf[20 + DMU_OBJACCT_PREFIX_LEN];
uint64_t offset = DMU_OBJACCT_PREFIX_LEN;
uint64_t quota;
uint64_t used;
int err;
strlcpy(buf, DMU_OBJACCT_PREFIX, DMU_OBJACCT_PREFIX_LEN + 1);
err = zfs_id_to_fuidstr(zfsvfs, NULL, zp->z_projid, buf + offset,
sizeof (buf) - offset, B_FALSE);
if (err)
return (err);
if (zfsvfs->z_projectquota_obj == 0)
goto objs;
err = zap_lookup(zfsvfs->z_os, zfsvfs->z_projectquota_obj,
buf + offset, 8, 1, &quota);
if (err == ENOENT)
goto objs;
else if (err)
return (err);
err = zap_lookup(zfsvfs->z_os, DMU_PROJECTUSED_OBJECT,
buf + offset, 8, 1, &used);
if (unlikely(err == ENOENT)) {
uint32_t blksize;
u_longlong_t nblocks;
/*
* Quota accounting is async, so it is possible race case.
* There is at least one object with the given project ID.
*/
sa_object_size(zp->z_sa_hdl, &blksize, &nblocks);
if (unlikely(zp->z_blksz == 0))
blksize = zfsvfs->z_max_blksz;
used = blksize * nblocks;
} else if (err) {
return (err);
}
statp->f_blocks = quota >> bshift;
statp->f_bfree = (quota > used) ? ((quota - used) >> bshift) : 0;
statp->f_bavail = statp->f_bfree;
objs:
if (zfsvfs->z_projectobjquota_obj == 0)
return (0);
err = zap_lookup(zfsvfs->z_os, zfsvfs->z_projectobjquota_obj,
buf + offset, 8, 1, &quota);
if (err == ENOENT)
return (0);
else if (err)
return (err);
err = zap_lookup(zfsvfs->z_os, DMU_PROJECTUSED_OBJECT,
buf, 8, 1, &used);
if (unlikely(err == ENOENT)) {
/*
* Quota accounting is async, so it is possible race case.
* There is at least one object with the given project ID.
*/
used = 1;
} else if (err) {
return (err);
}
statp->f_files = quota;
statp->f_ffree = (quota > used) ? (quota - used) : 0;
return (0);
}
int
zfs_statvfs(struct inode *ip, struct kstatfs *statp)
{
zfsvfs_t *zfsvfs = ITOZSB(ip);
uint64_t refdbytes, availbytes, usedobjs, availobjs;
int err = 0;
- ZFS_ENTER(zfsvfs);
+ if ((err = zfs_enter(zfsvfs, FTAG)) != 0)
+ return (err);
dmu_objset_space(zfsvfs->z_os,
&refdbytes, &availbytes, &usedobjs, &availobjs);
uint64_t fsid = dmu_objset_fsid_guid(zfsvfs->z_os);
/*
* The underlying storage pool actually uses multiple block
* size. Under Solaris frsize (fragment size) is reported as
* the smallest block size we support, and bsize (block size)
* as the filesystem's maximum block size. Unfortunately,
* under Linux the fragment size and block size are often used
* interchangeably. Thus we are forced to report both of them
* as the filesystem's maximum block size.
*/
statp->f_frsize = zfsvfs->z_max_blksz;
statp->f_bsize = zfsvfs->z_max_blksz;
uint32_t bshift = fls(statp->f_bsize) - 1;
/*
* The following report "total" blocks of various kinds in
* the file system, but reported in terms of f_bsize - the
* "preferred" size.
*/
/* Round up so we never have a filesystem using 0 blocks. */
refdbytes = P2ROUNDUP(refdbytes, statp->f_bsize);
statp->f_blocks = (refdbytes + availbytes) >> bshift;
statp->f_bfree = availbytes >> bshift;
statp->f_bavail = statp->f_bfree; /* no root reservation */
/*
* statvfs() should really be called statufs(), because it assumes
* static metadata. ZFS doesn't preallocate files, so the best
* we can do is report the max that could possibly fit in f_files,
* and that minus the number actually used in f_ffree.
* For f_ffree, report the smaller of the number of objects available
* and the number of blocks (each object will take at least a block).
*/
statp->f_ffree = MIN(availobjs, availbytes >> DNODE_SHIFT);
statp->f_files = statp->f_ffree + usedobjs;
statp->f_fsid.val[0] = (uint32_t)fsid;
statp->f_fsid.val[1] = (uint32_t)(fsid >> 32);
statp->f_type = ZFS_SUPER_MAGIC;
statp->f_namelen = MAXNAMELEN - 1;
/*
* We have all of 40 characters to stuff a string here.
* Is there anything useful we could/should provide?
*/
memset(statp->f_spare, 0, sizeof (statp->f_spare));
if (dmu_objset_projectquota_enabled(zfsvfs->z_os) &&
dmu_objset_projectquota_present(zfsvfs->z_os)) {
znode_t *zp = ITOZ(ip);
if (zp->z_pflags & ZFS_PROJINHERIT && zp->z_projid &&
zpl_is_valid_projid(zp->z_projid))
err = zfs_statfs_project(zfsvfs, zp, statp, bshift);
}
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (err);
}
static int
zfs_root(zfsvfs_t *zfsvfs, struct inode **ipp)
{
znode_t *rootzp;
int error;
- ZFS_ENTER(zfsvfs);
+ if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
+ return (error);
error = zfs_zget(zfsvfs, zfsvfs->z_root, &rootzp);
if (error == 0)
*ipp = ZTOI(rootzp);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Linux kernels older than 3.1 do not support a per-filesystem shrinker.
* To accommodate this we must improvise and manually walk the list of znodes
* attempting to prune dentries in order to be able to drop the inodes.
*
* To avoid scanning the same znodes multiple times they are always rotated
* to the end of the z_all_znodes list. New znodes are inserted at the
* end of the list so we're always scanning the oldest znodes first.
*/
static int
zfs_prune_aliases(zfsvfs_t *zfsvfs, unsigned long nr_to_scan)
{
znode_t **zp_array, *zp;
int max_array = MIN(nr_to_scan, PAGE_SIZE * 8 / sizeof (znode_t *));
int objects = 0;
int i = 0, j = 0;
zp_array = kmem_zalloc(max_array * sizeof (znode_t *), KM_SLEEP);
mutex_enter(&zfsvfs->z_znodes_lock);
while ((zp = list_head(&zfsvfs->z_all_znodes)) != NULL) {
if ((i++ > nr_to_scan) || (j >= max_array))
break;
ASSERT(list_link_active(&zp->z_link_node));
list_remove(&zfsvfs->z_all_znodes, zp);
list_insert_tail(&zfsvfs->z_all_znodes, zp);
/* Skip active znodes and .zfs entries */
if (MUTEX_HELD(&zp->z_lock) || zp->z_is_ctldir)
continue;
if (igrab(ZTOI(zp)) == NULL)
continue;
zp_array[j] = zp;
j++;
}
mutex_exit(&zfsvfs->z_znodes_lock);
for (i = 0; i < j; i++) {
zp = zp_array[i];
ASSERT3P(zp, !=, NULL);
d_prune_aliases(ZTOI(zp));
if (atomic_read(&ZTOI(zp)->i_count) == 1)
objects++;
zrele(zp);
}
kmem_free(zp_array, max_array * sizeof (znode_t *));
return (objects);
}
/*
* The ARC has requested that the filesystem drop entries from the dentry
* and inode caches. This can occur when the ARC needs to free meta data
* blocks but can't because they are all pinned by entries in these caches.
*/
int
zfs_prune(struct super_block *sb, unsigned long nr_to_scan, int *objects)
{
zfsvfs_t *zfsvfs = sb->s_fs_info;
int error = 0;
struct shrinker *shrinker = &sb->s_shrink;
struct shrink_control sc = {
.nr_to_scan = nr_to_scan,
.gfp_mask = GFP_KERNEL,
};
- ZFS_ENTER(zfsvfs);
+ if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
+ return (error);
#if defined(HAVE_SPLIT_SHRINKER_CALLBACK) && \
defined(SHRINK_CONTROL_HAS_NID) && \
defined(SHRINKER_NUMA_AWARE)
if (sb->s_shrink.flags & SHRINKER_NUMA_AWARE) {
*objects = 0;
for_each_online_node(sc.nid) {
*objects += (*shrinker->scan_objects)(shrinker, &sc);
/*
* reset sc.nr_to_scan, modified by
* scan_objects == super_cache_scan
*/
sc.nr_to_scan = nr_to_scan;
}
} else {
*objects = (*shrinker->scan_objects)(shrinker, &sc);
}
#elif defined(HAVE_SPLIT_SHRINKER_CALLBACK)
*objects = (*shrinker->scan_objects)(shrinker, &sc);
#elif defined(HAVE_SINGLE_SHRINKER_CALLBACK)
*objects = (*shrinker->shrink)(shrinker, &sc);
#elif defined(HAVE_D_PRUNE_ALIASES)
#define D_PRUNE_ALIASES_IS_DEFAULT
*objects = zfs_prune_aliases(zfsvfs, nr_to_scan);
#else
#error "No available dentry and inode cache pruning mechanism."
#endif
#if defined(HAVE_D_PRUNE_ALIASES) && !defined(D_PRUNE_ALIASES_IS_DEFAULT)
#undef D_PRUNE_ALIASES_IS_DEFAULT
/*
* Fall back to zfs_prune_aliases if the kernel's per-superblock
* shrinker couldn't free anything, possibly due to the inodes being
* allocated in a different memcg.
*/
if (*objects == 0)
*objects = zfs_prune_aliases(zfsvfs, nr_to_scan);
#endif
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
dprintf_ds(zfsvfs->z_os->os_dsl_dataset,
"pruning, nr_to_scan=%lu objects=%d error=%d\n",
nr_to_scan, *objects, error);
return (error);
}
/*
* Teardown the zfsvfs_t.
*
* Note, if 'unmounting' is FALSE, we return with the 'z_teardown_lock'
* and 'z_teardown_inactive_lock' held.
*/
static int
zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
{
znode_t *zp;
zfs_unlinked_drain_stop_wait(zfsvfs);
/*
* If someone has not already unmounted this file system,
* drain the zrele_taskq to ensure all active references to the
* zfsvfs_t have been handled only then can it be safely destroyed.
*/
if (zfsvfs->z_os) {
/*
* If we're unmounting we have to wait for the list to
* drain completely.
*
* If we're not unmounting there's no guarantee the list
* will drain completely, but iputs run from the taskq
* may add the parents of dir-based xattrs to the taskq
* so we want to wait for these.
*
* We can safely read z_nr_znodes without locking because the
* VFS has already blocked operations which add to the
* z_all_znodes list and thus increment z_nr_znodes.
*/
int round = 0;
while (zfsvfs->z_nr_znodes > 0) {
taskq_wait_outstanding(dsl_pool_zrele_taskq(
dmu_objset_pool(zfsvfs->z_os)), 0);
if (++round > 1 && !unmounting)
break;
}
}
ZFS_TEARDOWN_ENTER_WRITE(zfsvfs, FTAG);
if (!unmounting) {
/*
* We purge the parent filesystem's super block as the
* parent filesystem and all of its snapshots have their
* inode's super block set to the parent's filesystem's
* super block. Note, 'z_parent' is self referential
* for non-snapshots.
*/
shrink_dcache_sb(zfsvfs->z_parent->z_sb);
}
/*
* Close the zil. NB: Can't close the zil while zfs_inactive
* threads are blocked as zil_close can call zfs_inactive.
*/
if (zfsvfs->z_log) {
zil_close(zfsvfs->z_log);
zfsvfs->z_log = NULL;
}
rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_WRITER);
/*
* If we are not unmounting (ie: online recv) and someone already
* unmounted this file system while we were doing the switcheroo,
* or a reopen of z_os failed then just bail out now.
*/
if (!unmounting && (zfsvfs->z_unmounted || zfsvfs->z_os == NULL)) {
rw_exit(&zfsvfs->z_teardown_inactive_lock);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
return (SET_ERROR(EIO));
}
/*
* At this point there are no VFS ops active, and any new VFS ops
* will fail with EIO since we have z_teardown_lock for writer (only
* relevant for forced unmount).
*
* Release all holds on dbufs. We also grab an extra reference to all
* the remaining inodes so that the kernel does not attempt to free
* any inodes of a suspended fs. This can cause deadlocks since the
* zfs_resume_fs() process may involve starting threads, which might
* attempt to free unreferenced inodes to free up memory for the new
* thread.
*/
if (!unmounting) {
mutex_enter(&zfsvfs->z_znodes_lock);
for (zp = list_head(&zfsvfs->z_all_znodes); zp != NULL;
zp = list_next(&zfsvfs->z_all_znodes, zp)) {
if (zp->z_sa_hdl)
zfs_znode_dmu_fini(zp);
if (igrab(ZTOI(zp)) != NULL)
zp->z_suspended = B_TRUE;
}
mutex_exit(&zfsvfs->z_znodes_lock);
}
/*
* If we are unmounting, set the unmounted flag and let new VFS ops
* unblock. zfs_inactive will have the unmounted behavior, and all
* other VFS ops will fail with EIO.
*/
if (unmounting) {
zfsvfs->z_unmounted = B_TRUE;
rw_exit(&zfsvfs->z_teardown_inactive_lock);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
}
/*
* z_os will be NULL if there was an error in attempting to reopen
* zfsvfs, so just return as the properties had already been
*
* unregistered and cached data had been evicted before.
*/
if (zfsvfs->z_os == NULL)
return (0);
/*
* Unregister properties.
*/
zfs_unregister_callbacks(zfsvfs);
/*
* Evict cached data. We must write out any dirty data before
* disowning the dataset.
*/
objset_t *os = zfsvfs->z_os;
boolean_t os_dirty = B_FALSE;
for (int t = 0; t < TXG_SIZE; t++) {
if (dmu_objset_is_dirty(os, t)) {
os_dirty = B_TRUE;
break;
}
}
if (!zfs_is_readonly(zfsvfs) && os_dirty) {
txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
}
dmu_objset_evict_dbufs(zfsvfs->z_os);
dsl_dir_t *dd = os->os_dsl_dataset->ds_dir;
dsl_dir_cancel_waiters(dd);
return (0);
}
#if defined(HAVE_SUPER_SETUP_BDI_NAME)
atomic_long_t zfs_bdi_seq = ATOMIC_LONG_INIT(0);
#endif
int
zfs_domount(struct super_block *sb, zfs_mnt_t *zm, int silent)
{
const char *osname = zm->mnt_osname;
struct inode *root_inode = NULL;
uint64_t recordsize;
int error = 0;
zfsvfs_t *zfsvfs = NULL;
vfs_t *vfs = NULL;
int canwrite;
int dataset_visible_zone;
ASSERT(zm);
ASSERT(osname);
dataset_visible_zone = zone_dataset_visible(osname, &canwrite);
/*
* Refuse to mount a filesystem if we are in a namespace and the
* dataset is not visible or writable in that namespace.
*/
if (!INGLOBALZONE(curproc) &&
(!dataset_visible_zone || !canwrite)) {
return (SET_ERROR(EPERM));
}
error = zfsvfs_parse_options(zm->mnt_data, &vfs);
if (error)
return (error);
/*
* If a non-writable filesystem is being mounted without the
* read-only flag, pretend it was set, as done for snapshots.
*/
if (!canwrite)
vfs->vfs_readonly = true;
error = zfsvfs_create(osname, vfs->vfs_readonly, &zfsvfs);
if (error) {
zfsvfs_vfs_free(vfs);
goto out;
}
if ((error = dsl_prop_get_integer(osname, "recordsize",
&recordsize, NULL))) {
zfsvfs_vfs_free(vfs);
goto out;
}
vfs->vfs_data = zfsvfs;
zfsvfs->z_vfs = vfs;
zfsvfs->z_sb = sb;
sb->s_fs_info = zfsvfs;
sb->s_magic = ZFS_SUPER_MAGIC;
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_time_gran = 1;
sb->s_blocksize = recordsize;
sb->s_blocksize_bits = ilog2(recordsize);
error = -zpl_bdi_setup(sb, "zfs");
if (error)
goto out;
sb->s_bdi->ra_pages = 0;
/* Set callback operations for the file system. */
sb->s_op = &zpl_super_operations;
sb->s_xattr = zpl_xattr_handlers;
sb->s_export_op = &zpl_export_operations;
sb->s_d_op = &zpl_dentry_operations;
/* Set features for file system. */
zfs_set_fuid_feature(zfsvfs);
if (dmu_objset_is_snapshot(zfsvfs->z_os)) {
uint64_t pval;
atime_changed_cb(zfsvfs, B_FALSE);
readonly_changed_cb(zfsvfs, B_TRUE);
if ((error = dsl_prop_get_integer(osname,
"xattr", &pval, NULL)))
goto out;
xattr_changed_cb(zfsvfs, pval);
if ((error = dsl_prop_get_integer(osname,
"acltype", &pval, NULL)))
goto out;
acltype_changed_cb(zfsvfs, pval);
zfsvfs->z_issnap = B_TRUE;
zfsvfs->z_os->os_sync = ZFS_SYNC_DISABLED;
zfsvfs->z_snap_defer_time = jiffies;
mutex_enter(&zfsvfs->z_os->os_user_ptr_lock);
dmu_objset_set_user(zfsvfs->z_os, zfsvfs);
mutex_exit(&zfsvfs->z_os->os_user_ptr_lock);
} else {
if ((error = zfsvfs_setup(zfsvfs, B_TRUE)))
goto out;
}
/* Allocate a root inode for the filesystem. */
error = zfs_root(zfsvfs, &root_inode);
if (error) {
(void) zfs_umount(sb);
goto out;
}
/* Allocate a root dentry for the filesystem */
sb->s_root = d_make_root(root_inode);
if (sb->s_root == NULL) {
(void) zfs_umount(sb);
error = SET_ERROR(ENOMEM);
goto out;
}
if (!zfsvfs->z_issnap)
zfsctl_create(zfsvfs);
zfsvfs->z_arc_prune = arc_add_prune_callback(zpl_prune_sb, sb);
out:
if (error) {
if (zfsvfs != NULL) {
dmu_objset_disown(zfsvfs->z_os, B_TRUE, zfsvfs);
zfsvfs_free(zfsvfs);
}
/*
* make sure we don't have dangling sb->s_fs_info which
* zfs_preumount will use.
*/
sb->s_fs_info = NULL;
}
return (error);
}
/*
* Called when an unmount is requested and certain sanity checks have
* already passed. At this point no dentries or inodes have been reclaimed
* from their respective caches. We drop the extra reference on the .zfs
* control directory to allow everything to be reclaimed. All snapshots
* must already have been unmounted to reach this point.
*/
void
zfs_preumount(struct super_block *sb)
{
zfsvfs_t *zfsvfs = sb->s_fs_info;
/* zfsvfs is NULL when zfs_domount fails during mount */
if (zfsvfs) {
zfs_unlinked_drain_stop_wait(zfsvfs);
zfsctl_destroy(sb->s_fs_info);
/*
* Wait for zrele_async before entering evict_inodes in
* generic_shutdown_super. The reason we must finish before
* evict_inodes is when lazytime is on, or when zfs_purgedir
* calls zfs_zget, zrele would bump i_count from 0 to 1. This
* would race with the i_count check in evict_inodes. This means
* it could destroy the inode while we are still using it.
*
* We wait for two passes. xattr directories in the first pass
* may add xattr entries in zfs_purgedir, so in the second pass
* we wait for them. We don't use taskq_wait here because it is
* a pool wide taskq. Other mounted filesystems can constantly
* do zrele_async and there's no guarantee when taskq will be
* empty.
*/
taskq_wait_outstanding(dsl_pool_zrele_taskq(
dmu_objset_pool(zfsvfs->z_os)), 0);
taskq_wait_outstanding(dsl_pool_zrele_taskq(
dmu_objset_pool(zfsvfs->z_os)), 0);
}
}
/*
* Called once all other unmount released tear down has occurred.
* It is our responsibility to release any remaining infrastructure.
*/
int
zfs_umount(struct super_block *sb)
{
zfsvfs_t *zfsvfs = sb->s_fs_info;
objset_t *os;
if (zfsvfs->z_arc_prune != NULL)
arc_remove_prune_callback(zfsvfs->z_arc_prune);
VERIFY(zfsvfs_teardown(zfsvfs, B_TRUE) == 0);
os = zfsvfs->z_os;
zpl_bdi_destroy(sb);
/*
* z_os will be NULL if there was an error in
* attempting to reopen zfsvfs.
*/
if (os != NULL) {
/*
* Unset the objset user_ptr.
*/
mutex_enter(&os->os_user_ptr_lock);
dmu_objset_set_user(os, NULL);
mutex_exit(&os->os_user_ptr_lock);
/*
* Finally release the objset
*/
dmu_objset_disown(os, B_TRUE, zfsvfs);
}
zfsvfs_free(zfsvfs);
return (0);
}
int
zfs_remount(struct super_block *sb, int *flags, zfs_mnt_t *zm)
{
zfsvfs_t *zfsvfs = sb->s_fs_info;
vfs_t *vfsp;
boolean_t issnap = dmu_objset_is_snapshot(zfsvfs->z_os);
int error;
if ((issnap || !spa_writeable(dmu_objset_spa(zfsvfs->z_os))) &&
!(*flags & SB_RDONLY)) {
*flags |= SB_RDONLY;
return (EROFS);
}
error = zfsvfs_parse_options(zm->mnt_data, &vfsp);
if (error)
return (error);
if (!zfs_is_readonly(zfsvfs) && (*flags & SB_RDONLY))
txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
zfs_unregister_callbacks(zfsvfs);
zfsvfs_vfs_free(zfsvfs->z_vfs);
vfsp->vfs_data = zfsvfs;
zfsvfs->z_vfs = vfsp;
if (!issnap)
(void) zfs_register_callbacks(vfsp);
return (error);
}
int
zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp)
{
zfsvfs_t *zfsvfs = sb->s_fs_info;
znode_t *zp;
uint64_t object = 0;
uint64_t fid_gen = 0;
uint64_t gen_mask;
uint64_t zp_gen;
int i, err;
*ipp = NULL;
if (fidp->fid_len == SHORT_FID_LEN || fidp->fid_len == LONG_FID_LEN) {
zfid_short_t *zfid = (zfid_short_t *)fidp;
for (i = 0; i < sizeof (zfid->zf_object); i++)
object |= ((uint64_t)zfid->zf_object[i]) << (8 * i);
for (i = 0; i < sizeof (zfid->zf_gen); i++)
fid_gen |= ((uint64_t)zfid->zf_gen[i]) << (8 * i);
} else {
return (SET_ERROR(EINVAL));
}
/* LONG_FID_LEN means snapdirs */
if (fidp->fid_len == LONG_FID_LEN) {
zfid_long_t *zlfid = (zfid_long_t *)fidp;
uint64_t objsetid = 0;
uint64_t setgen = 0;
for (i = 0; i < sizeof (zlfid->zf_setid); i++)
objsetid |= ((uint64_t)zlfid->zf_setid[i]) << (8 * i);
for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
setgen |= ((uint64_t)zlfid->zf_setgen[i]) << (8 * i);
if (objsetid != ZFSCTL_INO_SNAPDIRS - object) {
dprintf("snapdir fid: objsetid (%llu) != "
"ZFSCTL_INO_SNAPDIRS (%llu) - object (%llu)\n",
objsetid, ZFSCTL_INO_SNAPDIRS, object);
return (SET_ERROR(EINVAL));
}
if (fid_gen > 1 || setgen != 0) {
dprintf("snapdir fid: fid_gen (%llu) and setgen "
"(%llu)\n", fid_gen, setgen);
return (SET_ERROR(EINVAL));
}
return (zfsctl_snapdir_vget(sb, objsetid, fid_gen, ipp));
}
- ZFS_ENTER(zfsvfs);
+ if ((err = zfs_enter(zfsvfs, FTAG)) != 0)
+ return (err);
/* A zero fid_gen means we are in the .zfs control directories */
if (fid_gen == 0 &&
(object == ZFSCTL_INO_ROOT || object == ZFSCTL_INO_SNAPDIR)) {
*ipp = zfsvfs->z_ctldir;
ASSERT(*ipp != NULL);
if (object == ZFSCTL_INO_SNAPDIR) {
VERIFY(zfsctl_root_lookup(*ipp, "snapshot", ipp,
0, kcred, NULL, NULL) == 0);
} else {
/*
* Must have an existing ref, so igrab()
* cannot return NULL
*/
VERIFY3P(igrab(*ipp), !=, NULL);
}
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (0);
}
gen_mask = -1ULL >> (64 - 8 * i);
dprintf("getting %llu [%llu mask %llx]\n", object, fid_gen, gen_mask);
if ((err = zfs_zget(zfsvfs, object, &zp))) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (err);
}
/* Don't export xattr stuff */
if (zp->z_pflags & ZFS_XATTR) {
zrele(zp);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(ENOENT));
}
(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), &zp_gen,
sizeof (uint64_t));
zp_gen = zp_gen & gen_mask;
if (zp_gen == 0)
zp_gen = 1;
if ((fid_gen == 0) && (zfsvfs->z_root == object))
fid_gen = zp_gen;
if (zp->z_unlinked || zp_gen != fid_gen) {
dprintf("znode gen (%llu) != fid gen (%llu)\n", zp_gen,
fid_gen);
zrele(zp);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(ENOENT));
}
*ipp = ZTOI(zp);
if (*ipp)
zfs_znode_update_vfs(ITOZ(*ipp));
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (0);
}
/*
* Block out VFS ops and close zfsvfs_t
*
* Note, if successful, then we return with the 'z_teardown_lock' and
* 'z_teardown_inactive_lock' write held. We leave ownership of the underlying
* dataset and objset intact so that they can be atomically handed off during
* a subsequent rollback or recv operation and the resume thereafter.
*/
int
zfs_suspend_fs(zfsvfs_t *zfsvfs)
{
int error;
if ((error = zfsvfs_teardown(zfsvfs, B_FALSE)) != 0)
return (error);
return (0);
}
/*
* Rebuild SA and release VOPs. Note that ownership of the underlying dataset
* is an invariant across any of the operations that can be performed while the
* filesystem was suspended. Whether it succeeded or failed, the preconditions
* are the same: the relevant objset and associated dataset are owned by
* zfsvfs, held, and long held on entry.
*/
int
zfs_resume_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
{
int err, err2;
znode_t *zp;
ASSERT(ZFS_TEARDOWN_WRITE_HELD(zfsvfs));
ASSERT(RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock));
/*
* We already own this, so just update the objset_t, as the one we
* had before may have been evicted.
*/
objset_t *os;
VERIFY3P(ds->ds_owner, ==, zfsvfs);
VERIFY(dsl_dataset_long_held(ds));
dsl_pool_t *dp = spa_get_dsl(dsl_dataset_get_spa(ds));
dsl_pool_config_enter(dp, FTAG);
VERIFY0(dmu_objset_from_ds(ds, &os));
dsl_pool_config_exit(dp, FTAG);
err = zfsvfs_init(zfsvfs, os);
if (err != 0)
goto bail;
ds->ds_dir->dd_activity_cancelled = B_FALSE;
VERIFY(zfsvfs_setup(zfsvfs, B_FALSE) == 0);
zfs_set_fuid_feature(zfsvfs);
zfsvfs->z_rollback_time = jiffies;
/*
* Attempt to re-establish all the active inodes with their
* dbufs. If a zfs_rezget() fails, then we unhash the inode
* and mark it stale. This prevents a collision if a new
* inode/object is created which must use the same inode
* number. The stale inode will be be released when the
* VFS prunes the dentry holding the remaining references
* on the stale inode.
*/
mutex_enter(&zfsvfs->z_znodes_lock);
for (zp = list_head(&zfsvfs->z_all_znodes); zp;
zp = list_next(&zfsvfs->z_all_znodes, zp)) {
err2 = zfs_rezget(zp);
if (err2) {
remove_inode_hash(ZTOI(zp));
zp->z_is_stale = B_TRUE;
}
/* see comment in zfs_suspend_fs() */
if (zp->z_suspended) {
zfs_zrele_async(zp);
zp->z_suspended = B_FALSE;
}
}
mutex_exit(&zfsvfs->z_znodes_lock);
if (!zfs_is_readonly(zfsvfs) && !zfsvfs->z_unmounted) {
/*
* zfs_suspend_fs() could have interrupted freeing
* of dnodes. We need to restart this freeing so
* that we don't "leak" the space.
*/
zfs_unlinked_drain(zfsvfs);
}
/*
* Most of the time zfs_suspend_fs is used for changing the contents
* of the underlying dataset. ZFS rollback and receive operations
* might create files for which negative dentries are present in
* the cache. Since walking the dcache would require a lot of GPL-only
* code duplication, it's much easier on these rather rare occasions
* just to flush the whole dcache for the given dataset/filesystem.
*/
shrink_dcache_sb(zfsvfs->z_sb);
bail:
if (err != 0)
zfsvfs->z_unmounted = B_TRUE;
/* release the VFS ops */
rw_exit(&zfsvfs->z_teardown_inactive_lock);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
if (err != 0) {
/*
* Since we couldn't setup the sa framework, try to force
* unmount this file system.
*/
if (zfsvfs->z_os)
(void) zfs_umount(zfsvfs->z_sb);
}
return (err);
}
/*
* Release VOPs and unmount a suspended filesystem.
*/
int
zfs_end_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
{
ASSERT(ZFS_TEARDOWN_WRITE_HELD(zfsvfs));
ASSERT(RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock));
/*
* We already own this, so just hold and rele it to update the
* objset_t, as the one we had before may have been evicted.
*/
objset_t *os;
VERIFY3P(ds->ds_owner, ==, zfsvfs);
VERIFY(dsl_dataset_long_held(ds));
dsl_pool_t *dp = spa_get_dsl(dsl_dataset_get_spa(ds));
dsl_pool_config_enter(dp, FTAG);
VERIFY0(dmu_objset_from_ds(ds, &os));
dsl_pool_config_exit(dp, FTAG);
zfsvfs->z_os = os;
/* release the VOPs */
rw_exit(&zfsvfs->z_teardown_inactive_lock);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
/*
* Try to force unmount this file system.
*/
(void) zfs_umount(zfsvfs->z_sb);
zfsvfs->z_unmounted = B_TRUE;
return (0);
}
/*
* Automounted snapshots rely on periodic revalidation
* to defer snapshots from being automatically unmounted.
*/
inline void
zfs_exit_fs(zfsvfs_t *zfsvfs)
{
if (!zfsvfs->z_issnap)
return;
if (time_after(jiffies, zfsvfs->z_snap_defer_time +
MAX(zfs_expire_snapshot * HZ / 2, HZ))) {
zfsvfs->z_snap_defer_time = jiffies;
zfsctl_snapshot_unmount_delay(zfsvfs->z_os->os_spa,
dmu_objset_id(zfsvfs->z_os),
zfs_expire_snapshot);
}
}
int
zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers)
{
int error;
objset_t *os = zfsvfs->z_os;
dmu_tx_t *tx;
if (newvers < ZPL_VERSION_INITIAL || newvers > ZPL_VERSION)
return (SET_ERROR(EINVAL));
if (newvers < zfsvfs->z_version)
return (SET_ERROR(EINVAL));
if (zfs_spa_version_map(newvers) >
spa_version(dmu_objset_spa(zfsvfs->z_os)))
return (SET_ERROR(ENOTSUP));
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_FALSE, ZPL_VERSION_STR);
if (newvers >= ZPL_VERSION_SA && !zfsvfs->z_use_sa) {
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_TRUE,
ZFS_SA_ATTRS);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
}
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
return (error);
}
error = zap_update(os, MASTER_NODE_OBJ, ZPL_VERSION_STR,
8, 1, &newvers, tx);
if (error) {
dmu_tx_commit(tx);
return (error);
}
if (newvers >= ZPL_VERSION_SA && !zfsvfs->z_use_sa) {
uint64_t sa_obj;
ASSERT3U(spa_version(dmu_objset_spa(zfsvfs->z_os)), >=,
SPA_VERSION_SA);
sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
DMU_OT_NONE, 0, tx);
error = zap_add(os, MASTER_NODE_OBJ,
ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
ASSERT0(error);
VERIFY(0 == sa_set_sa_object(os, sa_obj));
sa_register_update_callback(os, zfs_sa_upgrade);
}
spa_history_log_internal_ds(dmu_objset_ds(os), "upgrade", tx,
"from %llu to %llu", zfsvfs->z_version, newvers);
dmu_tx_commit(tx);
zfsvfs->z_version = newvers;
os->os_version = newvers;
zfs_set_fuid_feature(zfsvfs);
return (0);
}
/*
* Read a property stored within the master node.
*/
int
zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value)
{
uint64_t *cached_copy = NULL;
/*
* Figure out where in the objset_t the cached copy would live, if it
* is available for the requested property.
*/
if (os != NULL) {
switch (prop) {
case ZFS_PROP_VERSION:
cached_copy = &os->os_version;
break;
case ZFS_PROP_NORMALIZE:
cached_copy = &os->os_normalization;
break;
case ZFS_PROP_UTF8ONLY:
cached_copy = &os->os_utf8only;
break;
case ZFS_PROP_CASE:
cached_copy = &os->os_casesensitivity;
break;
default:
break;
}
}
if (cached_copy != NULL && *cached_copy != OBJSET_PROP_UNINITIALIZED) {
*value = *cached_copy;
return (0);
}
/*
* If the property wasn't cached, look up the file system's value for
* the property. For the version property, we look up a slightly
* different string.
*/
const char *pname;
int error = ENOENT;
if (prop == ZFS_PROP_VERSION)
pname = ZPL_VERSION_STR;
else
pname = zfs_prop_to_name(prop);
if (os != NULL) {
ASSERT3U(os->os_phys->os_type, ==, DMU_OST_ZFS);
error = zap_lookup(os, MASTER_NODE_OBJ, pname, 8, 1, value);
}
if (error == ENOENT) {
/* No value set, use the default value */
switch (prop) {
case ZFS_PROP_VERSION:
*value = ZPL_VERSION;
break;
case ZFS_PROP_NORMALIZE:
case ZFS_PROP_UTF8ONLY:
*value = 0;
break;
case ZFS_PROP_CASE:
*value = ZFS_CASE_SENSITIVE;
break;
case ZFS_PROP_ACLTYPE:
*value = ZFS_ACLTYPE_OFF;
break;
default:
return (error);
}
error = 0;
}
/*
* If one of the methods for getting the property value above worked,
* copy it into the objset_t's cache.
*/
if (error == 0 && cached_copy != NULL) {
*cached_copy = *value;
}
return (error);
}
/*
* Return true if the corresponding vfs's unmounted flag is set.
* Otherwise return false.
* If this function returns true we know VFS unmount has been initiated.
*/
boolean_t
zfs_get_vfs_flag_unmounted(objset_t *os)
{
zfsvfs_t *zfvp;
boolean_t unmounted = B_FALSE;
ASSERT(dmu_objset_type(os) == DMU_OST_ZFS);
mutex_enter(&os->os_user_ptr_lock);
zfvp = dmu_objset_get_user(os);
if (zfvp != NULL && zfvp->z_unmounted)
unmounted = B_TRUE;
mutex_exit(&os->os_user_ptr_lock);
return (unmounted);
}
void
zfsvfs_update_fromname(const char *oldname, const char *newname)
{
/*
* We don't need to do anything here, the devname is always current by
* virtue of zfsvfs->z_sb->s_op->show_devname.
*/
(void) oldname, (void) newname;
}
void
zfs_init(void)
{
zfsctl_init();
zfs_znode_init();
dmu_objset_register_type(DMU_OST_ZFS, zpl_get_file_info);
register_filesystem(&zpl_fs_type);
}
void
zfs_fini(void)
{
/*
* we don't use outstanding because zpl_posix_acl_free might add more.
*/
taskq_wait(system_delay_taskq);
taskq_wait(system_taskq);
unregister_filesystem(&zpl_fs_type);
zfs_znode_fini();
zfsctl_fini();
}
#if defined(_KERNEL)
EXPORT_SYMBOL(zfs_suspend_fs);
EXPORT_SYMBOL(zfs_resume_fs);
EXPORT_SYMBOL(zfs_set_version);
EXPORT_SYMBOL(zfsvfs_create);
EXPORT_SYMBOL(zfsvfs_free);
EXPORT_SYMBOL(zfs_is_readonly);
EXPORT_SYMBOL(zfs_domount);
EXPORT_SYMBOL(zfs_preumount);
EXPORT_SYMBOL(zfs_umount);
EXPORT_SYMBOL(zfs_remount);
EXPORT_SYMBOL(zfs_statvfs);
EXPORT_SYMBOL(zfs_vget);
EXPORT_SYMBOL(zfs_prune);
#endif
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_vnops_os.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_vnops_os.c
index 0b3f7f2501ee..1ff88c121a79 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_vnops_os.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_vnops_os.c
@@ -1,4034 +1,4048 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright (c) 2015 by Chunwei Chen. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
*/
/* Portions Copyright 2007 Jeremy Teo */
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/sysmacros.h>
#include <sys/vfs.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/kmem.h>
#include <sys/taskq.h>
#include <sys/uio.h>
#include <sys/vmsystm.h>
#include <sys/atomic.h>
#include <sys/pathname.h>
#include <sys/cmn_err.h>
#include <sys/errno.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_acl.h>
#include <sys/zfs_ioctl.h>
#include <sys/fs/zfs.h>
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/dbuf.h>
#include <sys/zap.h>
#include <sys/sa.h>
#include <sys/policy.h>
#include <sys/sunddi.h>
#include <sys/sid.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_fuid.h>
#include <sys/zfs_quota.h>
#include <sys/zfs_sa.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_rlock.h>
#include <sys/cred.h>
#include <sys/zpl.h>
#include <sys/zil.h>
#include <sys/sa_impl.h>
/*
* Programming rules.
*
* Each vnode op performs some logical unit of work. To do this, the ZPL must
* properly lock its in-core state, create a DMU transaction, do the work,
* record this work in the intent log (ZIL), commit the DMU transaction,
* and wait for the intent log to commit if it is a synchronous operation.
* Moreover, the vnode ops must work in both normal and log replay context.
* The ordering of events is important to avoid deadlocks and references
* to freed memory. The example below illustrates the following Big Rules:
*
* (1) A check must be made in each zfs thread for a mounted file system.
- * This is done avoiding races using ZFS_ENTER(zfsvfs).
- * A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes
- * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros
+ * This is done avoiding races using zfs_enter(zfsvfs).
+ * A zfs_exit(zfsvfs) is needed before all returns. Any znodes
+ * must be checked with zfs_verify_zp(zp). Both of these macros
* can return EIO from the calling function.
*
* (2) zrele() should always be the last thing except for zil_commit() (if
- * necessary) and ZFS_EXIT(). This is for 3 reasons: First, if it's the
+ * necessary) and zfs_exit(). This is for 3 reasons: First, if it's the
* last reference, the vnode/znode can be freed, so the zp may point to
* freed memory. Second, the last reference will call zfs_zinactive(),
* which may induce a lot of work -- pushing cached pages (which acquires
* range locks) and syncing out cached atime changes. Third,
* zfs_zinactive() may require a new tx, which could deadlock the system
* if you were already holding one. This deadlock occurs because the tx
* currently being operated on prevents a txg from syncing, which
* prevents the new tx from progressing, resulting in a deadlock. If you
* must call zrele() within a tx, use zfs_zrele_async(). Note that iput()
* is a synonym for zrele().
*
* (3) All range locks must be grabbed before calling dmu_tx_assign(),
* as they can span dmu_tx_assign() calls.
*
* (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to
* dmu_tx_assign(). This is critical because we don't want to block
* while holding locks.
*
- * If no ZPL locks are held (aside from ZFS_ENTER()), use TXG_WAIT. This
+ * If no ZPL locks are held (aside from zfs_enter()), use TXG_WAIT. This
* reduces lock contention and CPU usage when we must wait (note that if
* throughput is constrained by the storage, nearly every transaction
* must wait).
*
* Note, in particular, that if a lock is sometimes acquired before
* the tx assigns, and sometimes after (e.g. z_lock), then failing
* to use a non-blocking assign can deadlock the system. The scenario:
*
* Thread A has grabbed a lock before calling dmu_tx_assign().
* Thread B is in an already-assigned tx, and blocks for this lock.
* Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
* forever, because the previous txg can't quiesce until B's tx commits.
*
* If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
* then drop all locks, call dmu_tx_wait(), and try again. On subsequent
* calls to dmu_tx_assign(), pass TXG_NOTHROTTLE in addition to TXG_NOWAIT,
* to indicate that this operation has already called dmu_tx_wait().
* This will ensure that we don't retry forever, waiting a short bit
* each time.
*
* (5) If the operation succeeded, generate the intent log entry for it
* before dropping locks. This ensures that the ordering of events
* in the intent log matches the order in which they actually occurred.
* During ZIL replay the zfs_log_* functions will update the sequence
* number to indicate the zil transaction has replayed.
*
* (6) At the end of each vnode op, the DMU tx must always commit,
* regardless of whether there were any errors.
*
* (7) After dropping all locks, invoke zil_commit(zilog, foid)
* to ensure that synchronous semantics are provided when necessary.
*
* In general, this is how things should be ordered in each vnode op:
*
- * ZFS_ENTER(zfsvfs); // exit if unmounted
+ * zfs_enter(zfsvfs); // exit if unmounted
* top:
* zfs_dirent_lock(&dl, ...) // lock directory entry (may igrab())
* rw_enter(...); // grab any other locks you need
* tx = dmu_tx_create(...); // get DMU tx
* dmu_tx_hold_*(); // hold each object you might modify
* error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
* if (error) {
* rw_exit(...); // drop locks
* zfs_dirent_unlock(dl); // unlock directory entry
* zrele(...); // release held znodes
* if (error == ERESTART) {
* waited = B_TRUE;
* dmu_tx_wait(tx);
* dmu_tx_abort(tx);
* goto top;
* }
* dmu_tx_abort(tx); // abort DMU tx
- * ZFS_EXIT(zfsvfs); // finished in zfs
+ * zfs_exit(zfsvfs); // finished in zfs
* return (error); // really out of space
* }
* error = do_real_work(); // do whatever this VOP does
* if (error == 0)
* zfs_log_*(...); // on success, make ZIL entry
* dmu_tx_commit(tx); // commit DMU tx -- error or not
* rw_exit(...); // drop locks
* zfs_dirent_unlock(dl); // unlock directory entry
* zrele(...); // release held znodes
* zil_commit(zilog, foid); // synchronous when necessary
- * ZFS_EXIT(zfsvfs); // finished in zfs
+ * zfs_exit(zfsvfs); // finished in zfs
* return (error); // done, report error
*/
int
zfs_open(struct inode *ip, int mode, int flag, cred_t *cr)
{
(void) cr;
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
+ int error;
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (error);
/* Honor ZFS_APPENDONLY file attribute */
if ((mode & FMODE_WRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
((flag & O_APPEND) == 0)) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EPERM));
}
/* Keep a count of the synchronous opens in the znode */
if (flag & O_SYNC)
atomic_inc_32(&zp->z_sync_cnt);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (0);
}
int
zfs_close(struct inode *ip, int flag, cred_t *cr)
{
(void) cr;
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
+ int error;
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (error);
/* Decrement the synchronous opens in the znode */
if (flag & O_SYNC)
atomic_dec_32(&zp->z_sync_cnt);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (0);
}
#if defined(_KERNEL)
/*
* When a file is memory mapped, we must keep the IO data synchronized
* between the DMU cache and the memory mapped pages. What this means:
*
* On Write: If we find a memory mapped page, we write to *both*
* the page and the dmu buffer.
*/
void
update_pages(znode_t *zp, int64_t start, int len, objset_t *os)
{
struct inode *ip = ZTOI(zp);
struct address_space *mp = ip->i_mapping;
struct page *pp;
uint64_t nbytes;
int64_t off;
void *pb;
off = start & (PAGE_SIZE-1);
for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) {
nbytes = MIN(PAGE_SIZE - off, len);
pp = find_lock_page(mp, start >> PAGE_SHIFT);
if (pp) {
if (mapping_writably_mapped(mp))
flush_dcache_page(pp);
pb = kmap(pp);
(void) dmu_read(os, zp->z_id, start + off, nbytes,
pb + off, DMU_READ_PREFETCH);
kunmap(pp);
if (mapping_writably_mapped(mp))
flush_dcache_page(pp);
mark_page_accessed(pp);
SetPageUptodate(pp);
ClearPageError(pp);
unlock_page(pp);
put_page(pp);
}
len -= nbytes;
off = 0;
}
}
/*
* When a file is memory mapped, we must keep the IO data synchronized
* between the DMU cache and the memory mapped pages. What this means:
*
* On Read: We "read" preferentially from memory mapped pages,
* else we default from the dmu buffer.
*
* NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
* the file is memory mapped.
*/
int
mappedread(znode_t *zp, int nbytes, zfs_uio_t *uio)
{
struct inode *ip = ZTOI(zp);
struct address_space *mp = ip->i_mapping;
struct page *pp;
int64_t start, off;
uint64_t bytes;
int len = nbytes;
int error = 0;
void *pb;
start = uio->uio_loffset;
off = start & (PAGE_SIZE-1);
for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) {
bytes = MIN(PAGE_SIZE - off, len);
pp = find_lock_page(mp, start >> PAGE_SHIFT);
if (pp) {
ASSERT(PageUptodate(pp));
unlock_page(pp);
pb = kmap(pp);
error = zfs_uiomove(pb + off, bytes, UIO_READ, uio);
kunmap(pp);
if (mapping_writably_mapped(mp))
flush_dcache_page(pp);
mark_page_accessed(pp);
put_page(pp);
} else {
error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
uio, bytes);
}
len -= bytes;
off = 0;
if (error)
break;
}
return (error);
}
#endif /* _KERNEL */
static unsigned long zfs_delete_blocks = DMU_MAX_DELETEBLKCNT;
/*
* Write the bytes to a file.
*
* IN: zp - znode of file to be written to
* data - bytes to write
* len - number of bytes to write
* pos - offset to start writing at
*
* OUT: resid - remaining bytes to write
*
* RETURN: 0 if success
* positive error code if failure. EIO is returned
* for a short write when residp isn't provided.
*
* Timestamps:
* zp - ctime|mtime updated if byte count > 0
*/
int
zfs_write_simple(znode_t *zp, const void *data, size_t len,
loff_t pos, size_t *residp)
{
fstrans_cookie_t cookie;
int error;
struct iovec iov;
iov.iov_base = (void *)data;
iov.iov_len = len;
zfs_uio_t uio;
zfs_uio_iovec_init(&uio, &iov, 1, pos, UIO_SYSSPACE, len, 0);
cookie = spl_fstrans_mark();
error = zfs_write(zp, &uio, 0, kcred);
spl_fstrans_unmark(cookie);
if (error == 0) {
if (residp != NULL)
*residp = zfs_uio_resid(&uio);
else if (zfs_uio_resid(&uio) != 0)
error = SET_ERROR(EIO);
}
return (error);
}
static void
zfs_rele_async_task(void *arg)
{
iput(arg);
}
void
zfs_zrele_async(znode_t *zp)
{
struct inode *ip = ZTOI(zp);
objset_t *os = ITOZSB(ip)->z_os;
ASSERT(atomic_read(&ip->i_count) > 0);
ASSERT(os != NULL);
/*
* If decrementing the count would put us at 0, we can't do it inline
* here, because that would be synchronous. Instead, dispatch an iput
* to run later.
*
* For more information on the dangers of a synchronous iput, see the
* header comment of this file.
*/
if (!atomic_add_unless(&ip->i_count, -1, 1)) {
VERIFY(taskq_dispatch(dsl_pool_zrele_taskq(dmu_objset_pool(os)),
zfs_rele_async_task, ip, TQ_SLEEP) != TASKQID_INVALID);
}
}
/*
* Lookup an entry in a directory, or an extended attribute directory.
* If it exists, return a held inode reference for it.
*
* IN: zdp - znode of directory to search.
* nm - name of entry to lookup.
* flags - LOOKUP_XATTR set if looking for an attribute.
* cr - credentials of caller.
* direntflags - directory lookup flags
* realpnp - returned pathname.
*
* OUT: zpp - znode of located entry, NULL if not found.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* NA
*/
int
zfs_lookup(znode_t *zdp, char *nm, znode_t **zpp, int flags, cred_t *cr,
int *direntflags, pathname_t *realpnp)
{
zfsvfs_t *zfsvfs = ZTOZSB(zdp);
int error = 0;
/*
* Fast path lookup, however we must skip DNLC lookup
* for case folding or normalizing lookups because the
* DNLC code only stores the passed in name. This means
* creating 'a' and removing 'A' on a case insensitive
* file system would work, but DNLC still thinks 'a'
* exists and won't let you create it again on the next
* pass through fast path.
*/
if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) {
if (!S_ISDIR(ZTOI(zdp)->i_mode)) {
return (SET_ERROR(ENOTDIR));
} else if (zdp->z_sa_hdl == NULL) {
return (SET_ERROR(EIO));
}
if (nm[0] == 0 || (nm[0] == '.' && nm[1] == '\0')) {
error = zfs_fastaccesschk_execute(zdp, cr);
if (!error) {
*zpp = zdp;
zhold(*zpp);
return (0);
}
return (error);
}
}
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zdp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zdp, FTAG)) != 0)
+ return (error);
*zpp = NULL;
if (flags & LOOKUP_XATTR) {
/*
* We don't allow recursive attributes..
* Maybe someday we will.
*/
if (zdp->z_pflags & ZFS_XATTR) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
if ((error = zfs_get_xattrdir(zdp, zpp, cr, flags))) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Do we have permission to get into attribute directory?
*/
if ((error = zfs_zaccess(*zpp, ACE_EXECUTE, 0,
B_TRUE, cr))) {
zrele(*zpp);
*zpp = NULL;
}
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
if (!S_ISDIR(ZTOI(zdp)->i_mode)) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(ENOTDIR));
}
/*
* Check accessibility of directory.
*/
if ((error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr))) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EILSEQ));
}
error = zfs_dirlook(zdp, nm, zpp, flags, direntflags, realpnp);
if ((error == 0) && (*zpp))
zfs_znode_update_vfs(*zpp);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Attempt to create a new entry in a directory. If the entry
* already exists, truncate the file if permissible, else return
* an error. Return the ip of the created or trunc'd file.
*
* IN: dzp - znode of directory to put new file entry in.
* name - name of new file entry.
* vap - attributes of new file.
* excl - flag indicating exclusive or non-exclusive mode.
* mode - mode to open file with.
* cr - credentials of caller.
* flag - file flag.
* vsecp - ACL to be set
*
* OUT: zpp - znode of created or trunc'd entry.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dzp - ctime|mtime updated if new entry created
* zp - ctime|mtime always, atime if new
*/
int
zfs_create(znode_t *dzp, char *name, vattr_t *vap, int excl,
int mode, znode_t **zpp, cred_t *cr, int flag, vsecattr_t *vsecp)
{
znode_t *zp;
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
zilog_t *zilog;
objset_t *os;
zfs_dirlock_t *dl;
dmu_tx_t *tx;
int error;
uid_t uid;
gid_t gid;
zfs_acl_ids_t acl_ids;
boolean_t fuid_dirtied;
boolean_t have_acl = B_FALSE;
boolean_t waited = B_FALSE;
/*
* If we have an ephemeral id, ACL, or XVATTR then
* make sure file system is at proper version
*/
gid = crgetgid(cr);
uid = crgetuid(cr);
if (zfsvfs->z_use_fuids == B_FALSE &&
(vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
return (SET_ERROR(EINVAL));
if (name == NULL)
return (SET_ERROR(EINVAL));
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(dzp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
+ return (error);
os = zfsvfs->z_os;
zilog = zfsvfs->z_log;
if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EILSEQ));
}
if (vap->va_mask & ATTR_XVATTR) {
if ((error = secpolicy_xvattr((xvattr_t *)vap,
crgetuid(cr), cr, vap->va_mode)) != 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
}
top:
*zpp = NULL;
if (*name == '\0') {
/*
* Null component name refers to the directory itself.
*/
zhold(dzp);
zp = dzp;
dl = NULL;
error = 0;
} else {
/* possible igrab(zp) */
int zflg = 0;
if (flag & FIGNORECASE)
zflg |= ZCILOOK;
error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
NULL, NULL);
if (error) {
if (have_acl)
zfs_acl_ids_free(&acl_ids);
if (strcmp(name, "..") == 0)
error = SET_ERROR(EISDIR);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
}
if (zp == NULL) {
uint64_t txtype;
uint64_t projid = ZFS_DEFAULT_PROJID;
/*
* Create a new file object and update the directory
* to reference it.
*/
if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
if (have_acl)
zfs_acl_ids_free(&acl_ids);
goto out;
}
/*
* We only support the creation of regular files in
* extended attribute directories.
*/
if ((dzp->z_pflags & ZFS_XATTR) && !S_ISREG(vap->va_mode)) {
if (have_acl)
zfs_acl_ids_free(&acl_ids);
error = SET_ERROR(EINVAL);
goto out;
}
if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
cr, vsecp, &acl_ids)) != 0)
goto out;
have_acl = B_TRUE;
if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode))
projid = zfs_inherit_projid(dzp);
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, projid)) {
zfs_acl_ids_free(&acl_ids);
error = SET_ERROR(EDQUOT);
goto out;
}
tx = dmu_tx_create(os);
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE);
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
if (!zfsvfs->z_use_sa &&
acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, acl_ids.z_aclp->z_acl_bytes);
}
error = dmu_tx_assign(tx,
(waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
zfs_dirent_unlock(dl);
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
error = zfs_link_create(dl, zp, tx, ZNEW);
if (error != 0) {
/*
* Since, we failed to add the directory entry for it,
* delete the newly created dnode.
*/
zfs_znode_delete(zp, tx);
remove_inode_hash(ZTOI(zp));
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
goto out;
}
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
if (flag & FIGNORECASE)
txtype |= TX_CI;
zfs_log_create(zilog, tx, txtype, dzp, zp, name,
vsecp, acl_ids.z_fuidp, vap);
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
} else {
int aflags = (flag & O_APPEND) ? V_APPEND : 0;
if (have_acl)
zfs_acl_ids_free(&acl_ids);
have_acl = B_FALSE;
/*
* A directory entry already exists for this name.
*/
/*
* Can't truncate an existing file if in exclusive mode.
*/
if (excl) {
error = SET_ERROR(EEXIST);
goto out;
}
/*
* Can't open a directory for writing.
*/
if (S_ISDIR(ZTOI(zp)->i_mode)) {
error = SET_ERROR(EISDIR);
goto out;
}
/*
* Verify requested access to file.
*/
if (mode && (error = zfs_zaccess_rwx(zp, mode, aflags, cr))) {
goto out;
}
mutex_enter(&dzp->z_lock);
dzp->z_seq++;
mutex_exit(&dzp->z_lock);
/*
* Truncate regular files if requested.
*/
if (S_ISREG(ZTOI(zp)->i_mode) &&
(vap->va_mask & ATTR_SIZE) && (vap->va_size == 0)) {
/* we can't hold any locks when calling zfs_freesp() */
if (dl) {
zfs_dirent_unlock(dl);
dl = NULL;
}
error = zfs_freesp(zp, 0, 0, mode, TRUE);
}
}
out:
if (dl)
zfs_dirent_unlock(dl);
if (error) {
if (zp)
zrele(zp);
} else {
zfs_znode_update_vfs(dzp);
zfs_znode_update_vfs(zp);
*zpp = zp;
}
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
int
zfs_tmpfile(struct inode *dip, vattr_t *vap, int excl,
int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp)
{
(void) excl, (void) mode, (void) flag;
znode_t *zp = NULL, *dzp = ITOZ(dip);
zfsvfs_t *zfsvfs = ITOZSB(dip);
objset_t *os;
dmu_tx_t *tx;
int error;
uid_t uid;
gid_t gid;
zfs_acl_ids_t acl_ids;
uint64_t projid = ZFS_DEFAULT_PROJID;
boolean_t fuid_dirtied;
boolean_t have_acl = B_FALSE;
boolean_t waited = B_FALSE;
/*
* If we have an ephemeral id, ACL, or XVATTR then
* make sure file system is at proper version
*/
gid = crgetgid(cr);
uid = crgetuid(cr);
if (zfsvfs->z_use_fuids == B_FALSE &&
(vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
return (SET_ERROR(EINVAL));
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(dzp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
+ return (error);
os = zfsvfs->z_os;
if (vap->va_mask & ATTR_XVATTR) {
if ((error = secpolicy_xvattr((xvattr_t *)vap,
crgetuid(cr), cr, vap->va_mode)) != 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
}
top:
*ipp = NULL;
/*
* Create a new file object and update the directory
* to reference it.
*/
if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
if (have_acl)
zfs_acl_ids_free(&acl_ids);
goto out;
}
if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
cr, vsecp, &acl_ids)) != 0)
goto out;
have_acl = B_TRUE;
if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode))
projid = zfs_inherit_projid(dzp);
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, projid)) {
zfs_acl_ids_free(&acl_ids);
error = SET_ERROR(EDQUOT);
goto out;
}
tx = dmu_tx_create(os);
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE);
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
if (!zfsvfs->z_use_sa &&
acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, acl_ids.z_aclp->z_acl_bytes);
}
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
zfs_mknode(dzp, vap, tx, cr, IS_TMPFILE, &zp, &acl_ids);
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
/* Add to unlinked set */
zp->z_unlinked = B_TRUE;
zfs_unlinked_add(zp, tx);
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
out:
if (error) {
if (zp)
zrele(zp);
} else {
zfs_znode_update_vfs(dzp);
zfs_znode_update_vfs(zp);
*ipp = ZTOI(zp);
}
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Remove an entry from a directory.
*
* IN: dzp - znode of directory to remove entry from.
* name - name of entry to remove.
* cr - credentials of caller.
* flags - case flags.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* dzp - ctime|mtime
* ip - ctime (if nlink > 0)
*/
static uint64_t null_xattr = 0;
int
zfs_remove(znode_t *dzp, char *name, cred_t *cr, int flags)
{
znode_t *zp;
znode_t *xzp;
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
zilog_t *zilog;
uint64_t acl_obj, xattr_obj;
uint64_t xattr_obj_unlinked = 0;
uint64_t obj = 0;
uint64_t links;
zfs_dirlock_t *dl;
dmu_tx_t *tx;
boolean_t may_delete_now, delete_now = FALSE;
boolean_t unlinked, toobig = FALSE;
uint64_t txtype;
pathname_t *realnmp = NULL;
pathname_t realnm;
int error;
int zflg = ZEXISTS;
boolean_t waited = B_FALSE;
if (name == NULL)
return (SET_ERROR(EINVAL));
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(dzp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
+ return (error);
zilog = zfsvfs->z_log;
if (flags & FIGNORECASE) {
zflg |= ZCILOOK;
pn_alloc(&realnm);
realnmp = &realnm;
}
top:
xattr_obj = 0;
xzp = NULL;
/*
* Attempt to lock directory; fail if entry doesn't exist.
*/
if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
NULL, realnmp))) {
if (realnmp)
pn_free(realnmp);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
goto out;
}
/*
* Need to use rmdir for removing directories.
*/
if (S_ISDIR(ZTOI(zp)->i_mode)) {
error = SET_ERROR(EPERM);
goto out;
}
mutex_enter(&zp->z_lock);
may_delete_now = atomic_read(&ZTOI(zp)->i_count) == 1 &&
!(zp->z_is_mapped);
mutex_exit(&zp->z_lock);
/*
* We may delete the znode now, or we may put it in the unlinked set;
* it depends on whether we're the last link, and on whether there are
* other holds on the inode. So we dmu_tx_hold() the right things to
* allow for either case.
*/
obj = zp->z_id;
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
zfs_sa_upgrade_txholds(tx, dzp);
if (may_delete_now) {
toobig = zp->z_size > zp->z_blksz * zfs_delete_blocks;
/* if the file is too big, only hold_free a token amount */
dmu_tx_hold_free(tx, zp->z_id, 0,
(toobig ? DMU_MAX_ACCESS : DMU_OBJECT_END));
}
/* are there any extended attributes? */
error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
&xattr_obj, sizeof (xattr_obj));
if (error == 0 && xattr_obj) {
error = zfs_zget(zfsvfs, xattr_obj, &xzp);
ASSERT0(error);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
}
mutex_enter(&zp->z_lock);
if ((acl_obj = zfs_external_acl(zp)) != 0 && may_delete_now)
dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
mutex_exit(&zp->z_lock);
/* charge as an update -- would be nice not to charge at all */
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
/*
* Mark this transaction as typically resulting in a net free of space
*/
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
zfs_dirent_unlock(dl);
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
zrele(zp);
if (xzp)
zrele(xzp);
goto top;
}
if (realnmp)
pn_free(realnmp);
dmu_tx_abort(tx);
zrele(zp);
if (xzp)
zrele(xzp);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Remove the directory entry.
*/
error = zfs_link_destroy(dl, zp, tx, zflg, &unlinked);
if (error) {
dmu_tx_commit(tx);
goto out;
}
if (unlinked) {
/*
* Hold z_lock so that we can make sure that the ACL obj
* hasn't changed. Could have been deleted due to
* zfs_sa_upgrade().
*/
mutex_enter(&zp->z_lock);
(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
&xattr_obj_unlinked, sizeof (xattr_obj_unlinked));
delete_now = may_delete_now && !toobig &&
atomic_read(&ZTOI(zp)->i_count) == 1 &&
!(zp->z_is_mapped) && xattr_obj == xattr_obj_unlinked &&
zfs_external_acl(zp) == acl_obj;
}
if (delete_now) {
if (xattr_obj_unlinked) {
ASSERT3U(ZTOI(xzp)->i_nlink, ==, 2);
mutex_enter(&xzp->z_lock);
xzp->z_unlinked = B_TRUE;
clear_nlink(ZTOI(xzp));
links = 0;
error = sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
&links, sizeof (links), tx);
ASSERT3U(error, ==, 0);
mutex_exit(&xzp->z_lock);
zfs_unlinked_add(xzp, tx);
if (zp->z_is_sa)
error = sa_remove(zp->z_sa_hdl,
SA_ZPL_XATTR(zfsvfs), tx);
else
error = sa_update(zp->z_sa_hdl,
SA_ZPL_XATTR(zfsvfs), &null_xattr,
sizeof (uint64_t), tx);
ASSERT0(error);
}
/*
* Add to the unlinked set because a new reference could be
* taken concurrently resulting in a deferred destruction.
*/
zfs_unlinked_add(zp, tx);
mutex_exit(&zp->z_lock);
} else if (unlinked) {
mutex_exit(&zp->z_lock);
zfs_unlinked_add(zp, tx);
}
txtype = TX_REMOVE;
if (flags & FIGNORECASE)
txtype |= TX_CI;
zfs_log_remove(zilog, tx, txtype, dzp, name, obj, unlinked);
dmu_tx_commit(tx);
out:
if (realnmp)
pn_free(realnmp);
zfs_dirent_unlock(dl);
zfs_znode_update_vfs(dzp);
zfs_znode_update_vfs(zp);
if (delete_now)
zrele(zp);
else
zfs_zrele_async(zp);
if (xzp) {
zfs_znode_update_vfs(xzp);
zfs_zrele_async(xzp);
}
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Create a new directory and insert it into dzp using the name
* provided. Return a pointer to the inserted directory.
*
* IN: dzp - znode of directory to add subdir to.
* dirname - name of new directory.
* vap - attributes of new directory.
* cr - credentials of caller.
* flags - case flags.
* vsecp - ACL to be set
*
* OUT: zpp - znode of created directory.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* dzp - ctime|mtime updated
* zpp - ctime|mtime|atime updated
*/
int
zfs_mkdir(znode_t *dzp, char *dirname, vattr_t *vap, znode_t **zpp,
cred_t *cr, int flags, vsecattr_t *vsecp)
{
znode_t *zp;
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
zilog_t *zilog;
zfs_dirlock_t *dl;
uint64_t txtype;
dmu_tx_t *tx;
int error;
int zf = ZNEW;
uid_t uid;
gid_t gid = crgetgid(cr);
zfs_acl_ids_t acl_ids;
boolean_t fuid_dirtied;
boolean_t waited = B_FALSE;
ASSERT(S_ISDIR(vap->va_mode));
/*
* If we have an ephemeral id, ACL, or XVATTR then
* make sure file system is at proper version
*/
uid = crgetuid(cr);
if (zfsvfs->z_use_fuids == B_FALSE &&
(vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
return (SET_ERROR(EINVAL));
if (dirname == NULL)
return (SET_ERROR(EINVAL));
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(dzp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
+ return (error);
zilog = zfsvfs->z_log;
if (dzp->z_pflags & ZFS_XATTR) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
if (zfsvfs->z_utf8 && u8_validate(dirname,
strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EILSEQ));
}
if (flags & FIGNORECASE)
zf |= ZCILOOK;
if (vap->va_mask & ATTR_XVATTR) {
if ((error = secpolicy_xvattr((xvattr_t *)vap,
crgetuid(cr), cr, vap->va_mode)) != 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
}
if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
vsecp, &acl_ids)) != 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* First make sure the new directory doesn't exist.
*
* Existence is checked first to make sure we don't return
* EACCES instead of EEXIST which can cause some applications
* to fail.
*/
top:
*zpp = NULL;
if ((error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf,
NULL, NULL))) {
zfs_acl_ids_free(&acl_ids);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
if ((error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr))) {
zfs_acl_ids_free(&acl_ids);
zfs_dirent_unlock(dl);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, zfs_inherit_projid(dzp))) {
zfs_acl_ids_free(&acl_ids);
zfs_dirent_unlock(dl);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EDQUOT));
}
/*
* Add a new entry to the directory.
*/
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
acl_ids.z_aclp->z_acl_bytes);
}
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE);
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
zfs_dirent_unlock(dl);
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Create new node.
*/
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
/*
* Now put new name in parent dir.
*/
error = zfs_link_create(dl, zp, tx, ZNEW);
if (error != 0) {
zfs_znode_delete(zp, tx);
remove_inode_hash(ZTOI(zp));
goto out;
}
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
*zpp = zp;
txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap);
if (flags & FIGNORECASE)
txtype |= TX_CI;
zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp,
acl_ids.z_fuidp, vap);
out:
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
zfs_dirent_unlock(dl);
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
if (error != 0) {
zrele(zp);
} else {
zfs_znode_update_vfs(dzp);
zfs_znode_update_vfs(zp);
}
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Remove a directory subdir entry. If the current working
* directory is the same as the subdir to be removed, the
* remove will fail.
*
* IN: dzp - znode of directory to remove from.
* name - name of directory to be removed.
* cwd - inode of current working directory.
* cr - credentials of caller.
* flags - case flags
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dzp - ctime|mtime updated
*/
int
zfs_rmdir(znode_t *dzp, char *name, znode_t *cwd, cred_t *cr,
int flags)
{
znode_t *zp;
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
zilog_t *zilog;
zfs_dirlock_t *dl;
dmu_tx_t *tx;
int error;
int zflg = ZEXISTS;
boolean_t waited = B_FALSE;
if (name == NULL)
return (SET_ERROR(EINVAL));
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(dzp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
+ return (error);
zilog = zfsvfs->z_log;
if (flags & FIGNORECASE)
zflg |= ZCILOOK;
top:
zp = NULL;
/*
* Attempt to lock directory; fail if entry doesn't exist.
*/
if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
NULL, NULL))) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
goto out;
}
if (!S_ISDIR(ZTOI(zp)->i_mode)) {
error = SET_ERROR(ENOTDIR);
goto out;
}
if (zp == cwd) {
error = SET_ERROR(EINVAL);
goto out;
}
/*
* Grab a lock on the directory to make sure that no one is
* trying to add (or lookup) entries while we are removing it.
*/
rw_enter(&zp->z_name_lock, RW_WRITER);
/*
* Grab a lock on the parent pointer to make sure we play well
* with the treewalk and directory rename code.
*/
rw_enter(&zp->z_parent_lock, RW_WRITER);
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
zfs_sa_upgrade_txholds(tx, zp);
zfs_sa_upgrade_txholds(tx, dzp);
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
rw_exit(&zp->z_parent_lock);
rw_exit(&zp->z_name_lock);
zfs_dirent_unlock(dl);
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
zrele(zp);
goto top;
}
dmu_tx_abort(tx);
zrele(zp);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
error = zfs_link_destroy(dl, zp, tx, zflg, NULL);
if (error == 0) {
uint64_t txtype = TX_RMDIR;
if (flags & FIGNORECASE)
txtype |= TX_CI;
zfs_log_remove(zilog, tx, txtype, dzp, name, ZFS_NO_OBJECT,
B_FALSE);
}
dmu_tx_commit(tx);
rw_exit(&zp->z_parent_lock);
rw_exit(&zp->z_name_lock);
out:
zfs_dirent_unlock(dl);
zfs_znode_update_vfs(dzp);
zfs_znode_update_vfs(zp);
zrele(zp);
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Read directory entries from the given directory cursor position and emit
* name and position for each entry.
*
* IN: ip - inode of directory to read.
* ctx - directory entry context.
* cr - credentials of caller.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* ip - atime updated
*
* Note that the low 4 bits of the cookie returned by zap is always zero.
* This allows us to use the low range for "special" directory entries:
* We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
* we use the offset 2 for the '.zfs' directory.
*/
int
zfs_readdir(struct inode *ip, zpl_dir_context_t *ctx, cred_t *cr)
{
(void) cr;
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
objset_t *os;
zap_cursor_t zc;
zap_attribute_t zap;
int error;
uint8_t prefetch;
uint8_t type;
int done = 0;
uint64_t parent;
uint64_t offset; /* must be unsigned; checks for < 1 */
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (error);
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
&parent, sizeof (parent))) != 0)
goto out;
/*
* Quit if directory has been removed (posix)
*/
if (zp->z_unlinked)
goto out;
error = 0;
os = zfsvfs->z_os;
offset = ctx->pos;
prefetch = zp->z_zn_prefetch;
/*
* Initialize the iterator cursor.
*/
if (offset <= 3) {
/*
* Start iteration from the beginning of the directory.
*/
zap_cursor_init(&zc, os, zp->z_id);
} else {
/*
* The offset is a serialized cursor.
*/
zap_cursor_init_serialized(&zc, os, zp->z_id, offset);
}
/*
* Transform to file-system independent format
*/
while (!done) {
uint64_t objnum;
/*
* Special case `.', `..', and `.zfs'.
*/
if (offset == 0) {
(void) strcpy(zap.za_name, ".");
zap.za_normalization_conflict = 0;
objnum = zp->z_id;
type = DT_DIR;
} else if (offset == 1) {
(void) strcpy(zap.za_name, "..");
zap.za_normalization_conflict = 0;
objnum = parent;
type = DT_DIR;
} else if (offset == 2 && zfs_show_ctldir(zp)) {
(void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
zap.za_normalization_conflict = 0;
objnum = ZFSCTL_INO_ROOT;
type = DT_DIR;
} else {
/*
* Grab next entry.
*/
if ((error = zap_cursor_retrieve(&zc, &zap))) {
if (error == ENOENT)
break;
else
goto update;
}
/*
* Allow multiple entries provided the first entry is
* the object id. Non-zpl consumers may safely make
* use of the additional space.
*
* XXX: This should be a feature flag for compatibility
*/
if (zap.za_integer_length != 8 ||
zap.za_num_integers == 0) {
cmn_err(CE_WARN, "zap_readdir: bad directory "
"entry, obj = %lld, offset = %lld, "
"length = %d, num = %lld\n",
(u_longlong_t)zp->z_id,
(u_longlong_t)offset,
zap.za_integer_length,
(u_longlong_t)zap.za_num_integers);
error = SET_ERROR(ENXIO);
goto update;
}
objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
type = ZFS_DIRENT_TYPE(zap.za_first_integer);
}
done = !zpl_dir_emit(ctx, zap.za_name, strlen(zap.za_name),
objnum, type);
if (done)
break;
/* Prefetch znode */
if (prefetch) {
dmu_prefetch(os, objnum, 0, 0, 0,
ZIO_PRIORITY_SYNC_READ);
}
/*
* Move to the next entry, fill in the previous offset.
*/
if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) {
zap_cursor_advance(&zc);
offset = zap_cursor_serialize(&zc);
} else {
offset += 1;
}
ctx->pos = offset;
}
zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
update:
zap_cursor_fini(&zc);
if (error == ENOENT)
error = 0;
out:
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Get the basic file attributes and place them in the provided kstat
* structure. The inode is assumed to be the authoritative source
* for most of the attributes. However, the znode currently has the
* authoritative atime, blksize, and block count.
*
* IN: ip - inode of file.
*
* OUT: sp - kstat values.
*
* RETURN: 0 (always succeeds)
*/
int
zfs_getattr_fast(struct user_namespace *user_ns, struct inode *ip,
struct kstat *sp)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
uint32_t blksize;
u_longlong_t nblocks;
+ int error;
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (error);
mutex_enter(&zp->z_lock);
zpl_generic_fillattr(user_ns, ip, sp);
/*
* +1 link count for root inode with visible '.zfs' directory.
*/
if ((zp->z_id == zfsvfs->z_root) && zfs_show_ctldir(zp))
if (sp->nlink < ZFS_LINK_MAX)
sp->nlink++;
sa_object_size(zp->z_sa_hdl, &blksize, &nblocks);
sp->blksize = blksize;
sp->blocks = nblocks;
if (unlikely(zp->z_blksz == 0)) {
/*
* Block size hasn't been set; suggest maximal I/O transfers.
*/
sp->blksize = zfsvfs->z_max_blksz;
}
mutex_exit(&zp->z_lock);
/*
* Required to prevent NFS client from detecting different inode
* numbers of snapshot root dentry before and after snapshot mount.
*/
if (zfsvfs->z_issnap) {
if (ip->i_sb->s_root->d_inode == ip)
sp->ino = ZFSCTL_INO_SNAPDIRS -
dmu_objset_id(zfsvfs->z_os);
}
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (0);
}
/*
* For the operation of changing file's user/group/project, we need to
* handle not only the main object that is assigned to the file directly,
* but also the ones that are used by the file via hidden xattr directory.
*
* Because the xattr directory may contains many EA entries, as to it may
* be impossible to change all of them via the transaction of changing the
* main object's user/group/project attributes. Then we have to change them
* via other multiple independent transactions one by one. It may be not good
* solution, but we have no better idea yet.
*/
static int
zfs_setattr_dir(znode_t *dzp)
{
struct inode *dxip = ZTOI(dzp);
struct inode *xip = NULL;
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
objset_t *os = zfsvfs->z_os;
zap_cursor_t zc;
zap_attribute_t zap;
zfs_dirlock_t *dl;
znode_t *zp = NULL;
dmu_tx_t *tx = NULL;
uint64_t uid, gid;
sa_bulk_attr_t bulk[4];
int count;
int err;
zap_cursor_init(&zc, os, dzp->z_id);
while ((err = zap_cursor_retrieve(&zc, &zap)) == 0) {
count = 0;
if (zap.za_integer_length != 8 || zap.za_num_integers != 1) {
err = ENXIO;
break;
}
err = zfs_dirent_lock(&dl, dzp, (char *)zap.za_name, &zp,
ZEXISTS, NULL, NULL);
if (err == ENOENT)
goto next;
if (err)
break;
xip = ZTOI(zp);
if (KUID_TO_SUID(xip->i_uid) == KUID_TO_SUID(dxip->i_uid) &&
KGID_TO_SGID(xip->i_gid) == KGID_TO_SGID(dxip->i_gid) &&
zp->z_projid == dzp->z_projid)
goto next;
tx = dmu_tx_create(os);
if (!(zp->z_pflags & ZFS_PROJID))
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
else
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err)
break;
mutex_enter(&dzp->z_lock);
if (KUID_TO_SUID(xip->i_uid) != KUID_TO_SUID(dxip->i_uid)) {
xip->i_uid = dxip->i_uid;
uid = zfs_uid_read(dxip);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
&uid, sizeof (uid));
}
if (KGID_TO_SGID(xip->i_gid) != KGID_TO_SGID(dxip->i_gid)) {
xip->i_gid = dxip->i_gid;
gid = zfs_gid_read(dxip);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
&gid, sizeof (gid));
}
if (zp->z_projid != dzp->z_projid) {
if (!(zp->z_pflags & ZFS_PROJID)) {
zp->z_pflags |= ZFS_PROJID;
SA_ADD_BULK_ATTR(bulk, count,
SA_ZPL_FLAGS(zfsvfs), NULL, &zp->z_pflags,
sizeof (zp->z_pflags));
}
zp->z_projid = dzp->z_projid;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PROJID(zfsvfs),
NULL, &zp->z_projid, sizeof (zp->z_projid));
}
mutex_exit(&dzp->z_lock);
if (likely(count > 0)) {
err = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
dmu_tx_commit(tx);
} else {
dmu_tx_abort(tx);
}
tx = NULL;
if (err != 0 && err != ENOENT)
break;
next:
if (zp) {
zrele(zp);
zp = NULL;
zfs_dirent_unlock(dl);
}
zap_cursor_advance(&zc);
}
if (tx)
dmu_tx_abort(tx);
if (zp) {
zrele(zp);
zfs_dirent_unlock(dl);
}
zap_cursor_fini(&zc);
return (err == ENOENT ? 0 : err);
}
/*
* Set the file attributes to the values contained in the
* vattr structure.
*
* IN: zp - znode of file to be modified.
* vap - new attribute values.
* If ATTR_XVATTR set, then optional attrs are being set
* flags - ATTR_UTIME set if non-default time values provided.
* - ATTR_NOACLCHECK (CIFS context only).
* cr - credentials of caller.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* ip - ctime updated, mtime updated if size changed.
*/
int
zfs_setattr(znode_t *zp, vattr_t *vap, int flags, cred_t *cr)
{
struct inode *ip;
zfsvfs_t *zfsvfs = ZTOZSB(zp);
objset_t *os = zfsvfs->z_os;
zilog_t *zilog;
dmu_tx_t *tx;
vattr_t oldva;
xvattr_t *tmpxvattr;
uint_t mask = vap->va_mask;
uint_t saved_mask = 0;
int trim_mask = 0;
uint64_t new_mode;
uint64_t new_kuid = 0, new_kgid = 0, new_uid, new_gid;
uint64_t xattr_obj;
uint64_t mtime[2], ctime[2], atime[2];
uint64_t projid = ZFS_INVALID_PROJID;
znode_t *attrzp;
int need_policy = FALSE;
int err, err2 = 0;
zfs_fuid_info_t *fuidp = NULL;
xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
xoptattr_t *xoap;
zfs_acl_t *aclp;
boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
boolean_t fuid_dirtied = B_FALSE;
boolean_t handle_eadir = B_FALSE;
sa_bulk_attr_t *bulk, *xattr_bulk;
int count = 0, xattr_count = 0, bulks = 8;
if (mask == 0)
return (0);
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((err = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (err);
ip = ZTOI(zp);
/*
* If this is a xvattr_t, then get a pointer to the structure of
* optional attributes. If this is NULL, then we have a vattr_t.
*/
xoap = xva_getxoptattr(xvap);
if (xoap != NULL && (mask & ATTR_XVATTR)) {
if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
if (!dmu_objset_projectquota_enabled(os) ||
(!S_ISREG(ip->i_mode) && !S_ISDIR(ip->i_mode))) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(ENOTSUP));
}
projid = xoap->xoa_projid;
if (unlikely(projid == ZFS_INVALID_PROJID)) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
if (projid == zp->z_projid && zp->z_pflags & ZFS_PROJID)
projid = ZFS_INVALID_PROJID;
else
need_policy = TRUE;
}
if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT) &&
(xoap->xoa_projinherit !=
((zp->z_pflags & ZFS_PROJINHERIT) != 0)) &&
(!dmu_objset_projectquota_enabled(os) ||
(!S_ISREG(ip->i_mode) && !S_ISDIR(ip->i_mode)))) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(ENOTSUP));
}
}
zilog = zfsvfs->z_log;
/*
* Make sure that if we have ephemeral uid/gid or xvattr specified
* that file system is at proper version level
*/
if (zfsvfs->z_use_fuids == B_FALSE &&
(((mask & ATTR_UID) && IS_EPHEMERAL(vap->va_uid)) ||
((mask & ATTR_GID) && IS_EPHEMERAL(vap->va_gid)) ||
(mask & ATTR_XVATTR))) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
if (mask & ATTR_SIZE && S_ISDIR(ip->i_mode)) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EISDIR));
}
if (mask & ATTR_SIZE && !S_ISREG(ip->i_mode) && !S_ISFIFO(ip->i_mode)) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
tmpxvattr = kmem_alloc(sizeof (xvattr_t), KM_SLEEP);
xva_init(tmpxvattr);
bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * bulks, KM_SLEEP);
xattr_bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * bulks, KM_SLEEP);
/*
* Immutable files can only alter immutable bit and atime
*/
if ((zp->z_pflags & ZFS_IMMUTABLE) &&
((mask & (ATTR_SIZE|ATTR_UID|ATTR_GID|ATTR_MTIME|ATTR_MODE)) ||
((mask & ATTR_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
err = SET_ERROR(EPERM);
goto out3;
}
if ((mask & ATTR_SIZE) && (zp->z_pflags & ZFS_READONLY)) {
err = SET_ERROR(EPERM);
goto out3;
}
/*
* Verify timestamps doesn't overflow 32 bits.
* ZFS can handle large timestamps, but 32bit syscalls can't
* handle times greater than 2039. This check should be removed
* once large timestamps are fully supported.
*/
if (mask & (ATTR_ATIME | ATTR_MTIME)) {
if (((mask & ATTR_ATIME) &&
TIMESPEC_OVERFLOW(&vap->va_atime)) ||
((mask & ATTR_MTIME) &&
TIMESPEC_OVERFLOW(&vap->va_mtime))) {
err = SET_ERROR(EOVERFLOW);
goto out3;
}
}
top:
attrzp = NULL;
aclp = NULL;
/* Can this be moved to before the top label? */
if (zfs_is_readonly(zfsvfs)) {
err = SET_ERROR(EROFS);
goto out3;
}
/*
* First validate permissions
*/
if (mask & ATTR_SIZE) {
err = zfs_zaccess(zp, ACE_WRITE_DATA, 0, skipaclchk, cr);
if (err)
goto out3;
/*
* XXX - Note, we are not providing any open
* mode flags here (like FNDELAY), so we may
* block if there are locks present... this
* should be addressed in openat().
*/
/* XXX - would it be OK to generate a log record here? */
err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
if (err)
goto out3;
}
if (mask & (ATTR_ATIME|ATTR_MTIME) ||
((mask & ATTR_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
XVA_ISSET_REQ(xvap, XAT_READONLY) ||
XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
XVA_ISSET_REQ(xvap, XAT_OFFLINE) ||
XVA_ISSET_REQ(xvap, XAT_SPARSE) ||
XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
skipaclchk, cr);
}
if (mask & (ATTR_UID|ATTR_GID)) {
int idmask = (mask & (ATTR_UID|ATTR_GID));
int take_owner;
int take_group;
/*
* NOTE: even if a new mode is being set,
* we may clear S_ISUID/S_ISGID bits.
*/
if (!(mask & ATTR_MODE))
vap->va_mode = zp->z_mode;
/*
* Take ownership or chgrp to group we are a member of
*/
take_owner = (mask & ATTR_UID) && (vap->va_uid == crgetuid(cr));
take_group = (mask & ATTR_GID) &&
zfs_groupmember(zfsvfs, vap->va_gid, cr);
/*
* If both ATTR_UID and ATTR_GID are set then take_owner and
* take_group must both be set in order to allow taking
* ownership.
*
* Otherwise, send the check through secpolicy_vnode_setattr()
*
*/
if (((idmask == (ATTR_UID|ATTR_GID)) &&
take_owner && take_group) ||
((idmask == ATTR_UID) && take_owner) ||
((idmask == ATTR_GID) && take_group)) {
if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0,
skipaclchk, cr) == 0) {
/*
* Remove setuid/setgid for non-privileged users
*/
(void) secpolicy_setid_clear(vap, cr);
trim_mask = (mask & (ATTR_UID|ATTR_GID));
} else {
need_policy = TRUE;
}
} else {
need_policy = TRUE;
}
}
mutex_enter(&zp->z_lock);
oldva.va_mode = zp->z_mode;
zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
if (mask & ATTR_XVATTR) {
/*
* Update xvattr mask to include only those attributes
* that are actually changing.
*
* the bits will be restored prior to actually setting
* the attributes so the caller thinks they were set.
*/
if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
if (xoap->xoa_appendonly !=
((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_APPENDONLY);
XVA_SET_REQ(tmpxvattr, XAT_APPENDONLY);
}
}
if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
if (xoap->xoa_projinherit !=
((zp->z_pflags & ZFS_PROJINHERIT) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_PROJINHERIT);
XVA_SET_REQ(tmpxvattr, XAT_PROJINHERIT);
}
}
if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
if (xoap->xoa_nounlink !=
((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_NOUNLINK);
XVA_SET_REQ(tmpxvattr, XAT_NOUNLINK);
}
}
if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
if (xoap->xoa_immutable !=
((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
XVA_SET_REQ(tmpxvattr, XAT_IMMUTABLE);
}
}
if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
if (xoap->xoa_nodump !=
((zp->z_pflags & ZFS_NODUMP) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_NODUMP);
XVA_SET_REQ(tmpxvattr, XAT_NODUMP);
}
}
if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
if (xoap->xoa_av_modified !=
((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
XVA_SET_REQ(tmpxvattr, XAT_AV_MODIFIED);
}
}
if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
if ((!S_ISREG(ip->i_mode) &&
xoap->xoa_av_quarantined) ||
xoap->xoa_av_quarantined !=
((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
XVA_SET_REQ(tmpxvattr, XAT_AV_QUARANTINED);
}
}
if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
mutex_exit(&zp->z_lock);
err = SET_ERROR(EPERM);
goto out3;
}
if (need_policy == FALSE &&
(XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
need_policy = TRUE;
}
}
mutex_exit(&zp->z_lock);
if (mask & ATTR_MODE) {
if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) {
err = secpolicy_setid_setsticky_clear(ip, vap,
&oldva, cr);
if (err)
goto out3;
trim_mask |= ATTR_MODE;
} else {
need_policy = TRUE;
}
}
if (need_policy) {
/*
* If trim_mask is set then take ownership
* has been granted or write_acl is present and user
* has the ability to modify mode. In that case remove
* UID|GID and or MODE from mask so that
* secpolicy_vnode_setattr() doesn't revoke it.
*/
if (trim_mask) {
saved_mask = vap->va_mask;
vap->va_mask &= ~trim_mask;
}
err = secpolicy_vnode_setattr(cr, ip, vap, &oldva, flags,
(int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp);
if (err)
goto out3;
if (trim_mask)
vap->va_mask |= saved_mask;
}
/*
* secpolicy_vnode_setattr, or take ownership may have
* changed va_mask
*/
mask = vap->va_mask;
if ((mask & (ATTR_UID | ATTR_GID)) || projid != ZFS_INVALID_PROJID) {
handle_eadir = B_TRUE;
err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
&xattr_obj, sizeof (xattr_obj));
if (err == 0 && xattr_obj) {
err = zfs_zget(ZTOZSB(zp), xattr_obj, &attrzp);
if (err)
goto out2;
}
if (mask & ATTR_UID) {
new_kuid = zfs_fuid_create(zfsvfs,
(uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
if (new_kuid != KUID_TO_SUID(ZTOI(zp)->i_uid) &&
zfs_id_overquota(zfsvfs, DMU_USERUSED_OBJECT,
new_kuid)) {
if (attrzp)
zrele(attrzp);
err = SET_ERROR(EDQUOT);
goto out2;
}
}
if (mask & ATTR_GID) {
new_kgid = zfs_fuid_create(zfsvfs,
(uint64_t)vap->va_gid, cr, ZFS_GROUP, &fuidp);
if (new_kgid != KGID_TO_SGID(ZTOI(zp)->i_gid) &&
zfs_id_overquota(zfsvfs, DMU_GROUPUSED_OBJECT,
new_kgid)) {
if (attrzp)
zrele(attrzp);
err = SET_ERROR(EDQUOT);
goto out2;
}
}
if (projid != ZFS_INVALID_PROJID &&
zfs_id_overquota(zfsvfs, DMU_PROJECTUSED_OBJECT, projid)) {
if (attrzp)
zrele(attrzp);
err = EDQUOT;
goto out2;
}
}
tx = dmu_tx_create(os);
if (mask & ATTR_MODE) {
uint64_t pmode = zp->z_mode;
uint64_t acl_obj;
new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
if (ZTOZSB(zp)->z_acl_mode == ZFS_ACL_RESTRICTED &&
!(zp->z_pflags & ZFS_ACL_TRIVIAL)) {
err = EPERM;
goto out;
}
if ((err = zfs_acl_chmod_setattr(zp, &aclp, new_mode)))
goto out;
mutex_enter(&zp->z_lock);
if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) {
/*
* Are we upgrading ACL from old V0 format
* to V1 format?
*/
if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
zfs_znode_acl_version(zp) ==
ZFS_ACL_VERSION_INITIAL) {
dmu_tx_hold_free(tx, acl_obj, 0,
DMU_OBJECT_END);
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, aclp->z_acl_bytes);
} else {
dmu_tx_hold_write(tx, acl_obj, 0,
aclp->z_acl_bytes);
}
} else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, aclp->z_acl_bytes);
}
mutex_exit(&zp->z_lock);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
} else {
if (((mask & ATTR_XVATTR) &&
XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) ||
(projid != ZFS_INVALID_PROJID &&
!(zp->z_pflags & ZFS_PROJID)))
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
else
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
}
if (attrzp) {
dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE);
}
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
zfs_sa_upgrade_txholds(tx, zp);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err)
goto out;
count = 0;
/*
* Set each attribute requested.
* We group settings according to the locks they need to acquire.
*
* Note: you cannot set ctime directly, although it will be
* updated as a side-effect of calling this function.
*/
if (projid != ZFS_INVALID_PROJID && !(zp->z_pflags & ZFS_PROJID)) {
/*
* For the existed object that is upgraded from old system,
* its on-disk layout has no slot for the project ID attribute.
* But quota accounting logic needs to access related slots by
* offset directly. So we need to adjust old objects' layout
* to make the project ID to some unified and fixed offset.
*/
if (attrzp)
err = sa_add_projid(attrzp->z_sa_hdl, tx, projid);
if (err == 0)
err = sa_add_projid(zp->z_sa_hdl, tx, projid);
if (unlikely(err == EEXIST))
err = 0;
else if (err != 0)
goto out;
else
projid = ZFS_INVALID_PROJID;
}
if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
mutex_enter(&zp->z_acl_lock);
mutex_enter(&zp->z_lock);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
if (attrzp) {
if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
mutex_enter(&attrzp->z_acl_lock);
mutex_enter(&attrzp->z_lock);
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_FLAGS(zfsvfs), NULL, &attrzp->z_pflags,
sizeof (attrzp->z_pflags));
if (projid != ZFS_INVALID_PROJID) {
attrzp->z_projid = projid;
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_PROJID(zfsvfs), NULL, &attrzp->z_projid,
sizeof (attrzp->z_projid));
}
}
if (mask & (ATTR_UID|ATTR_GID)) {
if (mask & ATTR_UID) {
ZTOI(zp)->i_uid = SUID_TO_KUID(new_kuid);
new_uid = zfs_uid_read(ZTOI(zp));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
&new_uid, sizeof (new_uid));
if (attrzp) {
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_UID(zfsvfs), NULL, &new_uid,
sizeof (new_uid));
ZTOI(attrzp)->i_uid = SUID_TO_KUID(new_uid);
}
}
if (mask & ATTR_GID) {
ZTOI(zp)->i_gid = SGID_TO_KGID(new_kgid);
new_gid = zfs_gid_read(ZTOI(zp));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs),
NULL, &new_gid, sizeof (new_gid));
if (attrzp) {
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_GID(zfsvfs), NULL, &new_gid,
sizeof (new_gid));
ZTOI(attrzp)->i_gid = SGID_TO_KGID(new_kgid);
}
}
if (!(mask & ATTR_MODE)) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs),
NULL, &new_mode, sizeof (new_mode));
new_mode = zp->z_mode;
}
err = zfs_acl_chown_setattr(zp);
ASSERT(err == 0);
if (attrzp) {
err = zfs_acl_chown_setattr(attrzp);
ASSERT(err == 0);
}
}
if (mask & ATTR_MODE) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
&new_mode, sizeof (new_mode));
zp->z_mode = ZTOI(zp)->i_mode = new_mode;
ASSERT3P(aclp, !=, NULL);
err = zfs_aclset_common(zp, aclp, cr, tx);
ASSERT0(err);
if (zp->z_acl_cached)
zfs_acl_free(zp->z_acl_cached);
zp->z_acl_cached = aclp;
aclp = NULL;
}
if ((mask & ATTR_ATIME) || zp->z_atime_dirty) {
zp->z_atime_dirty = B_FALSE;
ZFS_TIME_ENCODE(&ip->i_atime, atime);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
&atime, sizeof (atime));
}
if (mask & (ATTR_MTIME | ATTR_SIZE)) {
ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
ZTOI(zp)->i_mtime = zpl_inode_timestamp_truncate(
vap->va_mtime, ZTOI(zp));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
mtime, sizeof (mtime));
}
if (mask & (ATTR_CTIME | ATTR_SIZE)) {
ZFS_TIME_ENCODE(&vap->va_ctime, ctime);
ZTOI(zp)->i_ctime = zpl_inode_timestamp_truncate(vap->va_ctime,
ZTOI(zp));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
ctime, sizeof (ctime));
}
if (projid != ZFS_INVALID_PROJID) {
zp->z_projid = projid;
SA_ADD_BULK_ATTR(bulk, count,
SA_ZPL_PROJID(zfsvfs), NULL, &zp->z_projid,
sizeof (zp->z_projid));
}
if (attrzp && mask) {
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_CTIME(zfsvfs), NULL, &ctime,
sizeof (ctime));
}
/*
* Do this after setting timestamps to prevent timestamp
* update from toggling bit
*/
if (xoap && (mask & ATTR_XVATTR)) {
/*
* restore trimmed off masks
* so that return masks can be set for caller.
*/
if (XVA_ISSET_REQ(tmpxvattr, XAT_APPENDONLY)) {
XVA_SET_REQ(xvap, XAT_APPENDONLY);
}
if (XVA_ISSET_REQ(tmpxvattr, XAT_NOUNLINK)) {
XVA_SET_REQ(xvap, XAT_NOUNLINK);
}
if (XVA_ISSET_REQ(tmpxvattr, XAT_IMMUTABLE)) {
XVA_SET_REQ(xvap, XAT_IMMUTABLE);
}
if (XVA_ISSET_REQ(tmpxvattr, XAT_NODUMP)) {
XVA_SET_REQ(xvap, XAT_NODUMP);
}
if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_MODIFIED)) {
XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
}
if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_QUARANTINED)) {
XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
}
if (XVA_ISSET_REQ(tmpxvattr, XAT_PROJINHERIT)) {
XVA_SET_REQ(xvap, XAT_PROJINHERIT);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
ASSERT(S_ISREG(ip->i_mode));
zfs_xvattr_set(zp, xvap, tx);
}
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
if (mask != 0)
zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
mutex_exit(&zp->z_lock);
if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
mutex_exit(&zp->z_acl_lock);
if (attrzp) {
if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
mutex_exit(&attrzp->z_acl_lock);
mutex_exit(&attrzp->z_lock);
}
out:
if (err == 0 && xattr_count > 0) {
err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
xattr_count, tx);
ASSERT(err2 == 0);
}
if (aclp)
zfs_acl_free(aclp);
if (fuidp) {
zfs_fuid_info_free(fuidp);
fuidp = NULL;
}
if (err) {
dmu_tx_abort(tx);
if (attrzp)
zrele(attrzp);
if (err == ERESTART)
goto top;
} else {
if (count > 0)
err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
dmu_tx_commit(tx);
if (attrzp) {
if (err2 == 0 && handle_eadir)
err2 = zfs_setattr_dir(attrzp);
zrele(attrzp);
}
zfs_znode_update_vfs(zp);
}
out2:
if (os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
out3:
kmem_free(xattr_bulk, sizeof (sa_bulk_attr_t) * bulks);
kmem_free(bulk, sizeof (sa_bulk_attr_t) * bulks);
kmem_free(tmpxvattr, sizeof (xvattr_t));
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (err);
}
typedef struct zfs_zlock {
krwlock_t *zl_rwlock; /* lock we acquired */
znode_t *zl_znode; /* znode we held */
struct zfs_zlock *zl_next; /* next in list */
} zfs_zlock_t;
/*
* Drop locks and release vnodes that were held by zfs_rename_lock().
*/
static void
zfs_rename_unlock(zfs_zlock_t **zlpp)
{
zfs_zlock_t *zl;
while ((zl = *zlpp) != NULL) {
if (zl->zl_znode != NULL)
zfs_zrele_async(zl->zl_znode);
rw_exit(zl->zl_rwlock);
*zlpp = zl->zl_next;
kmem_free(zl, sizeof (*zl));
}
}
/*
* Search back through the directory tree, using the ".." entries.
* Lock each directory in the chain to prevent concurrent renames.
* Fail any attempt to move a directory into one of its own descendants.
* XXX - z_parent_lock can overlap with map or grow locks
*/
static int
zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
{
zfs_zlock_t *zl;
znode_t *zp = tdzp;
uint64_t rootid = ZTOZSB(zp)->z_root;
uint64_t oidp = zp->z_id;
krwlock_t *rwlp = &szp->z_parent_lock;
krw_t rw = RW_WRITER;
/*
* First pass write-locks szp and compares to zp->z_id.
* Later passes read-lock zp and compare to zp->z_parent.
*/
do {
if (!rw_tryenter(rwlp, rw)) {
/*
* Another thread is renaming in this path.
* Note that if we are a WRITER, we don't have any
* parent_locks held yet.
*/
if (rw == RW_READER && zp->z_id > szp->z_id) {
/*
* Drop our locks and restart
*/
zfs_rename_unlock(&zl);
*zlpp = NULL;
zp = tdzp;
oidp = zp->z_id;
rwlp = &szp->z_parent_lock;
rw = RW_WRITER;
continue;
} else {
/*
* Wait for other thread to drop its locks
*/
rw_enter(rwlp, rw);
}
}
zl = kmem_alloc(sizeof (*zl), KM_SLEEP);
zl->zl_rwlock = rwlp;
zl->zl_znode = NULL;
zl->zl_next = *zlpp;
*zlpp = zl;
if (oidp == szp->z_id) /* We're a descendant of szp */
return (SET_ERROR(EINVAL));
if (oidp == rootid) /* We've hit the top */
return (0);
if (rw == RW_READER) { /* i.e. not the first pass */
int error = zfs_zget(ZTOZSB(zp), oidp, &zp);
if (error)
return (error);
zl->zl_znode = zp;
}
(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(ZTOZSB(zp)),
&oidp, sizeof (oidp));
rwlp = &zp->z_parent_lock;
rw = RW_READER;
} while (zp->z_id != sdzp->z_id);
return (0);
}
/*
* Move an entry from the provided source directory to the target
* directory. Change the entry name as indicated.
*
* IN: sdzp - Source directory containing the "old entry".
* snm - Old entry name.
* tdzp - Target directory to contain the "new entry".
* tnm - New entry name.
* cr - credentials of caller.
* flags - case flags
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* sdzp,tdzp - ctime|mtime updated
*/
int
zfs_rename(znode_t *sdzp, char *snm, znode_t *tdzp, char *tnm,
cred_t *cr, int flags)
{
znode_t *szp, *tzp;
zfsvfs_t *zfsvfs = ZTOZSB(sdzp);
zilog_t *zilog;
zfs_dirlock_t *sdl, *tdl;
dmu_tx_t *tx;
zfs_zlock_t *zl;
int cmp, serr, terr;
int error = 0;
int zflg = 0;
boolean_t waited = B_FALSE;
if (snm == NULL || tnm == NULL)
return (SET_ERROR(EINVAL));
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(sdzp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, sdzp, FTAG)) != 0)
+ return (error);
zilog = zfsvfs->z_log;
- ZFS_VERIFY_ZP(tdzp);
+ if ((error = zfs_verify_zp(tdzp)) != 0) {
+ zfs_exit(zfsvfs, FTAG);
+ return (error);
+ }
/*
* We check i_sb because snapshots and the ctldir must have different
* super blocks.
*/
if (ZTOI(tdzp)->i_sb != ZTOI(sdzp)->i_sb ||
zfsctl_is_node(ZTOI(tdzp))) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EXDEV));
}
if (zfsvfs->z_utf8 && u8_validate(tnm,
strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EILSEQ));
}
if (flags & FIGNORECASE)
zflg |= ZCILOOK;
top:
szp = NULL;
tzp = NULL;
zl = NULL;
/*
* This is to prevent the creation of links into attribute space
* by renaming a linked file into/outof an attribute directory.
* See the comment in zfs_link() for why this is considered bad.
*/
if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* Lock source and target directory entries. To prevent deadlock,
* a lock ordering must be defined. We lock the directory with
* the smallest object id first, or if it's a tie, the one with
* the lexically first name.
*/
if (sdzp->z_id < tdzp->z_id) {
cmp = -1;
} else if (sdzp->z_id > tdzp->z_id) {
cmp = 1;
} else {
/*
* First compare the two name arguments without
* considering any case folding.
*/
int nofold = (zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER);
cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error);
ASSERT(error == 0 || !zfsvfs->z_utf8);
if (cmp == 0) {
/*
* POSIX: "If the old argument and the new argument
* both refer to links to the same existing file,
* the rename() function shall return successfully
* and perform no other action."
*/
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (0);
}
/*
* If the file system is case-folding, then we may
* have some more checking to do. A case-folding file
* system is either supporting mixed case sensitivity
* access or is completely case-insensitive. Note
* that the file system is always case preserving.
*
* In mixed sensitivity mode case sensitive behavior
* is the default. FIGNORECASE must be used to
* explicitly request case insensitive behavior.
*
* If the source and target names provided differ only
* by case (e.g., a request to rename 'tim' to 'Tim'),
* we will treat this as a special case in the
* case-insensitive mode: as long as the source name
* is an exact match, we will allow this to proceed as
* a name-change request.
*/
if ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
(zfsvfs->z_case == ZFS_CASE_MIXED &&
flags & FIGNORECASE)) &&
u8_strcmp(snm, tnm, 0, zfsvfs->z_norm, U8_UNICODE_LATEST,
&error) == 0) {
/*
* case preserving rename request, require exact
* name matches
*/
zflg |= ZCIEXACT;
zflg &= ~ZCILOOK;
}
}
/*
* If the source and destination directories are the same, we should
* grab the z_name_lock of that directory only once.
*/
if (sdzp == tdzp) {
zflg |= ZHAVELOCK;
rw_enter(&sdzp->z_name_lock, RW_READER);
}
if (cmp < 0) {
serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp,
ZEXISTS | zflg, NULL, NULL);
terr = zfs_dirent_lock(&tdl,
tdzp, tnm, &tzp, ZRENAMING | zflg, NULL, NULL);
} else {
terr = zfs_dirent_lock(&tdl,
tdzp, tnm, &tzp, zflg, NULL, NULL);
serr = zfs_dirent_lock(&sdl,
sdzp, snm, &szp, ZEXISTS | ZRENAMING | zflg,
NULL, NULL);
}
if (serr) {
/*
* Source entry invalid or not there.
*/
if (!terr) {
zfs_dirent_unlock(tdl);
if (tzp)
zrele(tzp);
}
if (sdzp == tdzp)
rw_exit(&sdzp->z_name_lock);
if (strcmp(snm, "..") == 0)
serr = EINVAL;
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (serr);
}
if (terr) {
zfs_dirent_unlock(sdl);
zrele(szp);
if (sdzp == tdzp)
rw_exit(&sdzp->z_name_lock);
if (strcmp(tnm, "..") == 0)
terr = EINVAL;
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (terr);
}
/*
* If we are using project inheritance, means if the directory has
* ZFS_PROJINHERIT set, then its descendant directories will inherit
* not only the project ID, but also the ZFS_PROJINHERIT flag. Under
* such case, we only allow renames into our tree when the project
* IDs are the same.
*/
if (tdzp->z_pflags & ZFS_PROJINHERIT &&
tdzp->z_projid != szp->z_projid) {
error = SET_ERROR(EXDEV);
goto out;
}
/*
* Must have write access at the source to remove the old entry
* and write access at the target to create the new entry.
* Note that if target and source are the same, this can be
* done in a single check.
*/
if ((error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr)))
goto out;
if (S_ISDIR(ZTOI(szp)->i_mode)) {
/*
* Check to make sure rename is valid.
* Can't do a move like this: /usr/a/b to /usr/a/b/c/d
*/
if ((error = zfs_rename_lock(szp, tdzp, sdzp, &zl)))
goto out;
}
/*
* Does target exist?
*/
if (tzp) {
/*
* Source and target must be the same type.
*/
if (S_ISDIR(ZTOI(szp)->i_mode)) {
if (!S_ISDIR(ZTOI(tzp)->i_mode)) {
error = SET_ERROR(ENOTDIR);
goto out;
}
} else {
if (S_ISDIR(ZTOI(tzp)->i_mode)) {
error = SET_ERROR(EISDIR);
goto out;
}
}
/*
* POSIX dictates that when the source and target
* entries refer to the same file object, rename
* must do nothing and exit without error.
*/
if (szp->z_id == tzp->z_id) {
error = 0;
goto out;
}
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
if (sdzp != tdzp) {
dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, tdzp);
}
if (tzp) {
dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, tzp);
}
zfs_sa_upgrade_txholds(tx, szp);
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
if (zl != NULL)
zfs_rename_unlock(&zl);
zfs_dirent_unlock(sdl);
zfs_dirent_unlock(tdl);
if (sdzp == tdzp)
rw_exit(&sdzp->z_name_lock);
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
zrele(szp);
if (tzp)
zrele(tzp);
goto top;
}
dmu_tx_abort(tx);
zrele(szp);
if (tzp)
zrele(tzp);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
if (tzp) /* Attempt to remove the existing target */
error = zfs_link_destroy(tdl, tzp, tx, zflg, NULL);
if (error == 0) {
error = zfs_link_create(tdl, szp, tx, ZRENAMING);
if (error == 0) {
szp->z_pflags |= ZFS_AV_MODIFIED;
if (tdzp->z_pflags & ZFS_PROJINHERIT)
szp->z_pflags |= ZFS_PROJINHERIT;
error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
(void *)&szp->z_pflags, sizeof (uint64_t), tx);
ASSERT0(error);
error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL);
if (error == 0) {
zfs_log_rename(zilog, tx, TX_RENAME |
(flags & FIGNORECASE ? TX_CI : 0), sdzp,
sdl->dl_name, tdzp, tdl->dl_name, szp);
} else {
/*
* At this point, we have successfully created
* the target name, but have failed to remove
* the source name. Since the create was done
* with the ZRENAMING flag, there are
* complications; for one, the link count is
* wrong. The easiest way to deal with this
* is to remove the newly created target, and
* return the original error. This must
* succeed; fortunately, it is very unlikely to
* fail, since we just created it.
*/
VERIFY3U(zfs_link_destroy(tdl, szp, tx,
ZRENAMING, NULL), ==, 0);
}
} else {
/*
* If we had removed the existing target, subsequent
* call to zfs_link_create() to add back the same entry
* but, the new dnode (szp) should not fail.
*/
ASSERT(tzp == NULL);
}
}
dmu_tx_commit(tx);
out:
if (zl != NULL)
zfs_rename_unlock(&zl);
zfs_dirent_unlock(sdl);
zfs_dirent_unlock(tdl);
zfs_znode_update_vfs(sdzp);
if (sdzp == tdzp)
rw_exit(&sdzp->z_name_lock);
if (sdzp != tdzp)
zfs_znode_update_vfs(tdzp);
zfs_znode_update_vfs(szp);
zrele(szp);
if (tzp) {
zfs_znode_update_vfs(tzp);
zrele(tzp);
}
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Insert the indicated symbolic reference entry into the directory.
*
* IN: dzp - Directory to contain new symbolic link.
* name - Name of directory entry in dip.
* vap - Attributes of new entry.
* link - Name for new symlink entry.
* cr - credentials of caller.
* flags - case flags
*
* OUT: zpp - Znode for new symbolic link.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dip - ctime|mtime updated
*/
int
zfs_symlink(znode_t *dzp, char *name, vattr_t *vap, char *link,
znode_t **zpp, cred_t *cr, int flags)
{
znode_t *zp;
zfs_dirlock_t *dl;
dmu_tx_t *tx;
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
zilog_t *zilog;
uint64_t len = strlen(link);
int error;
int zflg = ZNEW;
zfs_acl_ids_t acl_ids;
boolean_t fuid_dirtied;
uint64_t txtype = TX_SYMLINK;
boolean_t waited = B_FALSE;
ASSERT(S_ISLNK(vap->va_mode));
if (name == NULL)
return (SET_ERROR(EINVAL));
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(dzp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
+ return (error);
zilog = zfsvfs->z_log;
if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EILSEQ));
}
if (flags & FIGNORECASE)
zflg |= ZCILOOK;
if (len > MAXPATHLEN) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(ENAMETOOLONG));
}
if ((error = zfs_acl_ids_create(dzp, 0,
vap, cr, NULL, &acl_ids)) != 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
top:
*zpp = NULL;
/*
* Attempt to lock directory; fail if entry already exists.
*/
error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL);
if (error) {
zfs_acl_ids_free(&acl_ids);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
zfs_acl_ids_free(&acl_ids);
zfs_dirent_unlock(dl);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, ZFS_DEFAULT_PROJID)) {
zfs_acl_ids_free(&acl_ids);
zfs_dirent_unlock(dl);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EDQUOT));
}
tx = dmu_tx_create(zfsvfs->z_os);
fuid_dirtied = zfsvfs->z_fuid_dirty;
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE + len);
dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
acl_ids.z_aclp->z_acl_bytes);
}
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
zfs_dirent_unlock(dl);
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Create a new object for the symlink.
* for version 4 ZPL datasets the symlink will be an SA attribute
*/
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
mutex_enter(&zp->z_lock);
if (zp->z_is_sa)
error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zfsvfs),
link, len, tx);
else
zfs_sa_symlink(zp, link, len, tx);
mutex_exit(&zp->z_lock);
zp->z_size = len;
(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
&zp->z_size, sizeof (zp->z_size), tx);
/*
* Insert the new object into the directory.
*/
error = zfs_link_create(dl, zp, tx, ZNEW);
if (error != 0) {
zfs_znode_delete(zp, tx);
remove_inode_hash(ZTOI(zp));
} else {
if (flags & FIGNORECASE)
txtype |= TX_CI;
zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
zfs_znode_update_vfs(dzp);
zfs_znode_update_vfs(zp);
}
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
zfs_dirent_unlock(dl);
if (error == 0) {
*zpp = zp;
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
} else {
zrele(zp);
}
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Return, in the buffer contained in the provided uio structure,
* the symbolic path referred to by ip.
*
* IN: ip - inode of symbolic link
* uio - structure to contain the link path.
* cr - credentials of caller.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* ip - atime updated
*/
int
zfs_readlink(struct inode *ip, zfs_uio_t *uio, cred_t *cr)
{
(void) cr;
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
int error;
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (error);
mutex_enter(&zp->z_lock);
if (zp->z_is_sa)
error = sa_lookup_uio(zp->z_sa_hdl,
SA_ZPL_SYMLINK(zfsvfs), uio);
else
error = zfs_sa_readlink(zp, uio);
mutex_exit(&zp->z_lock);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Insert a new entry into directory tdzp referencing szp.
*
* IN: tdzp - Directory to contain new entry.
* szp - znode of new entry.
* name - name of new entry.
* cr - credentials of caller.
* flags - case flags.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* tdzp - ctime|mtime updated
* szp - ctime updated
*/
int
zfs_link(znode_t *tdzp, znode_t *szp, char *name, cred_t *cr,
int flags)
{
struct inode *sip = ZTOI(szp);
znode_t *tzp;
zfsvfs_t *zfsvfs = ZTOZSB(tdzp);
zilog_t *zilog;
zfs_dirlock_t *dl;
dmu_tx_t *tx;
int error;
int zf = ZNEW;
uint64_t parent;
uid_t owner;
boolean_t waited = B_FALSE;
boolean_t is_tmpfile = 0;
uint64_t txg;
#ifdef HAVE_TMPFILE
is_tmpfile = (sip->i_nlink == 0 && (sip->i_state & I_LINKABLE));
#endif
ASSERT(S_ISDIR(ZTOI(tdzp)->i_mode));
if (name == NULL)
return (SET_ERROR(EINVAL));
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(tdzp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, tdzp, FTAG)) != 0)
+ return (error);
zilog = zfsvfs->z_log;
/*
* POSIX dictates that we return EPERM here.
* Better choices include ENOTSUP or EISDIR.
*/
if (S_ISDIR(sip->i_mode)) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EPERM));
}
- ZFS_VERIFY_ZP(szp);
+ if ((error = zfs_verify_zp(szp)) != 0) {
+ zfs_exit(zfsvfs, FTAG);
+ return (error);
+ }
/*
* If we are using project inheritance, means if the directory has
* ZFS_PROJINHERIT set, then its descendant directories will inherit
* not only the project ID, but also the ZFS_PROJINHERIT flag. Under
* such case, we only allow hard link creation in our tree when the
* project IDs are the same.
*/
if (tdzp->z_pflags & ZFS_PROJINHERIT &&
tdzp->z_projid != szp->z_projid) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EXDEV));
}
/*
* We check i_sb because snapshots and the ctldir must have different
* super blocks.
*/
if (sip->i_sb != ZTOI(tdzp)->i_sb || zfsctl_is_node(sip)) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EXDEV));
}
/* Prevent links to .zfs/shares files */
if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
&parent, sizeof (uint64_t))) != 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
if (parent == zfsvfs->z_shares_dir) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EPERM));
}
if (zfsvfs->z_utf8 && u8_validate(name,
strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EILSEQ));
}
if (flags & FIGNORECASE)
zf |= ZCILOOK;
/*
* We do not support links between attributes and non-attributes
* because of the potential security risk of creating links
* into "normal" file space in order to circumvent restrictions
* imposed in attribute space.
*/
if ((szp->z_pflags & ZFS_XATTR) != (tdzp->z_pflags & ZFS_XATTR)) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
owner = zfs_fuid_map_id(zfsvfs, KUID_TO_SUID(sip->i_uid),
cr, ZFS_OWNER);
if (owner != crgetuid(cr) && secpolicy_basic_link(cr) != 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EPERM));
}
if ((error = zfs_zaccess(tdzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
top:
/*
* Attempt to lock directory; fail if entry already exists.
*/
error = zfs_dirent_lock(&dl, tdzp, name, &tzp, zf, NULL, NULL);
if (error) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, name);
if (is_tmpfile)
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
zfs_sa_upgrade_txholds(tx, szp);
zfs_sa_upgrade_txholds(tx, tdzp);
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
zfs_dirent_unlock(dl);
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
dmu_tx_abort(tx);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
/* unmark z_unlinked so zfs_link_create will not reject */
if (is_tmpfile)
szp->z_unlinked = B_FALSE;
error = zfs_link_create(dl, szp, tx, 0);
if (error == 0) {
uint64_t txtype = TX_LINK;
/*
* tmpfile is created to be in z_unlinkedobj, so remove it.
* Also, we don't log in ZIL, because all previous file
* operation on the tmpfile are ignored by ZIL. Instead we
* always wait for txg to sync to make sure all previous
* operation are sync safe.
*/
if (is_tmpfile) {
VERIFY(zap_remove_int(zfsvfs->z_os,
zfsvfs->z_unlinkedobj, szp->z_id, tx) == 0);
} else {
if (flags & FIGNORECASE)
txtype |= TX_CI;
zfs_log_link(zilog, tx, txtype, tdzp, szp, name);
}
} else if (is_tmpfile) {
/* restore z_unlinked since when linking failed */
szp->z_unlinked = B_TRUE;
}
txg = dmu_tx_get_txg(tx);
dmu_tx_commit(tx);
zfs_dirent_unlock(dl);
if (!is_tmpfile && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
if (is_tmpfile && zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED)
txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), txg);
zfs_znode_update_vfs(tdzp);
zfs_znode_update_vfs(szp);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
static void
zfs_putpage_sync_commit_cb(void *arg)
{
struct page *pp = arg;
ClearPageError(pp);
end_page_writeback(pp);
}
static void
zfs_putpage_async_commit_cb(void *arg)
{
struct page *pp = arg;
znode_t *zp = ITOZ(pp->mapping->host);
ClearPageError(pp);
end_page_writeback(pp);
atomic_dec_32(&zp->z_async_writes_cnt);
}
/*
* Push a page out to disk, once the page is on stable storage the
* registered commit callback will be run as notification of completion.
*
* IN: ip - page mapped for inode.
* pp - page to push (page is locked)
* wbc - writeback control data
* for_sync - does the caller intend to wait synchronously for the
* page writeback to complete?
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* ip - ctime|mtime updated
*/
int
zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc,
boolean_t for_sync)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
loff_t offset;
loff_t pgoff;
unsigned int pglen;
dmu_tx_t *tx;
caddr_t va;
int err = 0;
uint64_t mtime[2], ctime[2];
sa_bulk_attr_t bulk[3];
int cnt = 0;
struct address_space *mapping;
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((err = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (err);
ASSERT(PageLocked(pp));
pgoff = page_offset(pp); /* Page byte-offset in file */
offset = i_size_read(ip); /* File length in bytes */
pglen = MIN(PAGE_SIZE, /* Page length in bytes */
P2ROUNDUP(offset, PAGE_SIZE)-pgoff);
/* Page is beyond end of file */
if (pgoff >= offset) {
unlock_page(pp);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (0);
}
/* Truncate page length to end of file */
if (pgoff + pglen > offset)
pglen = offset - pgoff;
#if 0
/*
* FIXME: Allow mmap writes past its quota. The correct fix
* is to register a page_mkwrite() handler to count the page
* against its quota when it is about to be dirtied.
*/
if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT,
KUID_TO_SUID(ip->i_uid)) ||
zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT,
KGID_TO_SGID(ip->i_gid)) ||
(zp->z_projid != ZFS_DEFAULT_PROJID &&
zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
zp->z_projid))) {
err = EDQUOT;
}
#endif
/*
* The ordering here is critical and must adhere to the following
* rules in order to avoid deadlocking in either zfs_read() or
* zfs_free_range() due to a lock inversion.
*
* 1) The page must be unlocked prior to acquiring the range lock.
* This is critical because zfs_read() calls find_lock_page()
* which may block on the page lock while holding the range lock.
*
* 2) Before setting or clearing write back on a page the range lock
* must be held in order to prevent a lock inversion with the
* zfs_free_range() function.
*
* This presents a problem because upon entering this function the
* page lock is already held. To safely acquire the range lock the
* page lock must be dropped. This creates a window where another
* process could truncate, invalidate, dirty, or write out the page.
*
* Therefore, after successfully reacquiring the range and page locks
* the current page state is checked. In the common case everything
* will be as is expected and it can be written out. However, if
* the page state has changed it must be handled accordingly.
*/
mapping = pp->mapping;
redirty_page_for_writepage(wbc, pp);
unlock_page(pp);
zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
pgoff, pglen, RL_WRITER);
lock_page(pp);
/* Page mapping changed or it was no longer dirty, we're done */
if (unlikely((mapping != pp->mapping) || !PageDirty(pp))) {
unlock_page(pp);
zfs_rangelock_exit(lr);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (0);
}
/* Another process started write block if required */
if (PageWriteback(pp)) {
unlock_page(pp);
zfs_rangelock_exit(lr);
if (wbc->sync_mode != WB_SYNC_NONE) {
/*
* Speed up any non-sync page writebacks since
* they may take several seconds to complete.
* Refer to the comment in zpl_fsync() (when
* HAVE_FSYNC_RANGE is defined) for details.
*/
if (atomic_load_32(&zp->z_async_writes_cnt) > 0) {
zil_commit(zfsvfs->z_log, zp->z_id);
}
if (PageWriteback(pp))
#ifdef HAVE_PAGEMAP_FOLIO_WAIT_BIT
folio_wait_bit(page_folio(pp), PG_writeback);
#else
wait_on_page_bit(pp, PG_writeback);
#endif
}
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (0);
}
/* Clear the dirty flag the required locks are held */
if (!clear_page_dirty_for_io(pp)) {
unlock_page(pp);
zfs_rangelock_exit(lr);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (0);
}
/*
* Counterpart for redirty_page_for_writepage() above. This page
* was in fact not skipped and should not be counted as if it were.
*/
wbc->pages_skipped--;
if (!for_sync)
atomic_inc_32(&zp->z_async_writes_cnt);
set_page_writeback(pp);
unlock_page(pp);
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_write(tx, zp->z_id, pgoff, pglen);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
err = dmu_tx_assign(tx, TXG_NOWAIT);
if (err != 0) {
if (err == ERESTART)
dmu_tx_wait(tx);
dmu_tx_abort(tx);
#ifdef HAVE_VFS_FILEMAP_DIRTY_FOLIO
filemap_dirty_folio(page_mapping(pp), page_folio(pp));
#else
__set_page_dirty_nobuffers(pp);
#endif
ClearPageError(pp);
end_page_writeback(pp);
if (!for_sync)
atomic_dec_32(&zp->z_async_writes_cnt);
zfs_rangelock_exit(lr);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (err);
}
va = kmap(pp);
ASSERT3U(pglen, <=, PAGE_SIZE);
dmu_write(zfsvfs->z_os, zp->z_id, pgoff, pglen, va, tx);
kunmap(pp);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, 8);
/* Preserve the mtime and ctime provided by the inode */
ZFS_TIME_ENCODE(&ip->i_mtime, mtime);
ZFS_TIME_ENCODE(&ip->i_ctime, ctime);
zp->z_atime_dirty = B_FALSE;
zp->z_seq++;
err = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx);
zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, pgoff, pglen, 0,
for_sync ? zfs_putpage_sync_commit_cb :
zfs_putpage_async_commit_cb, pp);
dmu_tx_commit(tx);
zfs_rangelock_exit(lr);
if (wbc->sync_mode != WB_SYNC_NONE) {
/*
* Note that this is rarely called under writepages(), because
* writepages() normally handles the entire commit for
* performance reasons.
*/
zil_commit(zfsvfs->z_log, zp->z_id);
} else if (!for_sync && atomic_load_32(&zp->z_sync_writes_cnt) > 0) {
/*
* If the caller does not intend to wait synchronously
* for this page writeback to complete and there are active
* synchronous calls on this file, do a commit so that
* the latter don't accidentally end up waiting for
* our writeback to complete. Refer to the comment in
* zpl_fsync() (when HAVE_FSYNC_RANGE is defined) for details.
*/
zil_commit(zfsvfs->z_log, zp->z_id);
}
dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, pglen);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (err);
}
/*
* Update the system attributes when the inode has been dirtied. For the
* moment we only update the mode, atime, mtime, and ctime.
*/
int
zfs_dirty_inode(struct inode *ip, int flags)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
dmu_tx_t *tx;
uint64_t mode, atime[2], mtime[2], ctime[2];
sa_bulk_attr_t bulk[4];
int error = 0;
int cnt = 0;
if (zfs_is_readonly(zfsvfs) || dmu_objset_is_snapshot(zfsvfs->z_os))
return (0);
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (error);
#ifdef I_DIRTY_TIME
/*
* This is the lazytime semantic introduced in Linux 4.0
* This flag will only be called from update_time when lazytime is set.
* (Note, I_DIRTY_SYNC will also set if not lazytime)
* Fortunately mtime and ctime are managed within ZFS itself, so we
* only need to dirty atime.
*/
if (flags == I_DIRTY_TIME) {
zp->z_atime_dirty = B_TRUE;
goto out;
}
#endif
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
goto out;
}
mutex_enter(&zp->z_lock);
zp->z_atime_dirty = B_FALSE;
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
/* Preserve the mode, mtime and ctime provided by the inode */
ZFS_TIME_ENCODE(&ip->i_atime, atime);
ZFS_TIME_ENCODE(&ip->i_mtime, mtime);
ZFS_TIME_ENCODE(&ip->i_ctime, ctime);
mode = ip->i_mode;
zp->z_mode = mode;
error = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx);
mutex_exit(&zp->z_lock);
dmu_tx_commit(tx);
out:
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
void
zfs_inactive(struct inode *ip)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
uint64_t atime[2];
int error;
int need_unlock = 0;
/* Only read lock if we haven't already write locked, e.g. rollback */
if (!RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock)) {
need_unlock = 1;
rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER);
}
if (zp->z_sa_hdl == NULL) {
if (need_unlock)
rw_exit(&zfsvfs->z_teardown_inactive_lock);
return;
}
if (zp->z_atime_dirty && zp->z_unlinked == B_FALSE) {
dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
} else {
ZFS_TIME_ENCODE(&ip->i_atime, atime);
mutex_enter(&zp->z_lock);
(void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs),
(void *)&atime, sizeof (atime), tx);
zp->z_atime_dirty = B_FALSE;
mutex_exit(&zp->z_lock);
dmu_tx_commit(tx);
}
}
zfs_zinactive(zp);
if (need_unlock)
rw_exit(&zfsvfs->z_teardown_inactive_lock);
}
/*
* Fill pages with data from the disk.
*/
static int
zfs_fillpage(struct inode *ip, struct page *pl[], int nr_pages)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
objset_t *os;
struct page *cur_pp;
u_offset_t io_off, total;
size_t io_len;
loff_t i_size;
unsigned page_idx;
int err;
os = zfsvfs->z_os;
io_len = nr_pages << PAGE_SHIFT;
i_size = i_size_read(ip);
io_off = page_offset(pl[0]);
if (io_off + io_len > i_size)
io_len = i_size - io_off;
/*
* Iterate over list of pages and read each page individually.
*/
page_idx = 0;
for (total = io_off + io_len; io_off < total; io_off += PAGESIZE) {
caddr_t va;
cur_pp = pl[page_idx++];
va = kmap(cur_pp);
err = dmu_read(os, zp->z_id, io_off, PAGESIZE, va,
DMU_READ_PREFETCH);
kunmap(cur_pp);
if (err) {
/* convert checksum errors into IO errors */
if (err == ECKSUM)
err = SET_ERROR(EIO);
return (err);
}
}
return (0);
}
/*
* Uses zfs_fillpage to read data from the file and fill the pages.
*
* IN: ip - inode of file to get data from.
* pl - list of pages to read
* nr_pages - number of pages to read
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* vp - atime updated
*/
int
zfs_getpage(struct inode *ip, struct page *pl[], int nr_pages)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
int err;
if (pl == NULL)
return (0);
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((err = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (err);
err = zfs_fillpage(ip, pl, nr_pages);
dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, nr_pages*PAGESIZE);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (err);
}
/*
* Check ZFS specific permissions to memory map a section of a file.
*
* IN: ip - inode of the file to mmap
* off - file offset
* addrp - start address in memory region
* len - length of memory region
* vm_flags- address flags
*
* RETURN: 0 if success
* error code if failure
*/
int
zfs_map(struct inode *ip, offset_t off, caddr_t *addrp, size_t len,
unsigned long vm_flags)
{
(void) addrp;
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
+ int error;
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (error);
if ((vm_flags & VM_WRITE) && (zp->z_pflags &
(ZFS_IMMUTABLE | ZFS_READONLY | ZFS_APPENDONLY))) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EPERM));
}
if ((vm_flags & (VM_READ | VM_EXEC)) &&
(zp->z_pflags & ZFS_AV_QUARANTINED)) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EACCES));
}
if (off < 0 || len > MAXOFFSET_T - off) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(ENXIO));
}
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (0);
}
/*
* Free or allocate space in a file. Currently, this function only
* supports the `F_FREESP' command. However, this command is somewhat
* misnamed, as its functionality includes the ability to allocate as
* well as free space.
*
* IN: zp - znode of file to free data in.
* cmd - action to take (only F_FREESP supported).
* bfp - section of file to free/alloc.
* flag - current file open mode flags.
* offset - current file offset.
* cr - credentials of caller.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* zp - ctime|mtime updated
*/
int
zfs_space(znode_t *zp, int cmd, flock64_t *bfp, int flag,
offset_t offset, cred_t *cr)
{
(void) offset;
zfsvfs_t *zfsvfs = ZTOZSB(zp);
uint64_t off, len;
int error;
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (error);
if (cmd != F_FREESP) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* Callers might not be able to detect properly that we are read-only,
* so check it explicitly here.
*/
if (zfs_is_readonly(zfsvfs)) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EROFS));
}
if (bfp->l_len < 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* Permissions aren't checked on Solaris because on this OS
* zfs_space() can only be called with an opened file handle.
* On Linux we can get here through truncate_range() which
* operates directly on inodes, so we need to check access rights.
*/
if ((error = zfs_zaccess(zp, ACE_WRITE_DATA, 0, B_FALSE, cr))) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
off = bfp->l_start;
len = bfp->l_len; /* 0 means from off to end of file */
error = zfs_freesp(zp, off, len, flag, TRUE);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
int
zfs_fid(struct inode *ip, fid_t *fidp)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
uint32_t gen;
uint64_t gen64;
uint64_t object = zp->z_id;
zfid_short_t *zfid;
int size, i, error;
- ZFS_ENTER(zfsvfs);
+ if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
+ return (error);
if (fidp->fid_len < SHORT_FID_LEN) {
fidp->fid_len = SHORT_FID_LEN;
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(ENOSPC));
}
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_verify_zp(zp)) != 0) {
+ zfs_exit(zfsvfs, FTAG);
+ return (error);
+ }
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs),
&gen64, sizeof (uint64_t))) != 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
gen = (uint32_t)gen64;
size = SHORT_FID_LEN;
zfid = (zfid_short_t *)fidp;
zfid->zf_len = size;
for (i = 0; i < sizeof (zfid->zf_object); i++)
zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
/* Must have a non-zero generation number to distinguish from .zfs */
if (gen == 0)
gen = 1;
for (i = 0; i < sizeof (zfid->zf_gen); i++)
zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (0);
}
#if defined(_KERNEL)
EXPORT_SYMBOL(zfs_open);
EXPORT_SYMBOL(zfs_close);
EXPORT_SYMBOL(zfs_lookup);
EXPORT_SYMBOL(zfs_create);
EXPORT_SYMBOL(zfs_tmpfile);
EXPORT_SYMBOL(zfs_remove);
EXPORT_SYMBOL(zfs_mkdir);
EXPORT_SYMBOL(zfs_rmdir);
EXPORT_SYMBOL(zfs_readdir);
EXPORT_SYMBOL(zfs_getattr_fast);
EXPORT_SYMBOL(zfs_setattr);
EXPORT_SYMBOL(zfs_rename);
EXPORT_SYMBOL(zfs_symlink);
EXPORT_SYMBOL(zfs_readlink);
EXPORT_SYMBOL(zfs_link);
EXPORT_SYMBOL(zfs_inactive);
EXPORT_SYMBOL(zfs_space);
EXPORT_SYMBOL(zfs_fid);
EXPORT_SYMBOL(zfs_getpage);
EXPORT_SYMBOL(zfs_putpage);
EXPORT_SYMBOL(zfs_dirty_inode);
EXPORT_SYMBOL(zfs_map);
/* CSTYLED */
module_param(zfs_delete_blocks, ulong, 0644);
MODULE_PARM_DESC(zfs_delete_blocks, "Delete files larger than N blocks async");
#endif
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zpl_ctldir.c b/sys/contrib/openzfs/module/os/linux/zfs/zpl_ctldir.c
index ec8f2938598f..1a688687ac4b 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zpl_ctldir.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zpl_ctldir.c
@@ -1,629 +1,635 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (C) 2011 Lawrence Livermore National Security, LLC.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* LLNL-CODE-403049.
* Rewritten for Linux by:
* Rohan Puri <rohan.puri15@gmail.com>
* Brian Behlendorf <behlendorf1@llnl.gov>
*/
#include <sys/zfs_znode.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_ctldir.h>
#include <sys/zpl.h>
#include <sys/dmu.h>
#include <sys/dsl_dataset.h>
#include <sys/zap.h>
/*
* Common open routine. Disallow any write access.
*/
static int
zpl_common_open(struct inode *ip, struct file *filp)
{
if (filp->f_mode & FMODE_WRITE)
return (-EACCES);
return (generic_file_open(ip, filp));
}
/*
* Get root directory contents.
*/
static int
zpl_root_iterate(struct file *filp, zpl_dir_context_t *ctx)
{
zfsvfs_t *zfsvfs = ITOZSB(file_inode(filp));
int error = 0;
- ZPL_ENTER(zfsvfs);
+ if ((error = zpl_enter(zfsvfs, FTAG)) != 0)
+ return (error);
if (!zpl_dir_emit_dots(filp, ctx))
goto out;
if (ctx->pos == 2) {
if (!zpl_dir_emit(ctx, ZFS_SNAPDIR_NAME,
strlen(ZFS_SNAPDIR_NAME), ZFSCTL_INO_SNAPDIR, DT_DIR))
goto out;
ctx->pos++;
}
if (ctx->pos == 3) {
if (!zpl_dir_emit(ctx, ZFS_SHAREDIR_NAME,
strlen(ZFS_SHAREDIR_NAME), ZFSCTL_INO_SHARES, DT_DIR))
goto out;
ctx->pos++;
}
out:
- ZPL_EXIT(zfsvfs);
+ zpl_exit(zfsvfs, FTAG);
return (error);
}
#if !defined(HAVE_VFS_ITERATE) && !defined(HAVE_VFS_ITERATE_SHARED)
static int
zpl_root_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
zpl_dir_context_t ctx =
ZPL_DIR_CONTEXT_INIT(dirent, filldir, filp->f_pos);
int error;
error = zpl_root_iterate(filp, &ctx);
filp->f_pos = ctx.pos;
return (error);
}
#endif /* !HAVE_VFS_ITERATE && !HAVE_VFS_ITERATE_SHARED */
/*
* Get root directory attributes.
*/
static int
#ifdef HAVE_USERNS_IOPS_GETATTR
zpl_root_getattr_impl(struct user_namespace *user_ns,
const struct path *path, struct kstat *stat, u32 request_mask,
unsigned int query_flags)
#else
zpl_root_getattr_impl(const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
#endif
{
(void) request_mask, (void) query_flags;
struct inode *ip = path->dentry->d_inode;
#ifdef HAVE_USERNS_IOPS_GETATTR
#ifdef HAVE_GENERIC_FILLATTR_USERNS
generic_fillattr(user_ns, ip, stat);
#else
(void) user_ns;
#endif
#else
generic_fillattr(ip, stat);
#endif
stat->atime = current_time(ip);
return (0);
}
ZPL_GETATTR_WRAPPER(zpl_root_getattr);
static struct dentry *
zpl_root_lookup(struct inode *dip, struct dentry *dentry, unsigned int flags)
{
cred_t *cr = CRED();
struct inode *ip;
int error;
crhold(cr);
error = -zfsctl_root_lookup(dip, dname(dentry), &ip, 0, cr, NULL, NULL);
ASSERT3S(error, <=, 0);
crfree(cr);
if (error) {
if (error == -ENOENT)
return (d_splice_alias(NULL, dentry));
else
return (ERR_PTR(error));
}
return (d_splice_alias(ip, dentry));
}
/*
* The '.zfs' control directory file and inode operations.
*/
const struct file_operations zpl_fops_root = {
.open = zpl_common_open,
.llseek = generic_file_llseek,
.read = generic_read_dir,
#ifdef HAVE_VFS_ITERATE_SHARED
.iterate_shared = zpl_root_iterate,
#elif defined(HAVE_VFS_ITERATE)
.iterate = zpl_root_iterate,
#else
.readdir = zpl_root_readdir,
#endif
};
const struct inode_operations zpl_ops_root = {
.lookup = zpl_root_lookup,
.getattr = zpl_root_getattr,
};
static struct vfsmount *
zpl_snapdir_automount(struct path *path)
{
int error;
error = -zfsctl_snapshot_mount(path, 0);
if (error)
return (ERR_PTR(error));
/*
* Rather than returning the new vfsmount for the snapshot we must
* return NULL to indicate a mount collision. This is done because
* the user space mount calls do_add_mount() which adds the vfsmount
* to the name space. If we returned the new mount here it would be
* added again to the vfsmount list resulting in list corruption.
*/
return (NULL);
}
/*
* Negative dentries must always be revalidated so newly created snapshots
* can be detected and automounted. Normal dentries should be kept because
* as of the 3.18 kernel revaliding the mountpoint dentry will result in
* the snapshot being immediately unmounted.
*/
static int
#ifdef HAVE_D_REVALIDATE_NAMEIDATA
zpl_snapdir_revalidate(struct dentry *dentry, struct nameidata *i)
#else
zpl_snapdir_revalidate(struct dentry *dentry, unsigned int flags)
#endif
{
return (!!dentry->d_inode);
}
static const dentry_operations_t zpl_dops_snapdirs = {
/*
* Auto mounting of snapshots is only supported for 2.6.37 and
* newer kernels. Prior to this kernel the ops->follow_link()
* callback was used as a hack to trigger the mount. The
* resulting vfsmount was then explicitly grafted in to the
* name space. While it might be possible to add compatibility
* code to accomplish this it would require considerable care.
*/
.d_automount = zpl_snapdir_automount,
.d_revalidate = zpl_snapdir_revalidate,
};
static struct dentry *
zpl_snapdir_lookup(struct inode *dip, struct dentry *dentry,
unsigned int flags)
{
fstrans_cookie_t cookie;
cred_t *cr = CRED();
struct inode *ip = NULL;
int error;
crhold(cr);
cookie = spl_fstrans_mark();
error = -zfsctl_snapdir_lookup(dip, dname(dentry), &ip,
0, cr, NULL, NULL);
ASSERT3S(error, <=, 0);
spl_fstrans_unmark(cookie);
crfree(cr);
if (error && error != -ENOENT)
return (ERR_PTR(error));
ASSERT(error == 0 || ip == NULL);
d_clear_d_op(dentry);
d_set_d_op(dentry, &zpl_dops_snapdirs);
dentry->d_flags |= DCACHE_NEED_AUTOMOUNT;
return (d_splice_alias(ip, dentry));
}
static int
zpl_snapdir_iterate(struct file *filp, zpl_dir_context_t *ctx)
{
zfsvfs_t *zfsvfs = ITOZSB(file_inode(filp));
fstrans_cookie_t cookie;
char snapname[MAXNAMELEN];
boolean_t case_conflict;
uint64_t id, pos;
int error = 0;
- ZPL_ENTER(zfsvfs);
+ if ((error = zpl_enter(zfsvfs, FTAG)) != 0)
+ return (error);
cookie = spl_fstrans_mark();
if (!zpl_dir_emit_dots(filp, ctx))
goto out;
/* Start the position at 0 if it already emitted . and .. */
pos = (ctx->pos == 2 ? 0 : ctx->pos);
while (error == 0) {
dsl_pool_config_enter(dmu_objset_pool(zfsvfs->z_os), FTAG);
error = -dmu_snapshot_list_next(zfsvfs->z_os, MAXNAMELEN,
snapname, &id, &pos, &case_conflict);
dsl_pool_config_exit(dmu_objset_pool(zfsvfs->z_os), FTAG);
if (error)
goto out;
if (!zpl_dir_emit(ctx, snapname, strlen(snapname),
ZFSCTL_INO_SHARES - id, DT_DIR))
goto out;
ctx->pos = pos;
}
out:
spl_fstrans_unmark(cookie);
- ZPL_EXIT(zfsvfs);
+ zpl_exit(zfsvfs, FTAG);
if (error == -ENOENT)
return (0);
return (error);
}
#if !defined(HAVE_VFS_ITERATE) && !defined(HAVE_VFS_ITERATE_SHARED)
static int
zpl_snapdir_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
zpl_dir_context_t ctx =
ZPL_DIR_CONTEXT_INIT(dirent, filldir, filp->f_pos);
int error;
error = zpl_snapdir_iterate(filp, &ctx);
filp->f_pos = ctx.pos;
return (error);
}
#endif /* !HAVE_VFS_ITERATE && !HAVE_VFS_ITERATE_SHARED */
static int
#ifdef HAVE_IOPS_RENAME_USERNS
zpl_snapdir_rename2(struct user_namespace *user_ns, struct inode *sdip,
struct dentry *sdentry, struct inode *tdip, struct dentry *tdentry,
unsigned int flags)
#else
zpl_snapdir_rename2(struct inode *sdip, struct dentry *sdentry,
struct inode *tdip, struct dentry *tdentry, unsigned int flags)
#endif
{
cred_t *cr = CRED();
int error;
/* We probably don't want to support renameat2(2) in ctldir */
if (flags)
return (-EINVAL);
crhold(cr);
error = -zfsctl_snapdir_rename(sdip, dname(sdentry),
tdip, dname(tdentry), cr, 0);
ASSERT3S(error, <=, 0);
crfree(cr);
return (error);
}
#if !defined(HAVE_RENAME_WANTS_FLAGS) && !defined(HAVE_IOPS_RENAME_USERNS)
static int
zpl_snapdir_rename(struct inode *sdip, struct dentry *sdentry,
struct inode *tdip, struct dentry *tdentry)
{
return (zpl_snapdir_rename2(sdip, sdentry, tdip, tdentry, 0));
}
#endif
static int
zpl_snapdir_rmdir(struct inode *dip, struct dentry *dentry)
{
cred_t *cr = CRED();
int error;
crhold(cr);
error = -zfsctl_snapdir_remove(dip, dname(dentry), cr, 0);
ASSERT3S(error, <=, 0);
crfree(cr);
return (error);
}
static int
#ifdef HAVE_IOPS_MKDIR_USERNS
zpl_snapdir_mkdir(struct user_namespace *user_ns, struct inode *dip,
struct dentry *dentry, umode_t mode)
#else
zpl_snapdir_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode)
#endif
{
cred_t *cr = CRED();
vattr_t *vap;
struct inode *ip;
int error;
crhold(cr);
vap = kmem_zalloc(sizeof (vattr_t), KM_SLEEP);
zpl_vap_init(vap, dip, mode | S_IFDIR, cr);
error = -zfsctl_snapdir_mkdir(dip, dname(dentry), vap, &ip, cr, 0);
if (error == 0) {
d_clear_d_op(dentry);
d_set_d_op(dentry, &zpl_dops_snapdirs);
d_instantiate(dentry, ip);
}
kmem_free(vap, sizeof (vattr_t));
ASSERT3S(error, <=, 0);
crfree(cr);
return (error);
}
/*
* Get snapshot directory attributes.
*/
static int
#ifdef HAVE_USERNS_IOPS_GETATTR
zpl_snapdir_getattr_impl(struct user_namespace *user_ns,
const struct path *path, struct kstat *stat, u32 request_mask,
unsigned int query_flags)
#else
zpl_snapdir_getattr_impl(const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
#endif
{
(void) request_mask, (void) query_flags;
struct inode *ip = path->dentry->d_inode;
zfsvfs_t *zfsvfs = ITOZSB(ip);
+ int error;
- ZPL_ENTER(zfsvfs);
+ if ((error = zpl_enter(zfsvfs, FTAG)) != 0)
+ return (error);
#ifdef HAVE_USERNS_IOPS_GETATTR
#ifdef HAVE_GENERIC_FILLATTR_USERNS
generic_fillattr(user_ns, ip, stat);
#else
(void) user_ns;
#endif
#else
generic_fillattr(ip, stat);
#endif
stat->nlink = stat->size = 2;
dsl_dataset_t *ds = dmu_objset_ds(zfsvfs->z_os);
if (dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0) {
uint64_t snap_count;
int err = zap_count(
dmu_objset_pool(ds->ds_objset)->dp_meta_objset,
dsl_dataset_phys(ds)->ds_snapnames_zapobj, &snap_count);
if (err != 0) {
- ZPL_EXIT(zfsvfs);
+ zpl_exit(zfsvfs, FTAG);
return (-err);
}
stat->nlink += snap_count;
}
stat->ctime = stat->mtime = dmu_objset_snap_cmtime(zfsvfs->z_os);
stat->atime = current_time(ip);
- ZPL_EXIT(zfsvfs);
+ zpl_exit(zfsvfs, FTAG);
return (0);
}
ZPL_GETATTR_WRAPPER(zpl_snapdir_getattr);
/*
* The '.zfs/snapshot' directory file operations. These mainly control
* generating the list of available snapshots when doing an 'ls' in the
* directory. See zpl_snapdir_readdir().
*/
const struct file_operations zpl_fops_snapdir = {
.open = zpl_common_open,
.llseek = generic_file_llseek,
.read = generic_read_dir,
#ifdef HAVE_VFS_ITERATE_SHARED
.iterate_shared = zpl_snapdir_iterate,
#elif defined(HAVE_VFS_ITERATE)
.iterate = zpl_snapdir_iterate,
#else
.readdir = zpl_snapdir_readdir,
#endif
};
/*
* The '.zfs/snapshot' directory inode operations. These mainly control
* creating an inode for a snapshot directory and initializing the needed
* infrastructure to automount the snapshot. See zpl_snapdir_lookup().
*/
const struct inode_operations zpl_ops_snapdir = {
.lookup = zpl_snapdir_lookup,
.getattr = zpl_snapdir_getattr,
#if defined(HAVE_RENAME_WANTS_FLAGS) || defined(HAVE_IOPS_RENAME_USERNS)
.rename = zpl_snapdir_rename2,
#else
.rename = zpl_snapdir_rename,
#endif
.rmdir = zpl_snapdir_rmdir,
.mkdir = zpl_snapdir_mkdir,
};
static struct dentry *
zpl_shares_lookup(struct inode *dip, struct dentry *dentry,
unsigned int flags)
{
fstrans_cookie_t cookie;
cred_t *cr = CRED();
struct inode *ip = NULL;
int error;
crhold(cr);
cookie = spl_fstrans_mark();
error = -zfsctl_shares_lookup(dip, dname(dentry), &ip,
0, cr, NULL, NULL);
ASSERT3S(error, <=, 0);
spl_fstrans_unmark(cookie);
crfree(cr);
if (error) {
if (error == -ENOENT)
return (d_splice_alias(NULL, dentry));
else
return (ERR_PTR(error));
}
return (d_splice_alias(ip, dentry));
}
static int
zpl_shares_iterate(struct file *filp, zpl_dir_context_t *ctx)
{
fstrans_cookie_t cookie;
cred_t *cr = CRED();
zfsvfs_t *zfsvfs = ITOZSB(file_inode(filp));
znode_t *dzp;
int error = 0;
- ZPL_ENTER(zfsvfs);
+ if ((error = zpl_enter(zfsvfs, FTAG)) != 0)
+ return (error);
cookie = spl_fstrans_mark();
if (zfsvfs->z_shares_dir == 0) {
zpl_dir_emit_dots(filp, ctx);
goto out;
}
error = -zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp);
if (error)
goto out;
crhold(cr);
error = -zfs_readdir(ZTOI(dzp), ctx, cr);
crfree(cr);
iput(ZTOI(dzp));
out:
spl_fstrans_unmark(cookie);
- ZPL_EXIT(zfsvfs);
+ zpl_exit(zfsvfs, FTAG);
ASSERT3S(error, <=, 0);
return (error);
}
#if !defined(HAVE_VFS_ITERATE) && !defined(HAVE_VFS_ITERATE_SHARED)
static int
zpl_shares_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
zpl_dir_context_t ctx =
ZPL_DIR_CONTEXT_INIT(dirent, filldir, filp->f_pos);
int error;
error = zpl_shares_iterate(filp, &ctx);
filp->f_pos = ctx.pos;
return (error);
}
#endif /* !HAVE_VFS_ITERATE && !HAVE_VFS_ITERATE_SHARED */
static int
#ifdef HAVE_USERNS_IOPS_GETATTR
zpl_shares_getattr_impl(struct user_namespace *user_ns,
const struct path *path, struct kstat *stat, u32 request_mask,
unsigned int query_flags)
#else
zpl_shares_getattr_impl(const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
#endif
{
(void) request_mask, (void) query_flags;
struct inode *ip = path->dentry->d_inode;
zfsvfs_t *zfsvfs = ITOZSB(ip);
znode_t *dzp;
int error;
- ZPL_ENTER(zfsvfs);
+ if ((error = zpl_enter(zfsvfs, FTAG)) != 0)
+ return (error);
if (zfsvfs->z_shares_dir == 0) {
#ifdef HAVE_USERNS_IOPS_GETATTR
#ifdef HAVE_GENERIC_FILLATTR_USERNS
generic_fillattr(user_ns, path->dentry->d_inode, stat);
#else
(void) user_ns;
#endif
#else
generic_fillattr(path->dentry->d_inode, stat);
#endif
stat->nlink = stat->size = 2;
stat->atime = current_time(ip);
- ZPL_EXIT(zfsvfs);
+ zpl_exit(zfsvfs, FTAG);
return (0);
}
error = -zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp);
if (error == 0) {
#ifdef HAVE_USERNS_IOPS_GETATTR
#ifdef HAVE_GENERIC_FILLATTR_USERNS
error = -zfs_getattr_fast(user_ns, ZTOI(dzp), stat);
#else
(void) user_ns;
#endif
#else
error = -zfs_getattr_fast(kcred->user_ns, ZTOI(dzp), stat);
#endif
iput(ZTOI(dzp));
}
- ZPL_EXIT(zfsvfs);
+ zpl_exit(zfsvfs, FTAG);
ASSERT3S(error, <=, 0);
return (error);
}
ZPL_GETATTR_WRAPPER(zpl_shares_getattr);
/*
* The '.zfs/shares' directory file operations.
*/
const struct file_operations zpl_fops_shares = {
.open = zpl_common_open,
.llseek = generic_file_llseek,
.read = generic_read_dir,
#ifdef HAVE_VFS_ITERATE_SHARED
.iterate_shared = zpl_shares_iterate,
#elif defined(HAVE_VFS_ITERATE)
.iterate = zpl_shares_iterate,
#else
.readdir = zpl_shares_readdir,
#endif
};
/*
* The '.zfs/shares' directory inode operations.
*/
const struct inode_operations zpl_ops_shares = {
.lookup = zpl_shares_lookup,
.getattr = zpl_shares_getattr,
};
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c b/sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c
index 43b7fb60a997..25fc6b223297 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c
@@ -1,1288 +1,1354 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2011, Lawrence Livermore National Security, LLC.
* Copyright (c) 2015 by Chunwei Chen. All rights reserved.
*/
#ifdef CONFIG_COMPAT
#include <linux/compat.h>
#endif
+#include <linux/fs.h>
#include <sys/file.h>
#include <sys/dmu_objset.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_project.h>
#if defined(HAVE_VFS_SET_PAGE_DIRTY_NOBUFFERS) || \
defined(HAVE_VFS_FILEMAP_DIRTY_FOLIO)
#include <linux/pagemap.h>
#endif
+#ifdef HAVE_FILE_FADVISE
+#include <linux/fadvise.h>
+#endif
#ifdef HAVE_VFS_FILEMAP_DIRTY_FOLIO
#include <linux/writeback.h>
#endif
/*
* When using fallocate(2) to preallocate space, inflate the requested
* capacity check by 10% to account for the required metadata blocks.
*/
static unsigned int zfs_fallocate_reserve_percent = 110;
static int
zpl_open(struct inode *ip, struct file *filp)
{
cred_t *cr = CRED();
int error;
fstrans_cookie_t cookie;
error = generic_file_open(ip, filp);
if (error)
return (error);
crhold(cr);
cookie = spl_fstrans_mark();
error = -zfs_open(ip, filp->f_mode, filp->f_flags, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
static int
zpl_release(struct inode *ip, struct file *filp)
{
cred_t *cr = CRED();
int error;
fstrans_cookie_t cookie;
cookie = spl_fstrans_mark();
if (ITOZ(ip)->z_atime_dirty)
zfs_mark_inode_dirty(ip);
crhold(cr);
error = -zfs_close(ip, filp->f_flags, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
static int
zpl_iterate(struct file *filp, zpl_dir_context_t *ctx)
{
cred_t *cr = CRED();
int error;
fstrans_cookie_t cookie;
crhold(cr);
cookie = spl_fstrans_mark();
error = -zfs_readdir(file_inode(filp), ctx, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
#if !defined(HAVE_VFS_ITERATE) && !defined(HAVE_VFS_ITERATE_SHARED)
static int
zpl_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
zpl_dir_context_t ctx =
ZPL_DIR_CONTEXT_INIT(dirent, filldir, filp->f_pos);
int error;
error = zpl_iterate(filp, &ctx);
filp->f_pos = ctx.pos;
return (error);
}
#endif /* !HAVE_VFS_ITERATE && !HAVE_VFS_ITERATE_SHARED */
#if defined(HAVE_FSYNC_WITHOUT_DENTRY)
/*
* Linux 2.6.35 - 3.0 API,
* As of 2.6.35 the dentry argument to the fops->fsync() hook was deemed
* redundant. The dentry is still accessible via filp->f_path.dentry,
* and we are guaranteed that filp will never be NULL.
*/
static int
zpl_fsync(struct file *filp, int datasync)
{
struct inode *inode = filp->f_mapping->host;
cred_t *cr = CRED();
int error;
fstrans_cookie_t cookie;
crhold(cr);
cookie = spl_fstrans_mark();
error = -zfs_fsync(ITOZ(inode), datasync, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
#ifdef HAVE_FILE_AIO_FSYNC
static int
zpl_aio_fsync(struct kiocb *kiocb, int datasync)
{
return (zpl_fsync(kiocb->ki_filp, datasync));
}
#endif
#elif defined(HAVE_FSYNC_RANGE)
/*
* Linux 3.1 API,
* As of 3.1 the responsibility to call filemap_write_and_wait_range() has
* been pushed down in to the .fsync() vfs hook. Additionally, the i_mutex
* lock is no longer held by the caller, for zfs we don't require the lock
* to be held so we don't acquire it.
*/
static int
zpl_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
{
struct inode *inode = filp->f_mapping->host;
znode_t *zp = ITOZ(inode);
zfsvfs_t *zfsvfs = ITOZSB(inode);
cred_t *cr = CRED();
int error;
fstrans_cookie_t cookie;
/*
* The variables z_sync_writes_cnt and z_async_writes_cnt work in
* tandem so that sync writes can detect if there are any non-sync
* writes going on and vice-versa. The "vice-versa" part to this logic
* is located in zfs_putpage() where non-sync writes check if there are
* any ongoing sync writes. If any sync and non-sync writes overlap,
* we do a commit to complete the non-sync writes since the latter can
* potentially take several seconds to complete and thus block sync
* writes in the upcoming call to filemap_write_and_wait_range().
*/
atomic_inc_32(&zp->z_sync_writes_cnt);
/*
* If the following check does not detect an overlapping non-sync write
* (say because it's just about to start), then it is guaranteed that
* the non-sync write will detect this sync write. This is because we
* always increment z_sync_writes_cnt / z_async_writes_cnt before doing
* the check on z_async_writes_cnt / z_sync_writes_cnt here and in
* zfs_putpage() respectively.
*/
if (atomic_load_32(&zp->z_async_writes_cnt) > 0) {
- ZPL_ENTER(zfsvfs);
+ if ((error = zpl_enter(zfsvfs, FTAG)) != 0) {
+ atomic_dec_32(&zp->z_sync_writes_cnt);
+ return (error);
+ }
zil_commit(zfsvfs->z_log, zp->z_id);
- ZPL_EXIT(zfsvfs);
+ zpl_exit(zfsvfs, FTAG);
}
error = filemap_write_and_wait_range(inode->i_mapping, start, end);
/*
* The sync write is not complete yet but we decrement
* z_sync_writes_cnt since zfs_fsync() increments and decrements
* it internally. If a non-sync write starts just after the decrement
* operation but before we call zfs_fsync(), it may not detect this
* overlapping sync write but it does not matter since we have already
* gone past filemap_write_and_wait_range() and we won't block due to
* the non-sync write.
*/
atomic_dec_32(&zp->z_sync_writes_cnt);
if (error)
return (error);
crhold(cr);
cookie = spl_fstrans_mark();
error = -zfs_fsync(zp, datasync, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
#ifdef HAVE_FILE_AIO_FSYNC
static int
zpl_aio_fsync(struct kiocb *kiocb, int datasync)
{
return (zpl_fsync(kiocb->ki_filp, kiocb->ki_pos, -1, datasync));
}
#endif
#else
#error "Unsupported fops->fsync() implementation"
#endif
static inline int
zfs_io_flags(struct kiocb *kiocb)
{
int flags = 0;
#if defined(IOCB_DSYNC)
if (kiocb->ki_flags & IOCB_DSYNC)
flags |= O_DSYNC;
#endif
#if defined(IOCB_SYNC)
if (kiocb->ki_flags & IOCB_SYNC)
flags |= O_SYNC;
#endif
#if defined(IOCB_APPEND)
if (kiocb->ki_flags & IOCB_APPEND)
flags |= O_APPEND;
#endif
#if defined(IOCB_DIRECT)
if (kiocb->ki_flags & IOCB_DIRECT)
flags |= O_DIRECT;
#endif
return (flags);
}
/*
* If relatime is enabled, call file_accessed() if zfs_relatime_need_update()
* is true. This is needed since datasets with inherited "relatime" property
* aren't necessarily mounted with the MNT_RELATIME flag (e.g. after
* `zfs set relatime=...`), which is what relatime test in VFS by
* relatime_need_update() is based on.
*/
static inline void
zpl_file_accessed(struct file *filp)
{
struct inode *ip = filp->f_mapping->host;
if (!IS_NOATIME(ip) && ITOZSB(ip)->z_relatime) {
if (zfs_relatime_need_update(ip))
file_accessed(filp);
} else {
file_accessed(filp);
}
}
#if defined(HAVE_VFS_RW_ITERATE)
/*
* When HAVE_VFS_IOV_ITER is defined the iov_iter structure supports
* iovecs, kvevs, bvecs and pipes, plus all the required interfaces to
* manipulate the iov_iter are available. In which case the full iov_iter
* can be attached to the uio and correctly handled in the lower layers.
* Otherwise, for older kernels extract the iovec and pass it instead.
*/
static void
zpl_uio_init(zfs_uio_t *uio, struct kiocb *kiocb, struct iov_iter *to,
loff_t pos, ssize_t count, size_t skip)
{
#if defined(HAVE_VFS_IOV_ITER)
zfs_uio_iov_iter_init(uio, to, pos, count, skip);
#else
#ifdef HAVE_IOV_ITER_TYPE
zfs_uio_iovec_init(uio, to->iov, to->nr_segs, pos,
iov_iter_type(to) & ITER_KVEC ? UIO_SYSSPACE : UIO_USERSPACE,
count, skip);
#else
zfs_uio_iovec_init(uio, to->iov, to->nr_segs, pos,
to->type & ITER_KVEC ? UIO_SYSSPACE : UIO_USERSPACE,
count, skip);
#endif
#endif
}
static ssize_t
zpl_iter_read(struct kiocb *kiocb, struct iov_iter *to)
{
cred_t *cr = CRED();
fstrans_cookie_t cookie;
struct file *filp = kiocb->ki_filp;
ssize_t count = iov_iter_count(to);
zfs_uio_t uio;
zpl_uio_init(&uio, kiocb, to, kiocb->ki_pos, count, 0);
crhold(cr);
cookie = spl_fstrans_mark();
int error = -zfs_read(ITOZ(filp->f_mapping->host), &uio,
filp->f_flags | zfs_io_flags(kiocb), cr);
spl_fstrans_unmark(cookie);
crfree(cr);
if (error < 0)
return (error);
ssize_t read = count - uio.uio_resid;
kiocb->ki_pos += read;
zpl_file_accessed(filp);
return (read);
}
static inline ssize_t
zpl_generic_write_checks(struct kiocb *kiocb, struct iov_iter *from,
size_t *countp)
{
#ifdef HAVE_GENERIC_WRITE_CHECKS_KIOCB
ssize_t ret = generic_write_checks(kiocb, from);
if (ret <= 0)
return (ret);
*countp = ret;
#else
struct file *file = kiocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *ip = mapping->host;
int isblk = S_ISBLK(ip->i_mode);
*countp = iov_iter_count(from);
ssize_t ret = generic_write_checks(file, &kiocb->ki_pos, countp, isblk);
if (ret)
return (ret);
#endif
return (0);
}
static ssize_t
zpl_iter_write(struct kiocb *kiocb, struct iov_iter *from)
{
cred_t *cr = CRED();
fstrans_cookie_t cookie;
struct file *filp = kiocb->ki_filp;
struct inode *ip = filp->f_mapping->host;
zfs_uio_t uio;
size_t count = 0;
ssize_t ret;
ret = zpl_generic_write_checks(kiocb, from, &count);
if (ret)
return (ret);
zpl_uio_init(&uio, kiocb, from, kiocb->ki_pos, count, from->iov_offset);
crhold(cr);
cookie = spl_fstrans_mark();
int error = -zfs_write(ITOZ(ip), &uio,
filp->f_flags | zfs_io_flags(kiocb), cr);
spl_fstrans_unmark(cookie);
crfree(cr);
if (error < 0)
return (error);
ssize_t wrote = count - uio.uio_resid;
kiocb->ki_pos += wrote;
return (wrote);
}
#else /* !HAVE_VFS_RW_ITERATE */
static ssize_t
zpl_aio_read(struct kiocb *kiocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
cred_t *cr = CRED();
fstrans_cookie_t cookie;
struct file *filp = kiocb->ki_filp;
size_t count;
ssize_t ret;
ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
if (ret)
return (ret);
zfs_uio_t uio;
zfs_uio_iovec_init(&uio, iov, nr_segs, kiocb->ki_pos, UIO_USERSPACE,
count, 0);
crhold(cr);
cookie = spl_fstrans_mark();
int error = -zfs_read(ITOZ(filp->f_mapping->host), &uio,
filp->f_flags | zfs_io_flags(kiocb), cr);
spl_fstrans_unmark(cookie);
crfree(cr);
if (error < 0)
return (error);
ssize_t read = count - uio.uio_resid;
kiocb->ki_pos += read;
zpl_file_accessed(filp);
return (read);
}
static ssize_t
zpl_aio_write(struct kiocb *kiocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
cred_t *cr = CRED();
fstrans_cookie_t cookie;
struct file *filp = kiocb->ki_filp;
struct inode *ip = filp->f_mapping->host;
size_t count;
ssize_t ret;
ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
if (ret)
return (ret);
ret = generic_write_checks(filp, &pos, &count, S_ISBLK(ip->i_mode));
if (ret)
return (ret);
kiocb->ki_pos = pos;
zfs_uio_t uio;
zfs_uio_iovec_init(&uio, iov, nr_segs, kiocb->ki_pos, UIO_USERSPACE,
count, 0);
crhold(cr);
cookie = spl_fstrans_mark();
int error = -zfs_write(ITOZ(ip), &uio,
filp->f_flags | zfs_io_flags(kiocb), cr);
spl_fstrans_unmark(cookie);
crfree(cr);
if (error < 0)
return (error);
ssize_t wrote = count - uio.uio_resid;
kiocb->ki_pos += wrote;
return (wrote);
}
#endif /* HAVE_VFS_RW_ITERATE */
#if defined(HAVE_VFS_RW_ITERATE)
static ssize_t
zpl_direct_IO_impl(int rw, struct kiocb *kiocb, struct iov_iter *iter)
{
if (rw == WRITE)
return (zpl_iter_write(kiocb, iter));
else
return (zpl_iter_read(kiocb, iter));
}
#if defined(HAVE_VFS_DIRECT_IO_ITER)
static ssize_t
zpl_direct_IO(struct kiocb *kiocb, struct iov_iter *iter)
{
return (zpl_direct_IO_impl(iov_iter_rw(iter), kiocb, iter));
}
#elif defined(HAVE_VFS_DIRECT_IO_ITER_OFFSET)
static ssize_t
zpl_direct_IO(struct kiocb *kiocb, struct iov_iter *iter, loff_t pos)
{
ASSERT3S(pos, ==, kiocb->ki_pos);
return (zpl_direct_IO_impl(iov_iter_rw(iter), kiocb, iter));
}
#elif defined(HAVE_VFS_DIRECT_IO_ITER_RW_OFFSET)
static ssize_t
zpl_direct_IO(int rw, struct kiocb *kiocb, struct iov_iter *iter, loff_t pos)
{
ASSERT3S(pos, ==, kiocb->ki_pos);
return (zpl_direct_IO_impl(rw, kiocb, iter));
}
#else
#error "Unknown direct IO interface"
#endif
#else /* HAVE_VFS_RW_ITERATE */
#if defined(HAVE_VFS_DIRECT_IO_IOVEC)
static ssize_t
zpl_direct_IO(int rw, struct kiocb *kiocb, const struct iovec *iov,
loff_t pos, unsigned long nr_segs)
{
if (rw == WRITE)
return (zpl_aio_write(kiocb, iov, nr_segs, pos));
else
return (zpl_aio_read(kiocb, iov, nr_segs, pos));
}
#elif defined(HAVE_VFS_DIRECT_IO_ITER_RW_OFFSET)
static ssize_t
zpl_direct_IO(int rw, struct kiocb *kiocb, struct iov_iter *iter, loff_t pos)
{
const struct iovec *iovp = iov_iter_iovec(iter);
unsigned long nr_segs = iter->nr_segs;
ASSERT3S(pos, ==, kiocb->ki_pos);
if (rw == WRITE)
return (zpl_aio_write(kiocb, iovp, nr_segs, pos));
else
return (zpl_aio_read(kiocb, iovp, nr_segs, pos));
}
#else
#error "Unknown direct IO interface"
#endif
#endif /* HAVE_VFS_RW_ITERATE */
static loff_t
zpl_llseek(struct file *filp, loff_t offset, int whence)
{
#if defined(SEEK_HOLE) && defined(SEEK_DATA)
fstrans_cookie_t cookie;
if (whence == SEEK_DATA || whence == SEEK_HOLE) {
struct inode *ip = filp->f_mapping->host;
loff_t maxbytes = ip->i_sb->s_maxbytes;
loff_t error;
spl_inode_lock_shared(ip);
cookie = spl_fstrans_mark();
error = -zfs_holey(ITOZ(ip), whence, &offset);
spl_fstrans_unmark(cookie);
if (error == 0)
error = lseek_execute(filp, ip, offset, maxbytes);
spl_inode_unlock_shared(ip);
return (error);
}
#endif /* SEEK_HOLE && SEEK_DATA */
return (generic_file_llseek(filp, offset, whence));
}
/*
* It's worth taking a moment to describe how mmap is implemented
* for zfs because it differs considerably from other Linux filesystems.
* However, this issue is handled the same way under OpenSolaris.
*
* The issue is that by design zfs bypasses the Linux page cache and
* leaves all caching up to the ARC. This has been shown to work
* well for the common read(2)/write(2) case. However, mmap(2)
* is problem because it relies on being tightly integrated with the
* page cache. To handle this we cache mmap'ed files twice, once in
* the ARC and a second time in the page cache. The code is careful
* to keep both copies synchronized.
*
* When a file with an mmap'ed region is written to using write(2)
* both the data in the ARC and existing pages in the page cache
* are updated. For a read(2) data will be read first from the page
* cache then the ARC if needed. Neither a write(2) or read(2) will
* will ever result in new pages being added to the page cache.
*
* New pages are added to the page cache only via .readpage() which
* is called when the vfs needs to read a page off disk to back the
* virtual memory region. These pages may be modified without
* notifying the ARC and will be written out periodically via
* .writepage(). This will occur due to either a sync or the usual
* page aging behavior. Note because a read(2) of a mmap'ed file
* will always check the page cache first even when the ARC is out
* of date correct data will still be returned.
*
* While this implementation ensures correct behavior it does have
* have some drawbacks. The most obvious of which is that it
* increases the required memory footprint when access mmap'ed
* files. It also adds additional complexity to the code keeping
* both caches synchronized.
*
* Longer term it may be possible to cleanly resolve this wart by
* mapping page cache pages directly on to the ARC buffers. The
* Linux address space operations are flexible enough to allow
* selection of which pages back a particular index. The trick
* would be working out the details of which subsystem is in
* charge, the ARC, the page cache, or both. It may also prove
* helpful to move the ARC buffers to a scatter-gather lists
* rather than a vmalloc'ed region.
*/
static int
zpl_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct inode *ip = filp->f_mapping->host;
znode_t *zp = ITOZ(ip);
int error;
fstrans_cookie_t cookie;
cookie = spl_fstrans_mark();
error = -zfs_map(ip, vma->vm_pgoff, (caddr_t *)vma->vm_start,
(size_t)(vma->vm_end - vma->vm_start), vma->vm_flags);
spl_fstrans_unmark(cookie);
if (error)
return (error);
error = generic_file_mmap(filp, vma);
if (error)
return (error);
mutex_enter(&zp->z_lock);
zp->z_is_mapped = B_TRUE;
mutex_exit(&zp->z_lock);
return (error);
}
/*
* Populate a page with data for the Linux page cache. This function is
* only used to support mmap(2). There will be an identical copy of the
* data in the ARC which is kept up to date via .write() and .writepage().
*/
static inline int
zpl_readpage_common(struct page *pp)
{
struct inode *ip;
struct page *pl[1];
int error = 0;
fstrans_cookie_t cookie;
ASSERT(PageLocked(pp));
ip = pp->mapping->host;
pl[0] = pp;
cookie = spl_fstrans_mark();
error = -zfs_getpage(ip, pl, 1);
spl_fstrans_unmark(cookie);
if (error) {
SetPageError(pp);
ClearPageUptodate(pp);
} else {
ClearPageError(pp);
SetPageUptodate(pp);
flush_dcache_page(pp);
}
unlock_page(pp);
return (error);
}
#ifdef HAVE_VFS_READ_FOLIO
static int
zpl_read_folio(struct file *filp, struct folio *folio)
{
return (zpl_readpage_common(&folio->page));
}
#else
static int
zpl_readpage(struct file *filp, struct page *pp)
{
return (zpl_readpage_common(pp));
}
#endif
static int
zpl_readpage_filler(void *data, struct page *pp)
{
return (zpl_readpage_common(pp));
}
/*
* Populate a set of pages with data for the Linux page cache. This
* function will only be called for read ahead and never for demand
* paging. For simplicity, the code relies on read_cache_pages() to
* correctly lock each page for IO and call zpl_readpage().
*/
#ifdef HAVE_VFS_READPAGES
static int
zpl_readpages(struct file *filp, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
return (read_cache_pages(mapping, pages, zpl_readpage_filler, NULL));
}
#else
static void
zpl_readahead(struct readahead_control *ractl)
{
struct page *page;
while ((page = readahead_page(ractl)) != NULL) {
int ret;
ret = zpl_readpage_filler(NULL, page);
put_page(page);
if (ret)
break;
}
}
#endif
static int
zpl_putpage(struct page *pp, struct writeback_control *wbc, void *data)
{
boolean_t *for_sync = data;
fstrans_cookie_t cookie;
ASSERT(PageLocked(pp));
ASSERT(!PageWriteback(pp));
cookie = spl_fstrans_mark();
(void) zfs_putpage(pp->mapping->host, pp, wbc, *for_sync);
spl_fstrans_unmark(cookie);
return (0);
}
static int
zpl_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
znode_t *zp = ITOZ(mapping->host);
zfsvfs_t *zfsvfs = ITOZSB(mapping->host);
enum writeback_sync_modes sync_mode;
int result;
- ZPL_ENTER(zfsvfs);
+ if ((result = zpl_enter(zfsvfs, FTAG)) != 0)
+ return (result);
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
wbc->sync_mode = WB_SYNC_ALL;
- ZPL_EXIT(zfsvfs);
+ zpl_exit(zfsvfs, FTAG);
sync_mode = wbc->sync_mode;
/*
* We don't want to run write_cache_pages() in SYNC mode here, because
* that would make putpage() wait for a single page to be committed to
* disk every single time, resulting in atrocious performance. Instead
* we run it once in non-SYNC mode so that the ZIL gets all the data,
* and then we commit it all in one go.
*/
boolean_t for_sync = (sync_mode == WB_SYNC_ALL);
wbc->sync_mode = WB_SYNC_NONE;
result = write_cache_pages(mapping, wbc, zpl_putpage, &for_sync);
if (sync_mode != wbc->sync_mode) {
- ZPL_ENTER(zfsvfs);
- ZPL_VERIFY_ZP(zp);
+ if ((result = zpl_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (result);
if (zfsvfs->z_log != NULL)
zil_commit(zfsvfs->z_log, zp->z_id);
- ZPL_EXIT(zfsvfs);
+ zpl_exit(zfsvfs, FTAG);
/*
* We need to call write_cache_pages() again (we can't just
* return after the commit) because the previous call in
* non-SYNC mode does not guarantee that we got all the dirty
* pages (see the implementation of write_cache_pages() for
* details). That being said, this is a no-op in most cases.
*/
wbc->sync_mode = sync_mode;
result = write_cache_pages(mapping, wbc, zpl_putpage,
&for_sync);
}
return (result);
}
/*
* Write out dirty pages to the ARC, this function is only required to
* support mmap(2). Mapped pages may be dirtied by memory operations
* which never call .write(). These dirty pages are kept in sync with
* the ARC buffers via this hook.
*/
static int
zpl_writepage(struct page *pp, struct writeback_control *wbc)
{
if (ITOZSB(pp->mapping->host)->z_os->os_sync == ZFS_SYNC_ALWAYS)
wbc->sync_mode = WB_SYNC_ALL;
boolean_t for_sync = (wbc->sync_mode == WB_SYNC_ALL);
return (zpl_putpage(pp, wbc, &for_sync));
}
/*
* The flag combination which matches the behavior of zfs_space() is
* FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE. The FALLOC_FL_PUNCH_HOLE
* flag was introduced in the 2.6.38 kernel.
*
* The original mode=0 (allocate space) behavior can be reasonably emulated
* by checking if enough space exists and creating a sparse file, as real
* persistent space reservation is not possible due to COW, snapshots, etc.
*/
static long
zpl_fallocate_common(struct inode *ip, int mode, loff_t offset, loff_t len)
{
cred_t *cr = CRED();
loff_t olen;
fstrans_cookie_t cookie;
int error = 0;
int test_mode = FALLOC_FL_PUNCH_HOLE;
#ifdef HAVE_FALLOC_FL_ZERO_RANGE
test_mode |= FALLOC_FL_ZERO_RANGE;
#endif
if ((mode & ~(FALLOC_FL_KEEP_SIZE | test_mode)) != 0)
return (-EOPNOTSUPP);
if (offset < 0 || len <= 0)
return (-EINVAL);
spl_inode_lock(ip);
olen = i_size_read(ip);
crhold(cr);
cookie = spl_fstrans_mark();
if (mode & (test_mode)) {
flock64_t bf;
if (mode & FALLOC_FL_KEEP_SIZE) {
if (offset > olen)
goto out_unmark;
if (offset + len > olen)
len = olen - offset;
}
bf.l_type = F_WRLCK;
bf.l_whence = SEEK_SET;
bf.l_start = offset;
bf.l_len = len;
bf.l_pid = 0;
error = -zfs_space(ITOZ(ip), F_FREESP, &bf, O_RDWR, offset, cr);
} else if ((mode & ~FALLOC_FL_KEEP_SIZE) == 0) {
unsigned int percent = zfs_fallocate_reserve_percent;
struct kstatfs statfs;
/* Legacy mode, disable fallocate compatibility. */
if (percent == 0) {
error = -EOPNOTSUPP;
goto out_unmark;
}
/*
* Use zfs_statvfs() instead of dmu_objset_space() since it
* also checks project quota limits, which are relevant here.
*/
error = zfs_statvfs(ip, &statfs);
if (error)
goto out_unmark;
/*
* Shrink available space a bit to account for overhead/races.
* We know the product previously fit into availbytes from
* dmu_objset_space(), so the smaller product will also fit.
*/
if (len > statfs.f_bavail * (statfs.f_bsize * 100 / percent)) {
error = -ENOSPC;
goto out_unmark;
}
if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > olen)
error = zfs_freesp(ITOZ(ip), offset + len, 0, 0, FALSE);
}
out_unmark:
spl_fstrans_unmark(cookie);
spl_inode_unlock(ip);
crfree(cr);
return (error);
}
static long
zpl_fallocate(struct file *filp, int mode, loff_t offset, loff_t len)
{
return zpl_fallocate_common(file_inode(filp),
mode, offset, len);
}
static int
zpl_ioctl_getversion(struct file *filp, void __user *arg)
{
uint32_t generation = file_inode(filp)->i_generation;
return (copy_to_user(arg, &generation, sizeof (generation)));
}
+#ifdef HAVE_FILE_FADVISE
+static int
+zpl_fadvise(struct file *filp, loff_t offset, loff_t len, int advice)
+{
+ struct inode *ip = file_inode(filp);
+ znode_t *zp = ITOZ(ip);
+ zfsvfs_t *zfsvfs = ITOZSB(ip);
+ objset_t *os = zfsvfs->z_os;
+ int error = 0;
+
+ if (S_ISFIFO(ip->i_mode))
+ return (-ESPIPE);
+
+ if (offset < 0 || len < 0)
+ return (-EINVAL);
+
+ if ((error = zpl_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (error);
+
+ switch (advice) {
+ case POSIX_FADV_SEQUENTIAL:
+ case POSIX_FADV_WILLNEED:
+#ifdef HAVE_GENERIC_FADVISE
+ if (zn_has_cached_data(zp))
+ error = generic_fadvise(filp, offset, len, advice);
+#endif
+ /*
+ * Pass on the caller's size directly, but note that
+ * dmu_prefetch_max will effectively cap it. If there
+ * really is a larger sequential access pattern, perhaps
+ * dmu_zfetch will detect it.
+ */
+ if (len == 0)
+ len = i_size_read(ip) - offset;
+
+ dmu_prefetch(os, zp->z_id, 0, offset, len,
+ ZIO_PRIORITY_ASYNC_READ);
+ break;
+ case POSIX_FADV_NORMAL:
+ case POSIX_FADV_RANDOM:
+ case POSIX_FADV_DONTNEED:
+ case POSIX_FADV_NOREUSE:
+ /* ignored for now */
+ break;
+ default:
+ error = -EINVAL;
+ break;
+ }
+
+ zfs_exit(zfsvfs, FTAG);
+
+ return (error);
+}
+#endif /* HAVE_FILE_FADVISE */
+
#define ZFS_FL_USER_VISIBLE (FS_FL_USER_VISIBLE | ZFS_PROJINHERIT_FL)
#define ZFS_FL_USER_MODIFIABLE (FS_FL_USER_MODIFIABLE | ZFS_PROJINHERIT_FL)
static uint32_t
__zpl_ioctl_getflags(struct inode *ip)
{
uint64_t zfs_flags = ITOZ(ip)->z_pflags;
uint32_t ioctl_flags = 0;
if (zfs_flags & ZFS_IMMUTABLE)
ioctl_flags |= FS_IMMUTABLE_FL;
if (zfs_flags & ZFS_APPENDONLY)
ioctl_flags |= FS_APPEND_FL;
if (zfs_flags & ZFS_NODUMP)
ioctl_flags |= FS_NODUMP_FL;
if (zfs_flags & ZFS_PROJINHERIT)
ioctl_flags |= ZFS_PROJINHERIT_FL;
return (ioctl_flags & ZFS_FL_USER_VISIBLE);
}
/*
* Map zfs file z_pflags (xvattr_t) to linux file attributes. Only file
* attributes common to both Linux and Solaris are mapped.
*/
static int
zpl_ioctl_getflags(struct file *filp, void __user *arg)
{
uint32_t flags;
int err;
flags = __zpl_ioctl_getflags(file_inode(filp));
err = copy_to_user(arg, &flags, sizeof (flags));
return (err);
}
/*
* fchange() is a helper macro to detect if we have been asked to change a
* flag. This is ugly, but the requirement that we do this is a consequence of
* how the Linux file attribute interface was designed. Another consequence is
* that concurrent modification of files suffers from a TOCTOU race. Neither
* are things we can fix without modifying the kernel-userland interface, which
* is outside of our jurisdiction.
*/
#define fchange(f0, f1, b0, b1) (!((f0) & (b0)) != !((f1) & (b1)))
static int
__zpl_ioctl_setflags(struct inode *ip, uint32_t ioctl_flags, xvattr_t *xva)
{
uint64_t zfs_flags = ITOZ(ip)->z_pflags;
xoptattr_t *xoap;
if (ioctl_flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL |
ZFS_PROJINHERIT_FL))
return (-EOPNOTSUPP);
if (ioctl_flags & ~ZFS_FL_USER_MODIFIABLE)
return (-EACCES);
if ((fchange(ioctl_flags, zfs_flags, FS_IMMUTABLE_FL, ZFS_IMMUTABLE) ||
fchange(ioctl_flags, zfs_flags, FS_APPEND_FL, ZFS_APPENDONLY)) &&
!capable(CAP_LINUX_IMMUTABLE))
return (-EPERM);
if (!zpl_inode_owner_or_capable(kcred->user_ns, ip))
return (-EACCES);
xva_init(xva);
xoap = xva_getxoptattr(xva);
#define FLAG_CHANGE(iflag, zflag, xflag, xfield) do { \
if (((ioctl_flags & (iflag)) && !(zfs_flags & (zflag))) || \
((zfs_flags & (zflag)) && !(ioctl_flags & (iflag)))) { \
XVA_SET_REQ(xva, (xflag)); \
(xfield) = ((ioctl_flags & (iflag)) != 0); \
} \
} while (0)
FLAG_CHANGE(FS_IMMUTABLE_FL, ZFS_IMMUTABLE, XAT_IMMUTABLE,
xoap->xoa_immutable);
FLAG_CHANGE(FS_APPEND_FL, ZFS_APPENDONLY, XAT_APPENDONLY,
xoap->xoa_appendonly);
FLAG_CHANGE(FS_NODUMP_FL, ZFS_NODUMP, XAT_NODUMP,
xoap->xoa_nodump);
FLAG_CHANGE(ZFS_PROJINHERIT_FL, ZFS_PROJINHERIT, XAT_PROJINHERIT,
xoap->xoa_projinherit);
#undef FLAG_CHANGE
return (0);
}
static int
zpl_ioctl_setflags(struct file *filp, void __user *arg)
{
struct inode *ip = file_inode(filp);
uint32_t flags;
cred_t *cr = CRED();
xvattr_t xva;
int err;
fstrans_cookie_t cookie;
if (copy_from_user(&flags, arg, sizeof (flags)))
return (-EFAULT);
err = __zpl_ioctl_setflags(ip, flags, &xva);
if (err)
return (err);
crhold(cr);
cookie = spl_fstrans_mark();
err = -zfs_setattr(ITOZ(ip), (vattr_t *)&xva, 0, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
return (err);
}
static int
zpl_ioctl_getxattr(struct file *filp, void __user *arg)
{
zfsxattr_t fsx = { 0 };
struct inode *ip = file_inode(filp);
int err;
fsx.fsx_xflags = __zpl_ioctl_getflags(ip);
fsx.fsx_projid = ITOZ(ip)->z_projid;
err = copy_to_user(arg, &fsx, sizeof (fsx));
return (err);
}
static int
zpl_ioctl_setxattr(struct file *filp, void __user *arg)
{
struct inode *ip = file_inode(filp);
zfsxattr_t fsx;
cred_t *cr = CRED();
xvattr_t xva;
xoptattr_t *xoap;
int err;
fstrans_cookie_t cookie;
if (copy_from_user(&fsx, arg, sizeof (fsx)))
return (-EFAULT);
if (!zpl_is_valid_projid(fsx.fsx_projid))
return (-EINVAL);
err = __zpl_ioctl_setflags(ip, fsx.fsx_xflags, &xva);
if (err)
return (err);
xoap = xva_getxoptattr(&xva);
XVA_SET_REQ(&xva, XAT_PROJID);
xoap->xoa_projid = fsx.fsx_projid;
crhold(cr);
cookie = spl_fstrans_mark();
err = -zfs_setattr(ITOZ(ip), (vattr_t *)&xva, 0, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
return (err);
}
/*
* Expose Additional File Level Attributes of ZFS.
*/
static int
zpl_ioctl_getdosflags(struct file *filp, void __user *arg)
{
struct inode *ip = file_inode(filp);
uint64_t dosflags = ITOZ(ip)->z_pflags;
dosflags &= ZFS_DOS_FL_USER_VISIBLE;
int err = copy_to_user(arg, &dosflags, sizeof (dosflags));
return (err);
}
static int
__zpl_ioctl_setdosflags(struct inode *ip, uint64_t ioctl_flags, xvattr_t *xva)
{
uint64_t zfs_flags = ITOZ(ip)->z_pflags;
xoptattr_t *xoap;
if (ioctl_flags & (~ZFS_DOS_FL_USER_VISIBLE))
return (-EOPNOTSUPP);
if ((fchange(ioctl_flags, zfs_flags, ZFS_IMMUTABLE, ZFS_IMMUTABLE) ||
fchange(ioctl_flags, zfs_flags, ZFS_APPENDONLY, ZFS_APPENDONLY)) &&
!capable(CAP_LINUX_IMMUTABLE))
return (-EPERM);
if (!zpl_inode_owner_or_capable(kcred->user_ns, ip))
return (-EACCES);
xva_init(xva);
xoap = xva_getxoptattr(xva);
#define FLAG_CHANGE(iflag, xflag, xfield) do { \
if (((ioctl_flags & (iflag)) && !(zfs_flags & (iflag))) || \
((zfs_flags & (iflag)) && !(ioctl_flags & (iflag)))) { \
XVA_SET_REQ(xva, (xflag)); \
(xfield) = ((ioctl_flags & (iflag)) != 0); \
} \
} while (0)
FLAG_CHANGE(ZFS_IMMUTABLE, XAT_IMMUTABLE, xoap->xoa_immutable);
FLAG_CHANGE(ZFS_APPENDONLY, XAT_APPENDONLY, xoap->xoa_appendonly);
FLAG_CHANGE(ZFS_NODUMP, XAT_NODUMP, xoap->xoa_nodump);
FLAG_CHANGE(ZFS_READONLY, XAT_READONLY, xoap->xoa_readonly);
FLAG_CHANGE(ZFS_HIDDEN, XAT_HIDDEN, xoap->xoa_hidden);
FLAG_CHANGE(ZFS_SYSTEM, XAT_SYSTEM, xoap->xoa_system);
FLAG_CHANGE(ZFS_ARCHIVE, XAT_ARCHIVE, xoap->xoa_archive);
FLAG_CHANGE(ZFS_NOUNLINK, XAT_NOUNLINK, xoap->xoa_nounlink);
FLAG_CHANGE(ZFS_REPARSE, XAT_REPARSE, xoap->xoa_reparse);
FLAG_CHANGE(ZFS_OFFLINE, XAT_OFFLINE, xoap->xoa_offline);
FLAG_CHANGE(ZFS_SPARSE, XAT_SPARSE, xoap->xoa_sparse);
#undef FLAG_CHANGE
return (0);
}
/*
* Set Additional File Level Attributes of ZFS.
*/
static int
zpl_ioctl_setdosflags(struct file *filp, void __user *arg)
{
struct inode *ip = file_inode(filp);
uint64_t dosflags;
cred_t *cr = CRED();
xvattr_t xva;
int err;
fstrans_cookie_t cookie;
if (copy_from_user(&dosflags, arg, sizeof (dosflags)))
return (-EFAULT);
err = __zpl_ioctl_setdosflags(ip, dosflags, &xva);
if (err)
return (err);
crhold(cr);
cookie = spl_fstrans_mark();
err = -zfs_setattr(ITOZ(ip), (vattr_t *)&xva, 0, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
return (err);
}
static long
zpl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case FS_IOC_GETVERSION:
return (zpl_ioctl_getversion(filp, (void *)arg));
case FS_IOC_GETFLAGS:
return (zpl_ioctl_getflags(filp, (void *)arg));
case FS_IOC_SETFLAGS:
return (zpl_ioctl_setflags(filp, (void *)arg));
case ZFS_IOC_FSGETXATTR:
return (zpl_ioctl_getxattr(filp, (void *)arg));
case ZFS_IOC_FSSETXATTR:
return (zpl_ioctl_setxattr(filp, (void *)arg));
case ZFS_IOC_GETDOSFLAGS:
return (zpl_ioctl_getdosflags(filp, (void *)arg));
case ZFS_IOC_SETDOSFLAGS:
return (zpl_ioctl_setdosflags(filp, (void *)arg));
default:
return (-ENOTTY);
}
}
#ifdef CONFIG_COMPAT
static long
zpl_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case FS_IOC32_GETVERSION:
cmd = FS_IOC_GETVERSION;
break;
case FS_IOC32_GETFLAGS:
cmd = FS_IOC_GETFLAGS;
break;
case FS_IOC32_SETFLAGS:
cmd = FS_IOC_SETFLAGS;
break;
default:
return (-ENOTTY);
}
return (zpl_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)));
}
#endif /* CONFIG_COMPAT */
const struct address_space_operations zpl_address_space_operations = {
#ifdef HAVE_VFS_READPAGES
.readpages = zpl_readpages,
#else
.readahead = zpl_readahead,
#endif
#ifdef HAVE_VFS_READ_FOLIO
.read_folio = zpl_read_folio,
#else
.readpage = zpl_readpage,
#endif
.writepage = zpl_writepage,
.writepages = zpl_writepages,
.direct_IO = zpl_direct_IO,
#ifdef HAVE_VFS_SET_PAGE_DIRTY_NOBUFFERS
.set_page_dirty = __set_page_dirty_nobuffers,
#endif
#ifdef HAVE_VFS_FILEMAP_DIRTY_FOLIO
.dirty_folio = filemap_dirty_folio,
#endif
};
const struct file_operations zpl_file_operations = {
.open = zpl_open,
.release = zpl_release,
.llseek = zpl_llseek,
#ifdef HAVE_VFS_RW_ITERATE
#ifdef HAVE_NEW_SYNC_READ
.read = new_sync_read,
.write = new_sync_write,
#endif
.read_iter = zpl_iter_read,
.write_iter = zpl_iter_write,
#ifdef HAVE_VFS_IOV_ITER
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
#endif
#else
.read = do_sync_read,
.write = do_sync_write,
.aio_read = zpl_aio_read,
.aio_write = zpl_aio_write,
#endif
.mmap = zpl_mmap,
.fsync = zpl_fsync,
#ifdef HAVE_FILE_AIO_FSYNC
.aio_fsync = zpl_aio_fsync,
#endif
.fallocate = zpl_fallocate,
+#ifdef HAVE_FILE_FADVISE
+ .fadvise = zpl_fadvise,
+#endif
.unlocked_ioctl = zpl_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = zpl_compat_ioctl,
#endif
};
const struct file_operations zpl_dir_file_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
#if defined(HAVE_VFS_ITERATE_SHARED)
.iterate_shared = zpl_iterate,
#elif defined(HAVE_VFS_ITERATE)
.iterate = zpl_iterate,
#else
.readdir = zpl_readdir,
#endif
.fsync = zpl_fsync,
.unlocked_ioctl = zpl_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = zpl_compat_ioctl,
#endif
};
/* CSTYLED */
module_param(zfs_fallocate_reserve_percent, uint, 0644);
MODULE_PARM_DESC(zfs_fallocate_reserve_percent,
"Percentage of length to use for the available capacity check");
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zpl_super.c b/sys/contrib/openzfs/module/os/linux/zfs/zpl_super.c
index cf879a2897b3..e3945a2a05fe 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zpl_super.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zpl_super.c
@@ -1,378 +1,380 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2011, Lawrence Livermore National Security, LLC.
*/
#include <sys/zfs_znode.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_ctldir.h>
#include <sys/zpl.h>
static struct inode *
zpl_inode_alloc(struct super_block *sb)
{
struct inode *ip;
VERIFY3S(zfs_inode_alloc(sb, &ip), ==, 0);
inode_set_iversion(ip, 1);
return (ip);
}
static void
zpl_inode_destroy(struct inode *ip)
{
ASSERT(atomic_read(&ip->i_count) == 0);
zfs_inode_destroy(ip);
}
/*
* Called from __mark_inode_dirty() to reflect that something in the
* inode has changed. We use it to ensure the znode system attributes
* are always strictly update to date with respect to the inode.
*/
#ifdef HAVE_DIRTY_INODE_WITH_FLAGS
static void
zpl_dirty_inode(struct inode *ip, int flags)
{
fstrans_cookie_t cookie;
cookie = spl_fstrans_mark();
zfs_dirty_inode(ip, flags);
spl_fstrans_unmark(cookie);
}
#else
static void
zpl_dirty_inode(struct inode *ip)
{
fstrans_cookie_t cookie;
cookie = spl_fstrans_mark();
zfs_dirty_inode(ip, 0);
spl_fstrans_unmark(cookie);
}
#endif /* HAVE_DIRTY_INODE_WITH_FLAGS */
/*
* When ->drop_inode() is called its return value indicates if the
* inode should be evicted from the inode cache. If the inode is
* unhashed and has no links the default policy is to evict it
* immediately.
*
* The ->evict_inode() callback must minimally truncate the inode pages,
* and call clear_inode(). For 2.6.35 and later kernels this will
* simply update the inode state, with the sync occurring before the
* truncate in evict(). For earlier kernels clear_inode() maps to
* end_writeback() which is responsible for completing all outstanding
* write back. In either case, once this is done it is safe to cleanup
* any remaining inode specific data via zfs_inactive().
* remaining filesystem specific data.
*/
static void
zpl_evict_inode(struct inode *ip)
{
fstrans_cookie_t cookie;
cookie = spl_fstrans_mark();
truncate_setsize(ip, 0);
clear_inode(ip);
zfs_inactive(ip);
spl_fstrans_unmark(cookie);
}
static void
zpl_put_super(struct super_block *sb)
{
fstrans_cookie_t cookie;
int error;
cookie = spl_fstrans_mark();
error = -zfs_umount(sb);
spl_fstrans_unmark(cookie);
ASSERT3S(error, <=, 0);
}
static int
zpl_sync_fs(struct super_block *sb, int wait)
{
fstrans_cookie_t cookie;
cred_t *cr = CRED();
int error;
crhold(cr);
cookie = spl_fstrans_mark();
error = -zfs_sync(sb, wait, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
static int
zpl_statfs(struct dentry *dentry, struct kstatfs *statp)
{
fstrans_cookie_t cookie;
int error;
cookie = spl_fstrans_mark();
error = -zfs_statvfs(dentry->d_inode, statp);
spl_fstrans_unmark(cookie);
ASSERT3S(error, <=, 0);
/*
* If required by a 32-bit system call, dynamically scale the
* block size up to 16MiB and decrease the block counts. This
* allows for a maximum size of 64EiB to be reported. The file
* counts must be artificially capped at 2^32-1.
*/
if (unlikely(zpl_is_32bit_api())) {
while (statp->f_blocks > UINT32_MAX &&
statp->f_bsize < SPA_MAXBLOCKSIZE) {
statp->f_frsize <<= 1;
statp->f_bsize <<= 1;
statp->f_blocks >>= 1;
statp->f_bfree >>= 1;
statp->f_bavail >>= 1;
}
uint64_t usedobjs = statp->f_files - statp->f_ffree;
statp->f_ffree = MIN(statp->f_ffree, UINT32_MAX - usedobjs);
statp->f_files = statp->f_ffree + usedobjs;
}
return (error);
}
static int
zpl_remount_fs(struct super_block *sb, int *flags, char *data)
{
zfs_mnt_t zm = { .mnt_osname = NULL, .mnt_data = data };
fstrans_cookie_t cookie;
int error;
cookie = spl_fstrans_mark();
error = -zfs_remount(sb, flags, &zm);
spl_fstrans_unmark(cookie);
ASSERT3S(error, <=, 0);
return (error);
}
static int
__zpl_show_devname(struct seq_file *seq, zfsvfs_t *zfsvfs)
{
- ZPL_ENTER(zfsvfs);
+ int error;
+ if ((error = zpl_enter(zfsvfs, FTAG)) != 0)
+ return (error);
char *fsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
dmu_objset_name(zfsvfs->z_os, fsname);
for (int i = 0; fsname[i] != 0; i++) {
/*
* Spaces in the dataset name must be converted to their
* octal escape sequence for getmntent(3) to correctly
* parse then fsname portion of /proc/self/mounts.
*/
if (fsname[i] == ' ') {
seq_puts(seq, "\\040");
} else {
seq_putc(seq, fsname[i]);
}
}
kmem_free(fsname, ZFS_MAX_DATASET_NAME_LEN);
- ZPL_EXIT(zfsvfs);
+ zpl_exit(zfsvfs, FTAG);
return (0);
}
static int
zpl_show_devname(struct seq_file *seq, struct dentry *root)
{
return (__zpl_show_devname(seq, root->d_sb->s_fs_info));
}
static int
__zpl_show_options(struct seq_file *seq, zfsvfs_t *zfsvfs)
{
seq_printf(seq, ",%s",
zfsvfs->z_flags & ZSB_XATTR ? "xattr" : "noxattr");
#ifdef CONFIG_FS_POSIX_ACL
switch (zfsvfs->z_acl_type) {
case ZFS_ACLTYPE_POSIX:
seq_puts(seq, ",posixacl");
break;
default:
seq_puts(seq, ",noacl");
break;
}
#endif /* CONFIG_FS_POSIX_ACL */
switch (zfsvfs->z_case) {
case ZFS_CASE_SENSITIVE:
seq_puts(seq, ",casesensitive");
break;
case ZFS_CASE_INSENSITIVE:
seq_puts(seq, ",caseinsensitive");
break;
default:
seq_puts(seq, ",casemixed");
break;
}
return (0);
}
static int
zpl_show_options(struct seq_file *seq, struct dentry *root)
{
return (__zpl_show_options(seq, root->d_sb->s_fs_info));
}
static int
zpl_fill_super(struct super_block *sb, void *data, int silent)
{
zfs_mnt_t *zm = (zfs_mnt_t *)data;
fstrans_cookie_t cookie;
int error;
cookie = spl_fstrans_mark();
error = -zfs_domount(sb, zm, silent);
spl_fstrans_unmark(cookie);
ASSERT3S(error, <=, 0);
return (error);
}
static int
zpl_test_super(struct super_block *s, void *data)
{
zfsvfs_t *zfsvfs = s->s_fs_info;
objset_t *os = data;
if (zfsvfs == NULL)
return (0);
return (os == zfsvfs->z_os);
}
static struct super_block *
zpl_mount_impl(struct file_system_type *fs_type, int flags, zfs_mnt_t *zm)
{
struct super_block *s;
objset_t *os;
int err;
err = dmu_objset_hold(zm->mnt_osname, FTAG, &os);
if (err)
return (ERR_PTR(-err));
/*
* The dsl pool lock must be released prior to calling sget().
* It is possible sget() may block on the lock in grab_super()
* while deactivate_super() holds that same lock and waits for
* a txg sync. If the dsl_pool lock is held over sget()
* this can prevent the pool sync and cause a deadlock.
*/
dsl_dataset_long_hold(dmu_objset_ds(os), FTAG);
dsl_pool_rele(dmu_objset_pool(os), FTAG);
s = sget(fs_type, zpl_test_super, set_anon_super, flags, os);
dsl_dataset_long_rele(dmu_objset_ds(os), FTAG);
dsl_dataset_rele(dmu_objset_ds(os), FTAG);
if (IS_ERR(s))
return (ERR_CAST(s));
if (s->s_root == NULL) {
err = zpl_fill_super(s, zm, flags & SB_SILENT ? 1 : 0);
if (err) {
deactivate_locked_super(s);
return (ERR_PTR(err));
}
s->s_flags |= SB_ACTIVE;
} else if ((flags ^ s->s_flags) & SB_RDONLY) {
deactivate_locked_super(s);
return (ERR_PTR(-EBUSY));
}
return (s);
}
static struct dentry *
zpl_mount(struct file_system_type *fs_type, int flags,
const char *osname, void *data)
{
zfs_mnt_t zm = { .mnt_osname = osname, .mnt_data = data };
struct super_block *sb = zpl_mount_impl(fs_type, flags, &zm);
if (IS_ERR(sb))
return (ERR_CAST(sb));
return (dget(sb->s_root));
}
static void
zpl_kill_sb(struct super_block *sb)
{
zfs_preumount(sb);
kill_anon_super(sb);
}
void
zpl_prune_sb(int64_t nr_to_scan, void *arg)
{
struct super_block *sb = (struct super_block *)arg;
int objects = 0;
(void) -zfs_prune(sb, nr_to_scan, &objects);
}
const struct super_operations zpl_super_operations = {
.alloc_inode = zpl_inode_alloc,
.destroy_inode = zpl_inode_destroy,
.dirty_inode = zpl_dirty_inode,
.write_inode = NULL,
.evict_inode = zpl_evict_inode,
.put_super = zpl_put_super,
.sync_fs = zpl_sync_fs,
.statfs = zpl_statfs,
.remount_fs = zpl_remount_fs,
.show_devname = zpl_show_devname,
.show_options = zpl_show_options,
.show_stats = NULL,
};
struct file_system_type zpl_fs_type = {
.owner = THIS_MODULE,
.name = ZFS_DRIVER,
.fs_flags = FS_USERNS_MOUNT,
.mount = zpl_mount,
.kill_sb = zpl_kill_sb,
};
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zpl_xattr.c b/sys/contrib/openzfs/module/os/linux/zfs/zpl_xattr.c
index e7e299dcf1cd..a010667adfa8 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zpl_xattr.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zpl_xattr.c
@@ -1,1602 +1,1605 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2011, Lawrence Livermore National Security, LLC.
*
* Extended attributes (xattr) on Solaris are implemented as files
* which exist in a hidden xattr directory. These extended attributes
* can be accessed using the attropen() system call which opens
* the extended attribute. It can then be manipulated just like
* a standard file descriptor. This has a couple advantages such
* as practically no size limit on the file, and the extended
* attributes permissions may differ from those of the parent file.
* This interface is really quite clever, but it's also completely
* different than what is supported on Linux. It also comes with a
* steep performance penalty when accessing small xattrs because they
* are not stored with the parent file.
*
* Under Linux extended attributes are manipulated by the system
* calls getxattr(2), setxattr(2), and listxattr(2). They consider
* extended attributes to be name/value pairs where the name is a
* NULL terminated string. The name must also include one of the
* following namespace prefixes:
*
* user - No restrictions and is available to user applications.
* trusted - Restricted to kernel and root (CAP_SYS_ADMIN) use.
* system - Used for access control lists (system.nfs4_acl, etc).
* security - Used by SELinux to store a files security context.
*
* The value under Linux to limited to 65536 bytes of binary data.
* In practice, individual xattrs tend to be much smaller than this
* and are typically less than 100 bytes. A good example of this
* are the security.selinux xattrs which are less than 100 bytes and
* exist for every file when xattr labeling is enabled.
*
* The Linux xattr implementation has been written to take advantage of
* this typical usage. When the dataset property 'xattr=sa' is set,
* then xattrs will be preferentially stored as System Attributes (SA).
* This allows tiny xattrs (~100 bytes) to be stored with the dnode and
* up to 64k of xattrs to be stored in the spill block. If additional
* xattr space is required, which is unlikely under Linux, they will
* be stored using the traditional directory approach.
*
* This optimization results in roughly a 3x performance improvement
* when accessing xattrs because it avoids the need to perform a seek
* for every xattr value. When multiple xattrs are stored per-file
* the performance improvements are even greater because all of the
* xattrs stored in the spill block will be cached.
*
* However, by default SA based xattrs are disabled in the Linux port
* to maximize compatibility with other implementations. If you do
* enable SA based xattrs then they will not be visible on platforms
* which do not support this feature.
*
* NOTE: One additional consequence of the xattr directory implementation
* is that when an extended attribute is manipulated an inode is created.
* This inode will exist in the Linux inode cache but there will be no
* associated entry in the dentry cache which references it. This is
* safe but it may result in some confusion. Enabling SA based xattrs
* largely avoids the issue except in the overflow case.
*/
#include <sys/zfs_znode.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_vnops.h>
#include <sys/zap.h>
#include <sys/vfs.h>
#include <sys/zpl.h>
#include <linux/vfs_compat.h>
enum xattr_permission {
XAPERM_DENY,
XAPERM_ALLOW,
XAPERM_COMPAT,
};
typedef struct xattr_filldir {
size_t size;
size_t offset;
char *buf;
struct dentry *dentry;
} xattr_filldir_t;
static enum xattr_permission zpl_xattr_permission(xattr_filldir_t *,
const char *, int);
static int zfs_xattr_compat = 0;
/*
* Determine is a given xattr name should be visible and if so copy it
* in to the provided buffer (xf->buf).
*/
static int
zpl_xattr_filldir(xattr_filldir_t *xf, const char *name, int name_len)
{
enum xattr_permission perm;
/* Check permissions using the per-namespace list xattr handler. */
perm = zpl_xattr_permission(xf, name, name_len);
if (perm == XAPERM_DENY)
return (0);
/* Prefix the name with "user." if it does not have a namespace. */
if (perm == XAPERM_COMPAT) {
if (xf->buf) {
if (xf->offset + XATTR_USER_PREFIX_LEN + 1 > xf->size)
return (-ERANGE);
memcpy(xf->buf + xf->offset, XATTR_USER_PREFIX,
XATTR_USER_PREFIX_LEN);
xf->buf[xf->offset + XATTR_USER_PREFIX_LEN] = '\0';
}
xf->offset += XATTR_USER_PREFIX_LEN;
}
/* When xf->buf is NULL only calculate the required size. */
if (xf->buf) {
if (xf->offset + name_len + 1 > xf->size)
return (-ERANGE);
memcpy(xf->buf + xf->offset, name, name_len);
xf->buf[xf->offset + name_len] = '\0';
}
xf->offset += (name_len + 1);
return (0);
}
/*
* Read as many directory entry names as will fit in to the provided buffer,
* or when no buffer is provided calculate the required buffer size.
*/
static int
zpl_xattr_readdir(struct inode *dxip, xattr_filldir_t *xf)
{
zap_cursor_t zc;
zap_attribute_t zap;
int error;
zap_cursor_init(&zc, ITOZSB(dxip)->z_os, ITOZ(dxip)->z_id);
while ((error = -zap_cursor_retrieve(&zc, &zap)) == 0) {
if (zap.za_integer_length != 8 || zap.za_num_integers != 1) {
error = -ENXIO;
break;
}
error = zpl_xattr_filldir(xf, zap.za_name, strlen(zap.za_name));
if (error)
break;
zap_cursor_advance(&zc);
}
zap_cursor_fini(&zc);
if (error == -ENOENT)
error = 0;
return (error);
}
static ssize_t
zpl_xattr_list_dir(xattr_filldir_t *xf, cred_t *cr)
{
struct inode *ip = xf->dentry->d_inode;
struct inode *dxip = NULL;
znode_t *dxzp;
int error;
/* Lookup the xattr directory */
error = -zfs_lookup(ITOZ(ip), NULL, &dxzp, LOOKUP_XATTR,
cr, NULL, NULL);
if (error) {
if (error == -ENOENT)
error = 0;
return (error);
}
dxip = ZTOI(dxzp);
error = zpl_xattr_readdir(dxip, xf);
iput(dxip);
return (error);
}
static ssize_t
zpl_xattr_list_sa(xattr_filldir_t *xf)
{
znode_t *zp = ITOZ(xf->dentry->d_inode);
nvpair_t *nvp = NULL;
int error = 0;
mutex_enter(&zp->z_lock);
if (zp->z_xattr_cached == NULL)
error = -zfs_sa_get_xattr(zp);
mutex_exit(&zp->z_lock);
if (error)
return (error);
ASSERT(zp->z_xattr_cached);
while ((nvp = nvlist_next_nvpair(zp->z_xattr_cached, nvp)) != NULL) {
ASSERT3U(nvpair_type(nvp), ==, DATA_TYPE_BYTE_ARRAY);
error = zpl_xattr_filldir(xf, nvpair_name(nvp),
strlen(nvpair_name(nvp)));
if (error)
return (error);
}
return (0);
}
ssize_t
zpl_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
{
znode_t *zp = ITOZ(dentry->d_inode);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
xattr_filldir_t xf = { buffer_size, 0, buffer, dentry };
cred_t *cr = CRED();
fstrans_cookie_t cookie;
int error = 0;
crhold(cr);
cookie = spl_fstrans_mark();
- ZPL_ENTER(zfsvfs);
- ZPL_VERIFY_ZP(zp);
+ if ((error = zpl_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ goto out1;
rw_enter(&zp->z_xattr_lock, RW_READER);
if (zfsvfs->z_use_sa && zp->z_is_sa) {
error = zpl_xattr_list_sa(&xf);
if (error)
goto out;
}
error = zpl_xattr_list_dir(&xf, cr);
if (error)
goto out;
error = xf.offset;
out:
rw_exit(&zp->z_xattr_lock);
- ZPL_EXIT(zfsvfs);
+ zpl_exit(zfsvfs, FTAG);
+out1:
spl_fstrans_unmark(cookie);
crfree(cr);
return (error);
}
static int
zpl_xattr_get_dir(struct inode *ip, const char *name, void *value,
size_t size, cred_t *cr)
{
fstrans_cookie_t cookie;
struct inode *xip = NULL;
znode_t *dxzp = NULL;
znode_t *xzp = NULL;
int error;
/* Lookup the xattr directory */
error = -zfs_lookup(ITOZ(ip), NULL, &dxzp, LOOKUP_XATTR,
cr, NULL, NULL);
if (error)
goto out;
/* Lookup a specific xattr name in the directory */
error = -zfs_lookup(dxzp, (char *)name, &xzp, 0, cr, NULL, NULL);
if (error)
goto out;
xip = ZTOI(xzp);
if (!size) {
error = i_size_read(xip);
goto out;
}
if (size < i_size_read(xip)) {
error = -ERANGE;
goto out;
}
struct iovec iov;
iov.iov_base = (void *)value;
iov.iov_len = size;
zfs_uio_t uio;
zfs_uio_iovec_init(&uio, &iov, 1, 0, UIO_SYSSPACE, size, 0);
cookie = spl_fstrans_mark();
error = -zfs_read(ITOZ(xip), &uio, 0, cr);
spl_fstrans_unmark(cookie);
if (error == 0)
error = size - zfs_uio_resid(&uio);
out:
if (xzp)
zrele(xzp);
if (dxzp)
zrele(dxzp);
return (error);
}
static int
zpl_xattr_get_sa(struct inode *ip, const char *name, void *value, size_t size)
{
znode_t *zp = ITOZ(ip);
uchar_t *nv_value;
uint_t nv_size;
int error = 0;
ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock));
mutex_enter(&zp->z_lock);
if (zp->z_xattr_cached == NULL)
error = -zfs_sa_get_xattr(zp);
mutex_exit(&zp->z_lock);
if (error)
return (error);
ASSERT(zp->z_xattr_cached);
error = -nvlist_lookup_byte_array(zp->z_xattr_cached, name,
&nv_value, &nv_size);
if (error)
return (error);
if (size == 0 || value == NULL)
return (nv_size);
if (size < nv_size)
return (-ERANGE);
memcpy(value, nv_value, nv_size);
return (nv_size);
}
static int
__zpl_xattr_get(struct inode *ip, const char *name, void *value, size_t size,
cred_t *cr)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int error;
ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock));
if (zfsvfs->z_use_sa && zp->z_is_sa) {
error = zpl_xattr_get_sa(ip, name, value, size);
if (error != -ENOENT)
goto out;
}
error = zpl_xattr_get_dir(ip, name, value, size, cr);
out:
if (error == -ENOENT)
error = -ENODATA;
return (error);
}
#define XATTR_NOENT 0x0
#define XATTR_IN_SA 0x1
#define XATTR_IN_DIR 0x2
/* check where the xattr resides */
static int
__zpl_xattr_where(struct inode *ip, const char *name, int *where, cred_t *cr)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int error;
ASSERT(where);
ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock));
*where = XATTR_NOENT;
if (zfsvfs->z_use_sa && zp->z_is_sa) {
error = zpl_xattr_get_sa(ip, name, NULL, 0);
if (error >= 0)
*where |= XATTR_IN_SA;
else if (error != -ENOENT)
return (error);
}
error = zpl_xattr_get_dir(ip, name, NULL, 0, cr);
if (error >= 0)
*where |= XATTR_IN_DIR;
else if (error != -ENOENT)
return (error);
if (*where == (XATTR_IN_SA|XATTR_IN_DIR))
cmn_err(CE_WARN, "ZFS: inode %p has xattr \"%s\""
" in both SA and dir", ip, name);
if (*where == XATTR_NOENT)
error = -ENODATA;
else
error = 0;
return (error);
}
static int
zpl_xattr_get(struct inode *ip, const char *name, void *value, size_t size)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
cred_t *cr = CRED();
fstrans_cookie_t cookie;
int error;
crhold(cr);
cookie = spl_fstrans_mark();
- ZPL_ENTER(zfsvfs);
- ZPL_VERIFY_ZP(zp);
+ if ((error = zpl_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ goto out;
rw_enter(&zp->z_xattr_lock, RW_READER);
error = __zpl_xattr_get(ip, name, value, size, cr);
rw_exit(&zp->z_xattr_lock);
- ZPL_EXIT(zfsvfs);
+ zpl_exit(zfsvfs, FTAG);
+out:
spl_fstrans_unmark(cookie);
crfree(cr);
return (error);
}
static int
zpl_xattr_set_dir(struct inode *ip, const char *name, const void *value,
size_t size, int flags, cred_t *cr)
{
znode_t *dxzp = NULL;
znode_t *xzp = NULL;
vattr_t *vap = NULL;
int lookup_flags, error;
const int xattr_mode = S_IFREG | 0644;
loff_t pos = 0;
/*
* Lookup the xattr directory. When we're adding an entry pass
* CREATE_XATTR_DIR to ensure the xattr directory is created.
* When removing an entry this flag is not passed to avoid
* unnecessarily creating a new xattr directory.
*/
lookup_flags = LOOKUP_XATTR;
if (value != NULL)
lookup_flags |= CREATE_XATTR_DIR;
error = -zfs_lookup(ITOZ(ip), NULL, &dxzp, lookup_flags,
cr, NULL, NULL);
if (error)
goto out;
/* Lookup a specific xattr name in the directory */
error = -zfs_lookup(dxzp, (char *)name, &xzp, 0, cr, NULL, NULL);
if (error && (error != -ENOENT))
goto out;
error = 0;
/* Remove a specific name xattr when value is set to NULL. */
if (value == NULL) {
if (xzp)
error = -zfs_remove(dxzp, (char *)name, cr, 0);
goto out;
}
/* Lookup failed create a new xattr. */
if (xzp == NULL) {
vap = kmem_zalloc(sizeof (vattr_t), KM_SLEEP);
vap->va_mode = xattr_mode;
vap->va_mask = ATTR_MODE;
vap->va_uid = crgetuid(cr);
vap->va_gid = crgetgid(cr);
error = -zfs_create(dxzp, (char *)name, vap, 0, 0644, &xzp,
cr, 0, NULL);
if (error)
goto out;
}
ASSERT(xzp != NULL);
error = -zfs_freesp(xzp, 0, 0, xattr_mode, TRUE);
if (error)
goto out;
error = -zfs_write_simple(xzp, value, size, pos, NULL);
out:
if (error == 0) {
ip->i_ctime = current_time(ip);
zfs_mark_inode_dirty(ip);
}
if (vap)
kmem_free(vap, sizeof (vattr_t));
if (xzp)
zrele(xzp);
if (dxzp)
zrele(dxzp);
if (error == -ENOENT)
error = -ENODATA;
ASSERT3S(error, <=, 0);
return (error);
}
static int
zpl_xattr_set_sa(struct inode *ip, const char *name, const void *value,
size_t size, int flags, cred_t *cr)
{
znode_t *zp = ITOZ(ip);
nvlist_t *nvl;
size_t sa_size;
int error = 0;
mutex_enter(&zp->z_lock);
if (zp->z_xattr_cached == NULL)
error = -zfs_sa_get_xattr(zp);
mutex_exit(&zp->z_lock);
if (error)
return (error);
ASSERT(zp->z_xattr_cached);
nvl = zp->z_xattr_cached;
if (value == NULL) {
error = -nvlist_remove(nvl, name, DATA_TYPE_BYTE_ARRAY);
if (error == -ENOENT)
error = zpl_xattr_set_dir(ip, name, NULL, 0, flags, cr);
} else {
/* Limited to 32k to keep nvpair memory allocations small */
if (size > DXATTR_MAX_ENTRY_SIZE)
return (-EFBIG);
/* Prevent the DXATTR SA from consuming the entire SA region */
error = -nvlist_size(nvl, &sa_size, NV_ENCODE_XDR);
if (error)
return (error);
if (sa_size > DXATTR_MAX_SA_SIZE)
return (-EFBIG);
error = -nvlist_add_byte_array(nvl, name,
(uchar_t *)value, size);
}
/*
* Update the SA for additions, modifications, and removals. On
* error drop the inconsistent cached version of the nvlist, it
* will be reconstructed from the ARC when next accessed.
*/
if (error == 0)
error = -zfs_sa_set_xattr(zp, name, value, size);
if (error) {
nvlist_free(nvl);
zp->z_xattr_cached = NULL;
}
ASSERT3S(error, <=, 0);
return (error);
}
static int
zpl_xattr_set(struct inode *ip, const char *name, const void *value,
size_t size, int flags)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
cred_t *cr = CRED();
fstrans_cookie_t cookie;
int where;
int error;
crhold(cr);
cookie = spl_fstrans_mark();
- ZPL_ENTER(zfsvfs);
- ZPL_VERIFY_ZP(zp);
+ if ((error = zpl_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ goto out1;
rw_enter(&zp->z_xattr_lock, RW_WRITER);
/*
* Before setting the xattr check to see if it already exists.
* This is done to ensure the following optional flags are honored.
*
* XATTR_CREATE: fail if xattr already exists
* XATTR_REPLACE: fail if xattr does not exist
*
* We also want to know if it resides in sa or dir, so we can make
* sure we don't end up with duplicate in both places.
*/
error = __zpl_xattr_where(ip, name, &where, cr);
if (error < 0) {
if (error != -ENODATA)
goto out;
if (flags & XATTR_REPLACE)
goto out;
/* The xattr to be removed already doesn't exist */
error = 0;
if (value == NULL)
goto out;
} else {
error = -EEXIST;
if (flags & XATTR_CREATE)
goto out;
}
/* Preferentially store the xattr as a SA for better performance */
if (zfsvfs->z_use_sa && zp->z_is_sa &&
(zfsvfs->z_xattr_sa || (value == NULL && where & XATTR_IN_SA))) {
error = zpl_xattr_set_sa(ip, name, value, size, flags, cr);
if (error == 0) {
/*
* Successfully put into SA, we need to clear the one
* in dir.
*/
if (where & XATTR_IN_DIR)
zpl_xattr_set_dir(ip, name, NULL, 0, 0, cr);
goto out;
}
}
error = zpl_xattr_set_dir(ip, name, value, size, flags, cr);
/*
* Successfully put into dir, we need to clear the one in SA.
*/
if (error == 0 && (where & XATTR_IN_SA))
zpl_xattr_set_sa(ip, name, NULL, 0, 0, cr);
out:
rw_exit(&zp->z_xattr_lock);
- ZPL_EXIT(zfsvfs);
+ zpl_exit(zfsvfs, FTAG);
+out1:
spl_fstrans_unmark(cookie);
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
/*
* Extended user attributes
*
* "Extended user attributes may be assigned to files and directories for
* storing arbitrary additional information such as the mime type,
* character set or encoding of a file. The access permissions for user
* attributes are defined by the file permission bits: read permission
* is required to retrieve the attribute value, and writer permission is
* required to change it.
*
* The file permission bits of regular files and directories are
* interpreted differently from the file permission bits of special
* files and symbolic links. For regular files and directories the file
* permission bits define access to the file's contents, while for
* device special files they define access to the device described by
* the special file. The file permissions of symbolic links are not
* used in access checks. These differences would allow users to
* consume filesystem resources in a way not controllable by disk quotas
* for group or world writable special files and directories.
*
* For this reason, extended user attributes are allowed only for
* regular files and directories, and access to extended user attributes
* is restricted to the owner and to users with appropriate capabilities
* for directories with the sticky bit set (see the chmod(1) manual page
* for an explanation of the sticky bit)." - xattr(7)
*
* ZFS allows extended user attributes to be disabled administratively
* by setting the 'xattr=off' property on the dataset.
*/
static int
__zpl_xattr_user_list(struct inode *ip, char *list, size_t list_size,
const char *name, size_t name_len)
{
return (ITOZSB(ip)->z_flags & ZSB_XATTR);
}
ZPL_XATTR_LIST_WRAPPER(zpl_xattr_user_list);
static int
__zpl_xattr_user_get(struct inode *ip, const char *name,
void *value, size_t size)
{
int error;
/* xattr_resolve_name will do this for us if this is defined */
#ifndef HAVE_XATTR_HANDLER_NAME
if (strcmp(name, "") == 0)
return (-EINVAL);
#endif
if (ZFS_XA_NS_PREFIX_FORBIDDEN(name))
return (-EINVAL);
if (!(ITOZSB(ip)->z_flags & ZSB_XATTR))
return (-EOPNOTSUPP);
/*
* Try to look up the name with the namespace prefix first for
* compatibility with xattrs from this platform. If that fails,
* try again without the namespace prefix for compatibility with
* other platforms.
*/
char *xattr_name = kmem_asprintf("%s%s", XATTR_USER_PREFIX, name);
error = zpl_xattr_get(ip, xattr_name, value, size);
kmem_strfree(xattr_name);
if (error == -ENODATA)
error = zpl_xattr_get(ip, name, value, size);
return (error);
}
ZPL_XATTR_GET_WRAPPER(zpl_xattr_user_get);
static int
__zpl_xattr_user_set(struct inode *ip, const char *name,
const void *value, size_t size, int flags)
{
int error = 0;
/* xattr_resolve_name will do this for us if this is defined */
#ifndef HAVE_XATTR_HANDLER_NAME
if (strcmp(name, "") == 0)
return (-EINVAL);
#endif
if (ZFS_XA_NS_PREFIX_FORBIDDEN(name))
return (-EINVAL);
if (!(ITOZSB(ip)->z_flags & ZSB_XATTR))
return (-EOPNOTSUPP);
/*
* Remove alternate compat version of the xattr so we only set the
* version specified by the zfs_xattr_compat tunable.
*
* The following flags must be handled correctly:
*
* XATTR_CREATE: fail if xattr already exists
* XATTR_REPLACE: fail if xattr does not exist
*/
char *prefixed_name = kmem_asprintf("%s%s", XATTR_USER_PREFIX, name);
const char *clear_name, *set_name;
if (zfs_xattr_compat) {
clear_name = prefixed_name;
set_name = name;
} else {
clear_name = name;
set_name = prefixed_name;
}
/*
* Clear the old value with the alternative name format, if it exists.
*/
error = zpl_xattr_set(ip, clear_name, NULL, 0, flags);
/*
* XATTR_CREATE was specified and we failed to clear the xattr
* because it already exists. Stop here.
*/
if (error == -EEXIST)
goto out;
/*
* If XATTR_REPLACE was specified and we succeeded to clear
* an xattr, we don't need to replace anything when setting
* the new value. If we failed with -ENODATA that's fine,
* there was nothing to be cleared and we can ignore the error.
*/
if (error == 0)
flags &= ~XATTR_REPLACE;
/*
* Set the new value with the configured name format.
*/
error = zpl_xattr_set(ip, set_name, value, size, flags);
out:
kmem_strfree(prefixed_name);
return (error);
}
ZPL_XATTR_SET_WRAPPER(zpl_xattr_user_set);
static xattr_handler_t zpl_xattr_user_handler =
{
.prefix = XATTR_USER_PREFIX,
.list = zpl_xattr_user_list,
.get = zpl_xattr_user_get,
.set = zpl_xattr_user_set,
};
/*
* Trusted extended attributes
*
* "Trusted extended attributes are visible and accessible only to
* processes that have the CAP_SYS_ADMIN capability. Attributes in this
* class are used to implement mechanisms in user space (i.e., outside
* the kernel) which keep information in extended attributes to which
* ordinary processes should not have access." - xattr(7)
*/
static int
__zpl_xattr_trusted_list(struct inode *ip, char *list, size_t list_size,
const char *name, size_t name_len)
{
return (capable(CAP_SYS_ADMIN));
}
ZPL_XATTR_LIST_WRAPPER(zpl_xattr_trusted_list);
static int
__zpl_xattr_trusted_get(struct inode *ip, const char *name,
void *value, size_t size)
{
char *xattr_name;
int error;
if (!capable(CAP_SYS_ADMIN))
return (-EACCES);
/* xattr_resolve_name will do this for us if this is defined */
#ifndef HAVE_XATTR_HANDLER_NAME
if (strcmp(name, "") == 0)
return (-EINVAL);
#endif
xattr_name = kmem_asprintf("%s%s", XATTR_TRUSTED_PREFIX, name);
error = zpl_xattr_get(ip, xattr_name, value, size);
kmem_strfree(xattr_name);
return (error);
}
ZPL_XATTR_GET_WRAPPER(zpl_xattr_trusted_get);
static int
__zpl_xattr_trusted_set(struct inode *ip, const char *name,
const void *value, size_t size, int flags)
{
char *xattr_name;
int error;
if (!capable(CAP_SYS_ADMIN))
return (-EACCES);
/* xattr_resolve_name will do this for us if this is defined */
#ifndef HAVE_XATTR_HANDLER_NAME
if (strcmp(name, "") == 0)
return (-EINVAL);
#endif
xattr_name = kmem_asprintf("%s%s", XATTR_TRUSTED_PREFIX, name);
error = zpl_xattr_set(ip, xattr_name, value, size, flags);
kmem_strfree(xattr_name);
return (error);
}
ZPL_XATTR_SET_WRAPPER(zpl_xattr_trusted_set);
static xattr_handler_t zpl_xattr_trusted_handler = {
.prefix = XATTR_TRUSTED_PREFIX,
.list = zpl_xattr_trusted_list,
.get = zpl_xattr_trusted_get,
.set = zpl_xattr_trusted_set,
};
/*
* Extended security attributes
*
* "The security attribute namespace is used by kernel security modules,
* such as Security Enhanced Linux, and also to implement file
* capabilities (see capabilities(7)). Read and write access
* permissions to security attributes depend on the policy implemented
* for each security attribute by the security module. When no security
* module is loaded, all processes have read access to extended security
* attributes, and write access is limited to processes that have the
* CAP_SYS_ADMIN capability." - xattr(7)
*/
static int
__zpl_xattr_security_list(struct inode *ip, char *list, size_t list_size,
const char *name, size_t name_len)
{
return (1);
}
ZPL_XATTR_LIST_WRAPPER(zpl_xattr_security_list);
static int
__zpl_xattr_security_get(struct inode *ip, const char *name,
void *value, size_t size)
{
char *xattr_name;
int error;
/* xattr_resolve_name will do this for us if this is defined */
#ifndef HAVE_XATTR_HANDLER_NAME
if (strcmp(name, "") == 0)
return (-EINVAL);
#endif
xattr_name = kmem_asprintf("%s%s", XATTR_SECURITY_PREFIX, name);
error = zpl_xattr_get(ip, xattr_name, value, size);
kmem_strfree(xattr_name);
return (error);
}
ZPL_XATTR_GET_WRAPPER(zpl_xattr_security_get);
static int
__zpl_xattr_security_set(struct inode *ip, const char *name,
const void *value, size_t size, int flags)
{
char *xattr_name;
int error;
/* xattr_resolve_name will do this for us if this is defined */
#ifndef HAVE_XATTR_HANDLER_NAME
if (strcmp(name, "") == 0)
return (-EINVAL);
#endif
xattr_name = kmem_asprintf("%s%s", XATTR_SECURITY_PREFIX, name);
error = zpl_xattr_set(ip, xattr_name, value, size, flags);
kmem_strfree(xattr_name);
return (error);
}
ZPL_XATTR_SET_WRAPPER(zpl_xattr_security_set);
static int
zpl_xattr_security_init_impl(struct inode *ip, const struct xattr *xattrs,
void *fs_info)
{
const struct xattr *xattr;
int error = 0;
for (xattr = xattrs; xattr->name != NULL; xattr++) {
error = __zpl_xattr_security_set(ip,
xattr->name, xattr->value, xattr->value_len, 0);
if (error < 0)
break;
}
return (error);
}
int
zpl_xattr_security_init(struct inode *ip, struct inode *dip,
const struct qstr *qstr)
{
return security_inode_init_security(ip, dip, qstr,
&zpl_xattr_security_init_impl, NULL);
}
/*
* Security xattr namespace handlers.
*/
static xattr_handler_t zpl_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.list = zpl_xattr_security_list,
.get = zpl_xattr_security_get,
.set = zpl_xattr_security_set,
};
/*
* Extended system attributes
*
* "Extended system attributes are used by the kernel to store system
* objects such as Access Control Lists. Read and write access permissions
* to system attributes depend on the policy implemented for each system
* attribute implemented by filesystems in the kernel." - xattr(7)
*/
#ifdef CONFIG_FS_POSIX_ACL
static int
zpl_set_acl_impl(struct inode *ip, struct posix_acl *acl, int type)
{
char *name, *value = NULL;
int error = 0;
size_t size = 0;
if (S_ISLNK(ip->i_mode))
return (-EOPNOTSUPP);
switch (type) {
case ACL_TYPE_ACCESS:
name = XATTR_NAME_POSIX_ACL_ACCESS;
if (acl) {
umode_t mode = ip->i_mode;
error = posix_acl_equiv_mode(acl, &mode);
if (error < 0) {
return (error);
} else {
/*
* The mode bits will have been set by
* ->zfs_setattr()->zfs_acl_chmod_setattr()
* using the ZFS ACL conversion. If they
* differ from the Posix ACL conversion dirty
* the inode to write the Posix mode bits.
*/
if (ip->i_mode != mode) {
ip->i_mode = ITOZ(ip)->z_mode = mode;
ip->i_ctime = current_time(ip);
zfs_mark_inode_dirty(ip);
}
if (error == 0)
acl = NULL;
}
}
break;
case ACL_TYPE_DEFAULT:
name = XATTR_NAME_POSIX_ACL_DEFAULT;
if (!S_ISDIR(ip->i_mode))
return (acl ? -EACCES : 0);
break;
default:
return (-EINVAL);
}
if (acl) {
size = posix_acl_xattr_size(acl->a_count);
value = kmem_alloc(size, KM_SLEEP);
error = zpl_acl_to_xattr(acl, value, size);
if (error < 0) {
kmem_free(value, size);
return (error);
}
}
error = zpl_xattr_set(ip, name, value, size, 0);
if (value)
kmem_free(value, size);
if (!error) {
if (acl)
zpl_set_cached_acl(ip, type, acl);
else
zpl_forget_cached_acl(ip, type);
}
return (error);
}
#ifdef HAVE_SET_ACL
int
#ifdef HAVE_SET_ACL_USERNS
zpl_set_acl(struct user_namespace *userns, struct inode *ip,
struct posix_acl *acl, int type)
#else
zpl_set_acl(struct inode *ip, struct posix_acl *acl, int type)
#endif /* HAVE_SET_ACL_USERNS */
{
return (zpl_set_acl_impl(ip, acl, type));
}
#endif /* HAVE_SET_ACL */
static struct posix_acl *
zpl_get_acl_impl(struct inode *ip, int type)
{
struct posix_acl *acl;
void *value = NULL;
char *name;
/*
* As of Linux 3.14, the kernel get_acl will check this for us.
* Also as of Linux 4.7, comparing against ACL_NOT_CACHED is wrong
* as the kernel get_acl will set it to temporary sentinel value.
*/
#ifndef HAVE_KERNEL_GET_ACL_HANDLE_CACHE
acl = get_cached_acl(ip, type);
if (acl != ACL_NOT_CACHED)
return (acl);
#endif
switch (type) {
case ACL_TYPE_ACCESS:
name = XATTR_NAME_POSIX_ACL_ACCESS;
break;
case ACL_TYPE_DEFAULT:
name = XATTR_NAME_POSIX_ACL_DEFAULT;
break;
default:
return (ERR_PTR(-EINVAL));
}
int size = zpl_xattr_get(ip, name, NULL, 0);
if (size > 0) {
value = kmem_alloc(size, KM_SLEEP);
size = zpl_xattr_get(ip, name, value, size);
}
if (size > 0) {
acl = zpl_acl_from_xattr(value, size);
} else if (size == -ENODATA || size == -ENOSYS) {
acl = NULL;
} else {
acl = ERR_PTR(-EIO);
}
if (size > 0)
kmem_free(value, size);
/* As of Linux 4.7, the kernel get_acl will set this for us */
#ifndef HAVE_KERNEL_GET_ACL_HANDLE_CACHE
if (!IS_ERR(acl))
zpl_set_cached_acl(ip, type, acl);
#endif
return (acl);
}
#if defined(HAVE_GET_ACL_RCU)
struct posix_acl *
zpl_get_acl(struct inode *ip, int type, bool rcu)
{
if (rcu)
return (ERR_PTR(-ECHILD));
return (zpl_get_acl_impl(ip, type));
}
#elif defined(HAVE_GET_ACL)
struct posix_acl *
zpl_get_acl(struct inode *ip, int type)
{
return (zpl_get_acl_impl(ip, type));
}
#else
#error "Unsupported iops->get_acl() implementation"
#endif /* HAVE_GET_ACL_RCU */
int
zpl_init_acl(struct inode *ip, struct inode *dir)
{
struct posix_acl *acl = NULL;
int error = 0;
if (ITOZSB(ip)->z_acl_type != ZFS_ACLTYPE_POSIX)
return (0);
if (!S_ISLNK(ip->i_mode)) {
acl = zpl_get_acl_impl(dir, ACL_TYPE_DEFAULT);
if (IS_ERR(acl))
return (PTR_ERR(acl));
if (!acl) {
ITOZ(ip)->z_mode = (ip->i_mode &= ~current_umask());
ip->i_ctime = current_time(ip);
zfs_mark_inode_dirty(ip);
return (0);
}
}
if (acl) {
umode_t mode;
if (S_ISDIR(ip->i_mode)) {
error = zpl_set_acl_impl(ip, acl, ACL_TYPE_DEFAULT);
if (error)
goto out;
}
mode = ip->i_mode;
error = __posix_acl_create(&acl, GFP_KERNEL, &mode);
if (error >= 0) {
ip->i_mode = ITOZ(ip)->z_mode = mode;
zfs_mark_inode_dirty(ip);
if (error > 0) {
error = zpl_set_acl_impl(ip, acl,
ACL_TYPE_ACCESS);
}
}
}
out:
zpl_posix_acl_release(acl);
return (error);
}
int
zpl_chmod_acl(struct inode *ip)
{
struct posix_acl *acl;
int error;
if (ITOZSB(ip)->z_acl_type != ZFS_ACLTYPE_POSIX)
return (0);
if (S_ISLNK(ip->i_mode))
return (-EOPNOTSUPP);
acl = zpl_get_acl_impl(ip, ACL_TYPE_ACCESS);
if (IS_ERR(acl) || !acl)
return (PTR_ERR(acl));
error = __posix_acl_chmod(&acl, GFP_KERNEL, ip->i_mode);
if (!error)
error = zpl_set_acl_impl(ip, acl, ACL_TYPE_ACCESS);
zpl_posix_acl_release(acl);
return (error);
}
static int
__zpl_xattr_acl_list_access(struct inode *ip, char *list, size_t list_size,
const char *name, size_t name_len)
{
char *xattr_name = XATTR_NAME_POSIX_ACL_ACCESS;
size_t xattr_size = sizeof (XATTR_NAME_POSIX_ACL_ACCESS);
if (ITOZSB(ip)->z_acl_type != ZFS_ACLTYPE_POSIX)
return (0);
if (list && xattr_size <= list_size)
memcpy(list, xattr_name, xattr_size);
return (xattr_size);
}
ZPL_XATTR_LIST_WRAPPER(zpl_xattr_acl_list_access);
static int
__zpl_xattr_acl_list_default(struct inode *ip, char *list, size_t list_size,
const char *name, size_t name_len)
{
char *xattr_name = XATTR_NAME_POSIX_ACL_DEFAULT;
size_t xattr_size = sizeof (XATTR_NAME_POSIX_ACL_DEFAULT);
if (ITOZSB(ip)->z_acl_type != ZFS_ACLTYPE_POSIX)
return (0);
if (list && xattr_size <= list_size)
memcpy(list, xattr_name, xattr_size);
return (xattr_size);
}
ZPL_XATTR_LIST_WRAPPER(zpl_xattr_acl_list_default);
static int
__zpl_xattr_acl_get_access(struct inode *ip, const char *name,
void *buffer, size_t size)
{
struct posix_acl *acl;
int type = ACL_TYPE_ACCESS;
int error;
/* xattr_resolve_name will do this for us if this is defined */
#ifndef HAVE_XATTR_HANDLER_NAME
if (strcmp(name, "") != 0)
return (-EINVAL);
#endif
if (ITOZSB(ip)->z_acl_type != ZFS_ACLTYPE_POSIX)
return (-EOPNOTSUPP);
acl = zpl_get_acl_impl(ip, type);
if (IS_ERR(acl))
return (PTR_ERR(acl));
if (acl == NULL)
return (-ENODATA);
error = zpl_acl_to_xattr(acl, buffer, size);
zpl_posix_acl_release(acl);
return (error);
}
ZPL_XATTR_GET_WRAPPER(zpl_xattr_acl_get_access);
static int
__zpl_xattr_acl_get_default(struct inode *ip, const char *name,
void *buffer, size_t size)
{
struct posix_acl *acl;
int type = ACL_TYPE_DEFAULT;
int error;
/* xattr_resolve_name will do this for us if this is defined */
#ifndef HAVE_XATTR_HANDLER_NAME
if (strcmp(name, "") != 0)
return (-EINVAL);
#endif
if (ITOZSB(ip)->z_acl_type != ZFS_ACLTYPE_POSIX)
return (-EOPNOTSUPP);
acl = zpl_get_acl_impl(ip, type);
if (IS_ERR(acl))
return (PTR_ERR(acl));
if (acl == NULL)
return (-ENODATA);
error = zpl_acl_to_xattr(acl, buffer, size);
zpl_posix_acl_release(acl);
return (error);
}
ZPL_XATTR_GET_WRAPPER(zpl_xattr_acl_get_default);
static int
__zpl_xattr_acl_set_access(struct inode *ip, const char *name,
const void *value, size_t size, int flags)
{
struct posix_acl *acl;
int type = ACL_TYPE_ACCESS;
int error = 0;
/* xattr_resolve_name will do this for us if this is defined */
#ifndef HAVE_XATTR_HANDLER_NAME
if (strcmp(name, "") != 0)
return (-EINVAL);
#endif
if (ITOZSB(ip)->z_acl_type != ZFS_ACLTYPE_POSIX)
return (-EOPNOTSUPP);
if (!zpl_inode_owner_or_capable(kcred->user_ns, ip))
return (-EPERM);
if (value) {
acl = zpl_acl_from_xattr(value, size);
if (IS_ERR(acl))
return (PTR_ERR(acl));
else if (acl) {
error = zpl_posix_acl_valid(ip, acl);
if (error) {
zpl_posix_acl_release(acl);
return (error);
}
}
} else {
acl = NULL;
}
error = zpl_set_acl_impl(ip, acl, type);
zpl_posix_acl_release(acl);
return (error);
}
ZPL_XATTR_SET_WRAPPER(zpl_xattr_acl_set_access);
static int
__zpl_xattr_acl_set_default(struct inode *ip, const char *name,
const void *value, size_t size, int flags)
{
struct posix_acl *acl;
int type = ACL_TYPE_DEFAULT;
int error = 0;
/* xattr_resolve_name will do this for us if this is defined */
#ifndef HAVE_XATTR_HANDLER_NAME
if (strcmp(name, "") != 0)
return (-EINVAL);
#endif
if (ITOZSB(ip)->z_acl_type != ZFS_ACLTYPE_POSIX)
return (-EOPNOTSUPP);
if (!zpl_inode_owner_or_capable(kcred->user_ns, ip))
return (-EPERM);
if (value) {
acl = zpl_acl_from_xattr(value, size);
if (IS_ERR(acl))
return (PTR_ERR(acl));
else if (acl) {
error = zpl_posix_acl_valid(ip, acl);
if (error) {
zpl_posix_acl_release(acl);
return (error);
}
}
} else {
acl = NULL;
}
error = zpl_set_acl_impl(ip, acl, type);
zpl_posix_acl_release(acl);
return (error);
}
ZPL_XATTR_SET_WRAPPER(zpl_xattr_acl_set_default);
/*
* ACL access xattr namespace handlers.
*
* Use .name instead of .prefix when available. xattr_resolve_name will match
* whole name and reject anything that has .name only as prefix.
*/
static xattr_handler_t zpl_xattr_acl_access_handler = {
#ifdef HAVE_XATTR_HANDLER_NAME
.name = XATTR_NAME_POSIX_ACL_ACCESS,
#else
.prefix = XATTR_NAME_POSIX_ACL_ACCESS,
#endif
.list = zpl_xattr_acl_list_access,
.get = zpl_xattr_acl_get_access,
.set = zpl_xattr_acl_set_access,
#if defined(HAVE_XATTR_LIST_SIMPLE) || \
defined(HAVE_XATTR_LIST_DENTRY) || \
defined(HAVE_XATTR_LIST_HANDLER)
.flags = ACL_TYPE_ACCESS,
#endif
};
/*
* ACL default xattr namespace handlers.
*
* Use .name instead of .prefix when available. xattr_resolve_name will match
* whole name and reject anything that has .name only as prefix.
*/
static xattr_handler_t zpl_xattr_acl_default_handler = {
#ifdef HAVE_XATTR_HANDLER_NAME
.name = XATTR_NAME_POSIX_ACL_DEFAULT,
#else
.prefix = XATTR_NAME_POSIX_ACL_DEFAULT,
#endif
.list = zpl_xattr_acl_list_default,
.get = zpl_xattr_acl_get_default,
.set = zpl_xattr_acl_set_default,
#if defined(HAVE_XATTR_LIST_SIMPLE) || \
defined(HAVE_XATTR_LIST_DENTRY) || \
defined(HAVE_XATTR_LIST_HANDLER)
.flags = ACL_TYPE_DEFAULT,
#endif
};
#endif /* CONFIG_FS_POSIX_ACL */
xattr_handler_t *zpl_xattr_handlers[] = {
&zpl_xattr_security_handler,
&zpl_xattr_trusted_handler,
&zpl_xattr_user_handler,
#ifdef CONFIG_FS_POSIX_ACL
&zpl_xattr_acl_access_handler,
&zpl_xattr_acl_default_handler,
#endif /* CONFIG_FS_POSIX_ACL */
NULL
};
static const struct xattr_handler *
zpl_xattr_handler(const char *name)
{
if (strncmp(name, XATTR_USER_PREFIX,
XATTR_USER_PREFIX_LEN) == 0)
return (&zpl_xattr_user_handler);
if (strncmp(name, XATTR_TRUSTED_PREFIX,
XATTR_TRUSTED_PREFIX_LEN) == 0)
return (&zpl_xattr_trusted_handler);
if (strncmp(name, XATTR_SECURITY_PREFIX,
XATTR_SECURITY_PREFIX_LEN) == 0)
return (&zpl_xattr_security_handler);
#ifdef CONFIG_FS_POSIX_ACL
if (strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS,
sizeof (XATTR_NAME_POSIX_ACL_ACCESS)) == 0)
return (&zpl_xattr_acl_access_handler);
if (strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT,
sizeof (XATTR_NAME_POSIX_ACL_DEFAULT)) == 0)
return (&zpl_xattr_acl_default_handler);
#endif /* CONFIG_FS_POSIX_ACL */
return (NULL);
}
static enum xattr_permission
zpl_xattr_permission(xattr_filldir_t *xf, const char *name, int name_len)
{
const struct xattr_handler *handler;
struct dentry *d __maybe_unused = xf->dentry;
enum xattr_permission perm = XAPERM_ALLOW;
handler = zpl_xattr_handler(name);
if (handler == NULL) {
/* Do not expose FreeBSD system namespace xattrs. */
if (ZFS_XA_NS_PREFIX_MATCH(FREEBSD, name))
return (XAPERM_DENY);
/*
* Anything that doesn't match a known namespace gets put in the
* user namespace for compatibility with other platforms.
*/
perm = XAPERM_COMPAT;
handler = &zpl_xattr_user_handler;
}
if (handler->list) {
#if defined(HAVE_XATTR_LIST_SIMPLE)
if (!handler->list(d))
return (XAPERM_DENY);
#elif defined(HAVE_XATTR_LIST_DENTRY)
if (!handler->list(d, NULL, 0, name, name_len, 0))
return (XAPERM_DENY);
#elif defined(HAVE_XATTR_LIST_HANDLER)
if (!handler->list(handler, d, NULL, 0, name, name_len))
return (XAPERM_DENY);
#endif
}
return (perm);
}
#if defined(CONFIG_FS_POSIX_ACL) && \
(!defined(HAVE_POSIX_ACL_RELEASE) || \
defined(HAVE_POSIX_ACL_RELEASE_GPL_ONLY))
struct acl_rel_struct {
struct acl_rel_struct *next;
struct posix_acl *acl;
clock_t time;
};
#define ACL_REL_GRACE (60*HZ)
#define ACL_REL_WINDOW (1*HZ)
#define ACL_REL_SCHED (ACL_REL_GRACE+ACL_REL_WINDOW)
/*
* Lockless multi-producer single-consumer fifo list.
* Nodes are added to tail and removed from head. Tail pointer is our
* synchronization point. It always points to the next pointer of the last
* node, or head if list is empty.
*/
static struct acl_rel_struct *acl_rel_head = NULL;
static struct acl_rel_struct **acl_rel_tail = &acl_rel_head;
static void
zpl_posix_acl_free(void *arg)
{
struct acl_rel_struct *freelist = NULL;
struct acl_rel_struct *a;
clock_t new_time;
boolean_t refire = B_FALSE;
ASSERT3P(acl_rel_head, !=, NULL);
while (acl_rel_head) {
a = acl_rel_head;
if (ddi_get_lbolt() - a->time >= ACL_REL_GRACE) {
/*
* If a is the last node we need to reset tail, but we
* need to use cmpxchg to make sure it is still the
* last node.
*/
if (acl_rel_tail == &a->next) {
acl_rel_head = NULL;
if (cmpxchg(&acl_rel_tail, &a->next,
&acl_rel_head) == &a->next) {
ASSERT3P(a->next, ==, NULL);
a->next = freelist;
freelist = a;
break;
}
}
/*
* a is not last node, make sure next pointer is set
* by the adder and advance the head.
*/
while (READ_ONCE(a->next) == NULL)
cpu_relax();
acl_rel_head = a->next;
a->next = freelist;
freelist = a;
} else {
/*
* a is still in grace period. We are responsible to
* reschedule the free task, since adder will only do
* so if list is empty.
*/
new_time = a->time + ACL_REL_SCHED;
refire = B_TRUE;
break;
}
}
if (refire)
taskq_dispatch_delay(system_delay_taskq, zpl_posix_acl_free,
NULL, TQ_SLEEP, new_time);
while (freelist) {
a = freelist;
freelist = a->next;
kfree(a->acl);
kmem_free(a, sizeof (struct acl_rel_struct));
}
}
void
zpl_posix_acl_release_impl(struct posix_acl *acl)
{
struct acl_rel_struct *a, **prev;
a = kmem_alloc(sizeof (struct acl_rel_struct), KM_SLEEP);
a->next = NULL;
a->acl = acl;
a->time = ddi_get_lbolt();
/* atomically points tail to us and get the previous tail */
prev = xchg(&acl_rel_tail, &a->next);
ASSERT3P(*prev, ==, NULL);
*prev = a;
/* if it was empty before, schedule the free task */
if (prev == &acl_rel_head)
taskq_dispatch_delay(system_delay_taskq, zpl_posix_acl_free,
NULL, TQ_SLEEP, ddi_get_lbolt() + ACL_REL_SCHED);
}
#endif
ZFS_MODULE_PARAM(zfs, zfs_, xattr_compat, INT, ZMOD_RW,
"Use legacy ZFS xattr naming for writing new user namespace xattrs");
diff --git a/sys/contrib/openzfs/module/zfs/arc.c b/sys/contrib/openzfs/module/zfs/arc.c
index 579e78befe18..b9969bff534e 100644
--- a/sys/contrib/openzfs/module/zfs/arc.c
+++ b/sys/contrib/openzfs/module/zfs/arc.c
@@ -1,11199 +1,11200 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, Joyent, Inc.
* Copyright (c) 2011, 2020, Delphix. All rights reserved.
* Copyright (c) 2014, Saso Kiselkov. All rights reserved.
* Copyright (c) 2017, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
* Copyright (c) 2020, George Amanakis. All rights reserved.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2020, The FreeBSD Foundation [1]
*
* [1] Portions of this software were developed by Allan Jude
* under sponsorship from the FreeBSD Foundation.
*/
/*
* DVA-based Adjustable Replacement Cache
*
* While much of the theory of operation used here is
* based on the self-tuning, low overhead replacement cache
* presented by Megiddo and Modha at FAST 2003, there are some
* significant differences:
*
* 1. The Megiddo and Modha model assumes any page is evictable.
* Pages in its cache cannot be "locked" into memory. This makes
* the eviction algorithm simple: evict the last page in the list.
* This also make the performance characteristics easy to reason
* about. Our cache is not so simple. At any given moment, some
* subset of the blocks in the cache are un-evictable because we
* have handed out a reference to them. Blocks are only evictable
* when there are no external references active. This makes
* eviction far more problematic: we choose to evict the evictable
* blocks that are the "lowest" in the list.
*
* There are times when it is not possible to evict the requested
* space. In these circumstances we are unable to adjust the cache
* size. To prevent the cache growing unbounded at these times we
* implement a "cache throttle" that slows the flow of new data
* into the cache until we can make space available.
*
* 2. The Megiddo and Modha model assumes a fixed cache size.
* Pages are evicted when the cache is full and there is a cache
* miss. Our model has a variable sized cache. It grows with
* high use, but also tries to react to memory pressure from the
* operating system: decreasing its size when system memory is
* tight.
*
* 3. The Megiddo and Modha model assumes a fixed page size. All
* elements of the cache are therefore exactly the same size. So
* when adjusting the cache size following a cache miss, its simply
* a matter of choosing a single page to evict. In our model, we
* have variable sized cache blocks (ranging from 512 bytes to
* 128K bytes). We therefore choose a set of blocks to evict to make
* space for a cache miss that approximates as closely as possible
* the space used by the new block.
*
* See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
* by N. Megiddo & D. Modha, FAST 2003
*/
/*
* The locking model:
*
* A new reference to a cache buffer can be obtained in two
* ways: 1) via a hash table lookup using the DVA as a key,
* or 2) via one of the ARC lists. The arc_read() interface
* uses method 1, while the internal ARC algorithms for
* adjusting the cache use method 2. We therefore provide two
* types of locks: 1) the hash table lock array, and 2) the
* ARC list locks.
*
* Buffers do not have their own mutexes, rather they rely on the
* hash table mutexes for the bulk of their protection (i.e. most
* fields in the arc_buf_hdr_t are protected by these mutexes).
*
* buf_hash_find() returns the appropriate mutex (held) when it
* locates the requested buffer in the hash table. It returns
* NULL for the mutex if the buffer was not in the table.
*
* buf_hash_remove() expects the appropriate hash mutex to be
* already held before it is invoked.
*
* Each ARC state also has a mutex which is used to protect the
* buffer list associated with the state. When attempting to
* obtain a hash table lock while holding an ARC list lock you
* must use: mutex_tryenter() to avoid deadlock. Also note that
* the active state mutex must be held before the ghost state mutex.
*
* It as also possible to register a callback which is run when the
* arc_meta_limit is reached and no buffers can be safely evicted. In
* this case the arc user should drop a reference on some arc buffers so
* they can be reclaimed and the arc_meta_limit honored. For example,
* when using the ZPL each dentry holds a references on a znode. These
* dentries must be pruned before the arc buffer holding the znode can
* be safely evicted.
*
* Note that the majority of the performance stats are manipulated
* with atomic operations.
*
* The L2ARC uses the l2ad_mtx on each vdev for the following:
*
* - L2ARC buflist creation
* - L2ARC buflist eviction
* - L2ARC write completion, which walks L2ARC buflists
* - ARC header destruction, as it removes from L2ARC buflists
* - ARC header release, as it removes from L2ARC buflists
*/
/*
* ARC operation:
*
* Every block that is in the ARC is tracked by an arc_buf_hdr_t structure.
* This structure can point either to a block that is still in the cache or to
* one that is only accessible in an L2 ARC device, or it can provide
* information about a block that was recently evicted. If a block is
* only accessible in the L2ARC, then the arc_buf_hdr_t only has enough
* information to retrieve it from the L2ARC device. This information is
* stored in the l2arc_buf_hdr_t sub-structure of the arc_buf_hdr_t. A block
* that is in this state cannot access the data directly.
*
* Blocks that are actively being referenced or have not been evicted
* are cached in the L1ARC. The L1ARC (l1arc_buf_hdr_t) is a structure within
* the arc_buf_hdr_t that will point to the data block in memory. A block can
* only be read by a consumer if it has an l1arc_buf_hdr_t. The L1ARC
* caches data in two ways -- in a list of ARC buffers (arc_buf_t) and
* also in the arc_buf_hdr_t's private physical data block pointer (b_pabd).
*
* The L1ARC's data pointer may or may not be uncompressed. The ARC has the
* ability to store the physical data (b_pabd) associated with the DVA of the
* arc_buf_hdr_t. Since the b_pabd is a copy of the on-disk physical block,
* it will match its on-disk compression characteristics. This behavior can be
* disabled by setting 'zfs_compressed_arc_enabled' to B_FALSE. When the
* compressed ARC functionality is disabled, the b_pabd will point to an
* uncompressed version of the on-disk data.
*
* Data in the L1ARC is not accessed by consumers of the ARC directly. Each
* arc_buf_hdr_t can have multiple ARC buffers (arc_buf_t) which reference it.
* Each ARC buffer (arc_buf_t) is being actively accessed by a specific ARC
* consumer. The ARC will provide references to this data and will keep it
* cached until it is no longer in use. The ARC caches only the L1ARC's physical
* data block and will evict any arc_buf_t that is no longer referenced. The
* amount of memory consumed by the arc_buf_ts' data buffers can be seen via the
* "overhead_size" kstat.
*
* Depending on the consumer, an arc_buf_t can be requested in uncompressed or
* compressed form. The typical case is that consumers will want uncompressed
* data, and when that happens a new data buffer is allocated where the data is
* decompressed for them to use. Currently the only consumer who wants
* compressed arc_buf_t's is "zfs send", when it streams data exactly as it
* exists on disk. When this happens, the arc_buf_t's data buffer is shared
* with the arc_buf_hdr_t.
*
* Here is a diagram showing an arc_buf_hdr_t referenced by two arc_buf_t's. The
* first one is owned by a compressed send consumer (and therefore references
* the same compressed data buffer as the arc_buf_hdr_t) and the second could be
* used by any other consumer (and has its own uncompressed copy of the data
* buffer).
*
* arc_buf_hdr_t
* +-----------+
* | fields |
* | common to |
* | L1- and |
* | L2ARC |
* +-----------+
* | l2arc_buf_hdr_t
* | |
* +-----------+
* | l1arc_buf_hdr_t
* | | arc_buf_t
* | b_buf +------------>+-----------+ arc_buf_t
* | b_pabd +-+ |b_next +---->+-----------+
* +-----------+ | |-----------| |b_next +-->NULL
* | |b_comp = T | +-----------+
* | |b_data +-+ |b_comp = F |
* | +-----------+ | |b_data +-+
* +->+------+ | +-----------+ |
* compressed | | | |
* data | |<--------------+ | uncompressed
* +------+ compressed, | data
* shared +-->+------+
* data | |
* | |
* +------+
*
* When a consumer reads a block, the ARC must first look to see if the
* arc_buf_hdr_t is cached. If the hdr is cached then the ARC allocates a new
* arc_buf_t and either copies uncompressed data into a new data buffer from an
* existing uncompressed arc_buf_t, decompresses the hdr's b_pabd buffer into a
* new data buffer, or shares the hdr's b_pabd buffer, depending on whether the
* hdr is compressed and the desired compression characteristics of the
* arc_buf_t consumer. If the arc_buf_t ends up sharing data with the
* arc_buf_hdr_t and both of them are uncompressed then the arc_buf_t must be
* the last buffer in the hdr's b_buf list, however a shared compressed buf can
* be anywhere in the hdr's list.
*
* The diagram below shows an example of an uncompressed ARC hdr that is
* sharing its data with an arc_buf_t (note that the shared uncompressed buf is
* the last element in the buf list):
*
* arc_buf_hdr_t
* +-----------+
* | |
* | |
* | |
* +-----------+
* l2arc_buf_hdr_t| |
* | |
* +-----------+
* l1arc_buf_hdr_t| |
* | | arc_buf_t (shared)
* | b_buf +------------>+---------+ arc_buf_t
* | | |b_next +---->+---------+
* | b_pabd +-+ |---------| |b_next +-->NULL
* +-----------+ | | | +---------+
* | |b_data +-+ | |
* | +---------+ | |b_data +-+
* +->+------+ | +---------+ |
* | | | |
* uncompressed | | | |
* data +------+ | |
* ^ +->+------+ |
* | uncompressed | | |
* | data | | |
* | +------+ |
* +---------------------------------+
*
* Writing to the ARC requires that the ARC first discard the hdr's b_pabd
* since the physical block is about to be rewritten. The new data contents
* will be contained in the arc_buf_t. As the I/O pipeline performs the write,
* it may compress the data before writing it to disk. The ARC will be called
* with the transformed data and will memcpy the transformed on-disk block into
* a newly allocated b_pabd. Writes are always done into buffers which have
* either been loaned (and hence are new and don't have other readers) or
* buffers which have been released (and hence have their own hdr, if there
* were originally other readers of the buf's original hdr). This ensures that
* the ARC only needs to update a single buf and its hdr after a write occurs.
*
* When the L2ARC is in use, it will also take advantage of the b_pabd. The
* L2ARC will always write the contents of b_pabd to the L2ARC. This means
* that when compressed ARC is enabled that the L2ARC blocks are identical
* to the on-disk block in the main data pool. This provides a significant
* advantage since the ARC can leverage the bp's checksum when reading from the
* L2ARC to determine if the contents are valid. However, if the compressed
* ARC is disabled, then the L2ARC's block must be transformed to look
* like the physical block in the main data pool before comparing the
* checksum and determining its validity.
*
* The L1ARC has a slightly different system for storing encrypted data.
* Raw (encrypted + possibly compressed) data has a few subtle differences from
* data that is just compressed. The biggest difference is that it is not
* possible to decrypt encrypted data (or vice-versa) if the keys aren't loaded.
* The other difference is that encryption cannot be treated as a suggestion.
* If a caller would prefer compressed data, but they actually wind up with
* uncompressed data the worst thing that could happen is there might be a
* performance hit. If the caller requests encrypted data, however, we must be
* sure they actually get it or else secret information could be leaked. Raw
* data is stored in hdr->b_crypt_hdr.b_rabd. An encrypted header, therefore,
* may have both an encrypted version and a decrypted version of its data at
* once. When a caller needs a raw arc_buf_t, it is allocated and the data is
* copied out of this header. To avoid complications with b_pabd, raw buffers
* cannot be shared.
*/
#include <sys/spa.h>
#include <sys/zio.h>
#include <sys/spa_impl.h>
#include <sys/zio_compress.h>
#include <sys/zio_checksum.h>
#include <sys/zfs_context.h>
#include <sys/arc.h>
#include <sys/zfs_refcount.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/dsl_pool.h>
#include <sys/multilist.h>
#include <sys/abd.h>
#include <sys/zil.h>
#include <sys/fm/fs/zfs.h>
#include <sys/callb.h>
#include <sys/kstat.h>
#include <sys/zthr.h>
#include <zfs_fletcher.h>
#include <sys/arc_impl.h>
#include <sys/trace_zfs.h>
#include <sys/aggsum.h>
#include <sys/wmsum.h>
#include <cityhash.h>
#include <sys/vdev_trim.h>
#include <sys/zfs_racct.h>
#include <sys/zstd/zstd.h>
#ifndef _KERNEL
/* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
boolean_t arc_watch = B_FALSE;
#endif
/*
* This thread's job is to keep enough free memory in the system, by
* calling arc_kmem_reap_soon() plus arc_reduce_target_size(), which improves
* arc_available_memory().
*/
static zthr_t *arc_reap_zthr;
/*
* This thread's job is to keep arc_size under arc_c, by calling
* arc_evict(), which improves arc_is_overflowing().
*/
static zthr_t *arc_evict_zthr;
static arc_buf_hdr_t **arc_state_evict_markers;
static int arc_state_evict_marker_count;
static kmutex_t arc_evict_lock;
static boolean_t arc_evict_needed = B_FALSE;
/*
* Count of bytes evicted since boot.
*/
static uint64_t arc_evict_count;
/*
* List of arc_evict_waiter_t's, representing threads waiting for the
* arc_evict_count to reach specific values.
*/
static list_t arc_evict_waiters;
/*
* When arc_is_overflowing(), arc_get_data_impl() waits for this percent of
* the requested amount of data to be evicted. For example, by default for
* every 2KB that's evicted, 1KB of it may be "reused" by a new allocation.
* Since this is above 100%, it ensures that progress is made towards getting
* arc_size under arc_c. Since this is finite, it ensures that allocations
* can still happen, even during the potentially long time that arc_size is
* more than arc_c.
*/
static int zfs_arc_eviction_pct = 200;
/*
* The number of headers to evict in arc_evict_state_impl() before
* dropping the sublist lock and evicting from another sublist. A lower
* value means we're more likely to evict the "correct" header (i.e. the
* oldest header in the arc state), but comes with higher overhead
* (i.e. more invocations of arc_evict_state_impl()).
*/
static int zfs_arc_evict_batch_limit = 10;
/* number of seconds before growing cache again */
int arc_grow_retry = 5;
/*
* Minimum time between calls to arc_kmem_reap_soon().
*/
static const int arc_kmem_cache_reap_retry_ms = 1000;
/* shift of arc_c for calculating overflow limit in arc_get_data_impl */
static int zfs_arc_overflow_shift = 8;
/* shift of arc_c for calculating both min and max arc_p */
static int arc_p_min_shift = 4;
/* log2(fraction of arc to reclaim) */
int arc_shrink_shift = 7;
/* percent of pagecache to reclaim arc to */
#ifdef _KERNEL
uint_t zfs_arc_pc_percent = 0;
#endif
/*
* log2(fraction of ARC which must be free to allow growing).
* I.e. If there is less than arc_c >> arc_no_grow_shift free memory,
* when reading a new block into the ARC, we will evict an equal-sized block
* from the ARC.
*
* This must be less than arc_shrink_shift, so that when we shrink the ARC,
* we will still not allow it to grow.
*/
int arc_no_grow_shift = 5;
/*
* minimum lifespan of a prefetch block in clock ticks
* (initialized in arc_init())
*/
static int arc_min_prefetch_ms;
static int arc_min_prescient_prefetch_ms;
/*
* If this percent of memory is free, don't throttle.
*/
int arc_lotsfree_percent = 10;
/*
* The arc has filled available memory and has now warmed up.
*/
boolean_t arc_warm;
/*
* These tunables are for performance analysis.
*/
unsigned long zfs_arc_max = 0;
unsigned long zfs_arc_min = 0;
unsigned long zfs_arc_meta_limit = 0;
unsigned long zfs_arc_meta_min = 0;
static unsigned long zfs_arc_dnode_limit = 0;
static unsigned long zfs_arc_dnode_reduce_percent = 10;
static int zfs_arc_grow_retry = 0;
static int zfs_arc_shrink_shift = 0;
static int zfs_arc_p_min_shift = 0;
int zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
/*
* ARC dirty data constraints for arc_tempreserve_space() throttle:
* * total dirty data limit
* * anon block dirty limit
* * each pool's anon allowance
*/
static const unsigned long zfs_arc_dirty_limit_percent = 50;
static const unsigned long zfs_arc_anon_limit_percent = 25;
static const unsigned long zfs_arc_pool_dirty_percent = 20;
/*
* Enable or disable compressed arc buffers.
*/
int zfs_compressed_arc_enabled = B_TRUE;
/*
* ARC will evict meta buffers that exceed arc_meta_limit. This
* tunable make arc_meta_limit adjustable for different workloads.
*/
static unsigned long zfs_arc_meta_limit_percent = 75;
/*
* Percentage that can be consumed by dnodes of ARC meta buffers.
*/
static unsigned long zfs_arc_dnode_limit_percent = 10;
/*
* These tunables are Linux-specific
*/
static unsigned long zfs_arc_sys_free = 0;
static int zfs_arc_min_prefetch_ms = 0;
static int zfs_arc_min_prescient_prefetch_ms = 0;
static int zfs_arc_p_dampener_disable = 1;
static int zfs_arc_meta_prune = 10000;
static int zfs_arc_meta_strategy = ARC_STRATEGY_META_BALANCED;
static int zfs_arc_meta_adjust_restarts = 4096;
static int zfs_arc_lotsfree_percent = 10;
/*
* Number of arc_prune threads
*/
static int zfs_arc_prune_task_threads = 1;
/* The 6 states: */
arc_state_t ARC_anon;
arc_state_t ARC_mru;
arc_state_t ARC_mru_ghost;
arc_state_t ARC_mfu;
arc_state_t ARC_mfu_ghost;
arc_state_t ARC_l2c_only;
arc_stats_t arc_stats = {
{ "hits", KSTAT_DATA_UINT64 },
{ "misses", KSTAT_DATA_UINT64 },
{ "demand_data_hits", KSTAT_DATA_UINT64 },
{ "demand_data_misses", KSTAT_DATA_UINT64 },
{ "demand_metadata_hits", KSTAT_DATA_UINT64 },
{ "demand_metadata_misses", KSTAT_DATA_UINT64 },
{ "prefetch_data_hits", KSTAT_DATA_UINT64 },
{ "prefetch_data_misses", KSTAT_DATA_UINT64 },
{ "prefetch_metadata_hits", KSTAT_DATA_UINT64 },
{ "prefetch_metadata_misses", KSTAT_DATA_UINT64 },
{ "mru_hits", KSTAT_DATA_UINT64 },
{ "mru_ghost_hits", KSTAT_DATA_UINT64 },
{ "mfu_hits", KSTAT_DATA_UINT64 },
{ "mfu_ghost_hits", KSTAT_DATA_UINT64 },
{ "deleted", KSTAT_DATA_UINT64 },
{ "mutex_miss", KSTAT_DATA_UINT64 },
{ "access_skip", KSTAT_DATA_UINT64 },
{ "evict_skip", KSTAT_DATA_UINT64 },
{ "evict_not_enough", KSTAT_DATA_UINT64 },
{ "evict_l2_cached", KSTAT_DATA_UINT64 },
{ "evict_l2_eligible", KSTAT_DATA_UINT64 },
{ "evict_l2_eligible_mfu", KSTAT_DATA_UINT64 },
{ "evict_l2_eligible_mru", KSTAT_DATA_UINT64 },
{ "evict_l2_ineligible", KSTAT_DATA_UINT64 },
{ "evict_l2_skip", KSTAT_DATA_UINT64 },
{ "hash_elements", KSTAT_DATA_UINT64 },
{ "hash_elements_max", KSTAT_DATA_UINT64 },
{ "hash_collisions", KSTAT_DATA_UINT64 },
{ "hash_chains", KSTAT_DATA_UINT64 },
{ "hash_chain_max", KSTAT_DATA_UINT64 },
{ "p", KSTAT_DATA_UINT64 },
{ "c", KSTAT_DATA_UINT64 },
{ "c_min", KSTAT_DATA_UINT64 },
{ "c_max", KSTAT_DATA_UINT64 },
{ "size", KSTAT_DATA_UINT64 },
{ "compressed_size", KSTAT_DATA_UINT64 },
{ "uncompressed_size", KSTAT_DATA_UINT64 },
{ "overhead_size", KSTAT_DATA_UINT64 },
{ "hdr_size", KSTAT_DATA_UINT64 },
{ "data_size", KSTAT_DATA_UINT64 },
{ "metadata_size", KSTAT_DATA_UINT64 },
{ "dbuf_size", KSTAT_DATA_UINT64 },
{ "dnode_size", KSTAT_DATA_UINT64 },
{ "bonus_size", KSTAT_DATA_UINT64 },
#if defined(COMPAT_FREEBSD11)
{ "other_size", KSTAT_DATA_UINT64 },
#endif
{ "anon_size", KSTAT_DATA_UINT64 },
{ "anon_evictable_data", KSTAT_DATA_UINT64 },
{ "anon_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mru_size", KSTAT_DATA_UINT64 },
{ "mru_evictable_data", KSTAT_DATA_UINT64 },
{ "mru_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mru_ghost_size", KSTAT_DATA_UINT64 },
{ "mru_ghost_evictable_data", KSTAT_DATA_UINT64 },
{ "mru_ghost_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mfu_size", KSTAT_DATA_UINT64 },
{ "mfu_evictable_data", KSTAT_DATA_UINT64 },
{ "mfu_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mfu_ghost_size", KSTAT_DATA_UINT64 },
{ "mfu_ghost_evictable_data", KSTAT_DATA_UINT64 },
{ "mfu_ghost_evictable_metadata", KSTAT_DATA_UINT64 },
{ "l2_hits", KSTAT_DATA_UINT64 },
{ "l2_misses", KSTAT_DATA_UINT64 },
{ "l2_prefetch_asize", KSTAT_DATA_UINT64 },
{ "l2_mru_asize", KSTAT_DATA_UINT64 },
{ "l2_mfu_asize", KSTAT_DATA_UINT64 },
{ "l2_bufc_data_asize", KSTAT_DATA_UINT64 },
{ "l2_bufc_metadata_asize", KSTAT_DATA_UINT64 },
{ "l2_feeds", KSTAT_DATA_UINT64 },
{ "l2_rw_clash", KSTAT_DATA_UINT64 },
{ "l2_read_bytes", KSTAT_DATA_UINT64 },
{ "l2_write_bytes", KSTAT_DATA_UINT64 },
{ "l2_writes_sent", KSTAT_DATA_UINT64 },
{ "l2_writes_done", KSTAT_DATA_UINT64 },
{ "l2_writes_error", KSTAT_DATA_UINT64 },
{ "l2_writes_lock_retry", KSTAT_DATA_UINT64 },
{ "l2_evict_lock_retry", KSTAT_DATA_UINT64 },
{ "l2_evict_reading", KSTAT_DATA_UINT64 },
{ "l2_evict_l1cached", KSTAT_DATA_UINT64 },
{ "l2_free_on_write", KSTAT_DATA_UINT64 },
{ "l2_abort_lowmem", KSTAT_DATA_UINT64 },
{ "l2_cksum_bad", KSTAT_DATA_UINT64 },
{ "l2_io_error", KSTAT_DATA_UINT64 },
{ "l2_size", KSTAT_DATA_UINT64 },
{ "l2_asize", KSTAT_DATA_UINT64 },
{ "l2_hdr_size", KSTAT_DATA_UINT64 },
{ "l2_log_blk_writes", KSTAT_DATA_UINT64 },
{ "l2_log_blk_avg_asize", KSTAT_DATA_UINT64 },
{ "l2_log_blk_asize", KSTAT_DATA_UINT64 },
{ "l2_log_blk_count", KSTAT_DATA_UINT64 },
{ "l2_data_to_meta_ratio", KSTAT_DATA_UINT64 },
{ "l2_rebuild_success", KSTAT_DATA_UINT64 },
{ "l2_rebuild_unsupported", KSTAT_DATA_UINT64 },
{ "l2_rebuild_io_errors", KSTAT_DATA_UINT64 },
{ "l2_rebuild_dh_errors", KSTAT_DATA_UINT64 },
{ "l2_rebuild_cksum_lb_errors", KSTAT_DATA_UINT64 },
{ "l2_rebuild_lowmem", KSTAT_DATA_UINT64 },
{ "l2_rebuild_size", KSTAT_DATA_UINT64 },
{ "l2_rebuild_asize", KSTAT_DATA_UINT64 },
{ "l2_rebuild_bufs", KSTAT_DATA_UINT64 },
{ "l2_rebuild_bufs_precached", KSTAT_DATA_UINT64 },
{ "l2_rebuild_log_blks", KSTAT_DATA_UINT64 },
{ "memory_throttle_count", KSTAT_DATA_UINT64 },
{ "memory_direct_count", KSTAT_DATA_UINT64 },
{ "memory_indirect_count", KSTAT_DATA_UINT64 },
{ "memory_all_bytes", KSTAT_DATA_UINT64 },
{ "memory_free_bytes", KSTAT_DATA_UINT64 },
{ "memory_available_bytes", KSTAT_DATA_INT64 },
{ "arc_no_grow", KSTAT_DATA_UINT64 },
{ "arc_tempreserve", KSTAT_DATA_UINT64 },
{ "arc_loaned_bytes", KSTAT_DATA_UINT64 },
{ "arc_prune", KSTAT_DATA_UINT64 },
{ "arc_meta_used", KSTAT_DATA_UINT64 },
{ "arc_meta_limit", KSTAT_DATA_UINT64 },
{ "arc_dnode_limit", KSTAT_DATA_UINT64 },
{ "arc_meta_max", KSTAT_DATA_UINT64 },
{ "arc_meta_min", KSTAT_DATA_UINT64 },
{ "async_upgrade_sync", KSTAT_DATA_UINT64 },
{ "demand_hit_predictive_prefetch", KSTAT_DATA_UINT64 },
{ "demand_hit_prescient_prefetch", KSTAT_DATA_UINT64 },
{ "arc_need_free", KSTAT_DATA_UINT64 },
{ "arc_sys_free", KSTAT_DATA_UINT64 },
{ "arc_raw_size", KSTAT_DATA_UINT64 },
{ "cached_only_in_progress", KSTAT_DATA_UINT64 },
{ "abd_chunk_waste_size", KSTAT_DATA_UINT64 },
};
arc_sums_t arc_sums;
#define ARCSTAT_MAX(stat, val) { \
uint64_t m; \
while ((val) > (m = arc_stats.stat.value.ui64) && \
(m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
continue; \
}
/*
* We define a macro to allow ARC hits/misses to be easily broken down by
* two separate conditions, giving a total of four different subtypes for
* each of hits and misses (so eight statistics total).
*/
#define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
if (cond1) { \
if (cond2) { \
ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
} else { \
ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
} \
} else { \
if (cond2) { \
ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
} else { \
ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
} \
}
/*
* This macro allows us to use kstats as floating averages. Each time we
* update this kstat, we first factor it and the update value by
* ARCSTAT_AVG_FACTOR to shrink the new value's contribution to the overall
* average. This macro assumes that integer loads and stores are atomic, but
* is not safe for multiple writers updating the kstat in parallel (only the
* last writer's update will remain).
*/
#define ARCSTAT_F_AVG_FACTOR 3
#define ARCSTAT_F_AVG(stat, value) \
do { \
uint64_t x = ARCSTAT(stat); \
x = x - x / ARCSTAT_F_AVG_FACTOR + \
(value) / ARCSTAT_F_AVG_FACTOR; \
ARCSTAT(stat) = x; \
} while (0)
static kstat_t *arc_ksp;
/*
* There are several ARC variables that are critical to export as kstats --
* but we don't want to have to grovel around in the kstat whenever we wish to
* manipulate them. For these variables, we therefore define them to be in
* terms of the statistic variable. This assures that we are not introducing
* the possibility of inconsistency by having shadow copies of the variables,
* while still allowing the code to be readable.
*/
#define arc_tempreserve ARCSTAT(arcstat_tempreserve)
#define arc_loaned_bytes ARCSTAT(arcstat_loaned_bytes)
#define arc_meta_limit ARCSTAT(arcstat_meta_limit) /* max size for metadata */
/* max size for dnodes */
#define arc_dnode_size_limit ARCSTAT(arcstat_dnode_limit)
#define arc_meta_min ARCSTAT(arcstat_meta_min) /* min size for metadata */
#define arc_need_free ARCSTAT(arcstat_need_free) /* waiting to be evicted */
hrtime_t arc_growtime;
list_t arc_prune_list;
kmutex_t arc_prune_mtx;
taskq_t *arc_prune_taskq;
#define GHOST_STATE(state) \
((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \
(state) == arc_l2c_only)
#define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_FLAG_IN_HASH_TABLE)
#define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS)
#define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_FLAG_IO_ERROR)
#define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_FLAG_PREFETCH)
#define HDR_PRESCIENT_PREFETCH(hdr) \
((hdr)->b_flags & ARC_FLAG_PRESCIENT_PREFETCH)
#define HDR_COMPRESSION_ENABLED(hdr) \
((hdr)->b_flags & ARC_FLAG_COMPRESSED_ARC)
#define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_FLAG_L2CACHE)
#define HDR_L2_READING(hdr) \
(((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) && \
((hdr)->b_flags & ARC_FLAG_HAS_L2HDR))
#define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITING)
#define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_FLAG_L2_EVICTED)
#define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD)
#define HDR_PROTECTED(hdr) ((hdr)->b_flags & ARC_FLAG_PROTECTED)
#define HDR_NOAUTH(hdr) ((hdr)->b_flags & ARC_FLAG_NOAUTH)
#define HDR_SHARED_DATA(hdr) ((hdr)->b_flags & ARC_FLAG_SHARED_DATA)
#define HDR_ISTYPE_METADATA(hdr) \
((hdr)->b_flags & ARC_FLAG_BUFC_METADATA)
#define HDR_ISTYPE_DATA(hdr) (!HDR_ISTYPE_METADATA(hdr))
#define HDR_HAS_L1HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L1HDR)
#define HDR_HAS_L2HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR)
#define HDR_HAS_RABD(hdr) \
(HDR_HAS_L1HDR(hdr) && HDR_PROTECTED(hdr) && \
(hdr)->b_crypt_hdr.b_rabd != NULL)
#define HDR_ENCRYPTED(hdr) \
(HDR_PROTECTED(hdr) && DMU_OT_IS_ENCRYPTED((hdr)->b_crypt_hdr.b_ot))
#define HDR_AUTHENTICATED(hdr) \
(HDR_PROTECTED(hdr) && !DMU_OT_IS_ENCRYPTED((hdr)->b_crypt_hdr.b_ot))
/* For storing compression mode in b_flags */
#define HDR_COMPRESS_OFFSET (highbit64(ARC_FLAG_COMPRESS_0) - 1)
#define HDR_GET_COMPRESS(hdr) ((enum zio_compress)BF32_GET((hdr)->b_flags, \
HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS))
#define HDR_SET_COMPRESS(hdr, cmp) BF32_SET((hdr)->b_flags, \
HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS, (cmp));
#define ARC_BUF_LAST(buf) ((buf)->b_next == NULL)
#define ARC_BUF_SHARED(buf) ((buf)->b_flags & ARC_BUF_FLAG_SHARED)
#define ARC_BUF_COMPRESSED(buf) ((buf)->b_flags & ARC_BUF_FLAG_COMPRESSED)
#define ARC_BUF_ENCRYPTED(buf) ((buf)->b_flags & ARC_BUF_FLAG_ENCRYPTED)
/*
* Other sizes
*/
#define HDR_FULL_CRYPT_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
#define HDR_FULL_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_crypt_hdr))
#define HDR_L2ONLY_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_l1hdr))
/*
* Hash table routines
*/
#define BUF_LOCKS 2048
typedef struct buf_hash_table {
uint64_t ht_mask;
arc_buf_hdr_t **ht_table;
kmutex_t ht_locks[BUF_LOCKS] ____cacheline_aligned;
} buf_hash_table_t;
static buf_hash_table_t buf_hash_table;
#define BUF_HASH_INDEX(spa, dva, birth) \
(buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
#define BUF_HASH_LOCK(idx) (&buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
#define HDR_LOCK(hdr) \
(BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
uint64_t zfs_crc64_table[256];
/*
* Level 2 ARC
*/
#define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */
#define L2ARC_HEADROOM 2 /* num of writes */
/*
* If we discover during ARC scan any buffers to be compressed, we boost
* our headroom for the next scanning cycle by this percentage multiple.
*/
#define L2ARC_HEADROOM_BOOST 200
#define L2ARC_FEED_SECS 1 /* caching interval secs */
#define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */
/*
* We can feed L2ARC from two states of ARC buffers, mru and mfu,
* and each of the state has two types: data and metadata.
*/
#define L2ARC_FEED_TYPES 4
/* L2ARC Performance Tunables */
unsigned long l2arc_write_max = L2ARC_WRITE_SIZE; /* def max write size */
unsigned long l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra warmup write */
unsigned long l2arc_headroom = L2ARC_HEADROOM; /* # of dev writes */
unsigned long l2arc_headroom_boost = L2ARC_HEADROOM_BOOST;
unsigned long l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */
unsigned long l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval msecs */
int l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */
int l2arc_feed_again = B_TRUE; /* turbo warmup */
int l2arc_norw = B_FALSE; /* no reads during writes */
static int l2arc_meta_percent = 33; /* limit on headers size */
/*
* L2ARC Internals
*/
static list_t L2ARC_dev_list; /* device list */
static list_t *l2arc_dev_list; /* device list pointer */
static kmutex_t l2arc_dev_mtx; /* device list mutex */
static l2arc_dev_t *l2arc_dev_last; /* last device used */
static list_t L2ARC_free_on_write; /* free after write buf list */
static list_t *l2arc_free_on_write; /* free after write list ptr */
static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */
static uint64_t l2arc_ndev; /* number of devices */
typedef struct l2arc_read_callback {
arc_buf_hdr_t *l2rcb_hdr; /* read header */
blkptr_t l2rcb_bp; /* original blkptr */
zbookmark_phys_t l2rcb_zb; /* original bookmark */
int l2rcb_flags; /* original flags */
abd_t *l2rcb_abd; /* temporary buffer */
} l2arc_read_callback_t;
typedef struct l2arc_data_free {
/* protected by l2arc_free_on_write_mtx */
abd_t *l2df_abd;
size_t l2df_size;
arc_buf_contents_t l2df_type;
list_node_t l2df_list_node;
} l2arc_data_free_t;
typedef enum arc_fill_flags {
ARC_FILL_LOCKED = 1 << 0, /* hdr lock is held */
ARC_FILL_COMPRESSED = 1 << 1, /* fill with compressed data */
ARC_FILL_ENCRYPTED = 1 << 2, /* fill with encrypted data */
ARC_FILL_NOAUTH = 1 << 3, /* don't attempt to authenticate */
ARC_FILL_IN_PLACE = 1 << 4 /* fill in place (special case) */
} arc_fill_flags_t;
typedef enum arc_ovf_level {
ARC_OVF_NONE, /* ARC within target size. */
ARC_OVF_SOME, /* ARC is slightly overflowed. */
ARC_OVF_SEVERE /* ARC is severely overflowed. */
} arc_ovf_level_t;
static kmutex_t l2arc_feed_thr_lock;
static kcondvar_t l2arc_feed_thr_cv;
static uint8_t l2arc_thread_exit;
static kmutex_t l2arc_rebuild_thr_lock;
static kcondvar_t l2arc_rebuild_thr_cv;
enum arc_hdr_alloc_flags {
ARC_HDR_ALLOC_RDATA = 0x1,
ARC_HDR_DO_ADAPT = 0x2,
ARC_HDR_USE_RESERVE = 0x4,
};
static abd_t *arc_get_data_abd(arc_buf_hdr_t *, uint64_t, const void *, int);
static void *arc_get_data_buf(arc_buf_hdr_t *, uint64_t, const void *);
static void arc_get_data_impl(arc_buf_hdr_t *, uint64_t, const void *, int);
static void arc_free_data_abd(arc_buf_hdr_t *, abd_t *, uint64_t, const void *);
static void arc_free_data_buf(arc_buf_hdr_t *, void *, uint64_t, const void *);
static void arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size,
const void *tag);
static void arc_hdr_free_abd(arc_buf_hdr_t *, boolean_t);
static void arc_hdr_alloc_abd(arc_buf_hdr_t *, int);
static void arc_access(arc_buf_hdr_t *, kmutex_t *);
static void arc_buf_watch(arc_buf_t *);
static arc_buf_contents_t arc_buf_type(arc_buf_hdr_t *);
static uint32_t arc_bufc_to_flags(arc_buf_contents_t);
static inline void arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags);
static inline void arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags);
static boolean_t l2arc_write_eligible(uint64_t, arc_buf_hdr_t *);
static void l2arc_read_done(zio_t *);
static void l2arc_do_free_on_write(void);
static void l2arc_hdr_arcstats_update(arc_buf_hdr_t *hdr, boolean_t incr,
boolean_t state_only);
#define l2arc_hdr_arcstats_increment(hdr) \
l2arc_hdr_arcstats_update((hdr), B_TRUE, B_FALSE)
#define l2arc_hdr_arcstats_decrement(hdr) \
l2arc_hdr_arcstats_update((hdr), B_FALSE, B_FALSE)
#define l2arc_hdr_arcstats_increment_state(hdr) \
l2arc_hdr_arcstats_update((hdr), B_TRUE, B_TRUE)
#define l2arc_hdr_arcstats_decrement_state(hdr) \
l2arc_hdr_arcstats_update((hdr), B_FALSE, B_TRUE)
/*
* l2arc_exclude_special : A zfs module parameter that controls whether buffers
* present on special vdevs are eligibile for caching in L2ARC. If
* set to 1, exclude dbufs on special vdevs from being cached to
* L2ARC.
*/
int l2arc_exclude_special = 0;
/*
* l2arc_mfuonly : A ZFS module parameter that controls whether only MFU
* metadata and data are cached from ARC into L2ARC.
*/
static int l2arc_mfuonly = 0;
/*
* L2ARC TRIM
* l2arc_trim_ahead : A ZFS module parameter that controls how much ahead of
* the current write size (l2arc_write_max) we should TRIM if we
* have filled the device. It is defined as a percentage of the
* write size. If set to 100 we trim twice the space required to
* accommodate upcoming writes. A minimum of 64MB will be trimmed.
* It also enables TRIM of the whole L2ARC device upon creation or
* addition to an existing pool or if the header of the device is
* invalid upon importing a pool or onlining a cache device. The
* default is 0, which disables TRIM on L2ARC altogether as it can
* put significant stress on the underlying storage devices. This
* will vary depending of how well the specific device handles
* these commands.
*/
static unsigned long l2arc_trim_ahead = 0;
/*
* Performance tuning of L2ARC persistence:
*
* l2arc_rebuild_enabled : A ZFS module parameter that controls whether adding
* an L2ARC device (either at pool import or later) will attempt
* to rebuild L2ARC buffer contents.
* l2arc_rebuild_blocks_min_l2size : A ZFS module parameter that controls
* whether log blocks are written to the L2ARC device. If the L2ARC
* device is less than 1GB, the amount of data l2arc_evict()
* evicts is significant compared to the amount of restored L2ARC
* data. In this case do not write log blocks in L2ARC in order
* not to waste space.
*/
static int l2arc_rebuild_enabled = B_TRUE;
static unsigned long l2arc_rebuild_blocks_min_l2size = 1024 * 1024 * 1024;
/* L2ARC persistence rebuild control routines. */
void l2arc_rebuild_vdev(vdev_t *vd, boolean_t reopen);
static __attribute__((noreturn)) void l2arc_dev_rebuild_thread(void *arg);
static int l2arc_rebuild(l2arc_dev_t *dev);
/* L2ARC persistence read I/O routines. */
static int l2arc_dev_hdr_read(l2arc_dev_t *dev);
static int l2arc_log_blk_read(l2arc_dev_t *dev,
const l2arc_log_blkptr_t *this_lp, const l2arc_log_blkptr_t *next_lp,
l2arc_log_blk_phys_t *this_lb, l2arc_log_blk_phys_t *next_lb,
zio_t *this_io, zio_t **next_io);
static zio_t *l2arc_log_blk_fetch(vdev_t *vd,
const l2arc_log_blkptr_t *lp, l2arc_log_blk_phys_t *lb);
static void l2arc_log_blk_fetch_abort(zio_t *zio);
/* L2ARC persistence block restoration routines. */
static void l2arc_log_blk_restore(l2arc_dev_t *dev,
const l2arc_log_blk_phys_t *lb, uint64_t lb_asize);
static void l2arc_hdr_restore(const l2arc_log_ent_phys_t *le,
l2arc_dev_t *dev);
/* L2ARC persistence write I/O routines. */
static void l2arc_log_blk_commit(l2arc_dev_t *dev, zio_t *pio,
l2arc_write_callback_t *cb);
/* L2ARC persistence auxiliary routines. */
boolean_t l2arc_log_blkptr_valid(l2arc_dev_t *dev,
const l2arc_log_blkptr_t *lbp);
static boolean_t l2arc_log_blk_insert(l2arc_dev_t *dev,
const arc_buf_hdr_t *ab);
boolean_t l2arc_range_check_overlap(uint64_t bottom,
uint64_t top, uint64_t check);
static void l2arc_blk_fetch_done(zio_t *zio);
static inline uint64_t
l2arc_log_blk_overhead(uint64_t write_sz, l2arc_dev_t *dev);
/*
* We use Cityhash for this. It's fast, and has good hash properties without
* requiring any large static buffers.
*/
static uint64_t
buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
{
return (cityhash4(spa, dva->dva_word[0], dva->dva_word[1], birth));
}
#define HDR_EMPTY(hdr) \
((hdr)->b_dva.dva_word[0] == 0 && \
(hdr)->b_dva.dva_word[1] == 0)
#define HDR_EMPTY_OR_LOCKED(hdr) \
(HDR_EMPTY(hdr) || MUTEX_HELD(HDR_LOCK(hdr)))
#define HDR_EQUAL(spa, dva, birth, hdr) \
((hdr)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
((hdr)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
((hdr)->b_birth == birth) && ((hdr)->b_spa == spa)
static void
buf_discard_identity(arc_buf_hdr_t *hdr)
{
hdr->b_dva.dva_word[0] = 0;
hdr->b_dva.dva_word[1] = 0;
hdr->b_birth = 0;
}
static arc_buf_hdr_t *
buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp)
{
const dva_t *dva = BP_IDENTITY(bp);
uint64_t birth = BP_PHYSICAL_BIRTH(bp);
uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
arc_buf_hdr_t *hdr;
mutex_enter(hash_lock);
for (hdr = buf_hash_table.ht_table[idx]; hdr != NULL;
hdr = hdr->b_hash_next) {
if (HDR_EQUAL(spa, dva, birth, hdr)) {
*lockp = hash_lock;
return (hdr);
}
}
mutex_exit(hash_lock);
*lockp = NULL;
return (NULL);
}
/*
* Insert an entry into the hash table. If there is already an element
* equal to elem in the hash table, then the already existing element
* will be returned and the new element will not be inserted.
* Otherwise returns NULL.
* If lockp == NULL, the caller is assumed to already hold the hash lock.
*/
static arc_buf_hdr_t *
buf_hash_insert(arc_buf_hdr_t *hdr, kmutex_t **lockp)
{
uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
arc_buf_hdr_t *fhdr;
uint32_t i;
ASSERT(!DVA_IS_EMPTY(&hdr->b_dva));
ASSERT(hdr->b_birth != 0);
ASSERT(!HDR_IN_HASH_TABLE(hdr));
if (lockp != NULL) {
*lockp = hash_lock;
mutex_enter(hash_lock);
} else {
ASSERT(MUTEX_HELD(hash_lock));
}
for (fhdr = buf_hash_table.ht_table[idx], i = 0; fhdr != NULL;
fhdr = fhdr->b_hash_next, i++) {
if (HDR_EQUAL(hdr->b_spa, &hdr->b_dva, hdr->b_birth, fhdr))
return (fhdr);
}
hdr->b_hash_next = buf_hash_table.ht_table[idx];
buf_hash_table.ht_table[idx] = hdr;
arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
/* collect some hash table performance data */
if (i > 0) {
ARCSTAT_BUMP(arcstat_hash_collisions);
if (i == 1)
ARCSTAT_BUMP(arcstat_hash_chains);
ARCSTAT_MAX(arcstat_hash_chain_max, i);
}
uint64_t he = atomic_inc_64_nv(
&arc_stats.arcstat_hash_elements.value.ui64);
ARCSTAT_MAX(arcstat_hash_elements_max, he);
return (NULL);
}
static void
buf_hash_remove(arc_buf_hdr_t *hdr)
{
arc_buf_hdr_t *fhdr, **hdrp;
uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
ASSERT(HDR_IN_HASH_TABLE(hdr));
hdrp = &buf_hash_table.ht_table[idx];
while ((fhdr = *hdrp) != hdr) {
ASSERT3P(fhdr, !=, NULL);
hdrp = &fhdr->b_hash_next;
}
*hdrp = hdr->b_hash_next;
hdr->b_hash_next = NULL;
arc_hdr_clear_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
/* collect some hash table performance data */
atomic_dec_64(&arc_stats.arcstat_hash_elements.value.ui64);
if (buf_hash_table.ht_table[idx] &&
buf_hash_table.ht_table[idx]->b_hash_next == NULL)
ARCSTAT_BUMPDOWN(arcstat_hash_chains);
}
/*
* Global data structures and functions for the buf kmem cache.
*/
static kmem_cache_t *hdr_full_cache;
static kmem_cache_t *hdr_full_crypt_cache;
static kmem_cache_t *hdr_l2only_cache;
static kmem_cache_t *buf_cache;
static void
buf_fini(void)
{
#if defined(_KERNEL)
/*
* Large allocations which do not require contiguous pages
* should be using vmem_free() in the linux kernel\
*/
vmem_free(buf_hash_table.ht_table,
(buf_hash_table.ht_mask + 1) * sizeof (void *));
#else
kmem_free(buf_hash_table.ht_table,
(buf_hash_table.ht_mask + 1) * sizeof (void *));
#endif
for (int i = 0; i < BUF_LOCKS; i++)
mutex_destroy(BUF_HASH_LOCK(i));
kmem_cache_destroy(hdr_full_cache);
kmem_cache_destroy(hdr_full_crypt_cache);
kmem_cache_destroy(hdr_l2only_cache);
kmem_cache_destroy(buf_cache);
}
/*
* Constructor callback - called when the cache is empty
* and a new buf is requested.
*/
static int
hdr_full_cons(void *vbuf, void *unused, int kmflag)
{
(void) unused, (void) kmflag;
arc_buf_hdr_t *hdr = vbuf;
memset(hdr, 0, HDR_FULL_SIZE);
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
cv_init(&hdr->b_l1hdr.b_cv, NULL, CV_DEFAULT, NULL);
zfs_refcount_create(&hdr->b_l1hdr.b_refcnt);
mutex_init(&hdr->b_l1hdr.b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
list_link_init(&hdr->b_l1hdr.b_arc_node);
list_link_init(&hdr->b_l2hdr.b_l2node);
multilist_link_init(&hdr->b_l1hdr.b_arc_node);
arc_space_consume(HDR_FULL_SIZE, ARC_SPACE_HDRS);
return (0);
}
static int
hdr_full_crypt_cons(void *vbuf, void *unused, int kmflag)
{
(void) unused;
arc_buf_hdr_t *hdr = vbuf;
hdr_full_cons(vbuf, unused, kmflag);
memset(&hdr->b_crypt_hdr, 0, sizeof (hdr->b_crypt_hdr));
arc_space_consume(sizeof (hdr->b_crypt_hdr), ARC_SPACE_HDRS);
return (0);
}
static int
hdr_l2only_cons(void *vbuf, void *unused, int kmflag)
{
(void) unused, (void) kmflag;
arc_buf_hdr_t *hdr = vbuf;
memset(hdr, 0, HDR_L2ONLY_SIZE);
arc_space_consume(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
return (0);
}
static int
buf_cons(void *vbuf, void *unused, int kmflag)
{
(void) unused, (void) kmflag;
arc_buf_t *buf = vbuf;
memset(buf, 0, sizeof (arc_buf_t));
mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL);
arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
return (0);
}
/*
* Destructor callback - called when a cached buf is
* no longer required.
*/
static void
hdr_full_dest(void *vbuf, void *unused)
{
(void) unused;
arc_buf_hdr_t *hdr = vbuf;
ASSERT(HDR_EMPTY(hdr));
cv_destroy(&hdr->b_l1hdr.b_cv);
zfs_refcount_destroy(&hdr->b_l1hdr.b_refcnt);
mutex_destroy(&hdr->b_l1hdr.b_freeze_lock);
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS);
}
static void
hdr_full_crypt_dest(void *vbuf, void *unused)
{
(void) vbuf, (void) unused;
hdr_full_dest(vbuf, unused);
arc_space_return(sizeof (((arc_buf_hdr_t *)NULL)->b_crypt_hdr),
ARC_SPACE_HDRS);
}
static void
hdr_l2only_dest(void *vbuf, void *unused)
{
(void) unused;
arc_buf_hdr_t *hdr = vbuf;
ASSERT(HDR_EMPTY(hdr));
arc_space_return(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
}
static void
buf_dest(void *vbuf, void *unused)
{
(void) unused;
arc_buf_t *buf = vbuf;
mutex_destroy(&buf->b_evict_lock);
arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
}
static void
buf_init(void)
{
uint64_t *ct = NULL;
uint64_t hsize = 1ULL << 12;
int i, j;
/*
* The hash table is big enough to fill all of physical memory
* with an average block size of zfs_arc_average_blocksize (default 8K).
* By default, the table will take up
* totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
*/
while (hsize * zfs_arc_average_blocksize < arc_all_memory())
hsize <<= 1;
retry:
buf_hash_table.ht_mask = hsize - 1;
#if defined(_KERNEL)
/*
* Large allocations which do not require contiguous pages
* should be using vmem_alloc() in the linux kernel
*/
buf_hash_table.ht_table =
vmem_zalloc(hsize * sizeof (void*), KM_SLEEP);
#else
buf_hash_table.ht_table =
kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
#endif
if (buf_hash_table.ht_table == NULL) {
ASSERT(hsize > (1ULL << 8));
hsize >>= 1;
goto retry;
}
hdr_full_cache = kmem_cache_create("arc_buf_hdr_t_full", HDR_FULL_SIZE,
0, hdr_full_cons, hdr_full_dest, NULL, NULL, NULL, 0);
hdr_full_crypt_cache = kmem_cache_create("arc_buf_hdr_t_full_crypt",
HDR_FULL_CRYPT_SIZE, 0, hdr_full_crypt_cons, hdr_full_crypt_dest,
NULL, NULL, NULL, 0);
hdr_l2only_cache = kmem_cache_create("arc_buf_hdr_t_l2only",
HDR_L2ONLY_SIZE, 0, hdr_l2only_cons, hdr_l2only_dest, NULL,
NULL, NULL, 0);
buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
for (i = 0; i < 256; i++)
for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
*ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
for (i = 0; i < BUF_LOCKS; i++)
mutex_init(BUF_HASH_LOCK(i), NULL, MUTEX_DEFAULT, NULL);
}
#define ARC_MINTIME (hz>>4) /* 62 ms */
/*
* This is the size that the buf occupies in memory. If the buf is compressed,
* it will correspond to the compressed size. You should use this method of
* getting the buf size unless you explicitly need the logical size.
*/
uint64_t
arc_buf_size(arc_buf_t *buf)
{
return (ARC_BUF_COMPRESSED(buf) ?
HDR_GET_PSIZE(buf->b_hdr) : HDR_GET_LSIZE(buf->b_hdr));
}
uint64_t
arc_buf_lsize(arc_buf_t *buf)
{
return (HDR_GET_LSIZE(buf->b_hdr));
}
/*
* This function will return B_TRUE if the buffer is encrypted in memory.
* This buffer can be decrypted by calling arc_untransform().
*/
boolean_t
arc_is_encrypted(arc_buf_t *buf)
{
return (ARC_BUF_ENCRYPTED(buf) != 0);
}
/*
* Returns B_TRUE if the buffer represents data that has not had its MAC
* verified yet.
*/
boolean_t
arc_is_unauthenticated(arc_buf_t *buf)
{
return (HDR_NOAUTH(buf->b_hdr) != 0);
}
void
arc_get_raw_params(arc_buf_t *buf, boolean_t *byteorder, uint8_t *salt,
uint8_t *iv, uint8_t *mac)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT(HDR_PROTECTED(hdr));
memcpy(salt, hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN);
memcpy(iv, hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN);
memcpy(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN);
*byteorder = (hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ?
ZFS_HOST_BYTEORDER : !ZFS_HOST_BYTEORDER;
}
/*
* Indicates how this buffer is compressed in memory. If it is not compressed
* the value will be ZIO_COMPRESS_OFF. It can be made normally readable with
* arc_untransform() as long as it is also unencrypted.
*/
enum zio_compress
arc_get_compression(arc_buf_t *buf)
{
return (ARC_BUF_COMPRESSED(buf) ?
HDR_GET_COMPRESS(buf->b_hdr) : ZIO_COMPRESS_OFF);
}
/*
* Return the compression algorithm used to store this data in the ARC. If ARC
* compression is enabled or this is an encrypted block, this will be the same
* as what's used to store it on-disk. Otherwise, this will be ZIO_COMPRESS_OFF.
*/
static inline enum zio_compress
arc_hdr_get_compress(arc_buf_hdr_t *hdr)
{
return (HDR_COMPRESSION_ENABLED(hdr) ?
HDR_GET_COMPRESS(hdr) : ZIO_COMPRESS_OFF);
}
uint8_t
arc_get_complevel(arc_buf_t *buf)
{
return (buf->b_hdr->b_complevel);
}
static inline boolean_t
arc_buf_is_shared(arc_buf_t *buf)
{
boolean_t shared = (buf->b_data != NULL &&
buf->b_hdr->b_l1hdr.b_pabd != NULL &&
abd_is_linear(buf->b_hdr->b_l1hdr.b_pabd) &&
buf->b_data == abd_to_buf(buf->b_hdr->b_l1hdr.b_pabd));
IMPLY(shared, HDR_SHARED_DATA(buf->b_hdr));
IMPLY(shared, ARC_BUF_SHARED(buf));
IMPLY(shared, ARC_BUF_COMPRESSED(buf) || ARC_BUF_LAST(buf));
/*
* It would be nice to assert arc_can_share() too, but the "hdr isn't
* already being shared" requirement prevents us from doing that.
*/
return (shared);
}
/*
* Free the checksum associated with this header. If there is no checksum, this
* is a no-op.
*/
static inline void
arc_cksum_free(arc_buf_hdr_t *hdr)
{
ASSERT(HDR_HAS_L1HDR(hdr));
mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
if (hdr->b_l1hdr.b_freeze_cksum != NULL) {
kmem_free(hdr->b_l1hdr.b_freeze_cksum, sizeof (zio_cksum_t));
hdr->b_l1hdr.b_freeze_cksum = NULL;
}
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
}
/*
* Return true iff at least one of the bufs on hdr is not compressed.
* Encrypted buffers count as compressed.
*/
static boolean_t
arc_hdr_has_uncompressed_buf(arc_buf_hdr_t *hdr)
{
ASSERT(hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY_OR_LOCKED(hdr));
for (arc_buf_t *b = hdr->b_l1hdr.b_buf; b != NULL; b = b->b_next) {
if (!ARC_BUF_COMPRESSED(b)) {
return (B_TRUE);
}
}
return (B_FALSE);
}
/*
* If we've turned on the ZFS_DEBUG_MODIFY flag, verify that the buf's data
* matches the checksum that is stored in the hdr. If there is no checksum,
* or if the buf is compressed, this is a no-op.
*/
static void
arc_cksum_verify(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
zio_cksum_t zc;
if (!(zfs_flags & ZFS_DEBUG_MODIFY))
return;
if (ARC_BUF_COMPRESSED(buf))
return;
ASSERT(HDR_HAS_L1HDR(hdr));
mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
if (hdr->b_l1hdr.b_freeze_cksum == NULL || HDR_IO_ERROR(hdr)) {
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
return;
}
fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL, &zc);
if (!ZIO_CHECKSUM_EQUAL(*hdr->b_l1hdr.b_freeze_cksum, zc))
panic("buffer modified while frozen!");
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
}
/*
* This function makes the assumption that data stored in the L2ARC
* will be transformed exactly as it is in the main pool. Because of
* this we can verify the checksum against the reading process's bp.
*/
static boolean_t
arc_cksum_is_equal(arc_buf_hdr_t *hdr, zio_t *zio)
{
ASSERT(!BP_IS_EMBEDDED(zio->io_bp));
VERIFY3U(BP_GET_PSIZE(zio->io_bp), ==, HDR_GET_PSIZE(hdr));
/*
* Block pointers always store the checksum for the logical data.
* If the block pointer has the gang bit set, then the checksum
* it represents is for the reconstituted data and not for an
* individual gang member. The zio pipeline, however, must be able to
* determine the checksum of each of the gang constituents so it
* treats the checksum comparison differently than what we need
* for l2arc blocks. This prevents us from using the
* zio_checksum_error() interface directly. Instead we must call the
* zio_checksum_error_impl() so that we can ensure the checksum is
* generated using the correct checksum algorithm and accounts for the
* logical I/O size and not just a gang fragment.
*/
return (zio_checksum_error_impl(zio->io_spa, zio->io_bp,
BP_GET_CHECKSUM(zio->io_bp), zio->io_abd, zio->io_size,
zio->io_offset, NULL) == 0);
}
/*
* Given a buf full of data, if ZFS_DEBUG_MODIFY is enabled this computes a
* checksum and attaches it to the buf's hdr so that we can ensure that the buf
* isn't modified later on. If buf is compressed or there is already a checksum
* on the hdr, this is a no-op (we only checksum uncompressed bufs).
*/
static void
arc_cksum_compute(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
if (!(zfs_flags & ZFS_DEBUG_MODIFY))
return;
ASSERT(HDR_HAS_L1HDR(hdr));
mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock);
if (hdr->b_l1hdr.b_freeze_cksum != NULL || ARC_BUF_COMPRESSED(buf)) {
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
return;
}
ASSERT(!ARC_BUF_ENCRYPTED(buf));
ASSERT(!ARC_BUF_COMPRESSED(buf));
hdr->b_l1hdr.b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t),
KM_SLEEP);
fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL,
hdr->b_l1hdr.b_freeze_cksum);
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
arc_buf_watch(buf);
}
#ifndef _KERNEL
void
arc_buf_sigsegv(int sig, siginfo_t *si, void *unused)
{
(void) sig, (void) unused;
panic("Got SIGSEGV at address: 0x%lx\n", (long)si->si_addr);
}
#endif
static void
arc_buf_unwatch(arc_buf_t *buf)
{
#ifndef _KERNEL
if (arc_watch) {
ASSERT0(mprotect(buf->b_data, arc_buf_size(buf),
PROT_READ | PROT_WRITE));
}
#else
(void) buf;
#endif
}
static void
arc_buf_watch(arc_buf_t *buf)
{
#ifndef _KERNEL
if (arc_watch)
ASSERT0(mprotect(buf->b_data, arc_buf_size(buf),
PROT_READ));
#else
(void) buf;
#endif
}
static arc_buf_contents_t
arc_buf_type(arc_buf_hdr_t *hdr)
{
arc_buf_contents_t type;
if (HDR_ISTYPE_METADATA(hdr)) {
type = ARC_BUFC_METADATA;
} else {
type = ARC_BUFC_DATA;
}
VERIFY3U(hdr->b_type, ==, type);
return (type);
}
boolean_t
arc_is_metadata(arc_buf_t *buf)
{
return (HDR_ISTYPE_METADATA(buf->b_hdr) != 0);
}
static uint32_t
arc_bufc_to_flags(arc_buf_contents_t type)
{
switch (type) {
case ARC_BUFC_DATA:
/* metadata field is 0 if buffer contains normal data */
return (0);
case ARC_BUFC_METADATA:
return (ARC_FLAG_BUFC_METADATA);
default:
break;
}
panic("undefined ARC buffer type!");
return ((uint32_t)-1);
}
void
arc_buf_thaw(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
arc_cksum_verify(buf);
/*
* Compressed buffers do not manipulate the b_freeze_cksum.
*/
if (ARC_BUF_COMPRESSED(buf))
return;
ASSERT(HDR_HAS_L1HDR(hdr));
arc_cksum_free(hdr);
arc_buf_unwatch(buf);
}
void
arc_buf_freeze(arc_buf_t *buf)
{
if (!(zfs_flags & ZFS_DEBUG_MODIFY))
return;
if (ARC_BUF_COMPRESSED(buf))
return;
ASSERT(HDR_HAS_L1HDR(buf->b_hdr));
arc_cksum_compute(buf);
}
/*
* The arc_buf_hdr_t's b_flags should never be modified directly. Instead,
* the following functions should be used to ensure that the flags are
* updated in a thread-safe way. When manipulating the flags either
* the hash_lock must be held or the hdr must be undiscoverable. This
* ensures that we're not racing with any other threads when updating
* the flags.
*/
static inline void
arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags)
{
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
hdr->b_flags |= flags;
}
static inline void
arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags)
{
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
hdr->b_flags &= ~flags;
}
/*
* Setting the compression bits in the arc_buf_hdr_t's b_flags is
* done in a special way since we have to clear and set bits
* at the same time. Consumers that wish to set the compression bits
* must use this function to ensure that the flags are updated in
* thread-safe manner.
*/
static void
arc_hdr_set_compress(arc_buf_hdr_t *hdr, enum zio_compress cmp)
{
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
/*
* Holes and embedded blocks will always have a psize = 0 so
* we ignore the compression of the blkptr and set the
* want to uncompress them. Mark them as uncompressed.
*/
if (!zfs_compressed_arc_enabled || HDR_GET_PSIZE(hdr) == 0) {
arc_hdr_clear_flags(hdr, ARC_FLAG_COMPRESSED_ARC);
ASSERT(!HDR_COMPRESSION_ENABLED(hdr));
} else {
arc_hdr_set_flags(hdr, ARC_FLAG_COMPRESSED_ARC);
ASSERT(HDR_COMPRESSION_ENABLED(hdr));
}
HDR_SET_COMPRESS(hdr, cmp);
ASSERT3U(HDR_GET_COMPRESS(hdr), ==, cmp);
}
/*
* Looks for another buf on the same hdr which has the data decompressed, copies
* from it, and returns true. If no such buf exists, returns false.
*/
static boolean_t
arc_buf_try_copy_decompressed_data(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
boolean_t copied = B_FALSE;
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3P(buf->b_data, !=, NULL);
ASSERT(!ARC_BUF_COMPRESSED(buf));
for (arc_buf_t *from = hdr->b_l1hdr.b_buf; from != NULL;
from = from->b_next) {
/* can't use our own data buffer */
if (from == buf) {
continue;
}
if (!ARC_BUF_COMPRESSED(from)) {
memcpy(buf->b_data, from->b_data, arc_buf_size(buf));
copied = B_TRUE;
break;
}
}
/*
* There were no decompressed bufs, so there should not be a
* checksum on the hdr either.
*/
if (zfs_flags & ZFS_DEBUG_MODIFY)
EQUIV(!copied, hdr->b_l1hdr.b_freeze_cksum == NULL);
return (copied);
}
/*
* Allocates an ARC buf header that's in an evicted & L2-cached state.
* This is used during l2arc reconstruction to make empty ARC buffers
* which circumvent the regular disk->arc->l2arc path and instead come
* into being in the reverse order, i.e. l2arc->arc.
*/
static arc_buf_hdr_t *
arc_buf_alloc_l2only(size_t size, arc_buf_contents_t type, l2arc_dev_t *dev,
dva_t dva, uint64_t daddr, int32_t psize, uint64_t birth,
enum zio_compress compress, uint8_t complevel, boolean_t protected,
boolean_t prefetch, arc_state_type_t arcs_state)
{
arc_buf_hdr_t *hdr;
ASSERT(size != 0);
hdr = kmem_cache_alloc(hdr_l2only_cache, KM_SLEEP);
hdr->b_birth = birth;
hdr->b_type = type;
hdr->b_flags = 0;
arc_hdr_set_flags(hdr, arc_bufc_to_flags(type) | ARC_FLAG_HAS_L2HDR);
HDR_SET_LSIZE(hdr, size);
HDR_SET_PSIZE(hdr, psize);
arc_hdr_set_compress(hdr, compress);
hdr->b_complevel = complevel;
if (protected)
arc_hdr_set_flags(hdr, ARC_FLAG_PROTECTED);
if (prefetch)
arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
hdr->b_spa = spa_load_guid(dev->l2ad_vdev->vdev_spa);
hdr->b_dva = dva;
hdr->b_l2hdr.b_dev = dev;
hdr->b_l2hdr.b_daddr = daddr;
hdr->b_l2hdr.b_arcs_state = arcs_state;
return (hdr);
}
/*
* Return the size of the block, b_pabd, that is stored in the arc_buf_hdr_t.
*/
static uint64_t
arc_hdr_size(arc_buf_hdr_t *hdr)
{
uint64_t size;
if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF &&
HDR_GET_PSIZE(hdr) > 0) {
size = HDR_GET_PSIZE(hdr);
} else {
ASSERT3U(HDR_GET_LSIZE(hdr), !=, 0);
size = HDR_GET_LSIZE(hdr);
}
return (size);
}
static int
arc_hdr_authenticate(arc_buf_hdr_t *hdr, spa_t *spa, uint64_t dsobj)
{
int ret;
uint64_t csize;
uint64_t lsize = HDR_GET_LSIZE(hdr);
uint64_t psize = HDR_GET_PSIZE(hdr);
void *tmpbuf = NULL;
abd_t *abd = hdr->b_l1hdr.b_pabd;
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
ASSERT(HDR_AUTHENTICATED(hdr));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
/*
* The MAC is calculated on the compressed data that is stored on disk.
* However, if compressed arc is disabled we will only have the
* decompressed data available to us now. Compress it into a temporary
* abd so we can verify the MAC. The performance overhead of this will
* be relatively low, since most objects in an encrypted objset will
* be encrypted (instead of authenticated) anyway.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) {
tmpbuf = zio_buf_alloc(lsize);
abd = abd_get_from_buf(tmpbuf, lsize);
abd_take_ownership_of_buf(abd, B_TRUE);
csize = zio_compress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, tmpbuf, lsize, hdr->b_complevel);
ASSERT3U(csize, <=, psize);
abd_zero_off(abd, csize, psize - csize);
}
/*
* Authentication is best effort. We authenticate whenever the key is
* available. If we succeed we clear ARC_FLAG_NOAUTH.
*/
if (hdr->b_crypt_hdr.b_ot == DMU_OT_OBJSET) {
ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF);
ASSERT3U(lsize, ==, psize);
ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa, dsobj, abd,
psize, hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS);
} else {
ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj, abd, psize,
hdr->b_crypt_hdr.b_mac);
}
if (ret == 0)
arc_hdr_clear_flags(hdr, ARC_FLAG_NOAUTH);
else if (ret != ENOENT)
goto error;
if (tmpbuf != NULL)
abd_free(abd);
return (0);
error:
if (tmpbuf != NULL)
abd_free(abd);
return (ret);
}
/*
* This function will take a header that only has raw encrypted data in
* b_crypt_hdr.b_rabd and decrypt it into a new buffer which is stored in
* b_l1hdr.b_pabd. If designated in the header flags, this function will
* also decompress the data.
*/
static int
arc_hdr_decrypt(arc_buf_hdr_t *hdr, spa_t *spa, const zbookmark_phys_t *zb)
{
int ret;
abd_t *cabd = NULL;
void *tmp = NULL;
boolean_t no_crypt = B_FALSE;
boolean_t bswap = (hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS);
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
ASSERT(HDR_ENCRYPTED(hdr));
arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT);
ret = spa_do_crypt_abd(B_FALSE, spa, zb, hdr->b_crypt_hdr.b_ot,
B_FALSE, bswap, hdr->b_crypt_hdr.b_salt, hdr->b_crypt_hdr.b_iv,
hdr->b_crypt_hdr.b_mac, HDR_GET_PSIZE(hdr), hdr->b_l1hdr.b_pabd,
hdr->b_crypt_hdr.b_rabd, &no_crypt);
if (ret != 0)
goto error;
if (no_crypt) {
abd_copy(hdr->b_l1hdr.b_pabd, hdr->b_crypt_hdr.b_rabd,
HDR_GET_PSIZE(hdr));
}
/*
* If this header has disabled arc compression but the b_pabd is
* compressed after decrypting it, we need to decompress the newly
* decrypted data.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) {
/*
* We want to make sure that we are correctly honoring the
* zfs_abd_scatter_enabled setting, so we allocate an abd here
* and then loan a buffer from it, rather than allocating a
* linear buffer and wrapping it in an abd later.
*/
cabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr,
ARC_HDR_DO_ADAPT);
tmp = abd_borrow_buf(cabd, arc_hdr_size(hdr));
ret = zio_decompress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, tmp, HDR_GET_PSIZE(hdr),
HDR_GET_LSIZE(hdr), &hdr->b_complevel);
if (ret != 0) {
abd_return_buf(cabd, tmp, arc_hdr_size(hdr));
goto error;
}
abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr));
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd,
arc_hdr_size(hdr), hdr);
hdr->b_l1hdr.b_pabd = cabd;
}
return (0);
error:
arc_hdr_free_abd(hdr, B_FALSE);
if (cabd != NULL)
arc_free_data_buf(hdr, cabd, arc_hdr_size(hdr), hdr);
return (ret);
}
/*
* This function is called during arc_buf_fill() to prepare the header's
* abd plaintext pointer for use. This involves authenticated protected
* data and decrypting encrypted data into the plaintext abd.
*/
static int
arc_fill_hdr_crypt(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, spa_t *spa,
const zbookmark_phys_t *zb, boolean_t noauth)
{
int ret;
ASSERT(HDR_PROTECTED(hdr));
if (hash_lock != NULL)
mutex_enter(hash_lock);
if (HDR_NOAUTH(hdr) && !noauth) {
/*
* The caller requested authenticated data but our data has
* not been authenticated yet. Verify the MAC now if we can.
*/
ret = arc_hdr_authenticate(hdr, spa, zb->zb_objset);
if (ret != 0)
goto error;
} else if (HDR_HAS_RABD(hdr) && hdr->b_l1hdr.b_pabd == NULL) {
/*
* If we only have the encrypted version of the data, but the
* unencrypted version was requested we take this opportunity
* to store the decrypted version in the header for future use.
*/
ret = arc_hdr_decrypt(hdr, spa, zb);
if (ret != 0)
goto error;
}
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
if (hash_lock != NULL)
mutex_exit(hash_lock);
return (0);
error:
if (hash_lock != NULL)
mutex_exit(hash_lock);
return (ret);
}
/*
* This function is used by the dbuf code to decrypt bonus buffers in place.
* The dbuf code itself doesn't have any locking for decrypting a shared dnode
* block, so we use the hash lock here to protect against concurrent calls to
* arc_buf_fill().
*/
static void
arc_buf_untransform_in_place(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT(HDR_ENCRYPTED(hdr));
ASSERT3U(hdr->b_crypt_hdr.b_ot, ==, DMU_OT_DNODE);
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
zio_crypt_copy_dnode_bonus(hdr->b_l1hdr.b_pabd, buf->b_data,
arc_buf_size(buf));
buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED;
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
hdr->b_crypt_hdr.b_ebufcnt -= 1;
}
/*
* Given a buf that has a data buffer attached to it, this function will
* efficiently fill the buf with data of the specified compression setting from
* the hdr and update the hdr's b_freeze_cksum if necessary. If the buf and hdr
* are already sharing a data buf, no copy is performed.
*
* If the buf is marked as compressed but uncompressed data was requested, this
* will allocate a new data buffer for the buf, remove that flag, and fill the
* buf with uncompressed data. You can't request a compressed buf on a hdr with
* uncompressed data, and (since we haven't added support for it yet) if you
* want compressed data your buf must already be marked as compressed and have
* the correct-sized data buffer.
*/
static int
arc_buf_fill(arc_buf_t *buf, spa_t *spa, const zbookmark_phys_t *zb,
arc_fill_flags_t flags)
{
int error = 0;
arc_buf_hdr_t *hdr = buf->b_hdr;
boolean_t hdr_compressed =
(arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF);
boolean_t compressed = (flags & ARC_FILL_COMPRESSED) != 0;
boolean_t encrypted = (flags & ARC_FILL_ENCRYPTED) != 0;
dmu_object_byteswap_t bswap = hdr->b_l1hdr.b_byteswap;
kmutex_t *hash_lock = (flags & ARC_FILL_LOCKED) ? NULL : HDR_LOCK(hdr);
ASSERT3P(buf->b_data, !=, NULL);
IMPLY(compressed, hdr_compressed || ARC_BUF_ENCRYPTED(buf));
IMPLY(compressed, ARC_BUF_COMPRESSED(buf));
IMPLY(encrypted, HDR_ENCRYPTED(hdr));
IMPLY(encrypted, ARC_BUF_ENCRYPTED(buf));
IMPLY(encrypted, ARC_BUF_COMPRESSED(buf));
IMPLY(encrypted, !ARC_BUF_SHARED(buf));
/*
* If the caller wanted encrypted data we just need to copy it from
* b_rabd and potentially byteswap it. We won't be able to do any
* further transforms on it.
*/
if (encrypted) {
ASSERT(HDR_HAS_RABD(hdr));
abd_copy_to_buf(buf->b_data, hdr->b_crypt_hdr.b_rabd,
HDR_GET_PSIZE(hdr));
goto byteswap;
}
/*
* Adjust encrypted and authenticated headers to accommodate
* the request if needed. Dnode blocks (ARC_FILL_IN_PLACE) are
* allowed to fail decryption due to keys not being loaded
* without being marked as an IO error.
*/
if (HDR_PROTECTED(hdr)) {
error = arc_fill_hdr_crypt(hdr, hash_lock, spa,
zb, !!(flags & ARC_FILL_NOAUTH));
if (error == EACCES && (flags & ARC_FILL_IN_PLACE) != 0) {
return (error);
} else if (error != 0) {
if (hash_lock != NULL)
mutex_enter(hash_lock);
arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR);
if (hash_lock != NULL)
mutex_exit(hash_lock);
return (error);
}
}
/*
* There is a special case here for dnode blocks which are
* decrypting their bonus buffers. These blocks may request to
* be decrypted in-place. This is necessary because there may
* be many dnodes pointing into this buffer and there is
* currently no method to synchronize replacing the backing
* b_data buffer and updating all of the pointers. Here we use
* the hash lock to ensure there are no races. If the need
* arises for other types to be decrypted in-place, they must
* add handling here as well.
*/
if ((flags & ARC_FILL_IN_PLACE) != 0) {
ASSERT(!hdr_compressed);
ASSERT(!compressed);
ASSERT(!encrypted);
if (HDR_ENCRYPTED(hdr) && ARC_BUF_ENCRYPTED(buf)) {
ASSERT3U(hdr->b_crypt_hdr.b_ot, ==, DMU_OT_DNODE);
if (hash_lock != NULL)
mutex_enter(hash_lock);
arc_buf_untransform_in_place(buf);
if (hash_lock != NULL)
mutex_exit(hash_lock);
/* Compute the hdr's checksum if necessary */
arc_cksum_compute(buf);
}
return (0);
}
if (hdr_compressed == compressed) {
if (!arc_buf_is_shared(buf)) {
abd_copy_to_buf(buf->b_data, hdr->b_l1hdr.b_pabd,
arc_buf_size(buf));
}
} else {
ASSERT(hdr_compressed);
ASSERT(!compressed);
/*
* If the buf is sharing its data with the hdr, unlink it and
* allocate a new data buffer for the buf.
*/
if (arc_buf_is_shared(buf)) {
ASSERT(ARC_BUF_COMPRESSED(buf));
/* We need to give the buf its own b_data */
buf->b_flags &= ~ARC_BUF_FLAG_SHARED;
buf->b_data =
arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf);
arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
/* Previously overhead was 0; just add new overhead */
ARCSTAT_INCR(arcstat_overhead_size, HDR_GET_LSIZE(hdr));
} else if (ARC_BUF_COMPRESSED(buf)) {
/* We need to reallocate the buf's b_data */
arc_free_data_buf(hdr, buf->b_data, HDR_GET_PSIZE(hdr),
buf);
buf->b_data =
arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf);
/* We increased the size of b_data; update overhead */
ARCSTAT_INCR(arcstat_overhead_size,
HDR_GET_LSIZE(hdr) - HDR_GET_PSIZE(hdr));
}
/*
* Regardless of the buf's previous compression settings, it
* should not be compressed at the end of this function.
*/
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
/*
* Try copying the data from another buf which already has a
* decompressed version. If that's not possible, it's time to
* bite the bullet and decompress the data from the hdr.
*/
if (arc_buf_try_copy_decompressed_data(buf)) {
/* Skip byteswapping and checksumming (already done) */
return (0);
} else {
error = zio_decompress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, buf->b_data,
HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr),
&hdr->b_complevel);
/*
* Absent hardware errors or software bugs, this should
* be impossible, but log it anyway so we can debug it.
*/
if (error != 0) {
zfs_dbgmsg(
"hdr %px, compress %d, psize %d, lsize %d",
hdr, arc_hdr_get_compress(hdr),
HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr));
if (hash_lock != NULL)
mutex_enter(hash_lock);
arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR);
if (hash_lock != NULL)
mutex_exit(hash_lock);
return (SET_ERROR(EIO));
}
}
}
byteswap:
/* Byteswap the buf's data if necessary */
if (bswap != DMU_BSWAP_NUMFUNCS) {
ASSERT(!HDR_SHARED_DATA(hdr));
ASSERT3U(bswap, <, DMU_BSWAP_NUMFUNCS);
dmu_ot_byteswap[bswap].ob_func(buf->b_data, HDR_GET_LSIZE(hdr));
}
/* Compute the hdr's checksum if necessary */
arc_cksum_compute(buf);
return (0);
}
/*
* If this function is being called to decrypt an encrypted buffer or verify an
* authenticated one, the key must be loaded and a mapping must be made
* available in the keystore via spa_keystore_create_mapping() or one of its
* callers.
*/
int
arc_untransform(arc_buf_t *buf, spa_t *spa, const zbookmark_phys_t *zb,
boolean_t in_place)
{
int ret;
arc_fill_flags_t flags = 0;
if (in_place)
flags |= ARC_FILL_IN_PLACE;
ret = arc_buf_fill(buf, spa, zb, flags);
if (ret == ECKSUM) {
/*
* Convert authentication and decryption errors to EIO
* (and generate an ereport) before leaving the ARC.
*/
ret = SET_ERROR(EIO);
spa_log_error(spa, zb);
(void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION,
spa, NULL, zb, NULL, 0);
}
return (ret);
}
/*
* Increment the amount of evictable space in the arc_state_t's refcount.
* We account for the space used by the hdr and the arc buf individually
* so that we can add and remove them from the refcount individually.
*/
static void
arc_evictable_space_increment(arc_buf_hdr_t *hdr, arc_state_t *state)
{
arc_buf_contents_t type = arc_buf_type(hdr);
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(state)) {
ASSERT0(hdr->b_l1hdr.b_bufcnt);
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
(void) zfs_refcount_add_many(&state->arcs_esize[type],
HDR_GET_LSIZE(hdr), hdr);
return;
}
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) zfs_refcount_add_many(&state->arcs_esize[type],
arc_hdr_size(hdr), hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) zfs_refcount_add_many(&state->arcs_esize[type],
HDR_GET_PSIZE(hdr), hdr);
}
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
if (arc_buf_is_shared(buf))
continue;
(void) zfs_refcount_add_many(&state->arcs_esize[type],
arc_buf_size(buf), buf);
}
}
/*
* Decrement the amount of evictable space in the arc_state_t's refcount.
* We account for the space used by the hdr and the arc buf individually
* so that we can add and remove them from the refcount individually.
*/
static void
arc_evictable_space_decrement(arc_buf_hdr_t *hdr, arc_state_t *state)
{
arc_buf_contents_t type = arc_buf_type(hdr);
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(state)) {
ASSERT0(hdr->b_l1hdr.b_bufcnt);
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
HDR_GET_LSIZE(hdr), hdr);
return;
}
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
arc_hdr_size(hdr), hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
HDR_GET_PSIZE(hdr), hdr);
}
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
if (arc_buf_is_shared(buf))
continue;
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
arc_buf_size(buf), buf);
}
}
/*
* Add a reference to this hdr indicating that someone is actively
* referencing that memory. When the refcount transitions from 0 to 1,
* we remove it from the respective arc_state_t list to indicate that
* it is not evictable.
*/
static void
add_reference(arc_buf_hdr_t *hdr, const void *tag)
{
arc_state_t *state;
ASSERT(HDR_HAS_L1HDR(hdr));
if (!HDR_EMPTY(hdr) && !MUTEX_HELD(HDR_LOCK(hdr))) {
ASSERT(hdr->b_l1hdr.b_state == arc_anon);
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
}
state = hdr->b_l1hdr.b_state;
if ((zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) &&
(state != arc_anon)) {
/* We don't use the L2-only state list. */
if (state != arc_l2c_only) {
multilist_remove(&state->arcs_list[arc_buf_type(hdr)],
hdr);
arc_evictable_space_decrement(hdr, state);
}
/* remove the prefetch flag if we get a reference */
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_decrement_state(hdr);
arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH);
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_increment_state(hdr);
}
}
/*
* Remove a reference from this hdr. When the reference transitions from
* 1 to 0 and we're not anonymous, then we add this hdr to the arc_state_t's
* list making it eligible for eviction.
*/
static int
remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, const void *tag)
{
int cnt;
arc_state_t *state = hdr->b_l1hdr.b_state;
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
ASSERT(!GHOST_STATE(state));
/*
* arc_l2c_only counts as a ghost state so we don't need to explicitly
* check to prevent usage of the arc_l2c_only list.
*/
if (((cnt = zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) &&
(state != arc_anon)) {
multilist_insert(&state->arcs_list[arc_buf_type(hdr)], hdr);
ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0);
arc_evictable_space_increment(hdr, state);
}
return (cnt);
}
/*
* Returns detailed information about a specific arc buffer. When the
* state_index argument is set the function will calculate the arc header
* list position for its arc state. Since this requires a linear traversal
* callers are strongly encourage not to do this. However, it can be helpful
* for targeted analysis so the functionality is provided.
*/
void
arc_buf_info(arc_buf_t *ab, arc_buf_info_t *abi, int state_index)
{
(void) state_index;
arc_buf_hdr_t *hdr = ab->b_hdr;
l1arc_buf_hdr_t *l1hdr = NULL;
l2arc_buf_hdr_t *l2hdr = NULL;
arc_state_t *state = NULL;
memset(abi, 0, sizeof (arc_buf_info_t));
if (hdr == NULL)
return;
abi->abi_flags = hdr->b_flags;
if (HDR_HAS_L1HDR(hdr)) {
l1hdr = &hdr->b_l1hdr;
state = l1hdr->b_state;
}
if (HDR_HAS_L2HDR(hdr))
l2hdr = &hdr->b_l2hdr;
if (l1hdr) {
abi->abi_bufcnt = l1hdr->b_bufcnt;
abi->abi_access = l1hdr->b_arc_access;
abi->abi_mru_hits = l1hdr->b_mru_hits;
abi->abi_mru_ghost_hits = l1hdr->b_mru_ghost_hits;
abi->abi_mfu_hits = l1hdr->b_mfu_hits;
abi->abi_mfu_ghost_hits = l1hdr->b_mfu_ghost_hits;
abi->abi_holds = zfs_refcount_count(&l1hdr->b_refcnt);
}
if (l2hdr) {
abi->abi_l2arc_dattr = l2hdr->b_daddr;
abi->abi_l2arc_hits = l2hdr->b_hits;
}
abi->abi_state_type = state ? state->arcs_state : ARC_STATE_ANON;
abi->abi_state_contents = arc_buf_type(hdr);
abi->abi_size = arc_hdr_size(hdr);
}
/*
* Move the supplied buffer to the indicated state. The hash lock
* for the buffer must be held by the caller.
*/
static void
arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
kmutex_t *hash_lock)
{
arc_state_t *old_state;
int64_t refcnt;
uint32_t bufcnt;
boolean_t update_old, update_new;
arc_buf_contents_t buftype = arc_buf_type(hdr);
/*
* We almost always have an L1 hdr here, since we call arc_hdr_realloc()
* in arc_read() when bringing a buffer out of the L2ARC. However, the
* L1 hdr doesn't always exist when we change state to arc_anon before
* destroying a header, in which case reallocating to add the L1 hdr is
* pointless.
*/
if (HDR_HAS_L1HDR(hdr)) {
old_state = hdr->b_l1hdr.b_state;
refcnt = zfs_refcount_count(&hdr->b_l1hdr.b_refcnt);
bufcnt = hdr->b_l1hdr.b_bufcnt;
update_old = (bufcnt > 0 || hdr->b_l1hdr.b_pabd != NULL ||
HDR_HAS_RABD(hdr));
} else {
old_state = arc_l2c_only;
refcnt = 0;
bufcnt = 0;
update_old = B_FALSE;
}
update_new = update_old;
ASSERT(MUTEX_HELD(hash_lock));
ASSERT3P(new_state, !=, old_state);
ASSERT(!GHOST_STATE(new_state) || bufcnt == 0);
ASSERT(old_state != arc_anon || bufcnt <= 1);
/*
* If this buffer is evictable, transfer it from the
* old state list to the new state list.
*/
if (refcnt == 0) {
if (old_state != arc_anon && old_state != arc_l2c_only) {
ASSERT(HDR_HAS_L1HDR(hdr));
multilist_remove(&old_state->arcs_list[buftype], hdr);
if (GHOST_STATE(old_state)) {
ASSERT0(bufcnt);
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
update_old = B_TRUE;
}
arc_evictable_space_decrement(hdr, old_state);
}
if (new_state != arc_anon && new_state != arc_l2c_only) {
/*
* An L1 header always exists here, since if we're
* moving to some L1-cached state (i.e. not l2c_only or
* anonymous), we realloc the header to add an L1hdr
* beforehand.
*/
ASSERT(HDR_HAS_L1HDR(hdr));
multilist_insert(&new_state->arcs_list[buftype], hdr);
if (GHOST_STATE(new_state)) {
ASSERT0(bufcnt);
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
update_new = B_TRUE;
}
arc_evictable_space_increment(hdr, new_state);
}
}
ASSERT(!HDR_EMPTY(hdr));
if (new_state == arc_anon && HDR_IN_HASH_TABLE(hdr))
buf_hash_remove(hdr);
/* adjust state sizes (ignore arc_l2c_only) */
if (update_new && new_state != arc_l2c_only) {
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(new_state)) {
ASSERT0(bufcnt);
/*
* When moving a header to a ghost state, we first
* remove all arc buffers. Thus, we'll have a
* bufcnt of zero, and no arc buffer to use for
* the reference. As a result, we use the arc
* header pointer for the reference.
*/
(void) zfs_refcount_add_many(&new_state->arcs_size,
HDR_GET_LSIZE(hdr), hdr);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
} else {
uint32_t buffers = 0;
/*
* Each individual buffer holds a unique reference,
* thus we must remove each of these references one
* at a time.
*/
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
ASSERT3U(bufcnt, !=, 0);
buffers++;
/*
* When the arc_buf_t is sharing the data
* block with the hdr, the owner of the
* reference belongs to the hdr. Only
* add to the refcount if the arc_buf_t is
* not shared.
*/
if (arc_buf_is_shared(buf))
continue;
(void) zfs_refcount_add_many(
&new_state->arcs_size,
arc_buf_size(buf), buf);
}
ASSERT3U(bufcnt, ==, buffers);
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) zfs_refcount_add_many(
&new_state->arcs_size,
arc_hdr_size(hdr), hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) zfs_refcount_add_many(
&new_state->arcs_size,
HDR_GET_PSIZE(hdr), hdr);
}
}
}
if (update_old && old_state != arc_l2c_only) {
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(old_state)) {
ASSERT0(bufcnt);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
/*
* When moving a header off of a ghost state,
* the header will not contain any arc buffers.
* We use the arc header pointer for the reference
* which is exactly what we did when we put the
* header on the ghost state.
*/
(void) zfs_refcount_remove_many(&old_state->arcs_size,
HDR_GET_LSIZE(hdr), hdr);
} else {
uint32_t buffers = 0;
/*
* Each individual buffer holds a unique reference,
* thus we must remove each of these references one
* at a time.
*/
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
ASSERT3U(bufcnt, !=, 0);
buffers++;
/*
* When the arc_buf_t is sharing the data
* block with the hdr, the owner of the
* reference belongs to the hdr. Only
* add to the refcount if the arc_buf_t is
* not shared.
*/
if (arc_buf_is_shared(buf))
continue;
(void) zfs_refcount_remove_many(
&old_state->arcs_size, arc_buf_size(buf),
buf);
}
ASSERT3U(bufcnt, ==, buffers);
ASSERT(hdr->b_l1hdr.b_pabd != NULL ||
HDR_HAS_RABD(hdr));
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) zfs_refcount_remove_many(
&old_state->arcs_size, arc_hdr_size(hdr),
hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) zfs_refcount_remove_many(
&old_state->arcs_size, HDR_GET_PSIZE(hdr),
hdr);
}
}
}
if (HDR_HAS_L1HDR(hdr)) {
hdr->b_l1hdr.b_state = new_state;
if (HDR_HAS_L2HDR(hdr) && new_state != arc_l2c_only) {
l2arc_hdr_arcstats_decrement_state(hdr);
hdr->b_l2hdr.b_arcs_state = new_state->arcs_state;
l2arc_hdr_arcstats_increment_state(hdr);
}
}
}
void
arc_space_consume(uint64_t space, arc_space_type_t type)
{
ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
switch (type) {
default:
break;
case ARC_SPACE_DATA:
ARCSTAT_INCR(arcstat_data_size, space);
break;
case ARC_SPACE_META:
ARCSTAT_INCR(arcstat_metadata_size, space);
break;
case ARC_SPACE_BONUS:
ARCSTAT_INCR(arcstat_bonus_size, space);
break;
case ARC_SPACE_DNODE:
aggsum_add(&arc_sums.arcstat_dnode_size, space);
break;
case ARC_SPACE_DBUF:
ARCSTAT_INCR(arcstat_dbuf_size, space);
break;
case ARC_SPACE_HDRS:
ARCSTAT_INCR(arcstat_hdr_size, space);
break;
case ARC_SPACE_L2HDRS:
aggsum_add(&arc_sums.arcstat_l2_hdr_size, space);
break;
case ARC_SPACE_ABD_CHUNK_WASTE:
/*
* Note: this includes space wasted by all scatter ABD's, not
* just those allocated by the ARC. But the vast majority of
* scatter ABD's come from the ARC, because other users are
* very short-lived.
*/
ARCSTAT_INCR(arcstat_abd_chunk_waste_size, space);
break;
}
if (type != ARC_SPACE_DATA && type != ARC_SPACE_ABD_CHUNK_WASTE)
aggsum_add(&arc_sums.arcstat_meta_used, space);
aggsum_add(&arc_sums.arcstat_size, space);
}
void
arc_space_return(uint64_t space, arc_space_type_t type)
{
ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
switch (type) {
default:
break;
case ARC_SPACE_DATA:
ARCSTAT_INCR(arcstat_data_size, -space);
break;
case ARC_SPACE_META:
ARCSTAT_INCR(arcstat_metadata_size, -space);
break;
case ARC_SPACE_BONUS:
ARCSTAT_INCR(arcstat_bonus_size, -space);
break;
case ARC_SPACE_DNODE:
aggsum_add(&arc_sums.arcstat_dnode_size, -space);
break;
case ARC_SPACE_DBUF:
ARCSTAT_INCR(arcstat_dbuf_size, -space);
break;
case ARC_SPACE_HDRS:
ARCSTAT_INCR(arcstat_hdr_size, -space);
break;
case ARC_SPACE_L2HDRS:
aggsum_add(&arc_sums.arcstat_l2_hdr_size, -space);
break;
case ARC_SPACE_ABD_CHUNK_WASTE:
ARCSTAT_INCR(arcstat_abd_chunk_waste_size, -space);
break;
}
if (type != ARC_SPACE_DATA && type != ARC_SPACE_ABD_CHUNK_WASTE) {
ASSERT(aggsum_compare(&arc_sums.arcstat_meta_used,
space) >= 0);
ARCSTAT_MAX(arcstat_meta_max,
aggsum_upper_bound(&arc_sums.arcstat_meta_used));
aggsum_add(&arc_sums.arcstat_meta_used, -space);
}
ASSERT(aggsum_compare(&arc_sums.arcstat_size, space) >= 0);
aggsum_add(&arc_sums.arcstat_size, -space);
}
/*
* Given a hdr and a buf, returns whether that buf can share its b_data buffer
* with the hdr's b_pabd.
*/
static boolean_t
arc_can_share(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
/*
* The criteria for sharing a hdr's data are:
* 1. the buffer is not encrypted
* 2. the hdr's compression matches the buf's compression
* 3. the hdr doesn't need to be byteswapped
* 4. the hdr isn't already being shared
* 5. the buf is either compressed or it is the last buf in the hdr list
*
* Criterion #5 maintains the invariant that shared uncompressed
* bufs must be the final buf in the hdr's b_buf list. Reading this, you
* might ask, "if a compressed buf is allocated first, won't that be the
* last thing in the list?", but in that case it's impossible to create
* a shared uncompressed buf anyway (because the hdr must be compressed
* to have the compressed buf). You might also think that #3 is
* sufficient to make this guarantee, however it's possible
* (specifically in the rare L2ARC write race mentioned in
* arc_buf_alloc_impl()) there will be an existing uncompressed buf that
* is shareable, but wasn't at the time of its allocation. Rather than
* allow a new shared uncompressed buf to be created and then shuffle
* the list around to make it the last element, this simply disallows
* sharing if the new buf isn't the first to be added.
*/
ASSERT3P(buf->b_hdr, ==, hdr);
boolean_t hdr_compressed =
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF;
boolean_t buf_compressed = ARC_BUF_COMPRESSED(buf) != 0;
return (!ARC_BUF_ENCRYPTED(buf) &&
buf_compressed == hdr_compressed &&
hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS &&
!HDR_SHARED_DATA(hdr) &&
(ARC_BUF_LAST(buf) || ARC_BUF_COMPRESSED(buf)));
}
/*
* Allocate a buf for this hdr. If you care about the data that's in the hdr,
* or if you want a compressed buffer, pass those flags in. Returns 0 if the
* copy was made successfully, or an error code otherwise.
*/
static int
arc_buf_alloc_impl(arc_buf_hdr_t *hdr, spa_t *spa, const zbookmark_phys_t *zb,
const void *tag, boolean_t encrypted, boolean_t compressed,
boolean_t noauth, boolean_t fill, arc_buf_t **ret)
{
arc_buf_t *buf;
arc_fill_flags_t flags = ARC_FILL_LOCKED;
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3U(HDR_GET_LSIZE(hdr), >, 0);
VERIFY(hdr->b_type == ARC_BUFC_DATA ||
hdr->b_type == ARC_BUFC_METADATA);
ASSERT3P(ret, !=, NULL);
ASSERT3P(*ret, ==, NULL);
IMPLY(encrypted, compressed);
buf = *ret = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
buf->b_hdr = hdr;
buf->b_data = NULL;
buf->b_next = hdr->b_l1hdr.b_buf;
buf->b_flags = 0;
add_reference(hdr, tag);
/*
* We're about to change the hdr's b_flags. We must either
* hold the hash_lock or be undiscoverable.
*/
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
/*
* Only honor requests for compressed bufs if the hdr is actually
* compressed. This must be overridden if the buffer is encrypted since
* encrypted buffers cannot be decompressed.
*/
if (encrypted) {
buf->b_flags |= ARC_BUF_FLAG_COMPRESSED;
buf->b_flags |= ARC_BUF_FLAG_ENCRYPTED;
flags |= ARC_FILL_COMPRESSED | ARC_FILL_ENCRYPTED;
} else if (compressed &&
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF) {
buf->b_flags |= ARC_BUF_FLAG_COMPRESSED;
flags |= ARC_FILL_COMPRESSED;
}
if (noauth) {
ASSERT0(encrypted);
flags |= ARC_FILL_NOAUTH;
}
/*
* If the hdr's data can be shared then we share the data buffer and
* set the appropriate bit in the hdr's b_flags to indicate the hdr is
* sharing it's b_pabd with the arc_buf_t. Otherwise, we allocate a new
* buffer to store the buf's data.
*
* There are two additional restrictions here because we're sharing
* hdr -> buf instead of the usual buf -> hdr. First, the hdr can't be
* actively involved in an L2ARC write, because if this buf is used by
* an arc_write() then the hdr's data buffer will be released when the
* write completes, even though the L2ARC write might still be using it.
* Second, the hdr's ABD must be linear so that the buf's user doesn't
* need to be ABD-aware. It must be allocated via
* zio_[data_]buf_alloc(), not as a page, because we need to be able
* to abd_release_ownership_of_buf(), which isn't allowed on "linear
* page" buffers because the ABD code needs to handle freeing them
* specially.
*/
boolean_t can_share = arc_can_share(hdr, buf) &&
!HDR_L2_WRITING(hdr) &&
hdr->b_l1hdr.b_pabd != NULL &&
abd_is_linear(hdr->b_l1hdr.b_pabd) &&
!abd_is_linear_page(hdr->b_l1hdr.b_pabd);
/* Set up b_data and sharing */
if (can_share) {
buf->b_data = abd_to_buf(hdr->b_l1hdr.b_pabd);
buf->b_flags |= ARC_BUF_FLAG_SHARED;
arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA);
} else {
buf->b_data =
arc_get_data_buf(hdr, arc_buf_size(buf), buf);
ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf));
}
VERIFY3P(buf->b_data, !=, NULL);
hdr->b_l1hdr.b_buf = buf;
hdr->b_l1hdr.b_bufcnt += 1;
if (encrypted)
hdr->b_crypt_hdr.b_ebufcnt += 1;
/*
* If the user wants the data from the hdr, we need to either copy or
* decompress the data.
*/
if (fill) {
ASSERT3P(zb, !=, NULL);
return (arc_buf_fill(buf, spa, zb, flags));
}
return (0);
}
static const char *arc_onloan_tag = "onloan";
static inline void
arc_loaned_bytes_update(int64_t delta)
{
atomic_add_64(&arc_loaned_bytes, delta);
/* assert that it did not wrap around */
ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
}
/*
* Loan out an anonymous arc buffer. Loaned buffers are not counted as in
* flight data by arc_tempreserve_space() until they are "returned". Loaned
* buffers must be returned to the arc before they can be used by the DMU or
* freed.
*/
arc_buf_t *
arc_loan_buf(spa_t *spa, boolean_t is_metadata, int size)
{
arc_buf_t *buf = arc_alloc_buf(spa, arc_onloan_tag,
is_metadata ? ARC_BUFC_METADATA : ARC_BUFC_DATA, size);
arc_loaned_bytes_update(arc_buf_size(buf));
return (buf);
}
arc_buf_t *
arc_loan_compressed_buf(spa_t *spa, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type, uint8_t complevel)
{
arc_buf_t *buf = arc_alloc_compressed_buf(spa, arc_onloan_tag,
psize, lsize, compression_type, complevel);
arc_loaned_bytes_update(arc_buf_size(buf));
return (buf);
}
arc_buf_t *
arc_loan_raw_buf(spa_t *spa, uint64_t dsobj, boolean_t byteorder,
const uint8_t *salt, const uint8_t *iv, const uint8_t *mac,
dmu_object_type_t ot, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type, uint8_t complevel)
{
arc_buf_t *buf = arc_alloc_raw_buf(spa, arc_onloan_tag, dsobj,
byteorder, salt, iv, mac, ot, psize, lsize, compression_type,
complevel);
atomic_add_64(&arc_loaned_bytes, psize);
return (buf);
}
/*
* Return a loaned arc buffer to the arc.
*/
void
arc_return_buf(arc_buf_t *buf, const void *tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(buf->b_data, !=, NULL);
ASSERT(HDR_HAS_L1HDR(hdr));
(void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
(void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
arc_loaned_bytes_update(-arc_buf_size(buf));
}
/* Detach an arc_buf from a dbuf (tag) */
void
arc_loan_inuse_buf(arc_buf_t *buf, const void *tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(buf->b_data, !=, NULL);
ASSERT(HDR_HAS_L1HDR(hdr));
(void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
(void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag);
arc_loaned_bytes_update(arc_buf_size(buf));
}
static void
l2arc_free_abd_on_write(abd_t *abd, size_t size, arc_buf_contents_t type)
{
l2arc_data_free_t *df = kmem_alloc(sizeof (*df), KM_SLEEP);
df->l2df_abd = abd;
df->l2df_size = size;
df->l2df_type = type;
mutex_enter(&l2arc_free_on_write_mtx);
list_insert_head(l2arc_free_on_write, df);
mutex_exit(&l2arc_free_on_write_mtx);
}
static void
arc_hdr_free_on_write(arc_buf_hdr_t *hdr, boolean_t free_rdata)
{
arc_state_t *state = hdr->b_l1hdr.b_state;
arc_buf_contents_t type = arc_buf_type(hdr);
uint64_t size = (free_rdata) ? HDR_GET_PSIZE(hdr) : arc_hdr_size(hdr);
/* protected by hash lock, if in the hash table */
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT(state != arc_anon && state != arc_l2c_only);
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
size, hdr);
}
(void) zfs_refcount_remove_many(&state->arcs_size, size, hdr);
if (type == ARC_BUFC_METADATA) {
arc_space_return(size, ARC_SPACE_META);
} else {
ASSERT(type == ARC_BUFC_DATA);
arc_space_return(size, ARC_SPACE_DATA);
}
if (free_rdata) {
l2arc_free_abd_on_write(hdr->b_crypt_hdr.b_rabd, size, type);
} else {
l2arc_free_abd_on_write(hdr->b_l1hdr.b_pabd, size, type);
}
}
/*
* Share the arc_buf_t's data with the hdr. Whenever we are sharing the
* data buffer, we transfer the refcount ownership to the hdr and update
* the appropriate kstats.
*/
static void
arc_share_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
ASSERT(arc_can_share(hdr, buf));
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!ARC_BUF_ENCRYPTED(buf));
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
/*
* Start sharing the data buffer. We transfer the
* refcount ownership to the hdr since it always owns
* the refcount whenever an arc_buf_t is shared.
*/
zfs_refcount_transfer_ownership_many(&hdr->b_l1hdr.b_state->arcs_size,
arc_hdr_size(hdr), buf, hdr);
hdr->b_l1hdr.b_pabd = abd_get_from_buf(buf->b_data, arc_buf_size(buf));
abd_take_ownership_of_buf(hdr->b_l1hdr.b_pabd,
HDR_ISTYPE_METADATA(hdr));
arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA);
buf->b_flags |= ARC_BUF_FLAG_SHARED;
/*
* Since we've transferred ownership to the hdr we need
* to increment its compressed and uncompressed kstats and
* decrement the overhead size.
*/
ARCSTAT_INCR(arcstat_compressed_size, arc_hdr_size(hdr));
ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr));
ARCSTAT_INCR(arcstat_overhead_size, -arc_buf_size(buf));
}
static void
arc_unshare_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
ASSERT(arc_buf_is_shared(buf));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
/*
* We are no longer sharing this buffer so we need
* to transfer its ownership to the rightful owner.
*/
zfs_refcount_transfer_ownership_many(&hdr->b_l1hdr.b_state->arcs_size,
arc_hdr_size(hdr), hdr, buf);
arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
abd_release_ownership_of_buf(hdr->b_l1hdr.b_pabd);
abd_free(hdr->b_l1hdr.b_pabd);
hdr->b_l1hdr.b_pabd = NULL;
buf->b_flags &= ~ARC_BUF_FLAG_SHARED;
/*
* Since the buffer is no longer shared between
* the arc buf and the hdr, count it as overhead.
*/
ARCSTAT_INCR(arcstat_compressed_size, -arc_hdr_size(hdr));
ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr));
ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf));
}
/*
* Remove an arc_buf_t from the hdr's buf list and return the last
* arc_buf_t on the list. If no buffers remain on the list then return
* NULL.
*/
static arc_buf_t *
arc_buf_remove(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
arc_buf_t **bufp = &hdr->b_l1hdr.b_buf;
arc_buf_t *lastbuf = NULL;
/*
* Remove the buf from the hdr list and locate the last
* remaining buffer on the list.
*/
while (*bufp != NULL) {
if (*bufp == buf)
*bufp = buf->b_next;
/*
* If we've removed a buffer in the middle of
* the list then update the lastbuf and update
* bufp.
*/
if (*bufp != NULL) {
lastbuf = *bufp;
bufp = &(*bufp)->b_next;
}
}
buf->b_next = NULL;
ASSERT3P(lastbuf, !=, buf);
IMPLY(hdr->b_l1hdr.b_bufcnt > 0, lastbuf != NULL);
IMPLY(hdr->b_l1hdr.b_bufcnt > 0, hdr->b_l1hdr.b_buf != NULL);
IMPLY(lastbuf != NULL, ARC_BUF_LAST(lastbuf));
return (lastbuf);
}
/*
* Free up buf->b_data and pull the arc_buf_t off of the arc_buf_hdr_t's
* list and free it.
*/
static void
arc_buf_destroy_impl(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
/*
* Free up the data associated with the buf but only if we're not
* sharing this with the hdr. If we are sharing it with the hdr, the
* hdr is responsible for doing the free.
*/
if (buf->b_data != NULL) {
/*
* We're about to change the hdr's b_flags. We must either
* hold the hash_lock or be undiscoverable.
*/
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
arc_cksum_verify(buf);
arc_buf_unwatch(buf);
if (arc_buf_is_shared(buf)) {
arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
} else {
uint64_t size = arc_buf_size(buf);
arc_free_data_buf(hdr, buf->b_data, size, buf);
ARCSTAT_INCR(arcstat_overhead_size, -size);
}
buf->b_data = NULL;
ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
hdr->b_l1hdr.b_bufcnt -= 1;
if (ARC_BUF_ENCRYPTED(buf)) {
hdr->b_crypt_hdr.b_ebufcnt -= 1;
/*
* If we have no more encrypted buffers and we've
* already gotten a copy of the decrypted data we can
* free b_rabd to save some space.
*/
if (hdr->b_crypt_hdr.b_ebufcnt == 0 &&
HDR_HAS_RABD(hdr) && hdr->b_l1hdr.b_pabd != NULL &&
!HDR_IO_IN_PROGRESS(hdr)) {
arc_hdr_free_abd(hdr, B_TRUE);
}
}
}
arc_buf_t *lastbuf = arc_buf_remove(hdr, buf);
if (ARC_BUF_SHARED(buf) && !ARC_BUF_COMPRESSED(buf)) {
/*
* If the current arc_buf_t is sharing its data buffer with the
* hdr, then reassign the hdr's b_pabd to share it with the new
* buffer at the end of the list. The shared buffer is always
* the last one on the hdr's buffer list.
*
* There is an equivalent case for compressed bufs, but since
* they aren't guaranteed to be the last buf in the list and
* that is an exceedingly rare case, we just allow that space be
* wasted temporarily. We must also be careful not to share
* encrypted buffers, since they cannot be shared.
*/
if (lastbuf != NULL && !ARC_BUF_ENCRYPTED(lastbuf)) {
/* Only one buf can be shared at once */
VERIFY(!arc_buf_is_shared(lastbuf));
/* hdr is uncompressed so can't have compressed buf */
VERIFY(!ARC_BUF_COMPRESSED(lastbuf));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
arc_hdr_free_abd(hdr, B_FALSE);
/*
* We must setup a new shared block between the
* last buffer and the hdr. The data would have
* been allocated by the arc buf so we need to transfer
* ownership to the hdr since it's now being shared.
*/
arc_share_buf(hdr, lastbuf);
}
} else if (HDR_SHARED_DATA(hdr)) {
/*
* Uncompressed shared buffers are always at the end
* of the list. Compressed buffers don't have the
* same requirements. This makes it hard to
* simply assert that the lastbuf is shared so
* we rely on the hdr's compression flags to determine
* if we have a compressed, shared buffer.
*/
ASSERT3P(lastbuf, !=, NULL);
ASSERT(arc_buf_is_shared(lastbuf) ||
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF);
}
/*
* Free the checksum if we're removing the last uncompressed buf from
* this hdr.
*/
if (!arc_hdr_has_uncompressed_buf(hdr)) {
arc_cksum_free(hdr);
}
/* clean up the buf */
buf->b_hdr = NULL;
kmem_cache_free(buf_cache, buf);
}
static void
arc_hdr_alloc_abd(arc_buf_hdr_t *hdr, int alloc_flags)
{
uint64_t size;
boolean_t alloc_rdata = ((alloc_flags & ARC_HDR_ALLOC_RDATA) != 0);
ASSERT3U(HDR_GET_LSIZE(hdr), >, 0);
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(!HDR_SHARED_DATA(hdr) || alloc_rdata);
IMPLY(alloc_rdata, HDR_PROTECTED(hdr));
if (alloc_rdata) {
size = HDR_GET_PSIZE(hdr);
ASSERT3P(hdr->b_crypt_hdr.b_rabd, ==, NULL);
hdr->b_crypt_hdr.b_rabd = arc_get_data_abd(hdr, size, hdr,
alloc_flags);
ASSERT3P(hdr->b_crypt_hdr.b_rabd, !=, NULL);
ARCSTAT_INCR(arcstat_raw_size, size);
} else {
size = arc_hdr_size(hdr);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
hdr->b_l1hdr.b_pabd = arc_get_data_abd(hdr, size, hdr,
alloc_flags);
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
}
ARCSTAT_INCR(arcstat_compressed_size, size);
ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr));
}
static void
arc_hdr_free_abd(arc_buf_hdr_t *hdr, boolean_t free_rdata)
{
uint64_t size = (free_rdata) ? HDR_GET_PSIZE(hdr) : arc_hdr_size(hdr);
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr));
IMPLY(free_rdata, HDR_HAS_RABD(hdr));
/*
* If the hdr is currently being written to the l2arc then
* we defer freeing the data by adding it to the l2arc_free_on_write
* list. The l2arc will free the data once it's finished
* writing it to the l2arc device.
*/
if (HDR_L2_WRITING(hdr)) {
arc_hdr_free_on_write(hdr, free_rdata);
ARCSTAT_BUMP(arcstat_l2_free_on_write);
} else if (free_rdata) {
arc_free_data_abd(hdr, hdr->b_crypt_hdr.b_rabd, size, hdr);
} else {
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, size, hdr);
}
if (free_rdata) {
hdr->b_crypt_hdr.b_rabd = NULL;
ARCSTAT_INCR(arcstat_raw_size, -size);
} else {
hdr->b_l1hdr.b_pabd = NULL;
}
if (hdr->b_l1hdr.b_pabd == NULL && !HDR_HAS_RABD(hdr))
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
ARCSTAT_INCR(arcstat_compressed_size, -size);
ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr));
}
/*
* Allocate empty anonymous ARC header. The header will get its identity
* assigned and buffers attached later as part of read or write operations.
*
* In case of read arc_read() assigns header its identify (b_dva + b_birth),
* inserts it into ARC hash to become globally visible and allocates physical
* (b_pabd) or raw (b_rabd) ABD buffer to read into from disk. On disk read
* completion arc_read_done() allocates ARC buffer(s) as needed, potentially
* sharing one of them with the physical ABD buffer.
*
* In case of write arc_alloc_buf() allocates ARC buffer to be filled with
* data. Then after compression and/or encryption arc_write_ready() allocates
* and fills (or potentially shares) physical (b_pabd) or raw (b_rabd) ABD
* buffer. On disk write completion arc_write_done() assigns the header its
* new identity (b_dva + b_birth) and inserts into ARC hash.
*
* In case of partial overwrite the old data is read first as described. Then
* arc_release() either allocates new anonymous ARC header and moves the ARC
* buffer to it, or reuses the old ARC header by discarding its identity and
* removing it from ARC hash. After buffer modification normal write process
* follows as described.
*/
static arc_buf_hdr_t *
arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize,
boolean_t protected, enum zio_compress compression_type, uint8_t complevel,
arc_buf_contents_t type)
{
arc_buf_hdr_t *hdr;
VERIFY(type == ARC_BUFC_DATA || type == ARC_BUFC_METADATA);
if (protected) {
hdr = kmem_cache_alloc(hdr_full_crypt_cache, KM_PUSHPAGE);
} else {
hdr = kmem_cache_alloc(hdr_full_cache, KM_PUSHPAGE);
}
ASSERT(HDR_EMPTY(hdr));
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
HDR_SET_PSIZE(hdr, psize);
HDR_SET_LSIZE(hdr, lsize);
hdr->b_spa = spa;
hdr->b_type = type;
hdr->b_flags = 0;
arc_hdr_set_flags(hdr, arc_bufc_to_flags(type) | ARC_FLAG_HAS_L1HDR);
arc_hdr_set_compress(hdr, compression_type);
hdr->b_complevel = complevel;
if (protected)
arc_hdr_set_flags(hdr, ARC_FLAG_PROTECTED);
hdr->b_l1hdr.b_state = arc_anon;
hdr->b_l1hdr.b_arc_access = 0;
hdr->b_l1hdr.b_mru_hits = 0;
hdr->b_l1hdr.b_mru_ghost_hits = 0;
hdr->b_l1hdr.b_mfu_hits = 0;
hdr->b_l1hdr.b_mfu_ghost_hits = 0;
hdr->b_l1hdr.b_bufcnt = 0;
hdr->b_l1hdr.b_buf = NULL;
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
return (hdr);
}
/*
* Transition between the two allocation states for the arc_buf_hdr struct.
* The arc_buf_hdr struct can be allocated with (hdr_full_cache) or without
* (hdr_l2only_cache) the fields necessary for the L1 cache - the smaller
* version is used when a cache buffer is only in the L2ARC in order to reduce
* memory usage.
*/
static arc_buf_hdr_t *
arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new)
{
ASSERT(HDR_HAS_L2HDR(hdr));
arc_buf_hdr_t *nhdr;
l2arc_dev_t *dev = hdr->b_l2hdr.b_dev;
ASSERT((old == hdr_full_cache && new == hdr_l2only_cache) ||
(old == hdr_l2only_cache && new == hdr_full_cache));
/*
* if the caller wanted a new full header and the header is to be
* encrypted we will actually allocate the header from the full crypt
* cache instead. The same applies to freeing from the old cache.
*/
if (HDR_PROTECTED(hdr) && new == hdr_full_cache)
new = hdr_full_crypt_cache;
if (HDR_PROTECTED(hdr) && old == hdr_full_cache)
old = hdr_full_crypt_cache;
nhdr = kmem_cache_alloc(new, KM_PUSHPAGE);
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
buf_hash_remove(hdr);
memcpy(nhdr, hdr, HDR_L2ONLY_SIZE);
if (new == hdr_full_cache || new == hdr_full_crypt_cache) {
arc_hdr_set_flags(nhdr, ARC_FLAG_HAS_L1HDR);
/*
* arc_access and arc_change_state need to be aware that a
* header has just come out of L2ARC, so we set its state to
* l2c_only even though it's about to change.
*/
nhdr->b_l1hdr.b_state = arc_l2c_only;
/* Verify previous threads set to NULL before freeing */
ASSERT3P(nhdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
} else {
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT0(hdr->b_l1hdr.b_bufcnt);
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
/*
* If we've reached here, We must have been called from
* arc_evict_hdr(), as such we should have already been
* removed from any ghost list we were previously on
* (which protects us from racing with arc_evict_state),
* thus no locking is needed during this check.
*/
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
/*
* A buffer must not be moved into the arc_l2c_only
* state if it's not finished being written out to the
* l2arc device. Otherwise, the b_l1hdr.b_pabd field
* might try to be accessed, even though it was removed.
*/
VERIFY(!HDR_L2_WRITING(hdr));
VERIFY3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
arc_hdr_clear_flags(nhdr, ARC_FLAG_HAS_L1HDR);
}
/*
* The header has been reallocated so we need to re-insert it into any
* lists it was on.
*/
(void) buf_hash_insert(nhdr, NULL);
ASSERT(list_link_active(&hdr->b_l2hdr.b_l2node));
mutex_enter(&dev->l2ad_mtx);
/*
* We must place the realloc'ed header back into the list at
* the same spot. Otherwise, if it's placed earlier in the list,
* l2arc_write_buffers() could find it during the function's
* write phase, and try to write it out to the l2arc.
*/
list_insert_after(&dev->l2ad_buflist, hdr, nhdr);
list_remove(&dev->l2ad_buflist, hdr);
mutex_exit(&dev->l2ad_mtx);
/*
* Since we're using the pointer address as the tag when
* incrementing and decrementing the l2ad_alloc refcount, we
* must remove the old pointer (that we're about to destroy) and
* add the new pointer to the refcount. Otherwise we'd remove
* the wrong pointer address when calling arc_hdr_destroy() later.
*/
(void) zfs_refcount_remove_many(&dev->l2ad_alloc,
arc_hdr_size(hdr), hdr);
(void) zfs_refcount_add_many(&dev->l2ad_alloc,
arc_hdr_size(nhdr), nhdr);
buf_discard_identity(hdr);
kmem_cache_free(old, hdr);
return (nhdr);
}
/*
* This function allows an L1 header to be reallocated as a crypt
* header and vice versa. If we are going to a crypt header, the
* new fields will be zeroed out.
*/
static arc_buf_hdr_t *
arc_hdr_realloc_crypt(arc_buf_hdr_t *hdr, boolean_t need_crypt)
{
arc_buf_hdr_t *nhdr;
arc_buf_t *buf;
kmem_cache_t *ncache, *ocache;
/*
* This function requires that hdr is in the arc_anon state.
* Therefore it won't have any L2ARC data for us to worry
* about copying.
*/
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(!HDR_HAS_L2HDR(hdr));
ASSERT3U(!!HDR_PROTECTED(hdr), !=, need_crypt);
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
ASSERT(!list_link_active(&hdr->b_l2hdr.b_l2node));
ASSERT3P(hdr->b_hash_next, ==, NULL);
if (need_crypt) {
ncache = hdr_full_crypt_cache;
ocache = hdr_full_cache;
} else {
ncache = hdr_full_cache;
ocache = hdr_full_crypt_cache;
}
nhdr = kmem_cache_alloc(ncache, KM_PUSHPAGE);
/*
* Copy all members that aren't locks or condvars to the new header.
* No lists are pointing to us (as we asserted above), so we don't
* need to worry about the list nodes.
*/
nhdr->b_dva = hdr->b_dva;
nhdr->b_birth = hdr->b_birth;
nhdr->b_type = hdr->b_type;
nhdr->b_flags = hdr->b_flags;
nhdr->b_psize = hdr->b_psize;
nhdr->b_lsize = hdr->b_lsize;
nhdr->b_spa = hdr->b_spa;
nhdr->b_l1hdr.b_freeze_cksum = hdr->b_l1hdr.b_freeze_cksum;
nhdr->b_l1hdr.b_bufcnt = hdr->b_l1hdr.b_bufcnt;
nhdr->b_l1hdr.b_byteswap = hdr->b_l1hdr.b_byteswap;
nhdr->b_l1hdr.b_state = hdr->b_l1hdr.b_state;
nhdr->b_l1hdr.b_arc_access = hdr->b_l1hdr.b_arc_access;
nhdr->b_l1hdr.b_mru_hits = hdr->b_l1hdr.b_mru_hits;
nhdr->b_l1hdr.b_mru_ghost_hits = hdr->b_l1hdr.b_mru_ghost_hits;
nhdr->b_l1hdr.b_mfu_hits = hdr->b_l1hdr.b_mfu_hits;
nhdr->b_l1hdr.b_mfu_ghost_hits = hdr->b_l1hdr.b_mfu_ghost_hits;
nhdr->b_l1hdr.b_acb = hdr->b_l1hdr.b_acb;
nhdr->b_l1hdr.b_pabd = hdr->b_l1hdr.b_pabd;
/*
* This zfs_refcount_add() exists only to ensure that the individual
* arc buffers always point to a header that is referenced, avoiding
* a small race condition that could trigger ASSERTs.
*/
(void) zfs_refcount_add(&nhdr->b_l1hdr.b_refcnt, FTAG);
nhdr->b_l1hdr.b_buf = hdr->b_l1hdr.b_buf;
for (buf = nhdr->b_l1hdr.b_buf; buf != NULL; buf = buf->b_next) {
mutex_enter(&buf->b_evict_lock);
buf->b_hdr = nhdr;
mutex_exit(&buf->b_evict_lock);
}
zfs_refcount_transfer(&nhdr->b_l1hdr.b_refcnt, &hdr->b_l1hdr.b_refcnt);
(void) zfs_refcount_remove(&nhdr->b_l1hdr.b_refcnt, FTAG);
ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt));
if (need_crypt) {
arc_hdr_set_flags(nhdr, ARC_FLAG_PROTECTED);
} else {
arc_hdr_clear_flags(nhdr, ARC_FLAG_PROTECTED);
}
/* unset all members of the original hdr */
memset(&hdr->b_dva, 0, sizeof (dva_t));
hdr->b_birth = 0;
hdr->b_type = ARC_BUFC_INVALID;
hdr->b_flags = 0;
hdr->b_psize = 0;
hdr->b_lsize = 0;
hdr->b_spa = 0;
hdr->b_l1hdr.b_freeze_cksum = NULL;
hdr->b_l1hdr.b_buf = NULL;
hdr->b_l1hdr.b_bufcnt = 0;
hdr->b_l1hdr.b_byteswap = 0;
hdr->b_l1hdr.b_state = NULL;
hdr->b_l1hdr.b_arc_access = 0;
hdr->b_l1hdr.b_mru_hits = 0;
hdr->b_l1hdr.b_mru_ghost_hits = 0;
hdr->b_l1hdr.b_mfu_hits = 0;
hdr->b_l1hdr.b_mfu_ghost_hits = 0;
hdr->b_l1hdr.b_acb = NULL;
hdr->b_l1hdr.b_pabd = NULL;
if (ocache == hdr_full_crypt_cache) {
ASSERT(!HDR_HAS_RABD(hdr));
hdr->b_crypt_hdr.b_ot = DMU_OT_NONE;
hdr->b_crypt_hdr.b_ebufcnt = 0;
hdr->b_crypt_hdr.b_dsobj = 0;
memset(hdr->b_crypt_hdr.b_salt, 0, ZIO_DATA_SALT_LEN);
memset(hdr->b_crypt_hdr.b_iv, 0, ZIO_DATA_IV_LEN);
memset(hdr->b_crypt_hdr.b_mac, 0, ZIO_DATA_MAC_LEN);
}
buf_discard_identity(hdr);
kmem_cache_free(ocache, hdr);
return (nhdr);
}
/*
* This function is used by the send / receive code to convert a newly
* allocated arc_buf_t to one that is suitable for a raw encrypted write. It
* is also used to allow the root objset block to be updated without altering
* its embedded MACs. Both block types will always be uncompressed so we do not
* have to worry about compression type or psize.
*/
void
arc_convert_to_raw(arc_buf_t *buf, uint64_t dsobj, boolean_t byteorder,
dmu_object_type_t ot, const uint8_t *salt, const uint8_t *iv,
const uint8_t *mac)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT(ot == DMU_OT_DNODE || ot == DMU_OT_OBJSET);
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
buf->b_flags |= (ARC_BUF_FLAG_COMPRESSED | ARC_BUF_FLAG_ENCRYPTED);
if (!HDR_PROTECTED(hdr))
hdr = arc_hdr_realloc_crypt(hdr, B_TRUE);
hdr->b_crypt_hdr.b_dsobj = dsobj;
hdr->b_crypt_hdr.b_ot = ot;
hdr->b_l1hdr.b_byteswap = (byteorder == ZFS_HOST_BYTEORDER) ?
DMU_BSWAP_NUMFUNCS : DMU_OT_BYTESWAP(ot);
if (!arc_hdr_has_uncompressed_buf(hdr))
arc_cksum_free(hdr);
if (salt != NULL)
memcpy(hdr->b_crypt_hdr.b_salt, salt, ZIO_DATA_SALT_LEN);
if (iv != NULL)
memcpy(hdr->b_crypt_hdr.b_iv, iv, ZIO_DATA_IV_LEN);
if (mac != NULL)
memcpy(hdr->b_crypt_hdr.b_mac, mac, ZIO_DATA_MAC_LEN);
}
/*
* Allocate a new arc_buf_hdr_t and arc_buf_t and return the buf to the caller.
* The buf is returned thawed since we expect the consumer to modify it.
*/
arc_buf_t *
arc_alloc_buf(spa_t *spa, const void *tag, arc_buf_contents_t type,
int32_t size)
{
arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), size, size,
B_FALSE, ZIO_COMPRESS_OFF, 0, type);
arc_buf_t *buf = NULL;
VERIFY0(arc_buf_alloc_impl(hdr, spa, NULL, tag, B_FALSE, B_FALSE,
B_FALSE, B_FALSE, &buf));
arc_buf_thaw(buf);
return (buf);
}
/*
* Allocate a compressed buf in the same manner as arc_alloc_buf. Don't use this
* for bufs containing metadata.
*/
arc_buf_t *
arc_alloc_compressed_buf(spa_t *spa, const void *tag, uint64_t psize,
uint64_t lsize, enum zio_compress compression_type, uint8_t complevel)
{
ASSERT3U(lsize, >, 0);
ASSERT3U(lsize, >=, psize);
ASSERT3U(compression_type, >, ZIO_COMPRESS_OFF);
ASSERT3U(compression_type, <, ZIO_COMPRESS_FUNCTIONS);
arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize,
B_FALSE, compression_type, complevel, ARC_BUFC_DATA);
arc_buf_t *buf = NULL;
VERIFY0(arc_buf_alloc_impl(hdr, spa, NULL, tag, B_FALSE,
B_TRUE, B_FALSE, B_FALSE, &buf));
arc_buf_thaw(buf);
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
/*
* To ensure that the hdr has the correct data in it if we call
* arc_untransform() on this buf before it's been written to disk,
* it's easiest if we just set up sharing between the buf and the hdr.
*/
arc_share_buf(hdr, buf);
return (buf);
}
arc_buf_t *
arc_alloc_raw_buf(spa_t *spa, const void *tag, uint64_t dsobj,
boolean_t byteorder, const uint8_t *salt, const uint8_t *iv,
const uint8_t *mac, dmu_object_type_t ot, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type, uint8_t complevel)
{
arc_buf_hdr_t *hdr;
arc_buf_t *buf;
arc_buf_contents_t type = DMU_OT_IS_METADATA(ot) ?
ARC_BUFC_METADATA : ARC_BUFC_DATA;
ASSERT3U(lsize, >, 0);
ASSERT3U(lsize, >=, psize);
ASSERT3U(compression_type, >=, ZIO_COMPRESS_OFF);
ASSERT3U(compression_type, <, ZIO_COMPRESS_FUNCTIONS);
hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize, B_TRUE,
compression_type, complevel, type);
hdr->b_crypt_hdr.b_dsobj = dsobj;
hdr->b_crypt_hdr.b_ot = ot;
hdr->b_l1hdr.b_byteswap = (byteorder == ZFS_HOST_BYTEORDER) ?
DMU_BSWAP_NUMFUNCS : DMU_OT_BYTESWAP(ot);
memcpy(hdr->b_crypt_hdr.b_salt, salt, ZIO_DATA_SALT_LEN);
memcpy(hdr->b_crypt_hdr.b_iv, iv, ZIO_DATA_IV_LEN);
memcpy(hdr->b_crypt_hdr.b_mac, mac, ZIO_DATA_MAC_LEN);
/*
* This buffer will be considered encrypted even if the ot is not an
* encrypted type. It will become authenticated instead in
* arc_write_ready().
*/
buf = NULL;
VERIFY0(arc_buf_alloc_impl(hdr, spa, NULL, tag, B_TRUE, B_TRUE,
B_FALSE, B_FALSE, &buf));
arc_buf_thaw(buf);
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
return (buf);
}
static void
l2arc_hdr_arcstats_update(arc_buf_hdr_t *hdr, boolean_t incr,
boolean_t state_only)
{
l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr;
l2arc_dev_t *dev = l2hdr->b_dev;
uint64_t lsize = HDR_GET_LSIZE(hdr);
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev, psize);
arc_buf_contents_t type = hdr->b_type;
int64_t lsize_s;
int64_t psize_s;
int64_t asize_s;
if (incr) {
lsize_s = lsize;
psize_s = psize;
asize_s = asize;
} else {
lsize_s = -lsize;
psize_s = -psize;
asize_s = -asize;
}
/* If the buffer is a prefetch, count it as such. */
if (HDR_PREFETCH(hdr)) {
ARCSTAT_INCR(arcstat_l2_prefetch_asize, asize_s);
} else {
/*
* We use the value stored in the L2 header upon initial
* caching in L2ARC. This value will be updated in case
* an MRU/MRU_ghost buffer transitions to MFU but the L2ARC
* metadata (log entry) cannot currently be updated. Having
* the ARC state in the L2 header solves the problem of a
* possibly absent L1 header (apparent in buffers restored
* from persistent L2ARC).
*/
switch (hdr->b_l2hdr.b_arcs_state) {
case ARC_STATE_MRU_GHOST:
case ARC_STATE_MRU:
ARCSTAT_INCR(arcstat_l2_mru_asize, asize_s);
break;
case ARC_STATE_MFU_GHOST:
case ARC_STATE_MFU:
ARCSTAT_INCR(arcstat_l2_mfu_asize, asize_s);
break;
default:
break;
}
}
if (state_only)
return;
ARCSTAT_INCR(arcstat_l2_psize, psize_s);
ARCSTAT_INCR(arcstat_l2_lsize, lsize_s);
switch (type) {
case ARC_BUFC_DATA:
ARCSTAT_INCR(arcstat_l2_bufc_data_asize, asize_s);
break;
case ARC_BUFC_METADATA:
ARCSTAT_INCR(arcstat_l2_bufc_metadata_asize, asize_s);
break;
default:
break;
}
}
static void
arc_hdr_l2hdr_destroy(arc_buf_hdr_t *hdr)
{
l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr;
l2arc_dev_t *dev = l2hdr->b_dev;
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev, psize);
ASSERT(MUTEX_HELD(&dev->l2ad_mtx));
ASSERT(HDR_HAS_L2HDR(hdr));
list_remove(&dev->l2ad_buflist, hdr);
l2arc_hdr_arcstats_decrement(hdr);
vdev_space_update(dev->l2ad_vdev, -asize, 0, 0);
(void) zfs_refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr),
hdr);
arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR);
}
static void
arc_hdr_destroy(arc_buf_hdr_t *hdr)
{
if (HDR_HAS_L1HDR(hdr)) {
ASSERT(hdr->b_l1hdr.b_buf == NULL ||
hdr->b_l1hdr.b_bufcnt > 0);
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
}
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT(!HDR_IN_HASH_TABLE(hdr));
if (HDR_HAS_L2HDR(hdr)) {
l2arc_dev_t *dev = hdr->b_l2hdr.b_dev;
boolean_t buflist_held = MUTEX_HELD(&dev->l2ad_mtx);
if (!buflist_held)
mutex_enter(&dev->l2ad_mtx);
/*
* Even though we checked this conditional above, we
* need to check this again now that we have the
* l2ad_mtx. This is because we could be racing with
* another thread calling l2arc_evict() which might have
* destroyed this header's L2 portion as we were waiting
* to acquire the l2ad_mtx. If that happens, we don't
* want to re-destroy the header's L2 portion.
*/
if (HDR_HAS_L2HDR(hdr)) {
if (!HDR_EMPTY(hdr))
buf_discard_identity(hdr);
arc_hdr_l2hdr_destroy(hdr);
}
if (!buflist_held)
mutex_exit(&dev->l2ad_mtx);
}
/*
* The header's identify can only be safely discarded once it is no
* longer discoverable. This requires removing it from the hash table
* and the l2arc header list. After this point the hash lock can not
* be used to protect the header.
*/
if (!HDR_EMPTY(hdr))
buf_discard_identity(hdr);
if (HDR_HAS_L1HDR(hdr)) {
arc_cksum_free(hdr);
while (hdr->b_l1hdr.b_buf != NULL)
arc_buf_destroy_impl(hdr->b_l1hdr.b_buf);
if (hdr->b_l1hdr.b_pabd != NULL)
arc_hdr_free_abd(hdr, B_FALSE);
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
}
ASSERT3P(hdr->b_hash_next, ==, NULL);
if (HDR_HAS_L1HDR(hdr)) {
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
if (!HDR_PROTECTED(hdr)) {
kmem_cache_free(hdr_full_cache, hdr);
} else {
kmem_cache_free(hdr_full_crypt_cache, hdr);
}
} else {
kmem_cache_free(hdr_l2only_cache, hdr);
}
}
void
arc_buf_destroy(arc_buf_t *buf, const void *tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
if (hdr->b_l1hdr.b_state == arc_anon) {
ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
VERIFY0(remove_reference(hdr, NULL, tag));
arc_hdr_destroy(hdr);
return;
}
kmutex_t *hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
ASSERT3P(hdr, ==, buf->b_hdr);
ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
ASSERT3P(hdr->b_l1hdr.b_state, !=, arc_anon);
ASSERT3P(buf->b_data, !=, NULL);
(void) remove_reference(hdr, hash_lock, tag);
arc_buf_destroy_impl(buf);
mutex_exit(hash_lock);
}
/*
* Evict the arc_buf_hdr that is provided as a parameter. The resultant
* state of the header is dependent on its state prior to entering this
* function. The following transitions are possible:
*
* - arc_mru -> arc_mru_ghost
* - arc_mfu -> arc_mfu_ghost
* - arc_mru_ghost -> arc_l2c_only
* - arc_mru_ghost -> deleted
* - arc_mfu_ghost -> arc_l2c_only
* - arc_mfu_ghost -> deleted
*
* Return total size of evicted data buffers for eviction progress tracking.
* When evicting from ghost states return logical buffer size to make eviction
* progress at the same (or at least comparable) rate as from non-ghost states.
*
* Return *real_evicted for actual ARC size reduction to wake up threads
* waiting for it. For non-ghost states it includes size of evicted data
* buffers (the headers are not freed there). For ghost states it includes
* only the evicted headers size.
*/
static int64_t
arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, uint64_t *real_evicted)
{
arc_state_t *evicted_state, *state;
int64_t bytes_evicted = 0;
int min_lifetime = HDR_PRESCIENT_PREFETCH(hdr) ?
arc_min_prescient_prefetch_ms : arc_min_prefetch_ms;
ASSERT(MUTEX_HELD(hash_lock));
ASSERT(HDR_HAS_L1HDR(hdr));
*real_evicted = 0;
state = hdr->b_l1hdr.b_state;
if (GHOST_STATE(state)) {
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
/*
* l2arc_write_buffers() relies on a header's L1 portion
* (i.e. its b_pabd field) during it's write phase.
* Thus, we cannot push a header onto the arc_l2c_only
* state (removing its L1 piece) until the header is
* done being written to the l2arc.
*/
if (HDR_HAS_L2HDR(hdr) && HDR_L2_WRITING(hdr)) {
ARCSTAT_BUMP(arcstat_evict_l2_skip);
return (bytes_evicted);
}
ARCSTAT_BUMP(arcstat_deleted);
bytes_evicted += HDR_GET_LSIZE(hdr);
DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, hdr);
if (HDR_HAS_L2HDR(hdr)) {
ASSERT(hdr->b_l1hdr.b_pabd == NULL);
ASSERT(!HDR_HAS_RABD(hdr));
/*
* This buffer is cached on the 2nd Level ARC;
* don't destroy the header.
*/
arc_change_state(arc_l2c_only, hdr, hash_lock);
/*
* dropping from L1+L2 cached to L2-only,
* realloc to remove the L1 header.
*/
hdr = arc_hdr_realloc(hdr, hdr_full_cache,
hdr_l2only_cache);
*real_evicted += HDR_FULL_SIZE - HDR_L2ONLY_SIZE;
} else {
arc_change_state(arc_anon, hdr, hash_lock);
arc_hdr_destroy(hdr);
*real_evicted += HDR_FULL_SIZE;
}
return (bytes_evicted);
}
ASSERT(state == arc_mru || state == arc_mfu);
evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
/* prefetch buffers have a minimum lifespan */
if (HDR_IO_IN_PROGRESS(hdr) ||
((hdr->b_flags & (ARC_FLAG_PREFETCH | ARC_FLAG_INDIRECT)) &&
ddi_get_lbolt() - hdr->b_l1hdr.b_arc_access <
MSEC_TO_TICK(min_lifetime))) {
ARCSTAT_BUMP(arcstat_evict_skip);
return (bytes_evicted);
}
ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt));
while (hdr->b_l1hdr.b_buf) {
arc_buf_t *buf = hdr->b_l1hdr.b_buf;
if (!mutex_tryenter(&buf->b_evict_lock)) {
ARCSTAT_BUMP(arcstat_mutex_miss);
break;
}
if (buf->b_data != NULL) {
bytes_evicted += HDR_GET_LSIZE(hdr);
*real_evicted += HDR_GET_LSIZE(hdr);
}
mutex_exit(&buf->b_evict_lock);
arc_buf_destroy_impl(buf);
}
if (HDR_HAS_L2HDR(hdr)) {
ARCSTAT_INCR(arcstat_evict_l2_cached, HDR_GET_LSIZE(hdr));
} else {
if (l2arc_write_eligible(hdr->b_spa, hdr)) {
ARCSTAT_INCR(arcstat_evict_l2_eligible,
HDR_GET_LSIZE(hdr));
switch (state->arcs_state) {
case ARC_STATE_MRU:
ARCSTAT_INCR(
arcstat_evict_l2_eligible_mru,
HDR_GET_LSIZE(hdr));
break;
case ARC_STATE_MFU:
ARCSTAT_INCR(
arcstat_evict_l2_eligible_mfu,
HDR_GET_LSIZE(hdr));
break;
default:
break;
}
} else {
ARCSTAT_INCR(arcstat_evict_l2_ineligible,
HDR_GET_LSIZE(hdr));
}
}
if (hdr->b_l1hdr.b_bufcnt == 0) {
arc_cksum_free(hdr);
bytes_evicted += arc_hdr_size(hdr);
*real_evicted += arc_hdr_size(hdr);
/*
* If this hdr is being evicted and has a compressed
* buffer then we discard it here before we change states.
* This ensures that the accounting is updated correctly
* in arc_free_data_impl().
*/
if (hdr->b_l1hdr.b_pabd != NULL)
arc_hdr_free_abd(hdr, B_FALSE);
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
arc_change_state(evicted_state, hdr, hash_lock);
ASSERT(HDR_IN_HASH_TABLE(hdr));
arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, hdr);
}
return (bytes_evicted);
}
static void
arc_set_need_free(void)
{
ASSERT(MUTEX_HELD(&arc_evict_lock));
int64_t remaining = arc_free_memory() - arc_sys_free / 2;
arc_evict_waiter_t *aw = list_tail(&arc_evict_waiters);
if (aw == NULL) {
arc_need_free = MAX(-remaining, 0);
} else {
arc_need_free =
MAX(-remaining, (int64_t)(aw->aew_count - arc_evict_count));
}
}
static uint64_t
arc_evict_state_impl(multilist_t *ml, int idx, arc_buf_hdr_t *marker,
uint64_t spa, uint64_t bytes)
{
multilist_sublist_t *mls;
uint64_t bytes_evicted = 0, real_evicted = 0;
arc_buf_hdr_t *hdr;
kmutex_t *hash_lock;
int evict_count = zfs_arc_evict_batch_limit;
ASSERT3P(marker, !=, NULL);
mls = multilist_sublist_lock(ml, idx);
for (hdr = multilist_sublist_prev(mls, marker); likely(hdr != NULL);
hdr = multilist_sublist_prev(mls, marker)) {
if ((evict_count <= 0) || (bytes_evicted >= bytes))
break;
/*
* To keep our iteration location, move the marker
* forward. Since we're not holding hdr's hash lock, we
* must be very careful and not remove 'hdr' from the
* sublist. Otherwise, other consumers might mistake the
* 'hdr' as not being on a sublist when they call the
* multilist_link_active() function (they all rely on
* the hash lock protecting concurrent insertions and
* removals). multilist_sublist_move_forward() was
* specifically implemented to ensure this is the case
* (only 'marker' will be removed and re-inserted).
*/
multilist_sublist_move_forward(mls, marker);
/*
* The only case where the b_spa field should ever be
* zero, is the marker headers inserted by
* arc_evict_state(). It's possible for multiple threads
* to be calling arc_evict_state() concurrently (e.g.
* dsl_pool_close() and zio_inject_fault()), so we must
* skip any markers we see from these other threads.
*/
if (hdr->b_spa == 0)
continue;
/* we're only interested in evicting buffers of a certain spa */
if (spa != 0 && hdr->b_spa != spa) {
ARCSTAT_BUMP(arcstat_evict_skip);
continue;
}
hash_lock = HDR_LOCK(hdr);
/*
* We aren't calling this function from any code path
* that would already be holding a hash lock, so we're
* asserting on this assumption to be defensive in case
* this ever changes. Without this check, it would be
* possible to incorrectly increment arcstat_mutex_miss
* below (e.g. if the code changed such that we called
* this function with a hash lock held).
*/
ASSERT(!MUTEX_HELD(hash_lock));
if (mutex_tryenter(hash_lock)) {
uint64_t revicted;
uint64_t evicted = arc_evict_hdr(hdr, hash_lock,
&revicted);
mutex_exit(hash_lock);
bytes_evicted += evicted;
real_evicted += revicted;
/*
* If evicted is zero, arc_evict_hdr() must have
* decided to skip this header, don't increment
* evict_count in this case.
*/
if (evicted != 0)
evict_count--;
} else {
ARCSTAT_BUMP(arcstat_mutex_miss);
}
}
multilist_sublist_unlock(mls);
/*
* Increment the count of evicted bytes, and wake up any threads that
* are waiting for the count to reach this value. Since the list is
* ordered by ascending aew_count, we pop off the beginning of the
* list until we reach the end, or a waiter that's past the current
* "count". Doing this outside the loop reduces the number of times
* we need to acquire the global arc_evict_lock.
*
* Only wake when there's sufficient free memory in the system
* (specifically, arc_sys_free/2, which by default is a bit more than
* 1/64th of RAM). See the comments in arc_wait_for_eviction().
*/
mutex_enter(&arc_evict_lock);
arc_evict_count += real_evicted;
if (arc_free_memory() > arc_sys_free / 2) {
arc_evict_waiter_t *aw;
while ((aw = list_head(&arc_evict_waiters)) != NULL &&
aw->aew_count <= arc_evict_count) {
list_remove(&arc_evict_waiters, aw);
cv_broadcast(&aw->aew_cv);
}
}
arc_set_need_free();
mutex_exit(&arc_evict_lock);
/*
* If the ARC size is reduced from arc_c_max to arc_c_min (especially
* if the average cached block is small), eviction can be on-CPU for
* many seconds. To ensure that other threads that may be bound to
* this CPU are able to make progress, make a voluntary preemption
* call here.
*/
- cond_resched();
+ kpreempt(KPREEMPT_SYNC);
return (bytes_evicted);
}
/*
* Allocate an array of buffer headers used as placeholders during arc state
* eviction.
*/
static arc_buf_hdr_t **
arc_state_alloc_markers(int count)
{
arc_buf_hdr_t **markers;
markers = kmem_zalloc(sizeof (*markers) * count, KM_SLEEP);
for (int i = 0; i < count; i++) {
markers[i] = kmem_cache_alloc(hdr_full_cache, KM_SLEEP);
/*
* A b_spa of 0 is used to indicate that this header is
* a marker. This fact is used in arc_evict_type() and
* arc_evict_state_impl().
*/
markers[i]->b_spa = 0;
}
return (markers);
}
static void
arc_state_free_markers(arc_buf_hdr_t **markers, int count)
{
for (int i = 0; i < count; i++)
kmem_cache_free(hdr_full_cache, markers[i]);
kmem_free(markers, sizeof (*markers) * count);
}
/*
* Evict buffers from the given arc state, until we've removed the
* specified number of bytes. Move the removed buffers to the
* appropriate evict state.
*
* This function makes a "best effort". It skips over any buffers
* it can't get a hash_lock on, and so, may not catch all candidates.
* It may also return without evicting as much space as requested.
*
* If bytes is specified using the special value ARC_EVICT_ALL, this
* will evict all available (i.e. unlocked and evictable) buffers from
* the given arc state; which is used by arc_flush().
*/
static uint64_t
arc_evict_state(arc_state_t *state, uint64_t spa, uint64_t bytes,
arc_buf_contents_t type)
{
uint64_t total_evicted = 0;
multilist_t *ml = &state->arcs_list[type];
int num_sublists;
arc_buf_hdr_t **markers;
num_sublists = multilist_get_num_sublists(ml);
/*
* If we've tried to evict from each sublist, made some
* progress, but still have not hit the target number of bytes
* to evict, we want to keep trying. The markers allow us to
* pick up where we left off for each individual sublist, rather
* than starting from the tail each time.
*/
if (zthr_iscurthread(arc_evict_zthr)) {
markers = arc_state_evict_markers;
ASSERT3S(num_sublists, <=, arc_state_evict_marker_count);
} else {
markers = arc_state_alloc_markers(num_sublists);
}
for (int i = 0; i < num_sublists; i++) {
multilist_sublist_t *mls;
mls = multilist_sublist_lock(ml, i);
multilist_sublist_insert_tail(mls, markers[i]);
multilist_sublist_unlock(mls);
}
/*
* While we haven't hit our target number of bytes to evict, or
* we're evicting all available buffers.
*/
while (total_evicted < bytes) {
int sublist_idx = multilist_get_random_index(ml);
uint64_t scan_evicted = 0;
/*
* Try to reduce pinned dnodes with a floor of arc_dnode_limit.
* Request that 10% of the LRUs be scanned by the superblock
* shrinker.
*/
if (type == ARC_BUFC_DATA && aggsum_compare(
&arc_sums.arcstat_dnode_size, arc_dnode_size_limit) > 0) {
arc_prune_async((aggsum_upper_bound(
&arc_sums.arcstat_dnode_size) -
arc_dnode_size_limit) / sizeof (dnode_t) /
zfs_arc_dnode_reduce_percent);
}
/*
* Start eviction using a randomly selected sublist,
* this is to try and evenly balance eviction across all
* sublists. Always starting at the same sublist
* (e.g. index 0) would cause evictions to favor certain
* sublists over others.
*/
for (int i = 0; i < num_sublists; i++) {
uint64_t bytes_remaining;
uint64_t bytes_evicted;
if (total_evicted < bytes)
bytes_remaining = bytes - total_evicted;
else
break;
bytes_evicted = arc_evict_state_impl(ml, sublist_idx,
markers[sublist_idx], spa, bytes_remaining);
scan_evicted += bytes_evicted;
total_evicted += bytes_evicted;
/* we've reached the end, wrap to the beginning */
if (++sublist_idx >= num_sublists)
sublist_idx = 0;
}
/*
* If we didn't evict anything during this scan, we have
* no reason to believe we'll evict more during another
* scan, so break the loop.
*/
if (scan_evicted == 0) {
/* This isn't possible, let's make that obvious */
ASSERT3S(bytes, !=, 0);
/*
* When bytes is ARC_EVICT_ALL, the only way to
* break the loop is when scan_evicted is zero.
* In that case, we actually have evicted enough,
* so we don't want to increment the kstat.
*/
if (bytes != ARC_EVICT_ALL) {
ASSERT3S(total_evicted, <, bytes);
ARCSTAT_BUMP(arcstat_evict_not_enough);
}
break;
}
}
for (int i = 0; i < num_sublists; i++) {
multilist_sublist_t *mls = multilist_sublist_lock(ml, i);
multilist_sublist_remove(mls, markers[i]);
multilist_sublist_unlock(mls);
}
if (markers != arc_state_evict_markers)
arc_state_free_markers(markers, num_sublists);
return (total_evicted);
}
/*
* Flush all "evictable" data of the given type from the arc state
* specified. This will not evict any "active" buffers (i.e. referenced).
*
* When 'retry' is set to B_FALSE, the function will make a single pass
* over the state and evict any buffers that it can. Since it doesn't
* continually retry the eviction, it might end up leaving some buffers
* in the ARC due to lock misses.
*
* When 'retry' is set to B_TRUE, the function will continually retry the
* eviction until *all* evictable buffers have been removed from the
* state. As a result, if concurrent insertions into the state are
* allowed (e.g. if the ARC isn't shutting down), this function might
* wind up in an infinite loop, continually trying to evict buffers.
*/
static uint64_t
arc_flush_state(arc_state_t *state, uint64_t spa, arc_buf_contents_t type,
boolean_t retry)
{
uint64_t evicted = 0;
while (zfs_refcount_count(&state->arcs_esize[type]) != 0) {
evicted += arc_evict_state(state, spa, ARC_EVICT_ALL, type);
if (!retry)
break;
}
return (evicted);
}
/*
* Evict the specified number of bytes from the state specified,
* restricting eviction to the spa and type given. This function
* prevents us from trying to evict more from a state's list than
* is "evictable", and to skip evicting altogether when passed a
* negative value for "bytes". In contrast, arc_evict_state() will
* evict everything it can, when passed a negative value for "bytes".
*/
static uint64_t
arc_evict_impl(arc_state_t *state, uint64_t spa, int64_t bytes,
arc_buf_contents_t type)
{
uint64_t delta;
if (bytes > 0 && zfs_refcount_count(&state->arcs_esize[type]) > 0) {
delta = MIN(zfs_refcount_count(&state->arcs_esize[type]),
bytes);
return (arc_evict_state(state, spa, delta, type));
}
return (0);
}
/*
* The goal of this function is to evict enough meta data buffers from the
* ARC in order to enforce the arc_meta_limit. Achieving this is slightly
* more complicated than it appears because it is common for data buffers
* to have holds on meta data buffers. In addition, dnode meta data buffers
* will be held by the dnodes in the block preventing them from being freed.
* This means we can't simply traverse the ARC and expect to always find
* enough unheld meta data buffer to release.
*
* Therefore, this function has been updated to make alternating passes
* over the ARC releasing data buffers and then newly unheld meta data
* buffers. This ensures forward progress is maintained and meta_used
* will decrease. Normally this is sufficient, but if required the ARC
* will call the registered prune callbacks causing dentry and inodes to
* be dropped from the VFS cache. This will make dnode meta data buffers
* available for reclaim.
*/
static uint64_t
arc_evict_meta_balanced(uint64_t meta_used)
{
int64_t delta, prune = 0, adjustmnt;
uint64_t total_evicted = 0;
arc_buf_contents_t type = ARC_BUFC_DATA;
int restarts = MAX(zfs_arc_meta_adjust_restarts, 0);
restart:
/*
* This slightly differs than the way we evict from the mru in
* arc_evict because we don't have a "target" value (i.e. no
* "meta" arc_p). As a result, I think we can completely
* cannibalize the metadata in the MRU before we evict the
* metadata from the MFU. I think we probably need to implement a
* "metadata arc_p" value to do this properly.
*/
adjustmnt = meta_used - arc_meta_limit;
if (adjustmnt > 0 &&
zfs_refcount_count(&arc_mru->arcs_esize[type]) > 0) {
delta = MIN(zfs_refcount_count(&arc_mru->arcs_esize[type]),
adjustmnt);
total_evicted += arc_evict_impl(arc_mru, 0, delta, type);
adjustmnt -= delta;
}
/*
* We can't afford to recalculate adjustmnt here. If we do,
* new metadata buffers can sneak into the MRU or ANON lists,
* thus penalize the MFU metadata. Although the fudge factor is
* small, it has been empirically shown to be significant for
* certain workloads (e.g. creating many empty directories). As
* such, we use the original calculation for adjustmnt, and
* simply decrement the amount of data evicted from the MRU.
*/
if (adjustmnt > 0 &&
zfs_refcount_count(&arc_mfu->arcs_esize[type]) > 0) {
delta = MIN(zfs_refcount_count(&arc_mfu->arcs_esize[type]),
adjustmnt);
total_evicted += arc_evict_impl(arc_mfu, 0, delta, type);
}
adjustmnt = meta_used - arc_meta_limit;
if (adjustmnt > 0 &&
zfs_refcount_count(&arc_mru_ghost->arcs_esize[type]) > 0) {
delta = MIN(adjustmnt,
zfs_refcount_count(&arc_mru_ghost->arcs_esize[type]));
total_evicted += arc_evict_impl(arc_mru_ghost, 0, delta, type);
adjustmnt -= delta;
}
if (adjustmnt > 0 &&
zfs_refcount_count(&arc_mfu_ghost->arcs_esize[type]) > 0) {
delta = MIN(adjustmnt,
zfs_refcount_count(&arc_mfu_ghost->arcs_esize[type]));
total_evicted += arc_evict_impl(arc_mfu_ghost, 0, delta, type);
}
/*
* If after attempting to make the requested adjustment to the ARC
* the meta limit is still being exceeded then request that the
* higher layers drop some cached objects which have holds on ARC
* meta buffers. Requests to the upper layers will be made with
* increasingly large scan sizes until the ARC is below the limit.
*/
if (meta_used > arc_meta_limit) {
if (type == ARC_BUFC_DATA) {
type = ARC_BUFC_METADATA;
} else {
type = ARC_BUFC_DATA;
if (zfs_arc_meta_prune) {
prune += zfs_arc_meta_prune;
arc_prune_async(prune);
}
}
if (restarts > 0) {
restarts--;
goto restart;
}
}
return (total_evicted);
}
/*
* Evict metadata buffers from the cache, such that arcstat_meta_used is
* capped by the arc_meta_limit tunable.
*/
static uint64_t
arc_evict_meta_only(uint64_t meta_used)
{
uint64_t total_evicted = 0;
int64_t target;
/*
* If we're over the meta limit, we want to evict enough
* metadata to get back under the meta limit. We don't want to
* evict so much that we drop the MRU below arc_p, though. If
* we're over the meta limit more than we're over arc_p, we
* evict some from the MRU here, and some from the MFU below.
*/
target = MIN((int64_t)(meta_used - arc_meta_limit),
(int64_t)(zfs_refcount_count(&arc_anon->arcs_size) +
zfs_refcount_count(&arc_mru->arcs_size) - arc_p));
total_evicted += arc_evict_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
/*
* Similar to the above, we want to evict enough bytes to get us
* below the meta limit, but not so much as to drop us below the
* space allotted to the MFU (which is defined as arc_c - arc_p).
*/
target = MIN((int64_t)(meta_used - arc_meta_limit),
(int64_t)(zfs_refcount_count(&arc_mfu->arcs_size) -
(arc_c - arc_p)));
total_evicted += arc_evict_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
return (total_evicted);
}
static uint64_t
arc_evict_meta(uint64_t meta_used)
{
if (zfs_arc_meta_strategy == ARC_STRATEGY_META_ONLY)
return (arc_evict_meta_only(meta_used));
else
return (arc_evict_meta_balanced(meta_used));
}
/*
* Return the type of the oldest buffer in the given arc state
*
* This function will select a random sublist of type ARC_BUFC_DATA and
* a random sublist of type ARC_BUFC_METADATA. The tail of each sublist
* is compared, and the type which contains the "older" buffer will be
* returned.
*/
static arc_buf_contents_t
arc_evict_type(arc_state_t *state)
{
multilist_t *data_ml = &state->arcs_list[ARC_BUFC_DATA];
multilist_t *meta_ml = &state->arcs_list[ARC_BUFC_METADATA];
int data_idx = multilist_get_random_index(data_ml);
int meta_idx = multilist_get_random_index(meta_ml);
multilist_sublist_t *data_mls;
multilist_sublist_t *meta_mls;
arc_buf_contents_t type;
arc_buf_hdr_t *data_hdr;
arc_buf_hdr_t *meta_hdr;
/*
* We keep the sublist lock until we're finished, to prevent
* the headers from being destroyed via arc_evict_state().
*/
data_mls = multilist_sublist_lock(data_ml, data_idx);
meta_mls = multilist_sublist_lock(meta_ml, meta_idx);
/*
* These two loops are to ensure we skip any markers that
* might be at the tail of the lists due to arc_evict_state().
*/
for (data_hdr = multilist_sublist_tail(data_mls); data_hdr != NULL;
data_hdr = multilist_sublist_prev(data_mls, data_hdr)) {
if (data_hdr->b_spa != 0)
break;
}
for (meta_hdr = multilist_sublist_tail(meta_mls); meta_hdr != NULL;
meta_hdr = multilist_sublist_prev(meta_mls, meta_hdr)) {
if (meta_hdr->b_spa != 0)
break;
}
if (data_hdr == NULL && meta_hdr == NULL) {
type = ARC_BUFC_DATA;
} else if (data_hdr == NULL) {
ASSERT3P(meta_hdr, !=, NULL);
type = ARC_BUFC_METADATA;
} else if (meta_hdr == NULL) {
ASSERT3P(data_hdr, !=, NULL);
type = ARC_BUFC_DATA;
} else {
ASSERT3P(data_hdr, !=, NULL);
ASSERT3P(meta_hdr, !=, NULL);
/* The headers can't be on the sublist without an L1 header */
ASSERT(HDR_HAS_L1HDR(data_hdr));
ASSERT(HDR_HAS_L1HDR(meta_hdr));
if (data_hdr->b_l1hdr.b_arc_access <
meta_hdr->b_l1hdr.b_arc_access) {
type = ARC_BUFC_DATA;
} else {
type = ARC_BUFC_METADATA;
}
}
multilist_sublist_unlock(meta_mls);
multilist_sublist_unlock(data_mls);
return (type);
}
/*
* Evict buffers from the cache, such that arcstat_size is capped by arc_c.
*/
static uint64_t
arc_evict(void)
{
uint64_t total_evicted = 0;
uint64_t bytes;
int64_t target;
uint64_t asize = aggsum_value(&arc_sums.arcstat_size);
uint64_t ameta = aggsum_value(&arc_sums.arcstat_meta_used);
/*
* If we're over arc_meta_limit, we want to correct that before
* potentially evicting data buffers below.
*/
total_evicted += arc_evict_meta(ameta);
/*
* Adjust MRU size
*
* If we're over the target cache size, we want to evict enough
* from the list to get back to our target size. We don't want
* to evict too much from the MRU, such that it drops below
* arc_p. So, if we're over our target cache size more than
* the MRU is over arc_p, we'll evict enough to get back to
* arc_p here, and then evict more from the MFU below.
*/
target = MIN((int64_t)(asize - arc_c),
(int64_t)(zfs_refcount_count(&arc_anon->arcs_size) +
zfs_refcount_count(&arc_mru->arcs_size) + ameta - arc_p));
/*
* If we're below arc_meta_min, always prefer to evict data.
* Otherwise, try to satisfy the requested number of bytes to
* evict from the type which contains older buffers; in an
* effort to keep newer buffers in the cache regardless of their
* type. If we cannot satisfy the number of bytes from this
* type, spill over into the next type.
*/
if (arc_evict_type(arc_mru) == ARC_BUFC_METADATA &&
ameta > arc_meta_min) {
bytes = arc_evict_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
total_evicted += bytes;
/*
* If we couldn't evict our target number of bytes from
* metadata, we try to get the rest from data.
*/
target -= bytes;
total_evicted +=
arc_evict_impl(arc_mru, 0, target, ARC_BUFC_DATA);
} else {
bytes = arc_evict_impl(arc_mru, 0, target, ARC_BUFC_DATA);
total_evicted += bytes;
/*
* If we couldn't evict our target number of bytes from
* data, we try to get the rest from metadata.
*/
target -= bytes;
total_evicted +=
arc_evict_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
}
/*
* Re-sum ARC stats after the first round of evictions.
*/
asize = aggsum_value(&arc_sums.arcstat_size);
ameta = aggsum_value(&arc_sums.arcstat_meta_used);
/*
* Adjust MFU size
*
* Now that we've tried to evict enough from the MRU to get its
* size back to arc_p, if we're still above the target cache
* size, we evict the rest from the MFU.
*/
target = asize - arc_c;
if (arc_evict_type(arc_mfu) == ARC_BUFC_METADATA &&
ameta > arc_meta_min) {
bytes = arc_evict_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
total_evicted += bytes;
/*
* If we couldn't evict our target number of bytes from
* metadata, we try to get the rest from data.
*/
target -= bytes;
total_evicted +=
arc_evict_impl(arc_mfu, 0, target, ARC_BUFC_DATA);
} else {
bytes = arc_evict_impl(arc_mfu, 0, target, ARC_BUFC_DATA);
total_evicted += bytes;
/*
* If we couldn't evict our target number of bytes from
* data, we try to get the rest from data.
*/
target -= bytes;
total_evicted +=
arc_evict_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
}
/*
* Adjust ghost lists
*
* In addition to the above, the ARC also defines target values
* for the ghost lists. The sum of the mru list and mru ghost
* list should never exceed the target size of the cache, and
* the sum of the mru list, mfu list, mru ghost list, and mfu
* ghost list should never exceed twice the target size of the
* cache. The following logic enforces these limits on the ghost
* caches, and evicts from them as needed.
*/
target = zfs_refcount_count(&arc_mru->arcs_size) +
zfs_refcount_count(&arc_mru_ghost->arcs_size) - arc_c;
bytes = arc_evict_impl(arc_mru_ghost, 0, target, ARC_BUFC_DATA);
total_evicted += bytes;
target -= bytes;
total_evicted +=
arc_evict_impl(arc_mru_ghost, 0, target, ARC_BUFC_METADATA);
/*
* We assume the sum of the mru list and mfu list is less than
* or equal to arc_c (we enforced this above), which means we
* can use the simpler of the two equations below:
*
* mru + mfu + mru ghost + mfu ghost <= 2 * arc_c
* mru ghost + mfu ghost <= arc_c
*/
target = zfs_refcount_count(&arc_mru_ghost->arcs_size) +
zfs_refcount_count(&arc_mfu_ghost->arcs_size) - arc_c;
bytes = arc_evict_impl(arc_mfu_ghost, 0, target, ARC_BUFC_DATA);
total_evicted += bytes;
target -= bytes;
total_evicted +=
arc_evict_impl(arc_mfu_ghost, 0, target, ARC_BUFC_METADATA);
return (total_evicted);
}
void
arc_flush(spa_t *spa, boolean_t retry)
{
uint64_t guid = 0;
/*
* If retry is B_TRUE, a spa must not be specified since we have
* no good way to determine if all of a spa's buffers have been
* evicted from an arc state.
*/
ASSERT(!retry || spa == 0);
if (spa != NULL)
guid = spa_load_guid(spa);
(void) arc_flush_state(arc_mru, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mru, guid, ARC_BUFC_METADATA, retry);
(void) arc_flush_state(arc_mfu, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mfu, guid, ARC_BUFC_METADATA, retry);
(void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_METADATA, retry);
(void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_METADATA, retry);
}
void
arc_reduce_target_size(int64_t to_free)
{
uint64_t asize = aggsum_value(&arc_sums.arcstat_size);
/*
* All callers want the ARC to actually evict (at least) this much
* memory. Therefore we reduce from the lower of the current size and
* the target size. This way, even if arc_c is much higher than
* arc_size (as can be the case after many calls to arc_freed(), we will
* immediately have arc_c < arc_size and therefore the arc_evict_zthr
* will evict.
*/
uint64_t c = MIN(arc_c, asize);
if (c > to_free && c - to_free > arc_c_min) {
arc_c = c - to_free;
atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
if (arc_p > arc_c)
arc_p = (arc_c >> 1);
ASSERT(arc_c >= arc_c_min);
ASSERT((int64_t)arc_p >= 0);
} else {
arc_c = arc_c_min;
}
if (asize > arc_c) {
/* See comment in arc_evict_cb_check() on why lock+flag */
mutex_enter(&arc_evict_lock);
arc_evict_needed = B_TRUE;
mutex_exit(&arc_evict_lock);
zthr_wakeup(arc_evict_zthr);
}
}
/*
* Determine if the system is under memory pressure and is asking
* to reclaim memory. A return value of B_TRUE indicates that the system
* is under memory pressure and that the arc should adjust accordingly.
*/
boolean_t
arc_reclaim_needed(void)
{
return (arc_available_memory() < 0);
}
void
arc_kmem_reap_soon(void)
{
size_t i;
kmem_cache_t *prev_cache = NULL;
kmem_cache_t *prev_data_cache = NULL;
#ifdef _KERNEL
if ((aggsum_compare(&arc_sums.arcstat_meta_used,
arc_meta_limit) >= 0) && zfs_arc_meta_prune) {
/*
* We are exceeding our meta-data cache limit.
* Prune some entries to release holds on meta-data.
*/
arc_prune_async(zfs_arc_meta_prune);
}
#if defined(_ILP32)
/*
* Reclaim unused memory from all kmem caches.
*/
kmem_reap();
#endif
#endif
for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
#if defined(_ILP32)
/* reach upper limit of cache size on 32-bit */
if (zio_buf_cache[i] == NULL)
break;
#endif
if (zio_buf_cache[i] != prev_cache) {
prev_cache = zio_buf_cache[i];
kmem_cache_reap_now(zio_buf_cache[i]);
}
if (zio_data_buf_cache[i] != prev_data_cache) {
prev_data_cache = zio_data_buf_cache[i];
kmem_cache_reap_now(zio_data_buf_cache[i]);
}
}
kmem_cache_reap_now(buf_cache);
kmem_cache_reap_now(hdr_full_cache);
kmem_cache_reap_now(hdr_l2only_cache);
kmem_cache_reap_now(zfs_btree_leaf_cache);
abd_cache_reap_now();
}
static boolean_t
arc_evict_cb_check(void *arg, zthr_t *zthr)
{
(void) arg, (void) zthr;
#ifdef ZFS_DEBUG
/*
* This is necessary in order to keep the kstat information
* up to date for tools that display kstat data such as the
* mdb ::arc dcmd and the Linux crash utility. These tools
* typically do not call kstat's update function, but simply
* dump out stats from the most recent update. Without
* this call, these commands may show stale stats for the
* anon, mru, mru_ghost, mfu, and mfu_ghost lists. Even
* with this call, the data might be out of date if the
* evict thread hasn't been woken recently; but that should
* suffice. The arc_state_t structures can be queried
* directly if more accurate information is needed.
*/
if (arc_ksp != NULL)
arc_ksp->ks_update(arc_ksp, KSTAT_READ);
#endif
/*
* We have to rely on arc_wait_for_eviction() to tell us when to
* evict, rather than checking if we are overflowing here, so that we
* are sure to not leave arc_wait_for_eviction() waiting on aew_cv.
* If we have become "not overflowing" since arc_wait_for_eviction()
* checked, we need to wake it up. We could broadcast the CV here,
* but arc_wait_for_eviction() may have not yet gone to sleep. We
* would need to use a mutex to ensure that this function doesn't
* broadcast until arc_wait_for_eviction() has gone to sleep (e.g.
* the arc_evict_lock). However, the lock ordering of such a lock
* would necessarily be incorrect with respect to the zthr_lock,
* which is held before this function is called, and is held by
* arc_wait_for_eviction() when it calls zthr_wakeup().
*/
return (arc_evict_needed);
}
/*
* Keep arc_size under arc_c by running arc_evict which evicts data
* from the ARC.
*/
static void
arc_evict_cb(void *arg, zthr_t *zthr)
{
(void) arg, (void) zthr;
uint64_t evicted = 0;
fstrans_cookie_t cookie = spl_fstrans_mark();
/* Evict from cache */
evicted = arc_evict();
/*
* If evicted is zero, we couldn't evict anything
* via arc_evict(). This could be due to hash lock
* collisions, but more likely due to the majority of
* arc buffers being unevictable. Therefore, even if
* arc_size is above arc_c, another pass is unlikely to
* be helpful and could potentially cause us to enter an
* infinite loop. Additionally, zthr_iscancelled() is
* checked here so that if the arc is shutting down, the
* broadcast will wake any remaining arc evict waiters.
*/
mutex_enter(&arc_evict_lock);
arc_evict_needed = !zthr_iscancelled(arc_evict_zthr) &&
evicted > 0 && aggsum_compare(&arc_sums.arcstat_size, arc_c) > 0;
if (!arc_evict_needed) {
/*
* We're either no longer overflowing, or we
* can't evict anything more, so we should wake
* arc_get_data_impl() sooner.
*/
arc_evict_waiter_t *aw;
while ((aw = list_remove_head(&arc_evict_waiters)) != NULL) {
cv_broadcast(&aw->aew_cv);
}
arc_set_need_free();
}
mutex_exit(&arc_evict_lock);
spl_fstrans_unmark(cookie);
}
static boolean_t
arc_reap_cb_check(void *arg, zthr_t *zthr)
{
(void) arg, (void) zthr;
int64_t free_memory = arc_available_memory();
static int reap_cb_check_counter = 0;
/*
* If a kmem reap is already active, don't schedule more. We must
* check for this because kmem_cache_reap_soon() won't actually
* block on the cache being reaped (this is to prevent callers from
* becoming implicitly blocked by a system-wide kmem reap -- which,
* on a system with many, many full magazines, can take minutes).
*/
if (!kmem_cache_reap_active() && free_memory < 0) {
arc_no_grow = B_TRUE;
arc_warm = B_TRUE;
/*
* Wait at least zfs_grow_retry (default 5) seconds
* before considering growing.
*/
arc_growtime = gethrtime() + SEC2NSEC(arc_grow_retry);
return (B_TRUE);
} else if (free_memory < arc_c >> arc_no_grow_shift) {
arc_no_grow = B_TRUE;
} else if (gethrtime() >= arc_growtime) {
arc_no_grow = B_FALSE;
}
/*
* Called unconditionally every 60 seconds to reclaim unused
* zstd compression and decompression context. This is done
* here to avoid the need for an independent thread.
*/
if (!((reap_cb_check_counter++) % 60))
zfs_zstd_cache_reap_now();
return (B_FALSE);
}
/*
* Keep enough free memory in the system by reaping the ARC's kmem
* caches. To cause more slabs to be reapable, we may reduce the
* target size of the cache (arc_c), causing the arc_evict_cb()
* to free more buffers.
*/
static void
arc_reap_cb(void *arg, zthr_t *zthr)
{
(void) arg, (void) zthr;
int64_t free_memory;
fstrans_cookie_t cookie = spl_fstrans_mark();
/*
* Kick off asynchronous kmem_reap()'s of all our caches.
*/
arc_kmem_reap_soon();
/*
* Wait at least arc_kmem_cache_reap_retry_ms between
* arc_kmem_reap_soon() calls. Without this check it is possible to
* end up in a situation where we spend lots of time reaping
* caches, while we're near arc_c_min. Waiting here also gives the
* subsequent free memory check a chance of finding that the
* asynchronous reap has already freed enough memory, and we don't
* need to call arc_reduce_target_size().
*/
delay((hz * arc_kmem_cache_reap_retry_ms + 999) / 1000);
/*
* Reduce the target size as needed to maintain the amount of free
* memory in the system at a fraction of the arc_size (1/128th by
* default). If oversubscribed (free_memory < 0) then reduce the
* target arc_size by the deficit amount plus the fractional
* amount. If free memory is positive but less than the fractional
* amount, reduce by what is needed to hit the fractional amount.
*/
free_memory = arc_available_memory();
- int64_t to_free =
- (arc_c >> arc_shrink_shift) - free_memory;
- if (to_free > 0) {
- arc_reduce_target_size(to_free);
+ int64_t can_free = arc_c - arc_c_min;
+ if (can_free > 0) {
+ int64_t to_free = (can_free >> arc_shrink_shift) - free_memory;
+ if (to_free > 0)
+ arc_reduce_target_size(to_free);
}
spl_fstrans_unmark(cookie);
}
#ifdef _KERNEL
/*
* Determine the amount of memory eligible for eviction contained in the
* ARC. All clean data reported by the ghost lists can always be safely
* evicted. Due to arc_c_min, the same does not hold for all clean data
* contained by the regular mru and mfu lists.
*
* In the case of the regular mru and mfu lists, we need to report as
* much clean data as possible, such that evicting that same reported
* data will not bring arc_size below arc_c_min. Thus, in certain
* circumstances, the total amount of clean data in the mru and mfu
* lists might not actually be evictable.
*
* The following two distinct cases are accounted for:
*
* 1. The sum of the amount of dirty data contained by both the mru and
* mfu lists, plus the ARC's other accounting (e.g. the anon list),
* is greater than or equal to arc_c_min.
* (i.e. amount of dirty data >= arc_c_min)
*
* This is the easy case; all clean data contained by the mru and mfu
* lists is evictable. Evicting all clean data can only drop arc_size
* to the amount of dirty data, which is greater than arc_c_min.
*
* 2. The sum of the amount of dirty data contained by both the mru and
* mfu lists, plus the ARC's other accounting (e.g. the anon list),
* is less than arc_c_min.
* (i.e. arc_c_min > amount of dirty data)
*
* 2.1. arc_size is greater than or equal arc_c_min.
* (i.e. arc_size >= arc_c_min > amount of dirty data)
*
* In this case, not all clean data from the regular mru and mfu
* lists is actually evictable; we must leave enough clean data
* to keep arc_size above arc_c_min. Thus, the maximum amount of
* evictable data from the two lists combined, is exactly the
* difference between arc_size and arc_c_min.
*
* 2.2. arc_size is less than arc_c_min
* (i.e. arc_c_min > arc_size > amount of dirty data)
*
* In this case, none of the data contained in the mru and mfu
* lists is evictable, even if it's clean. Since arc_size is
* already below arc_c_min, evicting any more would only
* increase this negative difference.
*/
#endif /* _KERNEL */
/*
* Adapt arc info given the number of bytes we are trying to add and
* the state that we are coming from. This function is only called
* when we are adding new content to the cache.
*/
static void
arc_adapt(int bytes, arc_state_t *state)
{
int mult;
uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
int64_t mrug_size = zfs_refcount_count(&arc_mru_ghost->arcs_size);
int64_t mfug_size = zfs_refcount_count(&arc_mfu_ghost->arcs_size);
ASSERT(bytes > 0);
/*
* Adapt the target size of the MRU list:
* - if we just hit in the MRU ghost list, then increase
* the target size of the MRU list.
* - if we just hit in the MFU ghost list, then increase
* the target size of the MFU list by decreasing the
* target size of the MRU list.
*/
if (state == arc_mru_ghost) {
mult = (mrug_size >= mfug_size) ? 1 : (mfug_size / mrug_size);
if (!zfs_arc_p_dampener_disable)
mult = MIN(mult, 10); /* avoid wild arc_p adjustment */
arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult);
} else if (state == arc_mfu_ghost) {
uint64_t delta;
mult = (mfug_size >= mrug_size) ? 1 : (mrug_size / mfug_size);
if (!zfs_arc_p_dampener_disable)
mult = MIN(mult, 10);
delta = MIN(bytes * mult, arc_p);
arc_p = MAX(arc_p_min, arc_p - delta);
}
ASSERT((int64_t)arc_p >= 0);
/*
* Wake reap thread if we do not have any available memory
*/
if (arc_reclaim_needed()) {
zthr_wakeup(arc_reap_zthr);
return;
}
if (arc_no_grow)
return;
if (arc_c >= arc_c_max)
return;
/*
* If we're within (2 * maxblocksize) bytes of the target
* cache size, increment the target cache size
*/
ASSERT3U(arc_c, >=, 2ULL << SPA_MAXBLOCKSHIFT);
if (aggsum_upper_bound(&arc_sums.arcstat_size) >=
arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
atomic_add_64(&arc_c, (int64_t)bytes);
if (arc_c > arc_c_max)
arc_c = arc_c_max;
else if (state == arc_anon)
atomic_add_64(&arc_p, (int64_t)bytes);
if (arc_p > arc_c)
arc_p = arc_c;
}
ASSERT((int64_t)arc_p >= 0);
}
/*
* Check if arc_size has grown past our upper threshold, determined by
* zfs_arc_overflow_shift.
*/
static arc_ovf_level_t
arc_is_overflowing(boolean_t use_reserve)
{
/* Always allow at least one block of overflow */
int64_t overflow = MAX(SPA_MAXBLOCKSIZE,
arc_c >> zfs_arc_overflow_shift);
/*
* We just compare the lower bound here for performance reasons. Our
* primary goals are to make sure that the arc never grows without
* bound, and that it can reach its maximum size. This check
* accomplishes both goals. The maximum amount we could run over by is
* 2 * aggsum_borrow_multiplier * NUM_CPUS * the average size of a block
* in the ARC. In practice, that's in the tens of MB, which is low
* enough to be safe.
*/
int64_t over = aggsum_lower_bound(&arc_sums.arcstat_size) -
arc_c - overflow / 2;
if (!use_reserve)
overflow /= 2;
return (over < 0 ? ARC_OVF_NONE :
over < overflow ? ARC_OVF_SOME : ARC_OVF_SEVERE);
}
static abd_t *
arc_get_data_abd(arc_buf_hdr_t *hdr, uint64_t size, const void *tag,
int alloc_flags)
{
arc_buf_contents_t type = arc_buf_type(hdr);
arc_get_data_impl(hdr, size, tag, alloc_flags);
if (type == ARC_BUFC_METADATA) {
return (abd_alloc(size, B_TRUE));
} else {
ASSERT(type == ARC_BUFC_DATA);
return (abd_alloc(size, B_FALSE));
}
}
static void *
arc_get_data_buf(arc_buf_hdr_t *hdr, uint64_t size, const void *tag)
{
arc_buf_contents_t type = arc_buf_type(hdr);
arc_get_data_impl(hdr, size, tag, ARC_HDR_DO_ADAPT);
if (type == ARC_BUFC_METADATA) {
return (zio_buf_alloc(size));
} else {
ASSERT(type == ARC_BUFC_DATA);
return (zio_data_buf_alloc(size));
}
}
/*
* Wait for the specified amount of data (in bytes) to be evicted from the
* ARC, and for there to be sufficient free memory in the system. Waiting for
* eviction ensures that the memory used by the ARC decreases. Waiting for
* free memory ensures that the system won't run out of free pages, regardless
* of ARC behavior and settings. See arc_lowmem_init().
*/
void
arc_wait_for_eviction(uint64_t amount, boolean_t use_reserve)
{
switch (arc_is_overflowing(use_reserve)) {
case ARC_OVF_NONE:
return;
case ARC_OVF_SOME:
/*
* This is a bit racy without taking arc_evict_lock, but the
* worst that can happen is we either call zthr_wakeup() extra
* time due to race with other thread here, or the set flag
* get cleared by arc_evict_cb(), which is unlikely due to
* big hysteresis, but also not important since at this level
* of overflow the eviction is purely advisory. Same time
* taking the global lock here every time without waiting for
* the actual eviction creates a significant lock contention.
*/
if (!arc_evict_needed) {
arc_evict_needed = B_TRUE;
zthr_wakeup(arc_evict_zthr);
}
return;
case ARC_OVF_SEVERE:
default:
{
arc_evict_waiter_t aw;
list_link_init(&aw.aew_node);
cv_init(&aw.aew_cv, NULL, CV_DEFAULT, NULL);
uint64_t last_count = 0;
mutex_enter(&arc_evict_lock);
if (!list_is_empty(&arc_evict_waiters)) {
arc_evict_waiter_t *last =
list_tail(&arc_evict_waiters);
last_count = last->aew_count;
} else if (!arc_evict_needed) {
arc_evict_needed = B_TRUE;
zthr_wakeup(arc_evict_zthr);
}
/*
* Note, the last waiter's count may be less than
* arc_evict_count if we are low on memory in which
* case arc_evict_state_impl() may have deferred
* wakeups (but still incremented arc_evict_count).
*/
aw.aew_count = MAX(last_count, arc_evict_count) + amount;
list_insert_tail(&arc_evict_waiters, &aw);
arc_set_need_free();
DTRACE_PROBE3(arc__wait__for__eviction,
uint64_t, amount,
uint64_t, arc_evict_count,
uint64_t, aw.aew_count);
/*
* We will be woken up either when arc_evict_count reaches
* aew_count, or when the ARC is no longer overflowing and
* eviction completes.
* In case of "false" wakeup, we will still be on the list.
*/
do {
cv_wait(&aw.aew_cv, &arc_evict_lock);
} while (list_link_active(&aw.aew_node));
mutex_exit(&arc_evict_lock);
cv_destroy(&aw.aew_cv);
}
}
}
/*
* Allocate a block and return it to the caller. If we are hitting the
* hard limit for the cache size, we must sleep, waiting for the eviction
* thread to catch up. If we're past the target size but below the hard
* limit, we'll only signal the reclaim thread and continue on.
*/
static void
arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, const void *tag,
int alloc_flags)
{
arc_state_t *state = hdr->b_l1hdr.b_state;
arc_buf_contents_t type = arc_buf_type(hdr);
if (alloc_flags & ARC_HDR_DO_ADAPT)
arc_adapt(size, state);
/*
* If arc_size is currently overflowing, we must be adding data
* faster than we are evicting. To ensure we don't compound the
* problem by adding more data and forcing arc_size to grow even
* further past it's target size, we wait for the eviction thread to
* make some progress. We also wait for there to be sufficient free
* memory in the system, as measured by arc_free_memory().
*
* Specifically, we wait for zfs_arc_eviction_pct percent of the
* requested size to be evicted. This should be more than 100%, to
* ensure that that progress is also made towards getting arc_size
* under arc_c. See the comment above zfs_arc_eviction_pct.
*/
arc_wait_for_eviction(size * zfs_arc_eviction_pct / 100,
alloc_flags & ARC_HDR_USE_RESERVE);
VERIFY3U(hdr->b_type, ==, type);
if (type == ARC_BUFC_METADATA) {
arc_space_consume(size, ARC_SPACE_META);
} else {
arc_space_consume(size, ARC_SPACE_DATA);
}
/*
* Update the state size. Note that ghost states have a
* "ghost size" and so don't need to be updated.
*/
if (!GHOST_STATE(state)) {
(void) zfs_refcount_add_many(&state->arcs_size, size, tag);
/*
* If this is reached via arc_read, the link is
* protected by the hash lock. If reached via
* arc_buf_alloc, the header should not be accessed by
* any other thread. And, if reached via arc_read_done,
* the hash lock will protect it if it's found in the
* hash table; otherwise no other thread should be
* trying to [add|remove]_reference it.
*/
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
(void) zfs_refcount_add_many(&state->arcs_esize[type],
size, tag);
}
/*
* If we are growing the cache, and we are adding anonymous
* data, and we have outgrown arc_p, update arc_p
*/
if (aggsum_upper_bound(&arc_sums.arcstat_size) < arc_c &&
hdr->b_l1hdr.b_state == arc_anon &&
(zfs_refcount_count(&arc_anon->arcs_size) +
zfs_refcount_count(&arc_mru->arcs_size) > arc_p))
arc_p = MIN(arc_c, arc_p + size);
}
}
static void
arc_free_data_abd(arc_buf_hdr_t *hdr, abd_t *abd, uint64_t size,
const void *tag)
{
arc_free_data_impl(hdr, size, tag);
abd_free(abd);
}
static void
arc_free_data_buf(arc_buf_hdr_t *hdr, void *buf, uint64_t size, const void *tag)
{
arc_buf_contents_t type = arc_buf_type(hdr);
arc_free_data_impl(hdr, size, tag);
if (type == ARC_BUFC_METADATA) {
zio_buf_free(buf, size);
} else {
ASSERT(type == ARC_BUFC_DATA);
zio_data_buf_free(buf, size);
}
}
/*
* Free the arc data buffer.
*/
static void
arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, const void *tag)
{
arc_state_t *state = hdr->b_l1hdr.b_state;
arc_buf_contents_t type = arc_buf_type(hdr);
/* protected by hash lock, if in the hash table */
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT(state != arc_anon && state != arc_l2c_only);
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
size, tag);
}
(void) zfs_refcount_remove_many(&state->arcs_size, size, tag);
VERIFY3U(hdr->b_type, ==, type);
if (type == ARC_BUFC_METADATA) {
arc_space_return(size, ARC_SPACE_META);
} else {
ASSERT(type == ARC_BUFC_DATA);
arc_space_return(size, ARC_SPACE_DATA);
}
}
/*
* This routine is called whenever a buffer is accessed.
* NOTE: the hash lock is dropped in this function.
*/
static void
arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
{
clock_t now;
ASSERT(MUTEX_HELD(hash_lock));
ASSERT(HDR_HAS_L1HDR(hdr));
if (hdr->b_l1hdr.b_state == arc_anon) {
/*
* This buffer is not in the cache, and does not
* appear in our "ghost" list. Add the new buffer
* to the MRU state.
*/
ASSERT0(hdr->b_l1hdr.b_arc_access);
hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
arc_change_state(arc_mru, hdr, hash_lock);
} else if (hdr->b_l1hdr.b_state == arc_mru) {
now = ddi_get_lbolt();
/*
* If this buffer is here because of a prefetch, then either:
* - clear the flag if this is a "referencing" read
* (any subsequent access will bump this into the MFU state).
* or
* - move the buffer to the head of the list if this is
* another prefetch (to make it less likely to be evicted).
*/
if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) {
if (zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
/* link protected by hash lock */
ASSERT(multilist_link_active(
&hdr->b_l1hdr.b_arc_node));
} else {
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_decrement_state(hdr);
arc_hdr_clear_flags(hdr,
ARC_FLAG_PREFETCH |
ARC_FLAG_PRESCIENT_PREFETCH);
hdr->b_l1hdr.b_mru_hits++;
ARCSTAT_BUMP(arcstat_mru_hits);
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_increment_state(hdr);
}
hdr->b_l1hdr.b_arc_access = now;
return;
}
/*
* This buffer has been "accessed" only once so far,
* but it is still in the cache. Move it to the MFU
* state.
*/
if (ddi_time_after(now, hdr->b_l1hdr.b_arc_access +
ARC_MINTIME)) {
/*
* More than 125ms have passed since we
* instantiated this buffer. Move it to the
* most frequently used state.
*/
hdr->b_l1hdr.b_arc_access = now;
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
arc_change_state(arc_mfu, hdr, hash_lock);
}
hdr->b_l1hdr.b_mru_hits++;
ARCSTAT_BUMP(arcstat_mru_hits);
} else if (hdr->b_l1hdr.b_state == arc_mru_ghost) {
arc_state_t *new_state;
/*
* This buffer has been "accessed" recently, but
* was evicted from the cache. Move it to the
* MFU state.
*/
if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) {
new_state = arc_mru;
if (zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) > 0) {
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_decrement_state(hdr);
arc_hdr_clear_flags(hdr,
ARC_FLAG_PREFETCH |
ARC_FLAG_PRESCIENT_PREFETCH);
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_increment_state(hdr);
}
DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
} else {
new_state = arc_mfu;
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
}
hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
arc_change_state(new_state, hdr, hash_lock);
hdr->b_l1hdr.b_mru_ghost_hits++;
ARCSTAT_BUMP(arcstat_mru_ghost_hits);
} else if (hdr->b_l1hdr.b_state == arc_mfu) {
/*
* This buffer has been accessed more than once and is
* still in the cache. Keep it in the MFU state.
*
* NOTE: an add_reference() that occurred when we did
* the arc_read() will have kicked this off the list.
* If it was a prefetch, we will explicitly move it to
* the head of the list now.
*/
hdr->b_l1hdr.b_mfu_hits++;
ARCSTAT_BUMP(arcstat_mfu_hits);
hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
} else if (hdr->b_l1hdr.b_state == arc_mfu_ghost) {
arc_state_t *new_state = arc_mfu;
/*
* This buffer has been accessed more than once but has
* been evicted from the cache. Move it back to the
* MFU state.
*/
if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) {
/*
* This is a prefetch access...
* move this block back to the MRU state.
*/
new_state = arc_mru;
}
hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
arc_change_state(new_state, hdr, hash_lock);
hdr->b_l1hdr.b_mfu_ghost_hits++;
ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
} else if (hdr->b_l1hdr.b_state == arc_l2c_only) {
/*
* This buffer is on the 2nd Level ARC.
*/
hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
arc_change_state(arc_mfu, hdr, hash_lock);
} else {
cmn_err(CE_PANIC, "invalid arc state 0x%p",
hdr->b_l1hdr.b_state);
}
}
/*
* This routine is called by dbuf_hold() to update the arc_access() state
* which otherwise would be skipped for entries in the dbuf cache.
*/
void
arc_buf_access(arc_buf_t *buf)
{
mutex_enter(&buf->b_evict_lock);
arc_buf_hdr_t *hdr = buf->b_hdr;
/*
* Avoid taking the hash_lock when possible as an optimization.
* The header must be checked again under the hash_lock in order
* to handle the case where it is concurrently being released.
*/
if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr)) {
mutex_exit(&buf->b_evict_lock);
return;
}
kmutex_t *hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr)) {
mutex_exit(hash_lock);
mutex_exit(&buf->b_evict_lock);
ARCSTAT_BUMP(arcstat_access_skip);
return;
}
mutex_exit(&buf->b_evict_lock);
ASSERT(hdr->b_l1hdr.b_state == arc_mru ||
hdr->b_l1hdr.b_state == arc_mfu);
DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
arc_access(hdr, hash_lock);
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_hits);
ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr) && !HDR_PRESCIENT_PREFETCH(hdr),
demand, prefetch, !HDR_ISTYPE_METADATA(hdr), data, metadata, hits);
}
/* a generic arc_read_done_func_t which you can use */
void
arc_bcopy_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *arg)
{
(void) zio, (void) zb, (void) bp;
if (buf == NULL)
return;
memcpy(arg, buf->b_data, arc_buf_size(buf));
arc_buf_destroy(buf, arg);
}
/* a generic arc_read_done_func_t */
void
arc_getbuf_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *arg)
{
(void) zb, (void) bp;
arc_buf_t **bufp = arg;
if (buf == NULL) {
ASSERT(zio == NULL || zio->io_error != 0);
*bufp = NULL;
} else {
ASSERT(zio == NULL || zio->io_error == 0);
*bufp = buf;
ASSERT(buf->b_data != NULL);
}
}
static void
arc_hdr_verify(arc_buf_hdr_t *hdr, blkptr_t *bp)
{
if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) {
ASSERT3U(HDR_GET_PSIZE(hdr), ==, 0);
ASSERT3U(arc_hdr_get_compress(hdr), ==, ZIO_COMPRESS_OFF);
} else {
if (HDR_COMPRESSION_ENABLED(hdr)) {
ASSERT3U(arc_hdr_get_compress(hdr), ==,
BP_GET_COMPRESS(bp));
}
ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(bp));
ASSERT3U(HDR_GET_PSIZE(hdr), ==, BP_GET_PSIZE(bp));
ASSERT3U(!!HDR_PROTECTED(hdr), ==, BP_IS_PROTECTED(bp));
}
}
static void
arc_read_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
arc_buf_hdr_t *hdr = zio->io_private;
kmutex_t *hash_lock = NULL;
arc_callback_t *callback_list;
arc_callback_t *acb;
boolean_t freeable = B_FALSE;
/*
* The hdr was inserted into hash-table and removed from lists
* prior to starting I/O. We should find this header, since
* it's in the hash table, and it should be legit since it's
* not possible to evict it during the I/O. The only possible
* reason for it not to be found is if we were freed during the
* read.
*/
if (HDR_IN_HASH_TABLE(hdr)) {
arc_buf_hdr_t *found;
ASSERT3U(hdr->b_birth, ==, BP_PHYSICAL_BIRTH(zio->io_bp));
ASSERT3U(hdr->b_dva.dva_word[0], ==,
BP_IDENTITY(zio->io_bp)->dva_word[0]);
ASSERT3U(hdr->b_dva.dva_word[1], ==,
BP_IDENTITY(zio->io_bp)->dva_word[1]);
found = buf_hash_find(hdr->b_spa, zio->io_bp, &hash_lock);
ASSERT((found == hdr &&
DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
(found == hdr && HDR_L2_READING(hdr)));
ASSERT3P(hash_lock, !=, NULL);
}
if (BP_IS_PROTECTED(bp)) {
hdr->b_crypt_hdr.b_ot = BP_GET_TYPE(bp);
hdr->b_crypt_hdr.b_dsobj = zio->io_bookmark.zb_objset;
zio_crypt_decode_params_bp(bp, hdr->b_crypt_hdr.b_salt,
hdr->b_crypt_hdr.b_iv);
if (zio->io_error == 0) {
if (BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG) {
void *tmpbuf;
tmpbuf = abd_borrow_buf_copy(zio->io_abd,
sizeof (zil_chain_t));
zio_crypt_decode_mac_zil(tmpbuf,
hdr->b_crypt_hdr.b_mac);
abd_return_buf(zio->io_abd, tmpbuf,
sizeof (zil_chain_t));
} else {
zio_crypt_decode_mac_bp(bp,
hdr->b_crypt_hdr.b_mac);
}
}
}
if (zio->io_error == 0) {
/* byteswap if necessary */
if (BP_SHOULD_BYTESWAP(zio->io_bp)) {
if (BP_GET_LEVEL(zio->io_bp) > 0) {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64;
} else {
hdr->b_l1hdr.b_byteswap =
DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp));
}
} else {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
}
if (!HDR_L2_READING(hdr)) {
hdr->b_complevel = zio->io_prop.zp_complevel;
}
}
arc_hdr_clear_flags(hdr, ARC_FLAG_L2_EVICTED);
if (l2arc_noprefetch && HDR_PREFETCH(hdr))
arc_hdr_clear_flags(hdr, ARC_FLAG_L2CACHE);
callback_list = hdr->b_l1hdr.b_acb;
ASSERT3P(callback_list, !=, NULL);
if (hash_lock && zio->io_error == 0 &&
hdr->b_l1hdr.b_state == arc_anon) {
/*
* Only call arc_access on anonymous buffers. This is because
* if we've issued an I/O for an evicted buffer, we've already
* called arc_access (to prevent any simultaneous readers from
* getting confused).
*/
arc_access(hdr, hash_lock);
}
/*
* If a read request has a callback (i.e. acb_done is not NULL), then we
* make a buf containing the data according to the parameters which were
* passed in. The implementation of arc_buf_alloc_impl() ensures that we
* aren't needlessly decompressing the data multiple times.
*/
int callback_cnt = 0;
for (acb = callback_list; acb != NULL; acb = acb->acb_next) {
if (!acb->acb_done || acb->acb_nobuf)
continue;
callback_cnt++;
if (zio->io_error != 0)
continue;
int error = arc_buf_alloc_impl(hdr, zio->io_spa,
&acb->acb_zb, acb->acb_private, acb->acb_encrypted,
acb->acb_compressed, acb->acb_noauth, B_TRUE,
&acb->acb_buf);
/*
* Assert non-speculative zios didn't fail because an
* encryption key wasn't loaded
*/
ASSERT((zio->io_flags & ZIO_FLAG_SPECULATIVE) ||
error != EACCES);
/*
* If we failed to decrypt, report an error now (as the zio
* layer would have done if it had done the transforms).
*/
if (error == ECKSUM) {
ASSERT(BP_IS_PROTECTED(bp));
error = SET_ERROR(EIO);
if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) {
spa_log_error(zio->io_spa, &acb->acb_zb);
(void) zfs_ereport_post(
FM_EREPORT_ZFS_AUTHENTICATION,
zio->io_spa, NULL, &acb->acb_zb, zio, 0);
}
}
if (error != 0) {
/*
* Decompression or decryption failed. Set
* io_error so that when we call acb_done
* (below), we will indicate that the read
* failed. Note that in the unusual case
* where one callback is compressed and another
* uncompressed, we will mark all of them
* as failed, even though the uncompressed
* one can't actually fail. In this case,
* the hdr will not be anonymous, because
* if there are multiple callbacks, it's
* because multiple threads found the same
* arc buf in the hash table.
*/
zio->io_error = error;
}
}
/*
* If there are multiple callbacks, we must have the hash lock,
* because the only way for multiple threads to find this hdr is
* in the hash table. This ensures that if there are multiple
* callbacks, the hdr is not anonymous. If it were anonymous,
* we couldn't use arc_buf_destroy() in the error case below.
*/
ASSERT(callback_cnt < 2 || hash_lock != NULL);
hdr->b_l1hdr.b_acb = NULL;
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
if (callback_cnt == 0)
ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr));
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt) ||
callback_list != NULL);
if (zio->io_error == 0) {
arc_hdr_verify(hdr, zio->io_bp);
} else {
arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR);
if (hdr->b_l1hdr.b_state != arc_anon)
arc_change_state(arc_anon, hdr, hash_lock);
if (HDR_IN_HASH_TABLE(hdr))
buf_hash_remove(hdr);
freeable = zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
}
/*
* Broadcast before we drop the hash_lock to avoid the possibility
* that the hdr (and hence the cv) might be freed before we get to
* the cv_broadcast().
*/
cv_broadcast(&hdr->b_l1hdr.b_cv);
if (hash_lock != NULL) {
mutex_exit(hash_lock);
} else {
/*
* This block was freed while we waited for the read to
* complete. It has been removed from the hash table and
* moved to the anonymous state (so that it won't show up
* in the cache).
*/
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
freeable = zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
}
/* execute each callback and free its structure */
while ((acb = callback_list) != NULL) {
if (acb->acb_done != NULL) {
if (zio->io_error != 0 && acb->acb_buf != NULL) {
/*
* If arc_buf_alloc_impl() fails during
* decompression, the buf will still be
* allocated, and needs to be freed here.
*/
arc_buf_destroy(acb->acb_buf,
acb->acb_private);
acb->acb_buf = NULL;
}
acb->acb_done(zio, &zio->io_bookmark, zio->io_bp,
acb->acb_buf, acb->acb_private);
}
if (acb->acb_zio_dummy != NULL) {
acb->acb_zio_dummy->io_error = zio->io_error;
zio_nowait(acb->acb_zio_dummy);
}
callback_list = acb->acb_next;
kmem_free(acb, sizeof (arc_callback_t));
}
if (freeable)
arc_hdr_destroy(hdr);
}
/*
* "Read" the block at the specified DVA (in bp) via the
* cache. If the block is found in the cache, invoke the provided
* callback immediately and return. Note that the `zio' parameter
* in the callback will be NULL in this case, since no IO was
* required. If the block is not in the cache pass the read request
* on to the spa with a substitute callback function, so that the
* requested block will be added to the cache.
*
* If a read request arrives for a block that has a read in-progress,
* either wait for the in-progress read to complete (and return the
* results); or, if this is a read with a "done" func, add a record
* to the read to invoke the "done" func when the read completes,
* and return; or just return.
*
* arc_read_done() will invoke all the requested "done" functions
* for readers of this block.
*/
int
arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
arc_read_done_func_t *done, void *private, zio_priority_t priority,
int zio_flags, arc_flags_t *arc_flags, const zbookmark_phys_t *zb)
{
arc_buf_hdr_t *hdr = NULL;
kmutex_t *hash_lock = NULL;
zio_t *rzio;
uint64_t guid = spa_load_guid(spa);
boolean_t compressed_read = (zio_flags & ZIO_FLAG_RAW_COMPRESS) != 0;
boolean_t encrypted_read = BP_IS_ENCRYPTED(bp) &&
(zio_flags & ZIO_FLAG_RAW_ENCRYPT) != 0;
boolean_t noauth_read = BP_IS_AUTHENTICATED(bp) &&
(zio_flags & ZIO_FLAG_RAW_ENCRYPT) != 0;
boolean_t embedded_bp = !!BP_IS_EMBEDDED(bp);
boolean_t no_buf = *arc_flags & ARC_FLAG_NO_BUF;
int rc = 0;
ASSERT(!embedded_bp ||
BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA);
ASSERT(!BP_IS_HOLE(bp));
ASSERT(!BP_IS_REDACTED(bp));
/*
* Normally SPL_FSTRANS will already be set since kernel threads which
* expect to call the DMU interfaces will set it when created. System
* calls are similarly handled by setting/cleaning the bit in the
* registered callback (module/os/.../zfs/zpl_*).
*
* External consumers such as Lustre which call the exported DMU
* interfaces may not have set SPL_FSTRANS. To avoid a deadlock
* on the hash_lock always set and clear the bit.
*/
fstrans_cookie_t cookie = spl_fstrans_mark();
top:
/*
* Verify the block pointer contents are reasonable. This should
* always be the case since the blkptr is protected by a checksum.
* However, if there is damage it's desirable to detect this early
* and treat it as a checksum error. This allows an alternate blkptr
* to be tried when one is available (e.g. ditto blocks).
*/
if (!zfs_blkptr_verify(spa, bp, zio_flags & ZIO_FLAG_CONFIG_WRITER,
BLK_VERIFY_LOG)) {
rc = SET_ERROR(ECKSUM);
goto out;
}
if (!embedded_bp) {
/*
* Embedded BP's have no DVA and require no I/O to "read".
* Create an anonymous arc buf to back it.
*/
hdr = buf_hash_find(guid, bp, &hash_lock);
}
/*
* Determine if we have an L1 cache hit or a cache miss. For simplicity
* we maintain encrypted data separately from compressed / uncompressed
* data. If the user is requesting raw encrypted data and we don't have
* that in the header we will read from disk to guarantee that we can
* get it even if the encryption keys aren't loaded.
*/
if (hdr != NULL && HDR_HAS_L1HDR(hdr) && (HDR_HAS_RABD(hdr) ||
(hdr->b_l1hdr.b_pabd != NULL && !encrypted_read))) {
arc_buf_t *buf = NULL;
*arc_flags |= ARC_FLAG_CACHED;
if (HDR_IO_IN_PROGRESS(hdr)) {
zio_t *head_zio = hdr->b_l1hdr.b_acb->acb_zio_head;
if (*arc_flags & ARC_FLAG_CACHED_ONLY) {
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_cached_only_in_progress);
rc = SET_ERROR(ENOENT);
goto out;
}
ASSERT3P(head_zio, !=, NULL);
if ((hdr->b_flags & ARC_FLAG_PRIO_ASYNC_READ) &&
priority == ZIO_PRIORITY_SYNC_READ) {
/*
* This is a sync read that needs to wait for
* an in-flight async read. Request that the
* zio have its priority upgraded.
*/
zio_change_priority(head_zio, priority);
DTRACE_PROBE1(arc__async__upgrade__sync,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_async_upgrade_sync);
}
if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) {
arc_hdr_clear_flags(hdr,
ARC_FLAG_PREDICTIVE_PREFETCH);
}
/*
* If there are multiple threads reading the same block
* and that block is not yet in the ARC, then only one
* thread will do the physical I/O and all other
* threads will wait until that I/O completes.
* Synchronous reads use the b_cv whereas nowait reads
* register a callback. Both are signalled/called in
* arc_read_done.
*
* Errors of the physical I/O may need to be propagated
* to the pio. For synchronous reads, we simply restart
* this function and it will reassess. Nowait reads
* attach the acb_zio_dummy zio to pio and
* arc_read_done propagates the physical I/O's io_error
* to acb_zio_dummy, and thereby to pio.
*/
if (*arc_flags & ARC_FLAG_WAIT) {
cv_wait(&hdr->b_l1hdr.b_cv, hash_lock);
mutex_exit(hash_lock);
goto top;
}
ASSERT(*arc_flags & ARC_FLAG_NOWAIT);
if (done) {
arc_callback_t *acb = NULL;
acb = kmem_zalloc(sizeof (arc_callback_t),
KM_SLEEP);
acb->acb_done = done;
acb->acb_private = private;
acb->acb_compressed = compressed_read;
acb->acb_encrypted = encrypted_read;
acb->acb_noauth = noauth_read;
acb->acb_nobuf = no_buf;
acb->acb_zb = *zb;
if (pio != NULL)
acb->acb_zio_dummy = zio_null(pio,
spa, NULL, NULL, NULL, zio_flags);
ASSERT3P(acb->acb_done, !=, NULL);
acb->acb_zio_head = head_zio;
acb->acb_next = hdr->b_l1hdr.b_acb;
hdr->b_l1hdr.b_acb = acb;
}
mutex_exit(hash_lock);
goto out;
}
ASSERT(hdr->b_l1hdr.b_state == arc_mru ||
hdr->b_l1hdr.b_state == arc_mfu);
if (done && !no_buf) {
if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) {
/*
* This is a demand read which does not have to
* wait for i/o because we did a predictive
* prefetch i/o for it, which has completed.
*/
DTRACE_PROBE1(
arc__demand__hit__predictive__prefetch,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(
arcstat_demand_hit_predictive_prefetch);
arc_hdr_clear_flags(hdr,
ARC_FLAG_PREDICTIVE_PREFETCH);
}
if (hdr->b_flags & ARC_FLAG_PRESCIENT_PREFETCH) {
ARCSTAT_BUMP(
arcstat_demand_hit_prescient_prefetch);
arc_hdr_clear_flags(hdr,
ARC_FLAG_PRESCIENT_PREFETCH);
}
ASSERT(!embedded_bp || !BP_IS_HOLE(bp));
/* Get a buf with the desired data in it. */
rc = arc_buf_alloc_impl(hdr, spa, zb, private,
encrypted_read, compressed_read, noauth_read,
B_TRUE, &buf);
if (rc == ECKSUM) {
/*
* Convert authentication and decryption errors
* to EIO (and generate an ereport if needed)
* before leaving the ARC.
*/
rc = SET_ERROR(EIO);
if ((zio_flags & ZIO_FLAG_SPECULATIVE) == 0) {
spa_log_error(spa, zb);
(void) zfs_ereport_post(
FM_EREPORT_ZFS_AUTHENTICATION,
spa, NULL, zb, NULL, 0);
}
}
if (rc != 0) {
(void) remove_reference(hdr, hash_lock,
private);
arc_buf_destroy_impl(buf);
buf = NULL;
}
/* assert any errors weren't due to unloaded keys */
ASSERT((zio_flags & ZIO_FLAG_SPECULATIVE) ||
rc != EACCES);
} else if (*arc_flags & ARC_FLAG_PREFETCH &&
zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_decrement_state(hdr);
arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_increment_state(hdr);
}
DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
arc_access(hdr, hash_lock);
if (*arc_flags & ARC_FLAG_PRESCIENT_PREFETCH)
arc_hdr_set_flags(hdr, ARC_FLAG_PRESCIENT_PREFETCH);
if (*arc_flags & ARC_FLAG_L2CACHE)
arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_hits);
ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr),
demand, prefetch, !HDR_ISTYPE_METADATA(hdr),
data, metadata, hits);
if (done)
done(NULL, zb, bp, buf, private);
} else {
uint64_t lsize = BP_GET_LSIZE(bp);
uint64_t psize = BP_GET_PSIZE(bp);
arc_callback_t *acb;
vdev_t *vd = NULL;
uint64_t addr = 0;
boolean_t devw = B_FALSE;
uint64_t size;
abd_t *hdr_abd;
int alloc_flags = encrypted_read ? ARC_HDR_ALLOC_RDATA : 0;
if (*arc_flags & ARC_FLAG_CACHED_ONLY) {
rc = SET_ERROR(ENOENT);
if (hash_lock != NULL)
mutex_exit(hash_lock);
goto out;
}
if (hdr == NULL) {
/*
* This block is not in the cache or it has
* embedded data.
*/
arc_buf_hdr_t *exists = NULL;
arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize,
BP_IS_PROTECTED(bp), BP_GET_COMPRESS(bp), 0, type);
if (!embedded_bp) {
hdr->b_dva = *BP_IDENTITY(bp);
hdr->b_birth = BP_PHYSICAL_BIRTH(bp);
exists = buf_hash_insert(hdr, &hash_lock);
}
if (exists != NULL) {
/* somebody beat us to the hash insert */
mutex_exit(hash_lock);
buf_discard_identity(hdr);
arc_hdr_destroy(hdr);
goto top; /* restart the IO request */
}
alloc_flags |= ARC_HDR_DO_ADAPT;
} else {
/*
* This block is in the ghost cache or encrypted data
* was requested and we didn't have it. If it was
* L2-only (and thus didn't have an L1 hdr),
* we realloc the header to add an L1 hdr.
*/
if (!HDR_HAS_L1HDR(hdr)) {
hdr = arc_hdr_realloc(hdr, hdr_l2only_cache,
hdr_full_cache);
}
if (GHOST_STATE(hdr->b_l1hdr.b_state)) {
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT0(zfs_refcount_count(
&hdr->b_l1hdr.b_refcnt));
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
} else if (HDR_IO_IN_PROGRESS(hdr)) {
/*
* If this header already had an IO in progress
* and we are performing another IO to fetch
* encrypted data we must wait until the first
* IO completes so as not to confuse
* arc_read_done(). This should be very rare
* and so the performance impact shouldn't
* matter.
*/
cv_wait(&hdr->b_l1hdr.b_cv, hash_lock);
mutex_exit(hash_lock);
goto top;
}
/*
* This is a delicate dance that we play here.
* This hdr might be in the ghost list so we access
* it to move it out of the ghost list before we
* initiate the read. If it's a prefetch then
* it won't have a callback so we'll remove the
* reference that arc_buf_alloc_impl() created. We
* do this after we've called arc_access() to
* avoid hitting an assert in remove_reference().
*/
arc_adapt(arc_hdr_size(hdr), hdr->b_l1hdr.b_state);
arc_access(hdr, hash_lock);
}
arc_hdr_alloc_abd(hdr, alloc_flags);
if (encrypted_read) {
ASSERT(HDR_HAS_RABD(hdr));
size = HDR_GET_PSIZE(hdr);
hdr_abd = hdr->b_crypt_hdr.b_rabd;
zio_flags |= ZIO_FLAG_RAW;
} else {
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
size = arc_hdr_size(hdr);
hdr_abd = hdr->b_l1hdr.b_pabd;
if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF) {
zio_flags |= ZIO_FLAG_RAW_COMPRESS;
}
/*
* For authenticated bp's, we do not ask the ZIO layer
* to authenticate them since this will cause the entire
* IO to fail if the key isn't loaded. Instead, we
* defer authentication until arc_buf_fill(), which will
* verify the data when the key is available.
*/
if (BP_IS_AUTHENTICATED(bp))
zio_flags |= ZIO_FLAG_RAW_ENCRYPT;
}
if (*arc_flags & ARC_FLAG_PREFETCH &&
zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_decrement_state(hdr);
arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_increment_state(hdr);
}
if (*arc_flags & ARC_FLAG_PRESCIENT_PREFETCH)
arc_hdr_set_flags(hdr, ARC_FLAG_PRESCIENT_PREFETCH);
if (*arc_flags & ARC_FLAG_L2CACHE)
arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
if (BP_IS_AUTHENTICATED(bp))
arc_hdr_set_flags(hdr, ARC_FLAG_NOAUTH);
if (BP_GET_LEVEL(bp) > 0)
arc_hdr_set_flags(hdr, ARC_FLAG_INDIRECT);
if (*arc_flags & ARC_FLAG_PREDICTIVE_PREFETCH)
arc_hdr_set_flags(hdr, ARC_FLAG_PREDICTIVE_PREFETCH);
ASSERT(!GHOST_STATE(hdr->b_l1hdr.b_state));
acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
acb->acb_done = done;
acb->acb_private = private;
acb->acb_compressed = compressed_read;
acb->acb_encrypted = encrypted_read;
acb->acb_noauth = noauth_read;
acb->acb_zb = *zb;
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
hdr->b_l1hdr.b_acb = acb;
arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
if (HDR_HAS_L2HDR(hdr) &&
(vd = hdr->b_l2hdr.b_dev->l2ad_vdev) != NULL) {
devw = hdr->b_l2hdr.b_dev->l2ad_writing;
addr = hdr->b_l2hdr.b_daddr;
/*
* Lock out L2ARC device removal.
*/
if (vdev_is_dead(vd) ||
!spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
vd = NULL;
}
/*
* We count both async reads and scrub IOs as asynchronous so
* that both can be upgraded in the event of a cache hit while
* the read IO is still in-flight.
*/
if (priority == ZIO_PRIORITY_ASYNC_READ ||
priority == ZIO_PRIORITY_SCRUB)
arc_hdr_set_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ);
else
arc_hdr_clear_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ);
/*
* At this point, we have a level 1 cache miss or a blkptr
* with embedded data. Try again in L2ARC if possible.
*/
ASSERT3U(HDR_GET_LSIZE(hdr), ==, lsize);
/*
* Skip ARC stat bump for block pointers with embedded
* data. The data are read from the blkptr itself via
* decode_embedded_bp_compressed().
*/
if (!embedded_bp) {
DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr,
blkptr_t *, bp, uint64_t, lsize,
zbookmark_phys_t *, zb);
ARCSTAT_BUMP(arcstat_misses);
ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr),
demand, prefetch, !HDR_ISTYPE_METADATA(hdr), data,
metadata, misses);
zfs_racct_read(size, 1);
}
/* Check if the spa even has l2 configured */
const boolean_t spa_has_l2 = l2arc_ndev != 0 &&
spa->spa_l2cache.sav_count > 0;
if (vd != NULL && spa_has_l2 && !(l2arc_norw && devw)) {
/*
* Read from the L2ARC if the following are true:
* 1. The L2ARC vdev was previously cached.
* 2. This buffer still has L2ARC metadata.
* 3. This buffer isn't currently writing to the L2ARC.
* 4. The L2ARC entry wasn't evicted, which may
* also have invalidated the vdev.
* 5. This isn't prefetch or l2arc_noprefetch is 0.
*/
if (HDR_HAS_L2HDR(hdr) &&
!HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) &&
!(l2arc_noprefetch && HDR_PREFETCH(hdr))) {
l2arc_read_callback_t *cb;
abd_t *abd;
uint64_t asize;
DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_l2_hits);
hdr->b_l2hdr.b_hits++;
cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
KM_SLEEP);
cb->l2rcb_hdr = hdr;
cb->l2rcb_bp = *bp;
cb->l2rcb_zb = *zb;
cb->l2rcb_flags = zio_flags;
/*
* When Compressed ARC is disabled, but the
* L2ARC block is compressed, arc_hdr_size()
* will have returned LSIZE rather than PSIZE.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr) &&
HDR_GET_PSIZE(hdr) != 0) {
size = HDR_GET_PSIZE(hdr);
}
asize = vdev_psize_to_asize(vd, size);
if (asize != size) {
abd = abd_alloc_for_io(asize,
HDR_ISTYPE_METADATA(hdr));
cb->l2rcb_abd = abd;
} else {
abd = hdr_abd;
}
ASSERT(addr >= VDEV_LABEL_START_SIZE &&
addr + asize <= vd->vdev_psize -
VDEV_LABEL_END_SIZE);
/*
* l2arc read. The SCL_L2ARC lock will be
* released by l2arc_read_done().
* Issue a null zio if the underlying buffer
* was squashed to zero size by compression.
*/
ASSERT3U(arc_hdr_get_compress(hdr), !=,
ZIO_COMPRESS_EMPTY);
rzio = zio_read_phys(pio, vd, addr,
asize, abd,
ZIO_CHECKSUM_OFF,
l2arc_read_done, cb, priority,
zio_flags | ZIO_FLAG_DONT_CACHE |
ZIO_FLAG_CANFAIL |
ZIO_FLAG_DONT_PROPAGATE |
ZIO_FLAG_DONT_RETRY, B_FALSE);
acb->acb_zio_head = rzio;
if (hash_lock != NULL)
mutex_exit(hash_lock);
DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
zio_t *, rzio);
ARCSTAT_INCR(arcstat_l2_read_bytes,
HDR_GET_PSIZE(hdr));
if (*arc_flags & ARC_FLAG_NOWAIT) {
zio_nowait(rzio);
goto out;
}
ASSERT(*arc_flags & ARC_FLAG_WAIT);
if (zio_wait(rzio) == 0)
goto out;
/* l2arc read error; goto zio_read() */
if (hash_lock != NULL)
mutex_enter(hash_lock);
} else {
DTRACE_PROBE1(l2arc__miss,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_l2_misses);
if (HDR_L2_WRITING(hdr))
ARCSTAT_BUMP(arcstat_l2_rw_clash);
spa_config_exit(spa, SCL_L2ARC, vd);
}
} else {
if (vd != NULL)
spa_config_exit(spa, SCL_L2ARC, vd);
/*
* Only a spa with l2 should contribute to l2
* miss stats. (Including the case of having a
* faulted cache device - that's also a miss.)
*/
if (spa_has_l2) {
/*
* Skip ARC stat bump for block pointers with
* embedded data. The data are read from the
* blkptr itself via
* decode_embedded_bp_compressed().
*/
if (!embedded_bp) {
DTRACE_PROBE1(l2arc__miss,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_l2_misses);
}
}
}
rzio = zio_read(pio, spa, bp, hdr_abd, size,
arc_read_done, hdr, priority, zio_flags, zb);
acb->acb_zio_head = rzio;
if (hash_lock != NULL)
mutex_exit(hash_lock);
if (*arc_flags & ARC_FLAG_WAIT) {
rc = zio_wait(rzio);
goto out;
}
ASSERT(*arc_flags & ARC_FLAG_NOWAIT);
zio_nowait(rzio);
}
out:
/* embedded bps don't actually go to disk */
if (!embedded_bp)
spa_read_history_add(spa, zb, *arc_flags);
spl_fstrans_unmark(cookie);
return (rc);
}
arc_prune_t *
arc_add_prune_callback(arc_prune_func_t *func, void *private)
{
arc_prune_t *p;
p = kmem_alloc(sizeof (*p), KM_SLEEP);
p->p_pfunc = func;
p->p_private = private;
list_link_init(&p->p_node);
zfs_refcount_create(&p->p_refcnt);
mutex_enter(&arc_prune_mtx);
zfs_refcount_add(&p->p_refcnt, &arc_prune_list);
list_insert_head(&arc_prune_list, p);
mutex_exit(&arc_prune_mtx);
return (p);
}
void
arc_remove_prune_callback(arc_prune_t *p)
{
boolean_t wait = B_FALSE;
mutex_enter(&arc_prune_mtx);
list_remove(&arc_prune_list, p);
if (zfs_refcount_remove(&p->p_refcnt, &arc_prune_list) > 0)
wait = B_TRUE;
mutex_exit(&arc_prune_mtx);
/* wait for arc_prune_task to finish */
if (wait)
taskq_wait_outstanding(arc_prune_taskq, 0);
ASSERT0(zfs_refcount_count(&p->p_refcnt));
zfs_refcount_destroy(&p->p_refcnt);
kmem_free(p, sizeof (*p));
}
/*
* Notify the arc that a block was freed, and thus will never be used again.
*/
void
arc_freed(spa_t *spa, const blkptr_t *bp)
{
arc_buf_hdr_t *hdr;
kmutex_t *hash_lock;
uint64_t guid = spa_load_guid(spa);
ASSERT(!BP_IS_EMBEDDED(bp));
hdr = buf_hash_find(guid, bp, &hash_lock);
if (hdr == NULL)
return;
/*
* We might be trying to free a block that is still doing I/O
* (i.e. prefetch) or has a reference (i.e. a dedup-ed,
* dmu_sync-ed block). If this block is being prefetched, then it
* would still have the ARC_FLAG_IO_IN_PROGRESS flag set on the hdr
* until the I/O completes. A block may also have a reference if it is
* part of a dedup-ed, dmu_synced write. The dmu_sync() function would
* have written the new block to its final resting place on disk but
* without the dedup flag set. This would have left the hdr in the MRU
* state and discoverable. When the txg finally syncs it detects that
* the block was overridden in open context and issues an override I/O.
* Since this is a dedup block, the override I/O will determine if the
* block is already in the DDT. If so, then it will replace the io_bp
* with the bp from the DDT and allow the I/O to finish. When the I/O
* reaches the done callback, dbuf_write_override_done, it will
* check to see if the io_bp and io_bp_override are identical.
* If they are not, then it indicates that the bp was replaced with
* the bp in the DDT and the override bp is freed. This allows
* us to arrive here with a reference on a block that is being
* freed. So if we have an I/O in progress, or a reference to
* this hdr, then we don't destroy the hdr.
*/
if (!HDR_HAS_L1HDR(hdr) || (!HDR_IO_IN_PROGRESS(hdr) &&
zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt))) {
arc_change_state(arc_anon, hdr, hash_lock);
arc_hdr_destroy(hdr);
mutex_exit(hash_lock);
} else {
mutex_exit(hash_lock);
}
}
/*
* Release this buffer from the cache, making it an anonymous buffer. This
* must be done after a read and prior to modifying the buffer contents.
* If the buffer has more than one reference, we must make
* a new hdr for the buffer.
*/
void
arc_release(arc_buf_t *buf, const void *tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
/*
* It would be nice to assert that if its DMU metadata (level >
* 0 || it's the dnode file), then it must be syncing context.
* But we don't know that information at this level.
*/
mutex_enter(&buf->b_evict_lock);
ASSERT(HDR_HAS_L1HDR(hdr));
/*
* We don't grab the hash lock prior to this check, because if
* the buffer's header is in the arc_anon state, it won't be
* linked into the hash table.
*/
if (hdr->b_l1hdr.b_state == arc_anon) {
mutex_exit(&buf->b_evict_lock);
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT(!HDR_IN_HASH_TABLE(hdr));
ASSERT(!HDR_HAS_L2HDR(hdr));
ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1);
ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node));
hdr->b_l1hdr.b_arc_access = 0;
/*
* If the buf is being overridden then it may already
* have a hdr that is not empty.
*/
buf_discard_identity(hdr);
arc_buf_thaw(buf);
return;
}
kmutex_t *hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
/*
* This assignment is only valid as long as the hash_lock is
* held, we must be careful not to reference state or the
* b_state field after dropping the lock.
*/
arc_state_t *state = hdr->b_l1hdr.b_state;
ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
ASSERT3P(state, !=, arc_anon);
/* this buffer is not on any list */
ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0);
if (HDR_HAS_L2HDR(hdr)) {
mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx);
/*
* We have to recheck this conditional again now that
* we're holding the l2ad_mtx to prevent a race with
* another thread which might be concurrently calling
* l2arc_evict(). In that case, l2arc_evict() might have
* destroyed the header's L2 portion as we were waiting
* to acquire the l2ad_mtx.
*/
if (HDR_HAS_L2HDR(hdr))
arc_hdr_l2hdr_destroy(hdr);
mutex_exit(&hdr->b_l2hdr.b_dev->l2ad_mtx);
}
/*
* Do we have more than one buf?
*/
if (hdr->b_l1hdr.b_bufcnt > 1) {
arc_buf_hdr_t *nhdr;
uint64_t spa = hdr->b_spa;
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t lsize = HDR_GET_LSIZE(hdr);
boolean_t protected = HDR_PROTECTED(hdr);
enum zio_compress compress = arc_hdr_get_compress(hdr);
arc_buf_contents_t type = arc_buf_type(hdr);
VERIFY3U(hdr->b_type, ==, type);
ASSERT(hdr->b_l1hdr.b_buf != buf || buf->b_next != NULL);
(void) remove_reference(hdr, hash_lock, tag);
if (arc_buf_is_shared(buf) && !ARC_BUF_COMPRESSED(buf)) {
ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf);
ASSERT(ARC_BUF_LAST(buf));
}
/*
* Pull the data off of this hdr and attach it to
* a new anonymous hdr. Also find the last buffer
* in the hdr's buffer list.
*/
arc_buf_t *lastbuf = arc_buf_remove(hdr, buf);
ASSERT3P(lastbuf, !=, NULL);
/*
* If the current arc_buf_t and the hdr are sharing their data
* buffer, then we must stop sharing that block.
*/
if (arc_buf_is_shared(buf)) {
ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf);
VERIFY(!arc_buf_is_shared(lastbuf));
/*
* First, sever the block sharing relationship between
* buf and the arc_buf_hdr_t.
*/
arc_unshare_buf(hdr, buf);
/*
* Now we need to recreate the hdr's b_pabd. Since we
* have lastbuf handy, we try to share with it, but if
* we can't then we allocate a new b_pabd and copy the
* data from buf into it.
*/
if (arc_can_share(hdr, lastbuf)) {
arc_share_buf(hdr, lastbuf);
} else {
arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT);
abd_copy_from_buf(hdr->b_l1hdr.b_pabd,
buf->b_data, psize);
}
VERIFY3P(lastbuf->b_data, !=, NULL);
} else if (HDR_SHARED_DATA(hdr)) {
/*
* Uncompressed shared buffers are always at the end
* of the list. Compressed buffers don't have the
* same requirements. This makes it hard to
* simply assert that the lastbuf is shared so
* we rely on the hdr's compression flags to determine
* if we have a compressed, shared buffer.
*/
ASSERT(arc_buf_is_shared(lastbuf) ||
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF);
ASSERT(!ARC_BUF_SHARED(buf));
}
ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr));
ASSERT3P(state, !=, arc_l2c_only);
(void) zfs_refcount_remove_many(&state->arcs_size,
arc_buf_size(buf), buf);
if (zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
ASSERT3P(state, !=, arc_l2c_only);
(void) zfs_refcount_remove_many(
&state->arcs_esize[type],
arc_buf_size(buf), buf);
}
hdr->b_l1hdr.b_bufcnt -= 1;
if (ARC_BUF_ENCRYPTED(buf))
hdr->b_crypt_hdr.b_ebufcnt -= 1;
arc_cksum_verify(buf);
arc_buf_unwatch(buf);
/* if this is the last uncompressed buf free the checksum */
if (!arc_hdr_has_uncompressed_buf(hdr))
arc_cksum_free(hdr);
mutex_exit(hash_lock);
/*
* Allocate a new hdr. The new hdr will contain a b_pabd
* buffer which will be freed in arc_write().
*/
nhdr = arc_hdr_alloc(spa, psize, lsize, protected,
compress, hdr->b_complevel, type);
ASSERT3P(nhdr->b_l1hdr.b_buf, ==, NULL);
ASSERT0(nhdr->b_l1hdr.b_bufcnt);
ASSERT0(zfs_refcount_count(&nhdr->b_l1hdr.b_refcnt));
VERIFY3U(nhdr->b_type, ==, type);
ASSERT(!HDR_SHARED_DATA(nhdr));
nhdr->b_l1hdr.b_buf = buf;
nhdr->b_l1hdr.b_bufcnt = 1;
if (ARC_BUF_ENCRYPTED(buf))
nhdr->b_crypt_hdr.b_ebufcnt = 1;
(void) zfs_refcount_add(&nhdr->b_l1hdr.b_refcnt, tag);
buf->b_hdr = nhdr;
mutex_exit(&buf->b_evict_lock);
(void) zfs_refcount_add_many(&arc_anon->arcs_size,
arc_buf_size(buf), buf);
} else {
mutex_exit(&buf->b_evict_lock);
ASSERT(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 1);
/* protected by hash lock, or hdr is on arc_anon */
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
hdr->b_l1hdr.b_mru_hits = 0;
hdr->b_l1hdr.b_mru_ghost_hits = 0;
hdr->b_l1hdr.b_mfu_hits = 0;
hdr->b_l1hdr.b_mfu_ghost_hits = 0;
arc_change_state(arc_anon, hdr, hash_lock);
hdr->b_l1hdr.b_arc_access = 0;
mutex_exit(hash_lock);
buf_discard_identity(hdr);
arc_buf_thaw(buf);
}
}
int
arc_released(arc_buf_t *buf)
{
int released;
mutex_enter(&buf->b_evict_lock);
released = (buf->b_data != NULL &&
buf->b_hdr->b_l1hdr.b_state == arc_anon);
mutex_exit(&buf->b_evict_lock);
return (released);
}
#ifdef ZFS_DEBUG
int
arc_referenced(arc_buf_t *buf)
{
int referenced;
mutex_enter(&buf->b_evict_lock);
referenced = (zfs_refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt));
mutex_exit(&buf->b_evict_lock);
return (referenced);
}
#endif
static void
arc_write_ready(zio_t *zio)
{
arc_write_callback_t *callback = zio->io_private;
arc_buf_t *buf = callback->awcb_buf;
arc_buf_hdr_t *hdr = buf->b_hdr;
blkptr_t *bp = zio->io_bp;
uint64_t psize = BP_IS_HOLE(bp) ? 0 : BP_GET_PSIZE(bp);
fstrans_cookie_t cookie = spl_fstrans_mark();
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(!zfs_refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt));
ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
/*
* If we're reexecuting this zio because the pool suspended, then
* cleanup any state that was previously set the first time the
* callback was invoked.
*/
if (zio->io_flags & ZIO_FLAG_REEXECUTED) {
arc_cksum_free(hdr);
arc_buf_unwatch(buf);
if (hdr->b_l1hdr.b_pabd != NULL) {
if (arc_buf_is_shared(buf)) {
arc_unshare_buf(hdr, buf);
} else {
arc_hdr_free_abd(hdr, B_FALSE);
}
}
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
}
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
ASSERT(!HDR_SHARED_DATA(hdr));
ASSERT(!arc_buf_is_shared(buf));
callback->awcb_ready(zio, buf, callback->awcb_private);
if (HDR_IO_IN_PROGRESS(hdr))
ASSERT(zio->io_flags & ZIO_FLAG_REEXECUTED);
arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
if (BP_IS_PROTECTED(bp) != !!HDR_PROTECTED(hdr))
hdr = arc_hdr_realloc_crypt(hdr, BP_IS_PROTECTED(bp));
if (BP_IS_PROTECTED(bp)) {
/* ZIL blocks are written through zio_rewrite */
ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_INTENT_LOG);
ASSERT(HDR_PROTECTED(hdr));
if (BP_SHOULD_BYTESWAP(bp)) {
if (BP_GET_LEVEL(bp) > 0) {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64;
} else {
hdr->b_l1hdr.b_byteswap =
DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
}
} else {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
}
hdr->b_crypt_hdr.b_ot = BP_GET_TYPE(bp);
hdr->b_crypt_hdr.b_dsobj = zio->io_bookmark.zb_objset;
zio_crypt_decode_params_bp(bp, hdr->b_crypt_hdr.b_salt,
hdr->b_crypt_hdr.b_iv);
zio_crypt_decode_mac_bp(bp, hdr->b_crypt_hdr.b_mac);
}
/*
* If this block was written for raw encryption but the zio layer
* ended up only authenticating it, adjust the buffer flags now.
*/
if (BP_IS_AUTHENTICATED(bp) && ARC_BUF_ENCRYPTED(buf)) {
arc_hdr_set_flags(hdr, ARC_FLAG_NOAUTH);
buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED;
if (BP_GET_COMPRESS(bp) == ZIO_COMPRESS_OFF)
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
} else if (BP_IS_HOLE(bp) && ARC_BUF_ENCRYPTED(buf)) {
buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED;
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
}
/* this must be done after the buffer flags are adjusted */
arc_cksum_compute(buf);
enum zio_compress compress;
if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) {
compress = ZIO_COMPRESS_OFF;
} else {
ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(bp));
compress = BP_GET_COMPRESS(bp);
}
HDR_SET_PSIZE(hdr, psize);
arc_hdr_set_compress(hdr, compress);
hdr->b_complevel = zio->io_prop.zp_complevel;
if (zio->io_error != 0 || psize == 0)
goto out;
/*
* Fill the hdr with data. If the buffer is encrypted we have no choice
* but to copy the data into b_radb. If the hdr is compressed, the data
* we want is available from the zio, otherwise we can take it from
* the buf.
*
* We might be able to share the buf's data with the hdr here. However,
* doing so would cause the ARC to be full of linear ABDs if we write a
* lot of shareable data. As a compromise, we check whether scattered
* ABDs are allowed, and assume that if they are then the user wants
* the ARC to be primarily filled with them regardless of the data being
* written. Therefore, if they're allowed then we allocate one and copy
* the data into it; otherwise, we share the data directly if we can.
*/
if (ARC_BUF_ENCRYPTED(buf)) {
ASSERT3U(psize, >, 0);
ASSERT(ARC_BUF_COMPRESSED(buf));
arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT | ARC_HDR_ALLOC_RDATA |
ARC_HDR_USE_RESERVE);
abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize);
} else if (!abd_size_alloc_linear(arc_buf_size(buf)) ||
!arc_can_share(hdr, buf)) {
/*
* Ideally, we would always copy the io_abd into b_pabd, but the
* user may have disabled compressed ARC, thus we must check the
* hdr's compression setting rather than the io_bp's.
*/
if (BP_IS_ENCRYPTED(bp)) {
ASSERT3U(psize, >, 0);
arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT |
ARC_HDR_ALLOC_RDATA | ARC_HDR_USE_RESERVE);
abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize);
} else if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF &&
!ARC_BUF_COMPRESSED(buf)) {
ASSERT3U(psize, >, 0);
arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT |
ARC_HDR_USE_RESERVE);
abd_copy(hdr->b_l1hdr.b_pabd, zio->io_abd, psize);
} else {
ASSERT3U(zio->io_orig_size, ==, arc_hdr_size(hdr));
arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT |
ARC_HDR_USE_RESERVE);
abd_copy_from_buf(hdr->b_l1hdr.b_pabd, buf->b_data,
arc_buf_size(buf));
}
} else {
ASSERT3P(buf->b_data, ==, abd_to_buf(zio->io_orig_abd));
ASSERT3U(zio->io_orig_size, ==, arc_buf_size(buf));
ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
arc_share_buf(hdr, buf);
}
out:
arc_hdr_verify(hdr, bp);
spl_fstrans_unmark(cookie);
}
static void
arc_write_children_ready(zio_t *zio)
{
arc_write_callback_t *callback = zio->io_private;
arc_buf_t *buf = callback->awcb_buf;
callback->awcb_children_ready(zio, buf, callback->awcb_private);
}
/*
* The SPA calls this callback for each physical write that happens on behalf
* of a logical write. See the comment in dbuf_write_physdone() for details.
*/
static void
arc_write_physdone(zio_t *zio)
{
arc_write_callback_t *cb = zio->io_private;
if (cb->awcb_physdone != NULL)
cb->awcb_physdone(zio, cb->awcb_buf, cb->awcb_private);
}
static void
arc_write_done(zio_t *zio)
{
arc_write_callback_t *callback = zio->io_private;
arc_buf_t *buf = callback->awcb_buf;
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
if (zio->io_error == 0) {
arc_hdr_verify(hdr, zio->io_bp);
if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) {
buf_discard_identity(hdr);
} else {
hdr->b_dva = *BP_IDENTITY(zio->io_bp);
hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp);
}
} else {
ASSERT(HDR_EMPTY(hdr));
}
/*
* If the block to be written was all-zero or compressed enough to be
* embedded in the BP, no write was performed so there will be no
* dva/birth/checksum. The buffer must therefore remain anonymous
* (and uncached).
*/
if (!HDR_EMPTY(hdr)) {
arc_buf_hdr_t *exists;
kmutex_t *hash_lock;
ASSERT3U(zio->io_error, ==, 0);
arc_cksum_verify(buf);
exists = buf_hash_insert(hdr, &hash_lock);
if (exists != NULL) {
/*
* This can only happen if we overwrite for
* sync-to-convergence, because we remove
* buffers from the hash table when we arc_free().
*/
if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
panic("bad overwrite, hdr=%p exists=%p",
(void *)hdr, (void *)exists);
ASSERT(zfs_refcount_is_zero(
&exists->b_l1hdr.b_refcnt));
arc_change_state(arc_anon, exists, hash_lock);
arc_hdr_destroy(exists);
mutex_exit(hash_lock);
exists = buf_hash_insert(hdr, &hash_lock);
ASSERT3P(exists, ==, NULL);
} else if (zio->io_flags & ZIO_FLAG_NOPWRITE) {
/* nopwrite */
ASSERT(zio->io_prop.zp_nopwrite);
if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
panic("bad nopwrite, hdr=%p exists=%p",
(void *)hdr, (void *)exists);
} else {
/* Dedup */
ASSERT(hdr->b_l1hdr.b_bufcnt == 1);
ASSERT(hdr->b_l1hdr.b_state == arc_anon);
ASSERT(BP_GET_DEDUP(zio->io_bp));
ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
}
}
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
/* if it's not anon, we are doing a scrub */
if (exists == NULL && hdr->b_l1hdr.b_state == arc_anon)
arc_access(hdr, hash_lock);
mutex_exit(hash_lock);
} else {
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
}
ASSERT(!zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
callback->awcb_done(zio, buf, callback->awcb_private);
abd_free(zio->io_abd);
kmem_free(callback, sizeof (arc_write_callback_t));
}
zio_t *
arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc,
const zio_prop_t *zp, arc_write_done_func_t *ready,
arc_write_done_func_t *children_ready, arc_write_done_func_t *physdone,
arc_write_done_func_t *done, void *private, zio_priority_t priority,
int zio_flags, const zbookmark_phys_t *zb)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
arc_write_callback_t *callback;
zio_t *zio;
zio_prop_t localprop = *zp;
ASSERT3P(ready, !=, NULL);
ASSERT3P(done, !=, NULL);
ASSERT(!HDR_IO_ERROR(hdr));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0);
if (l2arc)
arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
if (ARC_BUF_ENCRYPTED(buf)) {
ASSERT(ARC_BUF_COMPRESSED(buf));
localprop.zp_encrypt = B_TRUE;
localprop.zp_compress = HDR_GET_COMPRESS(hdr);
localprop.zp_complevel = hdr->b_complevel;
localprop.zp_byteorder =
(hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ?
ZFS_HOST_BYTEORDER : !ZFS_HOST_BYTEORDER;
memcpy(localprop.zp_salt, hdr->b_crypt_hdr.b_salt,
ZIO_DATA_SALT_LEN);
memcpy(localprop.zp_iv, hdr->b_crypt_hdr.b_iv,
ZIO_DATA_IV_LEN);
memcpy(localprop.zp_mac, hdr->b_crypt_hdr.b_mac,
ZIO_DATA_MAC_LEN);
if (DMU_OT_IS_ENCRYPTED(localprop.zp_type)) {
localprop.zp_nopwrite = B_FALSE;
localprop.zp_copies =
MIN(localprop.zp_copies, SPA_DVAS_PER_BP - 1);
}
zio_flags |= ZIO_FLAG_RAW;
} else if (ARC_BUF_COMPRESSED(buf)) {
ASSERT3U(HDR_GET_LSIZE(hdr), !=, arc_buf_size(buf));
localprop.zp_compress = HDR_GET_COMPRESS(hdr);
localprop.zp_complevel = hdr->b_complevel;
zio_flags |= ZIO_FLAG_RAW_COMPRESS;
}
callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
callback->awcb_ready = ready;
callback->awcb_children_ready = children_ready;
callback->awcb_physdone = physdone;
callback->awcb_done = done;
callback->awcb_private = private;
callback->awcb_buf = buf;
/*
* The hdr's b_pabd is now stale, free it now. A new data block
* will be allocated when the zio pipeline calls arc_write_ready().
*/
if (hdr->b_l1hdr.b_pabd != NULL) {
/*
* If the buf is currently sharing the data block with
* the hdr then we need to break that relationship here.
* The hdr will remain with a NULL data pointer and the
* buf will take sole ownership of the block.
*/
if (arc_buf_is_shared(buf)) {
arc_unshare_buf(hdr, buf);
} else {
arc_hdr_free_abd(hdr, B_FALSE);
}
VERIFY3P(buf->b_data, !=, NULL);
}
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
if (!(zio_flags & ZIO_FLAG_RAW))
arc_hdr_set_compress(hdr, ZIO_COMPRESS_OFF);
ASSERT(!arc_buf_is_shared(buf));
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
zio = zio_write(pio, spa, txg, bp,
abd_get_from_buf(buf->b_data, HDR_GET_LSIZE(hdr)),
HDR_GET_LSIZE(hdr), arc_buf_size(buf), &localprop, arc_write_ready,
(children_ready != NULL) ? arc_write_children_ready : NULL,
arc_write_physdone, arc_write_done, callback,
priority, zio_flags, zb);
return (zio);
}
void
arc_tempreserve_clear(uint64_t reserve)
{
atomic_add_64(&arc_tempreserve, -reserve);
ASSERT((int64_t)arc_tempreserve >= 0);
}
int
arc_tempreserve_space(spa_t *spa, uint64_t reserve, uint64_t txg)
{
int error;
uint64_t anon_size;
if (!arc_no_grow &&
reserve > arc_c/4 &&
reserve * 4 > (2ULL << SPA_MAXBLOCKSHIFT))
arc_c = MIN(arc_c_max, reserve * 4);
/*
* Throttle when the calculated memory footprint for the TXG
* exceeds the target ARC size.
*/
if (reserve > arc_c) {
DMU_TX_STAT_BUMP(dmu_tx_memory_reserve);
return (SET_ERROR(ERESTART));
}
/*
* Don't count loaned bufs as in flight dirty data to prevent long
* network delays from blocking transactions that are ready to be
* assigned to a txg.
*/
/* assert that it has not wrapped around */
ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
anon_size = MAX((int64_t)(zfs_refcount_count(&arc_anon->arcs_size) -
arc_loaned_bytes), 0);
/*
* Writes will, almost always, require additional memory allocations
* in order to compress/encrypt/etc the data. We therefore need to
* make sure that there is sufficient available memory for this.
*/
error = arc_memory_throttle(spa, reserve, txg);
if (error != 0)
return (error);
/*
* Throttle writes when the amount of dirty data in the cache
* gets too large. We try to keep the cache less than half full
* of dirty blocks so that our sync times don't grow too large.
*
* In the case of one pool being built on another pool, we want
* to make sure we don't end up throttling the lower (backing)
* pool when the upper pool is the majority contributor to dirty
* data. To insure we make forward progress during throttling, we
* also check the current pool's net dirty data and only throttle
* if it exceeds zfs_arc_pool_dirty_percent of the anonymous dirty
* data in the cache.
*
* Note: if two requests come in concurrently, we might let them
* both succeed, when one of them should fail. Not a huge deal.
*/
uint64_t total_dirty = reserve + arc_tempreserve + anon_size;
uint64_t spa_dirty_anon = spa_dirty_data(spa);
uint64_t rarc_c = arc_warm ? arc_c : arc_c_max;
if (total_dirty > rarc_c * zfs_arc_dirty_limit_percent / 100 &&
anon_size > rarc_c * zfs_arc_anon_limit_percent / 100 &&
spa_dirty_anon > anon_size * zfs_arc_pool_dirty_percent / 100) {
#ifdef ZFS_DEBUG
uint64_t meta_esize = zfs_refcount_count(
&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
uint64_t data_esize =
zfs_refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
"anon_data=%lluK tempreserve=%lluK rarc_c=%lluK\n",
(u_longlong_t)arc_tempreserve >> 10,
(u_longlong_t)meta_esize >> 10,
(u_longlong_t)data_esize >> 10,
(u_longlong_t)reserve >> 10,
(u_longlong_t)rarc_c >> 10);
#endif
DMU_TX_STAT_BUMP(dmu_tx_dirty_throttle);
return (SET_ERROR(ERESTART));
}
atomic_add_64(&arc_tempreserve, reserve);
return (0);
}
static void
arc_kstat_update_state(arc_state_t *state, kstat_named_t *size,
kstat_named_t *evict_data, kstat_named_t *evict_metadata)
{
size->value.ui64 = zfs_refcount_count(&state->arcs_size);
evict_data->value.ui64 =
zfs_refcount_count(&state->arcs_esize[ARC_BUFC_DATA]);
evict_metadata->value.ui64 =
zfs_refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]);
}
static int
arc_kstat_update(kstat_t *ksp, int rw)
{
arc_stats_t *as = ksp->ks_data;
if (rw == KSTAT_WRITE)
return (SET_ERROR(EACCES));
as->arcstat_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_hits);
as->arcstat_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_misses);
as->arcstat_demand_data_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_data_hits);
as->arcstat_demand_data_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_data_misses);
as->arcstat_demand_metadata_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_metadata_hits);
as->arcstat_demand_metadata_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_metadata_misses);
as->arcstat_prefetch_data_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_data_hits);
as->arcstat_prefetch_data_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_data_misses);
as->arcstat_prefetch_metadata_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_metadata_hits);
as->arcstat_prefetch_metadata_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_metadata_misses);
as->arcstat_mru_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_mru_hits);
as->arcstat_mru_ghost_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_mru_ghost_hits);
as->arcstat_mfu_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_mfu_hits);
as->arcstat_mfu_ghost_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_mfu_ghost_hits);
as->arcstat_deleted.value.ui64 =
wmsum_value(&arc_sums.arcstat_deleted);
as->arcstat_mutex_miss.value.ui64 =
wmsum_value(&arc_sums.arcstat_mutex_miss);
as->arcstat_access_skip.value.ui64 =
wmsum_value(&arc_sums.arcstat_access_skip);
as->arcstat_evict_skip.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_skip);
as->arcstat_evict_not_enough.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_not_enough);
as->arcstat_evict_l2_cached.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_cached);
as->arcstat_evict_l2_eligible.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_eligible);
as->arcstat_evict_l2_eligible_mfu.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_eligible_mfu);
as->arcstat_evict_l2_eligible_mru.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_eligible_mru);
as->arcstat_evict_l2_ineligible.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_ineligible);
as->arcstat_evict_l2_skip.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_skip);
as->arcstat_hash_collisions.value.ui64 =
wmsum_value(&arc_sums.arcstat_hash_collisions);
as->arcstat_hash_chains.value.ui64 =
wmsum_value(&arc_sums.arcstat_hash_chains);
as->arcstat_size.value.ui64 =
aggsum_value(&arc_sums.arcstat_size);
as->arcstat_compressed_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_compressed_size);
as->arcstat_uncompressed_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_uncompressed_size);
as->arcstat_overhead_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_overhead_size);
as->arcstat_hdr_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_hdr_size);
as->arcstat_data_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_data_size);
as->arcstat_metadata_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_metadata_size);
as->arcstat_dbuf_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_dbuf_size);
#if defined(COMPAT_FREEBSD11)
as->arcstat_other_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_bonus_size) +
aggsum_value(&arc_sums.arcstat_dnode_size) +
wmsum_value(&arc_sums.arcstat_dbuf_size);
#endif
arc_kstat_update_state(arc_anon,
&as->arcstat_anon_size,
&as->arcstat_anon_evictable_data,
&as->arcstat_anon_evictable_metadata);
arc_kstat_update_state(arc_mru,
&as->arcstat_mru_size,
&as->arcstat_mru_evictable_data,
&as->arcstat_mru_evictable_metadata);
arc_kstat_update_state(arc_mru_ghost,
&as->arcstat_mru_ghost_size,
&as->arcstat_mru_ghost_evictable_data,
&as->arcstat_mru_ghost_evictable_metadata);
arc_kstat_update_state(arc_mfu,
&as->arcstat_mfu_size,
&as->arcstat_mfu_evictable_data,
&as->arcstat_mfu_evictable_metadata);
arc_kstat_update_state(arc_mfu_ghost,
&as->arcstat_mfu_ghost_size,
&as->arcstat_mfu_ghost_evictable_data,
&as->arcstat_mfu_ghost_evictable_metadata);
as->arcstat_dnode_size.value.ui64 =
aggsum_value(&arc_sums.arcstat_dnode_size);
as->arcstat_bonus_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_bonus_size);
as->arcstat_l2_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_hits);
as->arcstat_l2_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_misses);
as->arcstat_l2_prefetch_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_prefetch_asize);
as->arcstat_l2_mru_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_mru_asize);
as->arcstat_l2_mfu_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_mfu_asize);
as->arcstat_l2_bufc_data_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_bufc_data_asize);
as->arcstat_l2_bufc_metadata_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_bufc_metadata_asize);
as->arcstat_l2_feeds.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_feeds);
as->arcstat_l2_rw_clash.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rw_clash);
as->arcstat_l2_read_bytes.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_read_bytes);
as->arcstat_l2_write_bytes.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_write_bytes);
as->arcstat_l2_writes_sent.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_writes_sent);
as->arcstat_l2_writes_done.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_writes_done);
as->arcstat_l2_writes_error.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_writes_error);
as->arcstat_l2_writes_lock_retry.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_writes_lock_retry);
as->arcstat_l2_evict_lock_retry.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_evict_lock_retry);
as->arcstat_l2_evict_reading.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_evict_reading);
as->arcstat_l2_evict_l1cached.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_evict_l1cached);
as->arcstat_l2_free_on_write.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_free_on_write);
as->arcstat_l2_abort_lowmem.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_abort_lowmem);
as->arcstat_l2_cksum_bad.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_cksum_bad);
as->arcstat_l2_io_error.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_io_error);
as->arcstat_l2_lsize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_lsize);
as->arcstat_l2_psize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_psize);
as->arcstat_l2_hdr_size.value.ui64 =
aggsum_value(&arc_sums.arcstat_l2_hdr_size);
as->arcstat_l2_log_blk_writes.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_log_blk_writes);
as->arcstat_l2_log_blk_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_log_blk_asize);
as->arcstat_l2_log_blk_count.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_log_blk_count);
as->arcstat_l2_rebuild_success.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_success);
as->arcstat_l2_rebuild_abort_unsupported.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_unsupported);
as->arcstat_l2_rebuild_abort_io_errors.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_io_errors);
as->arcstat_l2_rebuild_abort_dh_errors.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_dh_errors);
as->arcstat_l2_rebuild_abort_cksum_lb_errors.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_cksum_lb_errors);
as->arcstat_l2_rebuild_abort_lowmem.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_lowmem);
as->arcstat_l2_rebuild_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_size);
as->arcstat_l2_rebuild_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_asize);
as->arcstat_l2_rebuild_bufs.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_bufs);
as->arcstat_l2_rebuild_bufs_precached.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_bufs_precached);
as->arcstat_l2_rebuild_log_blks.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_log_blks);
as->arcstat_memory_throttle_count.value.ui64 =
wmsum_value(&arc_sums.arcstat_memory_throttle_count);
as->arcstat_memory_direct_count.value.ui64 =
wmsum_value(&arc_sums.arcstat_memory_direct_count);
as->arcstat_memory_indirect_count.value.ui64 =
wmsum_value(&arc_sums.arcstat_memory_indirect_count);
as->arcstat_memory_all_bytes.value.ui64 =
arc_all_memory();
as->arcstat_memory_free_bytes.value.ui64 =
arc_free_memory();
as->arcstat_memory_available_bytes.value.i64 =
arc_available_memory();
as->arcstat_prune.value.ui64 =
wmsum_value(&arc_sums.arcstat_prune);
as->arcstat_meta_used.value.ui64 =
aggsum_value(&arc_sums.arcstat_meta_used);
as->arcstat_async_upgrade_sync.value.ui64 =
wmsum_value(&arc_sums.arcstat_async_upgrade_sync);
as->arcstat_demand_hit_predictive_prefetch.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_hit_predictive_prefetch);
as->arcstat_demand_hit_prescient_prefetch.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_hit_prescient_prefetch);
as->arcstat_raw_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_raw_size);
as->arcstat_cached_only_in_progress.value.ui64 =
wmsum_value(&arc_sums.arcstat_cached_only_in_progress);
as->arcstat_abd_chunk_waste_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_abd_chunk_waste_size);
return (0);
}
/*
* This function *must* return indices evenly distributed between all
* sublists of the multilist. This is needed due to how the ARC eviction
* code is laid out; arc_evict_state() assumes ARC buffers are evenly
* distributed between all sublists and uses this assumption when
* deciding which sublist to evict from and how much to evict from it.
*/
static unsigned int
arc_state_multilist_index_func(multilist_t *ml, void *obj)
{
arc_buf_hdr_t *hdr = obj;
/*
* We rely on b_dva to generate evenly distributed index
* numbers using buf_hash below. So, as an added precaution,
* let's make sure we never add empty buffers to the arc lists.
*/
ASSERT(!HDR_EMPTY(hdr));
/*
* The assumption here, is the hash value for a given
* arc_buf_hdr_t will remain constant throughout its lifetime
* (i.e. its b_spa, b_dva, and b_birth fields don't change).
* Thus, we don't need to store the header's sublist index
* on insertion, as this index can be recalculated on removal.
*
* Also, the low order bits of the hash value are thought to be
* distributed evenly. Otherwise, in the case that the multilist
* has a power of two number of sublists, each sublists' usage
* would not be evenly distributed. In this context full 64bit
* division would be a waste of time, so limit it to 32 bits.
*/
return ((unsigned int)buf_hash(hdr->b_spa, &hdr->b_dva, hdr->b_birth) %
multilist_get_num_sublists(ml));
}
static unsigned int
arc_state_l2c_multilist_index_func(multilist_t *ml, void *obj)
{
panic("Header %p insert into arc_l2c_only %p", obj, ml);
}
#define WARN_IF_TUNING_IGNORED(tuning, value, do_warn) do { \
if ((do_warn) && (tuning) && ((tuning) != (value))) { \
cmn_err(CE_WARN, \
"ignoring tunable %s (using %llu instead)", \
(#tuning), (u_longlong_t)(value)); \
} \
} while (0)
/*
* Called during module initialization and periodically thereafter to
* apply reasonable changes to the exposed performance tunings. Can also be
* called explicitly by param_set_arc_*() functions when ARC tunables are
* updated manually. Non-zero zfs_* values which differ from the currently set
* values will be applied.
*/
void
arc_tuning_update(boolean_t verbose)
{
uint64_t allmem = arc_all_memory();
unsigned long limit;
/* Valid range: 32M - <arc_c_max> */
if ((zfs_arc_min) && (zfs_arc_min != arc_c_min) &&
(zfs_arc_min >= 2ULL << SPA_MAXBLOCKSHIFT) &&
(zfs_arc_min <= arc_c_max)) {
arc_c_min = zfs_arc_min;
arc_c = MAX(arc_c, arc_c_min);
}
WARN_IF_TUNING_IGNORED(zfs_arc_min, arc_c_min, verbose);
/* Valid range: 64M - <all physical memory> */
if ((zfs_arc_max) && (zfs_arc_max != arc_c_max) &&
(zfs_arc_max >= MIN_ARC_MAX) && (zfs_arc_max < allmem) &&
(zfs_arc_max > arc_c_min)) {
arc_c_max = zfs_arc_max;
arc_c = MIN(arc_c, arc_c_max);
arc_p = (arc_c >> 1);
if (arc_meta_limit > arc_c_max)
arc_meta_limit = arc_c_max;
if (arc_dnode_size_limit > arc_meta_limit)
arc_dnode_size_limit = arc_meta_limit;
}
WARN_IF_TUNING_IGNORED(zfs_arc_max, arc_c_max, verbose);
/* Valid range: 16M - <arc_c_max> */
if ((zfs_arc_meta_min) && (zfs_arc_meta_min != arc_meta_min) &&
(zfs_arc_meta_min >= 1ULL << SPA_MAXBLOCKSHIFT) &&
(zfs_arc_meta_min <= arc_c_max)) {
arc_meta_min = zfs_arc_meta_min;
if (arc_meta_limit < arc_meta_min)
arc_meta_limit = arc_meta_min;
if (arc_dnode_size_limit < arc_meta_min)
arc_dnode_size_limit = arc_meta_min;
}
WARN_IF_TUNING_IGNORED(zfs_arc_meta_min, arc_meta_min, verbose);
/* Valid range: <arc_meta_min> - <arc_c_max> */
limit = zfs_arc_meta_limit ? zfs_arc_meta_limit :
MIN(zfs_arc_meta_limit_percent, 100) * arc_c_max / 100;
if ((limit != arc_meta_limit) &&
(limit >= arc_meta_min) &&
(limit <= arc_c_max))
arc_meta_limit = limit;
WARN_IF_TUNING_IGNORED(zfs_arc_meta_limit, arc_meta_limit, verbose);
/* Valid range: <arc_meta_min> - <arc_meta_limit> */
limit = zfs_arc_dnode_limit ? zfs_arc_dnode_limit :
MIN(zfs_arc_dnode_limit_percent, 100) * arc_meta_limit / 100;
if ((limit != arc_dnode_size_limit) &&
(limit >= arc_meta_min) &&
(limit <= arc_meta_limit))
arc_dnode_size_limit = limit;
WARN_IF_TUNING_IGNORED(zfs_arc_dnode_limit, arc_dnode_size_limit,
verbose);
/* Valid range: 1 - N */
if (zfs_arc_grow_retry)
arc_grow_retry = zfs_arc_grow_retry;
/* Valid range: 1 - N */
if (zfs_arc_shrink_shift) {
arc_shrink_shift = zfs_arc_shrink_shift;
arc_no_grow_shift = MIN(arc_no_grow_shift, arc_shrink_shift -1);
}
/* Valid range: 1 - N */
if (zfs_arc_p_min_shift)
arc_p_min_shift = zfs_arc_p_min_shift;
/* Valid range: 1 - N ms */
if (zfs_arc_min_prefetch_ms)
arc_min_prefetch_ms = zfs_arc_min_prefetch_ms;
/* Valid range: 1 - N ms */
if (zfs_arc_min_prescient_prefetch_ms) {
arc_min_prescient_prefetch_ms =
zfs_arc_min_prescient_prefetch_ms;
}
/* Valid range: 0 - 100 */
if ((zfs_arc_lotsfree_percent >= 0) &&
(zfs_arc_lotsfree_percent <= 100))
arc_lotsfree_percent = zfs_arc_lotsfree_percent;
WARN_IF_TUNING_IGNORED(zfs_arc_lotsfree_percent, arc_lotsfree_percent,
verbose);
/* Valid range: 0 - <all physical memory> */
if ((zfs_arc_sys_free) && (zfs_arc_sys_free != arc_sys_free))
arc_sys_free = MIN(MAX(zfs_arc_sys_free, 0), allmem);
WARN_IF_TUNING_IGNORED(zfs_arc_sys_free, arc_sys_free, verbose);
}
static void
arc_state_multilist_init(multilist_t *ml,
multilist_sublist_index_func_t *index_func, int *maxcountp)
{
multilist_create(ml, sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), index_func);
*maxcountp = MAX(*maxcountp, multilist_get_num_sublists(ml));
}
static void
arc_state_init(void)
{
int num_sublists = 0;
arc_state_multilist_init(&arc_mru->arcs_list[ARC_BUFC_METADATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mru->arcs_list[ARC_BUFC_DATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mfu->arcs_list[ARC_BUFC_METADATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mfu->arcs_list[ARC_BUFC_DATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA],
arc_state_multilist_index_func, &num_sublists);
/*
* L2 headers should never be on the L2 state list since they don't
* have L1 headers allocated. Special index function asserts that.
*/
arc_state_multilist_init(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA],
arc_state_l2c_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_l2c_only->arcs_list[ARC_BUFC_DATA],
arc_state_l2c_multilist_index_func, &num_sublists);
/*
* Keep track of the number of markers needed to reclaim buffers from
* any ARC state. The markers will be pre-allocated so as to minimize
* the number of memory allocations performed by the eviction thread.
*/
arc_state_evict_marker_count = num_sublists;
zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_anon->arcs_size);
zfs_refcount_create(&arc_mru->arcs_size);
zfs_refcount_create(&arc_mru_ghost->arcs_size);
zfs_refcount_create(&arc_mfu->arcs_size);
zfs_refcount_create(&arc_mfu_ghost->arcs_size);
zfs_refcount_create(&arc_l2c_only->arcs_size);
wmsum_init(&arc_sums.arcstat_hits, 0);
wmsum_init(&arc_sums.arcstat_misses, 0);
wmsum_init(&arc_sums.arcstat_demand_data_hits, 0);
wmsum_init(&arc_sums.arcstat_demand_data_misses, 0);
wmsum_init(&arc_sums.arcstat_demand_metadata_hits, 0);
wmsum_init(&arc_sums.arcstat_demand_metadata_misses, 0);
wmsum_init(&arc_sums.arcstat_prefetch_data_hits, 0);
wmsum_init(&arc_sums.arcstat_prefetch_data_misses, 0);
wmsum_init(&arc_sums.arcstat_prefetch_metadata_hits, 0);
wmsum_init(&arc_sums.arcstat_prefetch_metadata_misses, 0);
wmsum_init(&arc_sums.arcstat_mru_hits, 0);
wmsum_init(&arc_sums.arcstat_mru_ghost_hits, 0);
wmsum_init(&arc_sums.arcstat_mfu_hits, 0);
wmsum_init(&arc_sums.arcstat_mfu_ghost_hits, 0);
wmsum_init(&arc_sums.arcstat_deleted, 0);
wmsum_init(&arc_sums.arcstat_mutex_miss, 0);
wmsum_init(&arc_sums.arcstat_access_skip, 0);
wmsum_init(&arc_sums.arcstat_evict_skip, 0);
wmsum_init(&arc_sums.arcstat_evict_not_enough, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_cached, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_eligible, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_eligible_mfu, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_eligible_mru, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_ineligible, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_skip, 0);
wmsum_init(&arc_sums.arcstat_hash_collisions, 0);
wmsum_init(&arc_sums.arcstat_hash_chains, 0);
aggsum_init(&arc_sums.arcstat_size, 0);
wmsum_init(&arc_sums.arcstat_compressed_size, 0);
wmsum_init(&arc_sums.arcstat_uncompressed_size, 0);
wmsum_init(&arc_sums.arcstat_overhead_size, 0);
wmsum_init(&arc_sums.arcstat_hdr_size, 0);
wmsum_init(&arc_sums.arcstat_data_size, 0);
wmsum_init(&arc_sums.arcstat_metadata_size, 0);
wmsum_init(&arc_sums.arcstat_dbuf_size, 0);
aggsum_init(&arc_sums.arcstat_dnode_size, 0);
wmsum_init(&arc_sums.arcstat_bonus_size, 0);
wmsum_init(&arc_sums.arcstat_l2_hits, 0);
wmsum_init(&arc_sums.arcstat_l2_misses, 0);
wmsum_init(&arc_sums.arcstat_l2_prefetch_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_mru_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_mfu_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_bufc_data_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_bufc_metadata_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_feeds, 0);
wmsum_init(&arc_sums.arcstat_l2_rw_clash, 0);
wmsum_init(&arc_sums.arcstat_l2_read_bytes, 0);
wmsum_init(&arc_sums.arcstat_l2_write_bytes, 0);
wmsum_init(&arc_sums.arcstat_l2_writes_sent, 0);
wmsum_init(&arc_sums.arcstat_l2_writes_done, 0);
wmsum_init(&arc_sums.arcstat_l2_writes_error, 0);
wmsum_init(&arc_sums.arcstat_l2_writes_lock_retry, 0);
wmsum_init(&arc_sums.arcstat_l2_evict_lock_retry, 0);
wmsum_init(&arc_sums.arcstat_l2_evict_reading, 0);
wmsum_init(&arc_sums.arcstat_l2_evict_l1cached, 0);
wmsum_init(&arc_sums.arcstat_l2_free_on_write, 0);
wmsum_init(&arc_sums.arcstat_l2_abort_lowmem, 0);
wmsum_init(&arc_sums.arcstat_l2_cksum_bad, 0);
wmsum_init(&arc_sums.arcstat_l2_io_error, 0);
wmsum_init(&arc_sums.arcstat_l2_lsize, 0);
wmsum_init(&arc_sums.arcstat_l2_psize, 0);
aggsum_init(&arc_sums.arcstat_l2_hdr_size, 0);
wmsum_init(&arc_sums.arcstat_l2_log_blk_writes, 0);
wmsum_init(&arc_sums.arcstat_l2_log_blk_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_log_blk_count, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_success, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_unsupported, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_io_errors, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_dh_errors, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_cksum_lb_errors, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_lowmem, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_size, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_bufs, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_bufs_precached, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_log_blks, 0);
wmsum_init(&arc_sums.arcstat_memory_throttle_count, 0);
wmsum_init(&arc_sums.arcstat_memory_direct_count, 0);
wmsum_init(&arc_sums.arcstat_memory_indirect_count, 0);
wmsum_init(&arc_sums.arcstat_prune, 0);
aggsum_init(&arc_sums.arcstat_meta_used, 0);
wmsum_init(&arc_sums.arcstat_async_upgrade_sync, 0);
wmsum_init(&arc_sums.arcstat_demand_hit_predictive_prefetch, 0);
wmsum_init(&arc_sums.arcstat_demand_hit_prescient_prefetch, 0);
wmsum_init(&arc_sums.arcstat_raw_size, 0);
wmsum_init(&arc_sums.arcstat_cached_only_in_progress, 0);
wmsum_init(&arc_sums.arcstat_abd_chunk_waste_size, 0);
arc_anon->arcs_state = ARC_STATE_ANON;
arc_mru->arcs_state = ARC_STATE_MRU;
arc_mru_ghost->arcs_state = ARC_STATE_MRU_GHOST;
arc_mfu->arcs_state = ARC_STATE_MFU;
arc_mfu_ghost->arcs_state = ARC_STATE_MFU_GHOST;
arc_l2c_only->arcs_state = ARC_STATE_L2C_ONLY;
}
static void
arc_state_fini(void)
{
zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_anon->arcs_size);
zfs_refcount_destroy(&arc_mru->arcs_size);
zfs_refcount_destroy(&arc_mru_ghost->arcs_size);
zfs_refcount_destroy(&arc_mfu->arcs_size);
zfs_refcount_destroy(&arc_mfu_ghost->arcs_size);
zfs_refcount_destroy(&arc_l2c_only->arcs_size);
multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_l2c_only->arcs_list[ARC_BUFC_DATA]);
wmsum_fini(&arc_sums.arcstat_hits);
wmsum_fini(&arc_sums.arcstat_misses);
wmsum_fini(&arc_sums.arcstat_demand_data_hits);
wmsum_fini(&arc_sums.arcstat_demand_data_misses);
wmsum_fini(&arc_sums.arcstat_demand_metadata_hits);
wmsum_fini(&arc_sums.arcstat_demand_metadata_misses);
wmsum_fini(&arc_sums.arcstat_prefetch_data_hits);
wmsum_fini(&arc_sums.arcstat_prefetch_data_misses);
wmsum_fini(&arc_sums.arcstat_prefetch_metadata_hits);
wmsum_fini(&arc_sums.arcstat_prefetch_metadata_misses);
wmsum_fini(&arc_sums.arcstat_mru_hits);
wmsum_fini(&arc_sums.arcstat_mru_ghost_hits);
wmsum_fini(&arc_sums.arcstat_mfu_hits);
wmsum_fini(&arc_sums.arcstat_mfu_ghost_hits);
wmsum_fini(&arc_sums.arcstat_deleted);
wmsum_fini(&arc_sums.arcstat_mutex_miss);
wmsum_fini(&arc_sums.arcstat_access_skip);
wmsum_fini(&arc_sums.arcstat_evict_skip);
wmsum_fini(&arc_sums.arcstat_evict_not_enough);
wmsum_fini(&arc_sums.arcstat_evict_l2_cached);
wmsum_fini(&arc_sums.arcstat_evict_l2_eligible);
wmsum_fini(&arc_sums.arcstat_evict_l2_eligible_mfu);
wmsum_fini(&arc_sums.arcstat_evict_l2_eligible_mru);
wmsum_fini(&arc_sums.arcstat_evict_l2_ineligible);
wmsum_fini(&arc_sums.arcstat_evict_l2_skip);
wmsum_fini(&arc_sums.arcstat_hash_collisions);
wmsum_fini(&arc_sums.arcstat_hash_chains);
aggsum_fini(&arc_sums.arcstat_size);
wmsum_fini(&arc_sums.arcstat_compressed_size);
wmsum_fini(&arc_sums.arcstat_uncompressed_size);
wmsum_fini(&arc_sums.arcstat_overhead_size);
wmsum_fini(&arc_sums.arcstat_hdr_size);
wmsum_fini(&arc_sums.arcstat_data_size);
wmsum_fini(&arc_sums.arcstat_metadata_size);
wmsum_fini(&arc_sums.arcstat_dbuf_size);
aggsum_fini(&arc_sums.arcstat_dnode_size);
wmsum_fini(&arc_sums.arcstat_bonus_size);
wmsum_fini(&arc_sums.arcstat_l2_hits);
wmsum_fini(&arc_sums.arcstat_l2_misses);
wmsum_fini(&arc_sums.arcstat_l2_prefetch_asize);
wmsum_fini(&arc_sums.arcstat_l2_mru_asize);
wmsum_fini(&arc_sums.arcstat_l2_mfu_asize);
wmsum_fini(&arc_sums.arcstat_l2_bufc_data_asize);
wmsum_fini(&arc_sums.arcstat_l2_bufc_metadata_asize);
wmsum_fini(&arc_sums.arcstat_l2_feeds);
wmsum_fini(&arc_sums.arcstat_l2_rw_clash);
wmsum_fini(&arc_sums.arcstat_l2_read_bytes);
wmsum_fini(&arc_sums.arcstat_l2_write_bytes);
wmsum_fini(&arc_sums.arcstat_l2_writes_sent);
wmsum_fini(&arc_sums.arcstat_l2_writes_done);
wmsum_fini(&arc_sums.arcstat_l2_writes_error);
wmsum_fini(&arc_sums.arcstat_l2_writes_lock_retry);
wmsum_fini(&arc_sums.arcstat_l2_evict_lock_retry);
wmsum_fini(&arc_sums.arcstat_l2_evict_reading);
wmsum_fini(&arc_sums.arcstat_l2_evict_l1cached);
wmsum_fini(&arc_sums.arcstat_l2_free_on_write);
wmsum_fini(&arc_sums.arcstat_l2_abort_lowmem);
wmsum_fini(&arc_sums.arcstat_l2_cksum_bad);
wmsum_fini(&arc_sums.arcstat_l2_io_error);
wmsum_fini(&arc_sums.arcstat_l2_lsize);
wmsum_fini(&arc_sums.arcstat_l2_psize);
aggsum_fini(&arc_sums.arcstat_l2_hdr_size);
wmsum_fini(&arc_sums.arcstat_l2_log_blk_writes);
wmsum_fini(&arc_sums.arcstat_l2_log_blk_asize);
wmsum_fini(&arc_sums.arcstat_l2_log_blk_count);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_success);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_unsupported);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_io_errors);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_dh_errors);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_cksum_lb_errors);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_lowmem);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_size);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_asize);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_bufs);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_bufs_precached);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_log_blks);
wmsum_fini(&arc_sums.arcstat_memory_throttle_count);
wmsum_fini(&arc_sums.arcstat_memory_direct_count);
wmsum_fini(&arc_sums.arcstat_memory_indirect_count);
wmsum_fini(&arc_sums.arcstat_prune);
aggsum_fini(&arc_sums.arcstat_meta_used);
wmsum_fini(&arc_sums.arcstat_async_upgrade_sync);
wmsum_fini(&arc_sums.arcstat_demand_hit_predictive_prefetch);
wmsum_fini(&arc_sums.arcstat_demand_hit_prescient_prefetch);
wmsum_fini(&arc_sums.arcstat_raw_size);
wmsum_fini(&arc_sums.arcstat_cached_only_in_progress);
wmsum_fini(&arc_sums.arcstat_abd_chunk_waste_size);
}
uint64_t
arc_target_bytes(void)
{
return (arc_c);
}
void
arc_set_limits(uint64_t allmem)
{
/* Set min cache to 1/32 of all memory, or 32MB, whichever is more. */
arc_c_min = MAX(allmem / 32, 2ULL << SPA_MAXBLOCKSHIFT);
/* How to set default max varies by platform. */
arc_c_max = arc_default_max(arc_c_min, allmem);
}
void
arc_init(void)
{
uint64_t percent, allmem = arc_all_memory();
mutex_init(&arc_evict_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&arc_evict_waiters, sizeof (arc_evict_waiter_t),
offsetof(arc_evict_waiter_t, aew_node));
arc_min_prefetch_ms = 1000;
arc_min_prescient_prefetch_ms = 6000;
#if defined(_KERNEL)
arc_lowmem_init();
#endif
arc_set_limits(allmem);
#ifdef _KERNEL
/*
* If zfs_arc_max is non-zero at init, meaning it was set in the kernel
* environment before the module was loaded, don't block setting the
* maximum because it is less than arc_c_min, instead, reset arc_c_min
* to a lower value.
* zfs_arc_min will be handled by arc_tuning_update().
*/
if (zfs_arc_max != 0 && zfs_arc_max >= MIN_ARC_MAX &&
zfs_arc_max < allmem) {
arc_c_max = zfs_arc_max;
if (arc_c_min >= arc_c_max) {
arc_c_min = MAX(zfs_arc_max / 2,
2ULL << SPA_MAXBLOCKSHIFT);
}
}
#else
/*
* In userland, there's only the memory pressure that we artificially
* create (see arc_available_memory()). Don't let arc_c get too
* small, because it can cause transactions to be larger than
* arc_c, causing arc_tempreserve_space() to fail.
*/
arc_c_min = MAX(arc_c_max / 2, 2ULL << SPA_MAXBLOCKSHIFT);
#endif
arc_c = arc_c_min;
arc_p = (arc_c >> 1);
/* Set min to 1/2 of arc_c_min */
arc_meta_min = 1ULL << SPA_MAXBLOCKSHIFT;
/*
* Set arc_meta_limit to a percent of arc_c_max with a floor of
* arc_meta_min, and a ceiling of arc_c_max.
*/
percent = MIN(zfs_arc_meta_limit_percent, 100);
arc_meta_limit = MAX(arc_meta_min, (percent * arc_c_max) / 100);
percent = MIN(zfs_arc_dnode_limit_percent, 100);
arc_dnode_size_limit = (percent * arc_meta_limit) / 100;
/* Apply user specified tunings */
arc_tuning_update(B_TRUE);
/* if kmem_flags are set, lets try to use less memory */
if (kmem_debugging())
arc_c = arc_c / 2;
if (arc_c < arc_c_min)
arc_c = arc_c_min;
arc_register_hotplug();
arc_state_init();
buf_init();
list_create(&arc_prune_list, sizeof (arc_prune_t),
offsetof(arc_prune_t, p_node));
mutex_init(&arc_prune_mtx, NULL, MUTEX_DEFAULT, NULL);
arc_prune_taskq = taskq_create("arc_prune", zfs_arc_prune_task_threads,
defclsyspri, 100, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
if (arc_ksp != NULL) {
arc_ksp->ks_data = &arc_stats;
arc_ksp->ks_update = arc_kstat_update;
kstat_install(arc_ksp);
}
arc_state_evict_markers =
arc_state_alloc_markers(arc_state_evict_marker_count);
arc_evict_zthr = zthr_create("arc_evict",
arc_evict_cb_check, arc_evict_cb, NULL, defclsyspri);
arc_reap_zthr = zthr_create_timer("arc_reap",
arc_reap_cb_check, arc_reap_cb, NULL, SEC2NSEC(1), minclsyspri);
arc_warm = B_FALSE;
/*
* Calculate maximum amount of dirty data per pool.
*
* If it has been set by a module parameter, take that.
* Otherwise, use a percentage of physical memory defined by
* zfs_dirty_data_max_percent (default 10%) with a cap at
* zfs_dirty_data_max_max (default 4G or 25% of physical memory).
*/
#ifdef __LP64__
if (zfs_dirty_data_max_max == 0)
zfs_dirty_data_max_max = MIN(4ULL * 1024 * 1024 * 1024,
allmem * zfs_dirty_data_max_max_percent / 100);
#else
if (zfs_dirty_data_max_max == 0)
zfs_dirty_data_max_max = MIN(1ULL * 1024 * 1024 * 1024,
allmem * zfs_dirty_data_max_max_percent / 100);
#endif
if (zfs_dirty_data_max == 0) {
zfs_dirty_data_max = allmem *
zfs_dirty_data_max_percent / 100;
zfs_dirty_data_max = MIN(zfs_dirty_data_max,
zfs_dirty_data_max_max);
}
if (zfs_wrlog_data_max == 0) {
/*
* dp_wrlog_total is reduced for each txg at the end of
* spa_sync(). However, dp_dirty_total is reduced every time
* a block is written out. Thus under normal operation,
* dp_wrlog_total could grow 2 times as big as
* zfs_dirty_data_max.
*/
zfs_wrlog_data_max = zfs_dirty_data_max * 2;
}
}
void
arc_fini(void)
{
arc_prune_t *p;
#ifdef _KERNEL
arc_lowmem_fini();
#endif /* _KERNEL */
/* Use B_TRUE to ensure *all* buffers are evicted */
arc_flush(NULL, B_TRUE);
if (arc_ksp != NULL) {
kstat_delete(arc_ksp);
arc_ksp = NULL;
}
taskq_wait(arc_prune_taskq);
taskq_destroy(arc_prune_taskq);
mutex_enter(&arc_prune_mtx);
while ((p = list_head(&arc_prune_list)) != NULL) {
list_remove(&arc_prune_list, p);
zfs_refcount_remove(&p->p_refcnt, &arc_prune_list);
zfs_refcount_destroy(&p->p_refcnt);
kmem_free(p, sizeof (*p));
}
mutex_exit(&arc_prune_mtx);
list_destroy(&arc_prune_list);
mutex_destroy(&arc_prune_mtx);
(void) zthr_cancel(arc_evict_zthr);
(void) zthr_cancel(arc_reap_zthr);
arc_state_free_markers(arc_state_evict_markers,
arc_state_evict_marker_count);
mutex_destroy(&arc_evict_lock);
list_destroy(&arc_evict_waiters);
/*
* Free any buffers that were tagged for destruction. This needs
* to occur before arc_state_fini() runs and destroys the aggsum
* values which are updated when freeing scatter ABDs.
*/
l2arc_do_free_on_write();
/*
* buf_fini() must proceed arc_state_fini() because buf_fin() may
* trigger the release of kmem magazines, which can callback to
* arc_space_return() which accesses aggsums freed in act_state_fini().
*/
buf_fini();
arc_state_fini();
arc_unregister_hotplug();
/*
* We destroy the zthrs after all the ARC state has been
* torn down to avoid the case of them receiving any
* wakeup() signals after they are destroyed.
*/
zthr_destroy(arc_evict_zthr);
zthr_destroy(arc_reap_zthr);
ASSERT0(arc_loaned_bytes);
}
/*
* Level 2 ARC
*
* The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
* It uses dedicated storage devices to hold cached data, which are populated
* using large infrequent writes. The main role of this cache is to boost
* the performance of random read workloads. The intended L2ARC devices
* include short-stroked disks, solid state disks, and other media with
* substantially faster read latency than disk.
*
* +-----------------------+
* | ARC |
* +-----------------------+
* | ^ ^
* | | |
* l2arc_feed_thread() arc_read()
* | | |
* | l2arc read |
* V | |
* +---------------+ |
* | L2ARC | |
* +---------------+ |
* | ^ |
* l2arc_write() | |
* | | |
* V | |
* +-------+ +-------+
* | vdev | | vdev |
* | cache | | cache |
* +-------+ +-------+
* +=========+ .-----.
* : L2ARC : |-_____-|
* : devices : | Disks |
* +=========+ `-_____-'
*
* Read requests are satisfied from the following sources, in order:
*
* 1) ARC
* 2) vdev cache of L2ARC devices
* 3) L2ARC devices
* 4) vdev cache of disks
* 5) disks
*
* Some L2ARC device types exhibit extremely slow write performance.
* To accommodate for this there are some significant differences between
* the L2ARC and traditional cache design:
*
* 1. There is no eviction path from the ARC to the L2ARC. Evictions from
* the ARC behave as usual, freeing buffers and placing headers on ghost
* lists. The ARC does not send buffers to the L2ARC during eviction as
* this would add inflated write latencies for all ARC memory pressure.
*
* 2. The L2ARC attempts to cache data from the ARC before it is evicted.
* It does this by periodically scanning buffers from the eviction-end of
* the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
* not already there. It scans until a headroom of buffers is satisfied,
* which itself is a buffer for ARC eviction. If a compressible buffer is
* found during scanning and selected for writing to an L2ARC device, we
* temporarily boost scanning headroom during the next scan cycle to make
* sure we adapt to compression effects (which might significantly reduce
* the data volume we write to L2ARC). The thread that does this is
* l2arc_feed_thread(), illustrated below; example sizes are included to
* provide a better sense of ratio than this diagram:
*
* head --> tail
* +---------------------+----------+
* ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
* +---------------------+----------+ | o L2ARC eligible
* ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
* +---------------------+----------+ |
* 15.9 Gbytes ^ 32 Mbytes |
* headroom |
* l2arc_feed_thread()
* |
* l2arc write hand <--[oooo]--'
* | 8 Mbyte
* | write max
* V
* +==============================+
* L2ARC dev |####|#|###|###| |####| ... |
* +==============================+
* 32 Gbytes
*
* 3. If an ARC buffer is copied to the L2ARC but then hit instead of
* evicted, then the L2ARC has cached a buffer much sooner than it probably
* needed to, potentially wasting L2ARC device bandwidth and storage. It is
* safe to say that this is an uncommon case, since buffers at the end of
* the ARC lists have moved there due to inactivity.
*
* 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
* then the L2ARC simply misses copying some buffers. This serves as a
* pressure valve to prevent heavy read workloads from both stalling the ARC
* with waits and clogging the L2ARC with writes. This also helps prevent
* the potential for the L2ARC to churn if it attempts to cache content too
* quickly, such as during backups of the entire pool.
*
* 5. After system boot and before the ARC has filled main memory, there are
* no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
* lists can remain mostly static. Instead of searching from tail of these
* lists as pictured, the l2arc_feed_thread() will search from the list heads
* for eligible buffers, greatly increasing its chance of finding them.
*
* The L2ARC device write speed is also boosted during this time so that
* the L2ARC warms up faster. Since there have been no ARC evictions yet,
* there are no L2ARC reads, and no fear of degrading read performance
* through increased writes.
*
* 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
* the vdev queue can aggregate them into larger and fewer writes. Each
* device is written to in a rotor fashion, sweeping writes through
* available space then repeating.
*
* 7. The L2ARC does not store dirty content. It never needs to flush
* write buffers back to disk based storage.
*
* 8. If an ARC buffer is written (and dirtied) which also exists in the
* L2ARC, the now stale L2ARC buffer is immediately dropped.
*
* The performance of the L2ARC can be tweaked by a number of tunables, which
* may be necessary for different workloads:
*
* l2arc_write_max max write bytes per interval
* l2arc_write_boost extra write bytes during device warmup
* l2arc_noprefetch skip caching prefetched buffers
* l2arc_headroom number of max device writes to precache
* l2arc_headroom_boost when we find compressed buffers during ARC
* scanning, we multiply headroom by this
* percentage factor for the next scan cycle,
* since more compressed buffers are likely to
* be present
* l2arc_feed_secs seconds between L2ARC writing
*
* Tunables may be removed or added as future performance improvements are
* integrated, and also may become zpool properties.
*
* There are three key functions that control how the L2ARC warms up:
*
* l2arc_write_eligible() check if a buffer is eligible to cache
* l2arc_write_size() calculate how much to write
* l2arc_write_interval() calculate sleep delay between writes
*
* These three functions determine what to write, how much, and how quickly
* to send writes.
*
* L2ARC persistence:
*
* When writing buffers to L2ARC, we periodically add some metadata to
* make sure we can pick them up after reboot, thus dramatically reducing
* the impact that any downtime has on the performance of storage systems
* with large caches.
*
* The implementation works fairly simply by integrating the following two
* modifications:
*
* *) When writing to the L2ARC, we occasionally write a "l2arc log block",
* which is an additional piece of metadata which describes what's been
* written. This allows us to rebuild the arc_buf_hdr_t structures of the
* main ARC buffers. There are 2 linked-lists of log blocks headed by
* dh_start_lbps[2]. We alternate which chain we append to, so they are
* time-wise and offset-wise interleaved, but that is an optimization rather
* than for correctness. The log block also includes a pointer to the
* previous block in its chain.
*
* *) We reserve SPA_MINBLOCKSIZE of space at the start of each L2ARC device
* for our header bookkeeping purposes. This contains a device header,
* which contains our top-level reference structures. We update it each
* time we write a new log block, so that we're able to locate it in the
* L2ARC device. If this write results in an inconsistent device header
* (e.g. due to power failure), we detect this by verifying the header's
* checksum and simply fail to reconstruct the L2ARC after reboot.
*
* Implementation diagram:
*
* +=== L2ARC device (not to scale) ======================================+
* | ___two newest log block pointers__.__________ |
* | / \dh_start_lbps[1] |
* | / \ \dh_start_lbps[0]|
* |.___/__. V V |
* ||L2 dev|....|lb |bufs |lb |bufs |lb |bufs |lb |bufs |lb |---(empty)---|
* || hdr| ^ /^ /^ / / |
* |+------+ ...--\-------/ \-----/--\------/ / |
* | \--------------/ \--------------/ |
* +======================================================================+
*
* As can be seen on the diagram, rather than using a simple linked list,
* we use a pair of linked lists with alternating elements. This is a
* performance enhancement due to the fact that we only find out the
* address of the next log block access once the current block has been
* completely read in. Obviously, this hurts performance, because we'd be
* keeping the device's I/O queue at only a 1 operation deep, thus
* incurring a large amount of I/O round-trip latency. Having two lists
* allows us to fetch two log blocks ahead of where we are currently
* rebuilding L2ARC buffers.
*
* On-device data structures:
*
* L2ARC device header: l2arc_dev_hdr_phys_t
* L2ARC log block: l2arc_log_blk_phys_t
*
* L2ARC reconstruction:
*
* When writing data, we simply write in the standard rotary fashion,
* evicting buffers as we go and simply writing new data over them (writing
* a new log block every now and then). This obviously means that once we
* loop around the end of the device, we will start cutting into an already
* committed log block (and its referenced data buffers), like so:
*
* current write head__ __old tail
* \ /
* V V
* <--|bufs |lb |bufs |lb | |bufs |lb |bufs |lb |-->
* ^ ^^^^^^^^^___________________________________
* | \
* <<nextwrite>> may overwrite this blk and/or its bufs --'
*
* When importing the pool, we detect this situation and use it to stop
* our scanning process (see l2arc_rebuild).
*
* There is one significant caveat to consider when rebuilding ARC contents
* from an L2ARC device: what about invalidated buffers? Given the above
* construction, we cannot update blocks which we've already written to amend
* them to remove buffers which were invalidated. Thus, during reconstruction,
* we might be populating the cache with buffers for data that's not on the
* main pool anymore, or may have been overwritten!
*
* As it turns out, this isn't a problem. Every arc_read request includes
* both the DVA and, crucially, the birth TXG of the BP the caller is
* looking for. So even if the cache were populated by completely rotten
* blocks for data that had been long deleted and/or overwritten, we'll
* never actually return bad data from the cache, since the DVA with the
* birth TXG uniquely identify a block in space and time - once created,
* a block is immutable on disk. The worst thing we have done is wasted
* some time and memory at l2arc rebuild to reconstruct outdated ARC
* entries that will get dropped from the l2arc as it is being updated
* with new blocks.
*
* L2ARC buffers that have been evicted by l2arc_evict() ahead of the write
* hand are not restored. This is done by saving the offset (in bytes)
* l2arc_evict() has evicted to in the L2ARC device header and taking it
* into account when restoring buffers.
*/
static boolean_t
l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *hdr)
{
/*
* A buffer is *not* eligible for the L2ARC if it:
* 1. belongs to a different spa.
* 2. is already cached on the L2ARC.
* 3. has an I/O in progress (it may be an incomplete read).
* 4. is flagged not eligible (zfs property).
*/
if (hdr->b_spa != spa_guid || HDR_HAS_L2HDR(hdr) ||
HDR_IO_IN_PROGRESS(hdr) || !HDR_L2CACHE(hdr))
return (B_FALSE);
return (B_TRUE);
}
static uint64_t
l2arc_write_size(l2arc_dev_t *dev)
{
uint64_t size, dev_size, tsize;
/*
* Make sure our globals have meaningful values in case the user
* altered them.
*/
size = l2arc_write_max;
if (size == 0) {
cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must "
"be greater than zero, resetting it to the default (%d)",
L2ARC_WRITE_SIZE);
size = l2arc_write_max = L2ARC_WRITE_SIZE;
}
if (arc_warm == B_FALSE)
size += l2arc_write_boost;
/*
* Make sure the write size does not exceed the size of the cache
* device. This is important in l2arc_evict(), otherwise infinite
* iteration can occur.
*/
dev_size = dev->l2ad_end - dev->l2ad_start;
tsize = size + l2arc_log_blk_overhead(size, dev);
if (dev->l2ad_vdev->vdev_has_trim && l2arc_trim_ahead > 0)
tsize += MAX(64 * 1024 * 1024,
(tsize * l2arc_trim_ahead) / 100);
if (tsize >= dev_size) {
cmn_err(CE_NOTE, "l2arc_write_max or l2arc_write_boost "
"plus the overhead of log blocks (persistent L2ARC, "
"%llu bytes) exceeds the size of the cache device "
"(guid %llu), resetting them to the default (%d)",
(u_longlong_t)l2arc_log_blk_overhead(size, dev),
(u_longlong_t)dev->l2ad_vdev->vdev_guid, L2ARC_WRITE_SIZE);
size = l2arc_write_max = l2arc_write_boost = L2ARC_WRITE_SIZE;
if (arc_warm == B_FALSE)
size += l2arc_write_boost;
}
return (size);
}
static clock_t
l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
{
clock_t interval, next, now;
/*
* If the ARC lists are busy, increase our write rate; if the
* lists are stale, idle back. This is achieved by checking
* how much we previously wrote - if it was more than half of
* what we wanted, schedule the next write much sooner.
*/
if (l2arc_feed_again && wrote > (wanted / 2))
interval = (hz * l2arc_feed_min_ms) / 1000;
else
interval = hz * l2arc_feed_secs;
now = ddi_get_lbolt();
next = MAX(now, MIN(now + interval, began + interval));
return (next);
}
/*
* Cycle through L2ARC devices. This is how L2ARC load balances.
* If a device is returned, this also returns holding the spa config lock.
*/
static l2arc_dev_t *
l2arc_dev_get_next(void)
{
l2arc_dev_t *first, *next = NULL;
/*
* Lock out the removal of spas (spa_namespace_lock), then removal
* of cache devices (l2arc_dev_mtx). Once a device has been selected,
* both locks will be dropped and a spa config lock held instead.
*/
mutex_enter(&spa_namespace_lock);
mutex_enter(&l2arc_dev_mtx);
/* if there are no vdevs, there is nothing to do */
if (l2arc_ndev == 0)
goto out;
first = NULL;
next = l2arc_dev_last;
do {
/* loop around the list looking for a non-faulted vdev */
if (next == NULL) {
next = list_head(l2arc_dev_list);
} else {
next = list_next(l2arc_dev_list, next);
if (next == NULL)
next = list_head(l2arc_dev_list);
}
/* if we have come back to the start, bail out */
if (first == NULL)
first = next;
else if (next == first)
break;
} while (vdev_is_dead(next->l2ad_vdev) || next->l2ad_rebuild ||
next->l2ad_trim_all);
/* if we were unable to find any usable vdevs, return NULL */
if (vdev_is_dead(next->l2ad_vdev) || next->l2ad_rebuild ||
next->l2ad_trim_all)
next = NULL;
l2arc_dev_last = next;
out:
mutex_exit(&l2arc_dev_mtx);
/*
* Grab the config lock to prevent the 'next' device from being
* removed while we are writing to it.
*/
if (next != NULL)
spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
mutex_exit(&spa_namespace_lock);
return (next);
}
/*
* Free buffers that were tagged for destruction.
*/
static void
l2arc_do_free_on_write(void)
{
list_t *buflist;
l2arc_data_free_t *df, *df_prev;
mutex_enter(&l2arc_free_on_write_mtx);
buflist = l2arc_free_on_write;
for (df = list_tail(buflist); df; df = df_prev) {
df_prev = list_prev(buflist, df);
ASSERT3P(df->l2df_abd, !=, NULL);
abd_free(df->l2df_abd);
list_remove(buflist, df);
kmem_free(df, sizeof (l2arc_data_free_t));
}
mutex_exit(&l2arc_free_on_write_mtx);
}
/*
* A write to a cache device has completed. Update all headers to allow
* reads from these buffers to begin.
*/
static void
l2arc_write_done(zio_t *zio)
{
l2arc_write_callback_t *cb;
l2arc_lb_abd_buf_t *abd_buf;
l2arc_lb_ptr_buf_t *lb_ptr_buf;
l2arc_dev_t *dev;
l2arc_dev_hdr_phys_t *l2dhdr;
list_t *buflist;
arc_buf_hdr_t *head, *hdr, *hdr_prev;
kmutex_t *hash_lock;
int64_t bytes_dropped = 0;
cb = zio->io_private;
ASSERT3P(cb, !=, NULL);
dev = cb->l2wcb_dev;
l2dhdr = dev->l2ad_dev_hdr;
ASSERT3P(dev, !=, NULL);
head = cb->l2wcb_head;
ASSERT3P(head, !=, NULL);
buflist = &dev->l2ad_buflist;
ASSERT3P(buflist, !=, NULL);
DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
l2arc_write_callback_t *, cb);
/*
* All writes completed, or an error was hit.
*/
top:
mutex_enter(&dev->l2ad_mtx);
for (hdr = list_prev(buflist, head); hdr; hdr = hdr_prev) {
hdr_prev = list_prev(buflist, hdr);
hash_lock = HDR_LOCK(hdr);
/*
* We cannot use mutex_enter or else we can deadlock
* with l2arc_write_buffers (due to swapping the order
* the hash lock and l2ad_mtx are taken).
*/
if (!mutex_tryenter(hash_lock)) {
/*
* Missed the hash lock. We must retry so we
* don't leave the ARC_FLAG_L2_WRITING bit set.
*/
ARCSTAT_BUMP(arcstat_l2_writes_lock_retry);
/*
* We don't want to rescan the headers we've
* already marked as having been written out, so
* we reinsert the head node so we can pick up
* where we left off.
*/
list_remove(buflist, head);
list_insert_after(buflist, hdr, head);
mutex_exit(&dev->l2ad_mtx);
/*
* We wait for the hash lock to become available
* to try and prevent busy waiting, and increase
* the chance we'll be able to acquire the lock
* the next time around.
*/
mutex_enter(hash_lock);
mutex_exit(hash_lock);
goto top;
}
/*
* We could not have been moved into the arc_l2c_only
* state while in-flight due to our ARC_FLAG_L2_WRITING
* bit being set. Let's just ensure that's being enforced.
*/
ASSERT(HDR_HAS_L1HDR(hdr));
/*
* Skipped - drop L2ARC entry and mark the header as no
* longer L2 eligibile.
*/
if (zio->io_error != 0) {
/*
* Error - drop L2ARC entry.
*/
list_remove(buflist, hdr);
arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR);
uint64_t psize = HDR_GET_PSIZE(hdr);
l2arc_hdr_arcstats_decrement(hdr);
bytes_dropped +=
vdev_psize_to_asize(dev->l2ad_vdev, psize);
(void) zfs_refcount_remove_many(&dev->l2ad_alloc,
arc_hdr_size(hdr), hdr);
}
/*
* Allow ARC to begin reads and ghost list evictions to
* this L2ARC entry.
*/
arc_hdr_clear_flags(hdr, ARC_FLAG_L2_WRITING);
mutex_exit(hash_lock);
}
/*
* Free the allocated abd buffers for writing the log blocks.
* If the zio failed reclaim the allocated space and remove the
* pointers to these log blocks from the log block pointer list
* of the L2ARC device.
*/
while ((abd_buf = list_remove_tail(&cb->l2wcb_abd_list)) != NULL) {
abd_free(abd_buf->abd);
zio_buf_free(abd_buf, sizeof (*abd_buf));
if (zio->io_error != 0) {
lb_ptr_buf = list_remove_head(&dev->l2ad_lbptr_list);
/*
* L2BLK_GET_PSIZE returns aligned size for log
* blocks.
*/
uint64_t asize =
L2BLK_GET_PSIZE((lb_ptr_buf->lb_ptr)->lbp_prop);
bytes_dropped += asize;
ARCSTAT_INCR(arcstat_l2_log_blk_asize, -asize);
ARCSTAT_BUMPDOWN(arcstat_l2_log_blk_count);
zfs_refcount_remove_many(&dev->l2ad_lb_asize, asize,
lb_ptr_buf);
zfs_refcount_remove(&dev->l2ad_lb_count, lb_ptr_buf);
kmem_free(lb_ptr_buf->lb_ptr,
sizeof (l2arc_log_blkptr_t));
kmem_free(lb_ptr_buf, sizeof (l2arc_lb_ptr_buf_t));
}
}
list_destroy(&cb->l2wcb_abd_list);
if (zio->io_error != 0) {
ARCSTAT_BUMP(arcstat_l2_writes_error);
/*
* Restore the lbps array in the header to its previous state.
* If the list of log block pointers is empty, zero out the
* log block pointers in the device header.
*/
lb_ptr_buf = list_head(&dev->l2ad_lbptr_list);
for (int i = 0; i < 2; i++) {
if (lb_ptr_buf == NULL) {
/*
* If the list is empty zero out the device
* header. Otherwise zero out the second log
* block pointer in the header.
*/
if (i == 0) {
memset(l2dhdr, 0,
dev->l2ad_dev_hdr_asize);
} else {
memset(&l2dhdr->dh_start_lbps[i], 0,
sizeof (l2arc_log_blkptr_t));
}
break;
}
memcpy(&l2dhdr->dh_start_lbps[i], lb_ptr_buf->lb_ptr,
sizeof (l2arc_log_blkptr_t));
lb_ptr_buf = list_next(&dev->l2ad_lbptr_list,
lb_ptr_buf);
}
}
ARCSTAT_BUMP(arcstat_l2_writes_done);
list_remove(buflist, head);
ASSERT(!HDR_HAS_L1HDR(head));
kmem_cache_free(hdr_l2only_cache, head);
mutex_exit(&dev->l2ad_mtx);
ASSERT(dev->l2ad_vdev != NULL);
vdev_space_update(dev->l2ad_vdev, -bytes_dropped, 0, 0);
l2arc_do_free_on_write();
kmem_free(cb, sizeof (l2arc_write_callback_t));
}
static int
l2arc_untransform(zio_t *zio, l2arc_read_callback_t *cb)
{
int ret;
spa_t *spa = zio->io_spa;
arc_buf_hdr_t *hdr = cb->l2rcb_hdr;
blkptr_t *bp = zio->io_bp;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
boolean_t no_crypt = B_FALSE;
/*
* ZIL data is never be written to the L2ARC, so we don't need
* special handling for its unique MAC storage.
*/
ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_INTENT_LOG);
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
/*
* If the data was encrypted, decrypt it now. Note that
* we must check the bp here and not the hdr, since the
* hdr does not have its encryption parameters updated
* until arc_read_done().
*/
if (BP_IS_ENCRYPTED(bp)) {
abd_t *eabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr,
ARC_HDR_DO_ADAPT | ARC_HDR_USE_RESERVE);
zio_crypt_decode_params_bp(bp, salt, iv);
zio_crypt_decode_mac_bp(bp, mac);
ret = spa_do_crypt_abd(B_FALSE, spa, &cb->l2rcb_zb,
BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp),
salt, iv, mac, HDR_GET_PSIZE(hdr), eabd,
hdr->b_l1hdr.b_pabd, &no_crypt);
if (ret != 0) {
arc_free_data_abd(hdr, eabd, arc_hdr_size(hdr), hdr);
goto error;
}
/*
* If we actually performed decryption, replace b_pabd
* with the decrypted data. Otherwise we can just throw
* our decryption buffer away.
*/
if (!no_crypt) {
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd,
arc_hdr_size(hdr), hdr);
hdr->b_l1hdr.b_pabd = eabd;
zio->io_abd = eabd;
} else {
arc_free_data_abd(hdr, eabd, arc_hdr_size(hdr), hdr);
}
}
/*
* If the L2ARC block was compressed, but ARC compression
* is disabled we decompress the data into a new buffer and
* replace the existing data.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) {
abd_t *cabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr,
ARC_HDR_DO_ADAPT | ARC_HDR_USE_RESERVE);
void *tmp = abd_borrow_buf(cabd, arc_hdr_size(hdr));
ret = zio_decompress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, tmp, HDR_GET_PSIZE(hdr),
HDR_GET_LSIZE(hdr), &hdr->b_complevel);
if (ret != 0) {
abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr));
arc_free_data_abd(hdr, cabd, arc_hdr_size(hdr), hdr);
goto error;
}
abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr));
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd,
arc_hdr_size(hdr), hdr);
hdr->b_l1hdr.b_pabd = cabd;
zio->io_abd = cabd;
zio->io_size = HDR_GET_LSIZE(hdr);
}
return (0);
error:
return (ret);
}
/*
* A read to a cache device completed. Validate buffer contents before
* handing over to the regular ARC routines.
*/
static void
l2arc_read_done(zio_t *zio)
{
int tfm_error = 0;
l2arc_read_callback_t *cb = zio->io_private;
arc_buf_hdr_t *hdr;
kmutex_t *hash_lock;
boolean_t valid_cksum;
boolean_t using_rdata = (BP_IS_ENCRYPTED(&cb->l2rcb_bp) &&
(cb->l2rcb_flags & ZIO_FLAG_RAW_ENCRYPT));
ASSERT3P(zio->io_vd, !=, NULL);
ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
ASSERT3P(cb, !=, NULL);
hdr = cb->l2rcb_hdr;
ASSERT3P(hdr, !=, NULL);
hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
/*
* If the data was read into a temporary buffer,
* move it and free the buffer.
*/
if (cb->l2rcb_abd != NULL) {
ASSERT3U(arc_hdr_size(hdr), <, zio->io_size);
if (zio->io_error == 0) {
if (using_rdata) {
abd_copy(hdr->b_crypt_hdr.b_rabd,
cb->l2rcb_abd, arc_hdr_size(hdr));
} else {
abd_copy(hdr->b_l1hdr.b_pabd,
cb->l2rcb_abd, arc_hdr_size(hdr));
}
}
/*
* The following must be done regardless of whether
* there was an error:
* - free the temporary buffer
* - point zio to the real ARC buffer
* - set zio size accordingly
* These are required because zio is either re-used for
* an I/O of the block in the case of the error
* or the zio is passed to arc_read_done() and it
* needs real data.
*/
abd_free(cb->l2rcb_abd);
zio->io_size = zio->io_orig_size = arc_hdr_size(hdr);
if (using_rdata) {
ASSERT(HDR_HAS_RABD(hdr));
zio->io_abd = zio->io_orig_abd =
hdr->b_crypt_hdr.b_rabd;
} else {
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
zio->io_abd = zio->io_orig_abd = hdr->b_l1hdr.b_pabd;
}
}
ASSERT3P(zio->io_abd, !=, NULL);
/*
* Check this survived the L2ARC journey.
*/
ASSERT(zio->io_abd == hdr->b_l1hdr.b_pabd ||
(HDR_HAS_RABD(hdr) && zio->io_abd == hdr->b_crypt_hdr.b_rabd));
zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */
zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */
zio->io_prop.zp_complevel = hdr->b_complevel;
valid_cksum = arc_cksum_is_equal(hdr, zio);
/*
* b_rabd will always match the data as it exists on disk if it is
* being used. Therefore if we are reading into b_rabd we do not
* attempt to untransform the data.
*/
if (valid_cksum && !using_rdata)
tfm_error = l2arc_untransform(zio, cb);
if (valid_cksum && tfm_error == 0 && zio->io_error == 0 &&
!HDR_L2_EVICTED(hdr)) {
mutex_exit(hash_lock);
zio->io_private = hdr;
arc_read_done(zio);
} else {
/*
* Buffer didn't survive caching. Increment stats and
* reissue to the original storage device.
*/
if (zio->io_error != 0) {
ARCSTAT_BUMP(arcstat_l2_io_error);
} else {
zio->io_error = SET_ERROR(EIO);
}
if (!valid_cksum || tfm_error != 0)
ARCSTAT_BUMP(arcstat_l2_cksum_bad);
/*
* If there's no waiter, issue an async i/o to the primary
* storage now. If there *is* a waiter, the caller must
* issue the i/o in a context where it's OK to block.
*/
if (zio->io_waiter == NULL) {
zio_t *pio = zio_unique_parent(zio);
void *abd = (using_rdata) ?
hdr->b_crypt_hdr.b_rabd : hdr->b_l1hdr.b_pabd;
ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
zio = zio_read(pio, zio->io_spa, zio->io_bp,
abd, zio->io_size, arc_read_done,
hdr, zio->io_priority, cb->l2rcb_flags,
&cb->l2rcb_zb);
/*
* Original ZIO will be freed, so we need to update
* ARC header with the new ZIO pointer to be used
* by zio_change_priority() in arc_read().
*/
for (struct arc_callback *acb = hdr->b_l1hdr.b_acb;
acb != NULL; acb = acb->acb_next)
acb->acb_zio_head = zio;
mutex_exit(hash_lock);
zio_nowait(zio);
} else {
mutex_exit(hash_lock);
}
}
kmem_free(cb, sizeof (l2arc_read_callback_t));
}
/*
* This is the list priority from which the L2ARC will search for pages to
* cache. This is used within loops (0..3) to cycle through lists in the
* desired order. This order can have a significant effect on cache
* performance.
*
* Currently the metadata lists are hit first, MFU then MRU, followed by
* the data lists. This function returns a locked list, and also returns
* the lock pointer.
*/
static multilist_sublist_t *
l2arc_sublist_lock(int list_num)
{
multilist_t *ml = NULL;
unsigned int idx;
ASSERT(list_num >= 0 && list_num < L2ARC_FEED_TYPES);
switch (list_num) {
case 0:
ml = &arc_mfu->arcs_list[ARC_BUFC_METADATA];
break;
case 1:
ml = &arc_mru->arcs_list[ARC_BUFC_METADATA];
break;
case 2:
ml = &arc_mfu->arcs_list[ARC_BUFC_DATA];
break;
case 3:
ml = &arc_mru->arcs_list[ARC_BUFC_DATA];
break;
default:
return (NULL);
}
/*
* Return a randomly-selected sublist. This is acceptable
* because the caller feeds only a little bit of data for each
* call (8MB). Subsequent calls will result in different
* sublists being selected.
*/
idx = multilist_get_random_index(ml);
return (multilist_sublist_lock(ml, idx));
}
/*
* Calculates the maximum overhead of L2ARC metadata log blocks for a given
* L2ARC write size. l2arc_evict and l2arc_write_size need to include this
* overhead in processing to make sure there is enough headroom available
* when writing buffers.
*/
static inline uint64_t
l2arc_log_blk_overhead(uint64_t write_sz, l2arc_dev_t *dev)
{
if (dev->l2ad_log_entries == 0) {
return (0);
} else {
uint64_t log_entries = write_sz >> SPA_MINBLOCKSHIFT;
uint64_t log_blocks = (log_entries +
dev->l2ad_log_entries - 1) /
dev->l2ad_log_entries;
return (vdev_psize_to_asize(dev->l2ad_vdev,
sizeof (l2arc_log_blk_phys_t)) * log_blocks);
}
}
/*
* Evict buffers from the device write hand to the distance specified in
* bytes. This distance may span populated buffers, it may span nothing.
* This is clearing a region on the L2ARC device ready for writing.
* If the 'all' boolean is set, every buffer is evicted.
*/
static void
l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
{
list_t *buflist;
arc_buf_hdr_t *hdr, *hdr_prev;
kmutex_t *hash_lock;
uint64_t taddr;
l2arc_lb_ptr_buf_t *lb_ptr_buf, *lb_ptr_buf_prev;
vdev_t *vd = dev->l2ad_vdev;
boolean_t rerun;
buflist = &dev->l2ad_buflist;
/*
* We need to add in the worst case scenario of log block overhead.
*/
distance += l2arc_log_blk_overhead(distance, dev);
if (vd->vdev_has_trim && l2arc_trim_ahead > 0) {
/*
* Trim ahead of the write size 64MB or (l2arc_trim_ahead/100)
* times the write size, whichever is greater.
*/
distance += MAX(64 * 1024 * 1024,
(distance * l2arc_trim_ahead) / 100);
}
top:
rerun = B_FALSE;
if (dev->l2ad_hand >= (dev->l2ad_end - distance)) {
/*
* When there is no space to accommodate upcoming writes,
* evict to the end. Then bump the write and evict hands
* to the start and iterate. This iteration does not
* happen indefinitely as we make sure in
* l2arc_write_size() that when the write hand is reset,
* the write size does not exceed the end of the device.
*/
rerun = B_TRUE;
taddr = dev->l2ad_end;
} else {
taddr = dev->l2ad_hand + distance;
}
DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
uint64_t, taddr, boolean_t, all);
if (!all) {
/*
* This check has to be placed after deciding whether to
* iterate (rerun).
*/
if (dev->l2ad_first) {
/*
* This is the first sweep through the device. There is
* nothing to evict. We have already trimmmed the
* whole device.
*/
goto out;
} else {
/*
* Trim the space to be evicted.
*/
if (vd->vdev_has_trim && dev->l2ad_evict < taddr &&
l2arc_trim_ahead > 0) {
/*
* We have to drop the spa_config lock because
* vdev_trim_range() will acquire it.
* l2ad_evict already accounts for the label
* size. To prevent vdev_trim_ranges() from
* adding it again, we subtract it from
* l2ad_evict.
*/
spa_config_exit(dev->l2ad_spa, SCL_L2ARC, dev);
vdev_trim_simple(vd,
dev->l2ad_evict - VDEV_LABEL_START_SIZE,
taddr - dev->l2ad_evict);
spa_config_enter(dev->l2ad_spa, SCL_L2ARC, dev,
RW_READER);
}
/*
* When rebuilding L2ARC we retrieve the evict hand
* from the header of the device. Of note, l2arc_evict()
* does not actually delete buffers from the cache
* device, but trimming may do so depending on the
* hardware implementation. Thus keeping track of the
* evict hand is useful.
*/
dev->l2ad_evict = MAX(dev->l2ad_evict, taddr);
}
}
retry:
mutex_enter(&dev->l2ad_mtx);
/*
* We have to account for evicted log blocks. Run vdev_space_update()
* on log blocks whose offset (in bytes) is before the evicted offset
* (in bytes) by searching in the list of pointers to log blocks
* present in the L2ARC device.
*/
for (lb_ptr_buf = list_tail(&dev->l2ad_lbptr_list); lb_ptr_buf;
lb_ptr_buf = lb_ptr_buf_prev) {
lb_ptr_buf_prev = list_prev(&dev->l2ad_lbptr_list, lb_ptr_buf);
/* L2BLK_GET_PSIZE returns aligned size for log blocks */
uint64_t asize = L2BLK_GET_PSIZE(
(lb_ptr_buf->lb_ptr)->lbp_prop);
/*
* We don't worry about log blocks left behind (ie
* lbp_payload_start < l2ad_hand) because l2arc_write_buffers()
* will never write more than l2arc_evict() evicts.
*/
if (!all && l2arc_log_blkptr_valid(dev, lb_ptr_buf->lb_ptr)) {
break;
} else {
vdev_space_update(vd, -asize, 0, 0);
ARCSTAT_INCR(arcstat_l2_log_blk_asize, -asize);
ARCSTAT_BUMPDOWN(arcstat_l2_log_blk_count);
zfs_refcount_remove_many(&dev->l2ad_lb_asize, asize,
lb_ptr_buf);
zfs_refcount_remove(&dev->l2ad_lb_count, lb_ptr_buf);
list_remove(&dev->l2ad_lbptr_list, lb_ptr_buf);
kmem_free(lb_ptr_buf->lb_ptr,
sizeof (l2arc_log_blkptr_t));
kmem_free(lb_ptr_buf, sizeof (l2arc_lb_ptr_buf_t));
}
}
for (hdr = list_tail(buflist); hdr; hdr = hdr_prev) {
hdr_prev = list_prev(buflist, hdr);
ASSERT(!HDR_EMPTY(hdr));
hash_lock = HDR_LOCK(hdr);
/*
* We cannot use mutex_enter or else we can deadlock
* with l2arc_write_buffers (due to swapping the order
* the hash lock and l2ad_mtx are taken).
*/
if (!mutex_tryenter(hash_lock)) {
/*
* Missed the hash lock. Retry.
*/
ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
mutex_exit(&dev->l2ad_mtx);
mutex_enter(hash_lock);
mutex_exit(hash_lock);
goto retry;
}
/*
* A header can't be on this list if it doesn't have L2 header.
*/
ASSERT(HDR_HAS_L2HDR(hdr));
/* Ensure this header has finished being written. */
ASSERT(!HDR_L2_WRITING(hdr));
ASSERT(!HDR_L2_WRITE_HEAD(hdr));
if (!all && (hdr->b_l2hdr.b_daddr >= dev->l2ad_evict ||
hdr->b_l2hdr.b_daddr < dev->l2ad_hand)) {
/*
* We've evicted to the target address,
* or the end of the device.
*/
mutex_exit(hash_lock);
break;
}
if (!HDR_HAS_L1HDR(hdr)) {
ASSERT(!HDR_L2_READING(hdr));
/*
* This doesn't exist in the ARC. Destroy.
* arc_hdr_destroy() will call list_remove()
* and decrement arcstat_l2_lsize.
*/
arc_change_state(arc_anon, hdr, hash_lock);
arc_hdr_destroy(hdr);
} else {
ASSERT(hdr->b_l1hdr.b_state != arc_l2c_only);
ARCSTAT_BUMP(arcstat_l2_evict_l1cached);
/*
* Invalidate issued or about to be issued
* reads, since we may be about to write
* over this location.
*/
if (HDR_L2_READING(hdr)) {
ARCSTAT_BUMP(arcstat_l2_evict_reading);
arc_hdr_set_flags(hdr, ARC_FLAG_L2_EVICTED);
}
arc_hdr_l2hdr_destroy(hdr);
}
mutex_exit(hash_lock);
}
mutex_exit(&dev->l2ad_mtx);
out:
/*
* We need to check if we evict all buffers, otherwise we may iterate
* unnecessarily.
*/
if (!all && rerun) {
/*
* Bump device hand to the device start if it is approaching the
* end. l2arc_evict() has already evicted ahead for this case.
*/
dev->l2ad_hand = dev->l2ad_start;
dev->l2ad_evict = dev->l2ad_start;
dev->l2ad_first = B_FALSE;
goto top;
}
if (!all) {
/*
* In case of cache device removal (all) the following
* assertions may be violated without functional consequences
* as the device is about to be removed.
*/
ASSERT3U(dev->l2ad_hand + distance, <, dev->l2ad_end);
if (!dev->l2ad_first)
ASSERT3U(dev->l2ad_hand, <, dev->l2ad_evict);
}
}
/*
* Handle any abd transforms that might be required for writing to the L2ARC.
* If successful, this function will always return an abd with the data
* transformed as it is on disk in a new abd of asize bytes.
*/
static int
l2arc_apply_transforms(spa_t *spa, arc_buf_hdr_t *hdr, uint64_t asize,
abd_t **abd_out)
{
int ret;
void *tmp = NULL;
abd_t *cabd = NULL, *eabd = NULL, *to_write = hdr->b_l1hdr.b_pabd;
enum zio_compress compress = HDR_GET_COMPRESS(hdr);
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t size = arc_hdr_size(hdr);
boolean_t ismd = HDR_ISTYPE_METADATA(hdr);
boolean_t bswap = (hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS);
dsl_crypto_key_t *dck = NULL;
uint8_t mac[ZIO_DATA_MAC_LEN] = { 0 };
boolean_t no_crypt = B_FALSE;
ASSERT((HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) ||
HDR_ENCRYPTED(hdr) || HDR_SHARED_DATA(hdr) || psize != asize);
ASSERT3U(psize, <=, asize);
/*
* If this data simply needs its own buffer, we simply allocate it
* and copy the data. This may be done to eliminate a dependency on a
* shared buffer or to reallocate the buffer to match asize.
*/
if (HDR_HAS_RABD(hdr) && asize != psize) {
ASSERT3U(asize, >=, psize);
to_write = abd_alloc_for_io(asize, ismd);
abd_copy(to_write, hdr->b_crypt_hdr.b_rabd, psize);
if (psize != asize)
abd_zero_off(to_write, psize, asize - psize);
goto out;
}
if ((compress == ZIO_COMPRESS_OFF || HDR_COMPRESSION_ENABLED(hdr)) &&
!HDR_ENCRYPTED(hdr)) {
ASSERT3U(size, ==, psize);
to_write = abd_alloc_for_io(asize, ismd);
abd_copy(to_write, hdr->b_l1hdr.b_pabd, size);
if (size != asize)
abd_zero_off(to_write, size, asize - size);
goto out;
}
if (compress != ZIO_COMPRESS_OFF && !HDR_COMPRESSION_ENABLED(hdr)) {
/*
* In some cases, we can wind up with size > asize, so
* we need to opt for the larger allocation option here.
*
* (We also need abd_return_buf_copy in all cases because
* it's an ASSERT() to modify the buffer before returning it
* with arc_return_buf(), and all the compressors
* write things before deciding to fail compression in nearly
* every case.)
*/
cabd = abd_alloc_for_io(size, ismd);
tmp = abd_borrow_buf(cabd, size);
psize = zio_compress_data(compress, to_write, tmp, size,
hdr->b_complevel);
if (psize >= asize) {
psize = HDR_GET_PSIZE(hdr);
abd_return_buf_copy(cabd, tmp, size);
HDR_SET_COMPRESS(hdr, ZIO_COMPRESS_OFF);
to_write = cabd;
abd_copy(to_write, hdr->b_l1hdr.b_pabd, psize);
if (psize != asize)
abd_zero_off(to_write, psize, asize - psize);
goto encrypt;
}
ASSERT3U(psize, <=, HDR_GET_PSIZE(hdr));
if (psize < asize)
memset((char *)tmp + psize, 0, asize - psize);
psize = HDR_GET_PSIZE(hdr);
abd_return_buf_copy(cabd, tmp, size);
to_write = cabd;
}
encrypt:
if (HDR_ENCRYPTED(hdr)) {
eabd = abd_alloc_for_io(asize, ismd);
/*
* If the dataset was disowned before the buffer
* made it to this point, the key to re-encrypt
* it won't be available. In this case we simply
* won't write the buffer to the L2ARC.
*/
ret = spa_keystore_lookup_key(spa, hdr->b_crypt_hdr.b_dsobj,
FTAG, &dck);
if (ret != 0)
goto error;
ret = zio_do_crypt_abd(B_TRUE, &dck->dck_key,
hdr->b_crypt_hdr.b_ot, bswap, hdr->b_crypt_hdr.b_salt,
hdr->b_crypt_hdr.b_iv, mac, psize, to_write, eabd,
&no_crypt);
if (ret != 0)
goto error;
if (no_crypt)
abd_copy(eabd, to_write, psize);
if (psize != asize)
abd_zero_off(eabd, psize, asize - psize);
/* assert that the MAC we got here matches the one we saved */
ASSERT0(memcmp(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN));
spa_keystore_dsl_key_rele(spa, dck, FTAG);
if (to_write == cabd)
abd_free(cabd);
to_write = eabd;
}
out:
ASSERT3P(to_write, !=, hdr->b_l1hdr.b_pabd);
*abd_out = to_write;
return (0);
error:
if (dck != NULL)
spa_keystore_dsl_key_rele(spa, dck, FTAG);
if (cabd != NULL)
abd_free(cabd);
if (eabd != NULL)
abd_free(eabd);
*abd_out = NULL;
return (ret);
}
static void
l2arc_blk_fetch_done(zio_t *zio)
{
l2arc_read_callback_t *cb;
cb = zio->io_private;
if (cb->l2rcb_abd != NULL)
abd_free(cb->l2rcb_abd);
kmem_free(cb, sizeof (l2arc_read_callback_t));
}
/*
* Find and write ARC buffers to the L2ARC device.
*
* An ARC_FLAG_L2_WRITING flag is set so that the L2ARC buffers are not valid
* for reading until they have completed writing.
* The headroom_boost is an in-out parameter used to maintain headroom boost
* state between calls to this function.
*
* Returns the number of bytes actually written (which may be smaller than
* the delta by which the device hand has changed due to alignment and the
* writing of log blocks).
*/
static uint64_t
l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
{
arc_buf_hdr_t *hdr, *hdr_prev, *head;
uint64_t write_asize, write_psize, write_lsize, headroom;
boolean_t full;
l2arc_write_callback_t *cb = NULL;
zio_t *pio, *wzio;
uint64_t guid = spa_load_guid(spa);
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
ASSERT3P(dev->l2ad_vdev, !=, NULL);
pio = NULL;
write_lsize = write_asize = write_psize = 0;
full = B_FALSE;
head = kmem_cache_alloc(hdr_l2only_cache, KM_PUSHPAGE);
arc_hdr_set_flags(head, ARC_FLAG_L2_WRITE_HEAD | ARC_FLAG_HAS_L2HDR);
/*
* Copy buffers for L2ARC writing.
*/
for (int pass = 0; pass < L2ARC_FEED_TYPES; pass++) {
/*
* If pass == 1 or 3, we cache MRU metadata and data
* respectively.
*/
if (l2arc_mfuonly) {
if (pass == 1 || pass == 3)
continue;
}
multilist_sublist_t *mls = l2arc_sublist_lock(pass);
uint64_t passed_sz = 0;
VERIFY3P(mls, !=, NULL);
/*
* L2ARC fast warmup.
*
* Until the ARC is warm and starts to evict, read from the
* head of the ARC lists rather than the tail.
*/
if (arc_warm == B_FALSE)
hdr = multilist_sublist_head(mls);
else
hdr = multilist_sublist_tail(mls);
headroom = target_sz * l2arc_headroom;
if (zfs_compressed_arc_enabled)
headroom = (headroom * l2arc_headroom_boost) / 100;
for (; hdr; hdr = hdr_prev) {
kmutex_t *hash_lock;
abd_t *to_write = NULL;
if (arc_warm == B_FALSE)
hdr_prev = multilist_sublist_next(mls, hdr);
else
hdr_prev = multilist_sublist_prev(mls, hdr);
hash_lock = HDR_LOCK(hdr);
if (!mutex_tryenter(hash_lock)) {
/*
* Skip this buffer rather than waiting.
*/
continue;
}
passed_sz += HDR_GET_LSIZE(hdr);
if (l2arc_headroom != 0 && passed_sz > headroom) {
/*
* Searched too far.
*/
mutex_exit(hash_lock);
break;
}
if (!l2arc_write_eligible(guid, hdr)) {
mutex_exit(hash_lock);
continue;
}
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3U(HDR_GET_PSIZE(hdr), >, 0);
ASSERT3U(arc_hdr_size(hdr), >, 0);
ASSERT(hdr->b_l1hdr.b_pabd != NULL ||
HDR_HAS_RABD(hdr));
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev,
psize);
if ((write_asize + asize) > target_sz) {
full = B_TRUE;
mutex_exit(hash_lock);
break;
}
/*
* We rely on the L1 portion of the header below, so
* it's invalid for this header to have been evicted out
* of the ghost cache, prior to being written out. The
* ARC_FLAG_L2_WRITING bit ensures this won't happen.
*/
arc_hdr_set_flags(hdr, ARC_FLAG_L2_WRITING);
/*
* If this header has b_rabd, we can use this since it
* must always match the data exactly as it exists on
* disk. Otherwise, the L2ARC can normally use the
* hdr's data, but if we're sharing data between the
* hdr and one of its bufs, L2ARC needs its own copy of
* the data so that the ZIO below can't race with the
* buf consumer. To ensure that this copy will be
* available for the lifetime of the ZIO and be cleaned
* up afterwards, we add it to the l2arc_free_on_write
* queue. If we need to apply any transforms to the
* data (compression, encryption) we will also need the
* extra buffer.
*/
if (HDR_HAS_RABD(hdr) && psize == asize) {
to_write = hdr->b_crypt_hdr.b_rabd;
} else if ((HDR_COMPRESSION_ENABLED(hdr) ||
HDR_GET_COMPRESS(hdr) == ZIO_COMPRESS_OFF) &&
!HDR_ENCRYPTED(hdr) && !HDR_SHARED_DATA(hdr) &&
psize == asize) {
to_write = hdr->b_l1hdr.b_pabd;
} else {
int ret;
arc_buf_contents_t type = arc_buf_type(hdr);
ret = l2arc_apply_transforms(spa, hdr, asize,
&to_write);
if (ret != 0) {
arc_hdr_clear_flags(hdr,
ARC_FLAG_L2_WRITING);
mutex_exit(hash_lock);
continue;
}
l2arc_free_abd_on_write(to_write, asize, type);
}
if (pio == NULL) {
/*
* Insert a dummy header on the buflist so
* l2arc_write_done() can find where the
* write buffers begin without searching.
*/
mutex_enter(&dev->l2ad_mtx);
list_insert_head(&dev->l2ad_buflist, head);
mutex_exit(&dev->l2ad_mtx);
cb = kmem_alloc(
sizeof (l2arc_write_callback_t), KM_SLEEP);
cb->l2wcb_dev = dev;
cb->l2wcb_head = head;
/*
* Create a list to save allocated abd buffers
* for l2arc_log_blk_commit().
*/
list_create(&cb->l2wcb_abd_list,
sizeof (l2arc_lb_abd_buf_t),
offsetof(l2arc_lb_abd_buf_t, node));
pio = zio_root(spa, l2arc_write_done, cb,
ZIO_FLAG_CANFAIL);
}
hdr->b_l2hdr.b_dev = dev;
hdr->b_l2hdr.b_hits = 0;
hdr->b_l2hdr.b_daddr = dev->l2ad_hand;
hdr->b_l2hdr.b_arcs_state =
hdr->b_l1hdr.b_state->arcs_state;
arc_hdr_set_flags(hdr, ARC_FLAG_HAS_L2HDR);
mutex_enter(&dev->l2ad_mtx);
list_insert_head(&dev->l2ad_buflist, hdr);
mutex_exit(&dev->l2ad_mtx);
(void) zfs_refcount_add_many(&dev->l2ad_alloc,
arc_hdr_size(hdr), hdr);
wzio = zio_write_phys(pio, dev->l2ad_vdev,
hdr->b_l2hdr.b_daddr, asize, to_write,
ZIO_CHECKSUM_OFF, NULL, hdr,
ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_CANFAIL, B_FALSE);
write_lsize += HDR_GET_LSIZE(hdr);
DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
zio_t *, wzio);
write_psize += psize;
write_asize += asize;
dev->l2ad_hand += asize;
l2arc_hdr_arcstats_increment(hdr);
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);
mutex_exit(hash_lock);
/*
* Append buf info to current log and commit if full.
* arcstat_l2_{size,asize} kstats are updated
* internally.
*/
if (l2arc_log_blk_insert(dev, hdr))
l2arc_log_blk_commit(dev, pio, cb);
zio_nowait(wzio);
}
multilist_sublist_unlock(mls);
if (full == B_TRUE)
break;
}
/* No buffers selected for writing? */
if (pio == NULL) {
ASSERT0(write_lsize);
ASSERT(!HDR_HAS_L1HDR(head));
kmem_cache_free(hdr_l2only_cache, head);
/*
* Although we did not write any buffers l2ad_evict may
* have advanced.
*/
if (dev->l2ad_evict != l2dhdr->dh_evict)
l2arc_dev_hdr_update(dev);
return (0);
}
if (!dev->l2ad_first)
ASSERT3U(dev->l2ad_hand, <=, dev->l2ad_evict);
ASSERT3U(write_asize, <=, target_sz);
ARCSTAT_BUMP(arcstat_l2_writes_sent);
ARCSTAT_INCR(arcstat_l2_write_bytes, write_psize);
dev->l2ad_writing = B_TRUE;
(void) zio_wait(pio);
dev->l2ad_writing = B_FALSE;
/*
* Update the device header after the zio completes as
* l2arc_write_done() may have updated the memory holding the log block
* pointers in the device header.
*/
l2arc_dev_hdr_update(dev);
return (write_asize);
}
static boolean_t
l2arc_hdr_limit_reached(void)
{
int64_t s = aggsum_upper_bound(&arc_sums.arcstat_l2_hdr_size);
return (arc_reclaim_needed() || (s > arc_meta_limit * 3 / 4) ||
(s > (arc_warm ? arc_c : arc_c_max) * l2arc_meta_percent / 100));
}
/*
* This thread feeds the L2ARC at regular intervals. This is the beating
* heart of the L2ARC.
*/
static __attribute__((noreturn)) void
l2arc_feed_thread(void *unused)
{
(void) unused;
callb_cpr_t cpr;
l2arc_dev_t *dev;
spa_t *spa;
uint64_t size, wrote;
clock_t begin, next = ddi_get_lbolt();
fstrans_cookie_t cookie;
CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
mutex_enter(&l2arc_feed_thr_lock);
cookie = spl_fstrans_mark();
while (l2arc_thread_exit == 0) {
CALLB_CPR_SAFE_BEGIN(&cpr);
(void) cv_timedwait_idle(&l2arc_feed_thr_cv,
&l2arc_feed_thr_lock, next);
CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
next = ddi_get_lbolt() + hz;
/*
* Quick check for L2ARC devices.
*/
mutex_enter(&l2arc_dev_mtx);
if (l2arc_ndev == 0) {
mutex_exit(&l2arc_dev_mtx);
continue;
}
mutex_exit(&l2arc_dev_mtx);
begin = ddi_get_lbolt();
/*
* This selects the next l2arc device to write to, and in
* doing so the next spa to feed from: dev->l2ad_spa. This
* will return NULL if there are now no l2arc devices or if
* they are all faulted.
*
* If a device is returned, its spa's config lock is also
* held to prevent device removal. l2arc_dev_get_next()
* will grab and release l2arc_dev_mtx.
*/
if ((dev = l2arc_dev_get_next()) == NULL)
continue;
spa = dev->l2ad_spa;
ASSERT3P(spa, !=, NULL);
/*
* If the pool is read-only then force the feed thread to
* sleep a little longer.
*/
if (!spa_writeable(spa)) {
next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz;
spa_config_exit(spa, SCL_L2ARC, dev);
continue;
}
/*
* Avoid contributing to memory pressure.
*/
if (l2arc_hdr_limit_reached()) {
ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
spa_config_exit(spa, SCL_L2ARC, dev);
continue;
}
ARCSTAT_BUMP(arcstat_l2_feeds);
size = l2arc_write_size(dev);
/*
* Evict L2ARC buffers that will be overwritten.
*/
l2arc_evict(dev, size, B_FALSE);
/*
* Write ARC buffers.
*/
wrote = l2arc_write_buffers(spa, dev, size);
/*
* Calculate interval between writes.
*/
next = l2arc_write_interval(begin, size, wrote);
spa_config_exit(spa, SCL_L2ARC, dev);
}
spl_fstrans_unmark(cookie);
l2arc_thread_exit = 0;
cv_broadcast(&l2arc_feed_thr_cv);
CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */
thread_exit();
}
boolean_t
l2arc_vdev_present(vdev_t *vd)
{
return (l2arc_vdev_get(vd) != NULL);
}
/*
* Returns the l2arc_dev_t associated with a particular vdev_t or NULL if
* the vdev_t isn't an L2ARC device.
*/
l2arc_dev_t *
l2arc_vdev_get(vdev_t *vd)
{
l2arc_dev_t *dev;
mutex_enter(&l2arc_dev_mtx);
for (dev = list_head(l2arc_dev_list); dev != NULL;
dev = list_next(l2arc_dev_list, dev)) {
if (dev->l2ad_vdev == vd)
break;
}
mutex_exit(&l2arc_dev_mtx);
return (dev);
}
static void
l2arc_rebuild_dev(l2arc_dev_t *dev, boolean_t reopen)
{
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
uint64_t l2dhdr_asize = dev->l2ad_dev_hdr_asize;
spa_t *spa = dev->l2ad_spa;
/*
* The L2ARC has to hold at least the payload of one log block for
* them to be restored (persistent L2ARC). The payload of a log block
* depends on the amount of its log entries. We always write log blocks
* with 1022 entries. How many of them are committed or restored depends
* on the size of the L2ARC device. Thus the maximum payload of
* one log block is 1022 * SPA_MAXBLOCKSIZE = 16GB. If the L2ARC device
* is less than that, we reduce the amount of committed and restored
* log entries per block so as to enable persistence.
*/
if (dev->l2ad_end < l2arc_rebuild_blocks_min_l2size) {
dev->l2ad_log_entries = 0;
} else {
dev->l2ad_log_entries = MIN((dev->l2ad_end -
dev->l2ad_start) >> SPA_MAXBLOCKSHIFT,
L2ARC_LOG_BLK_MAX_ENTRIES);
}
/*
* Read the device header, if an error is returned do not rebuild L2ARC.
*/
if (l2arc_dev_hdr_read(dev) == 0 && dev->l2ad_log_entries > 0) {
/*
* If we are onlining a cache device (vdev_reopen) that was
* still present (l2arc_vdev_present()) and rebuild is enabled,
* we should evict all ARC buffers and pointers to log blocks
* and reclaim their space before restoring its contents to
* L2ARC.
*/
if (reopen) {
if (!l2arc_rebuild_enabled) {
return;
} else {
l2arc_evict(dev, 0, B_TRUE);
/* start a new log block */
dev->l2ad_log_ent_idx = 0;
dev->l2ad_log_blk_payload_asize = 0;
dev->l2ad_log_blk_payload_start = 0;
}
}
/*
* Just mark the device as pending for a rebuild. We won't
* be starting a rebuild in line here as it would block pool
* import. Instead spa_load_impl will hand that off to an
* async task which will call l2arc_spa_rebuild_start.
*/
dev->l2ad_rebuild = B_TRUE;
} else if (spa_writeable(spa)) {
/*
* In this case TRIM the whole device if l2arc_trim_ahead > 0,
* otherwise create a new header. We zero out the memory holding
* the header to reset dh_start_lbps. If we TRIM the whole
* device the new header will be written by
* vdev_trim_l2arc_thread() at the end of the TRIM to update the
* trim_state in the header too. When reading the header, if
* trim_state is not VDEV_TRIM_COMPLETE and l2arc_trim_ahead > 0
* we opt to TRIM the whole device again.
*/
if (l2arc_trim_ahead > 0) {
dev->l2ad_trim_all = B_TRUE;
} else {
memset(l2dhdr, 0, l2dhdr_asize);
l2arc_dev_hdr_update(dev);
}
}
}
/*
* Add a vdev for use by the L2ARC. By this point the spa has already
* validated the vdev and opened it.
*/
void
l2arc_add_vdev(spa_t *spa, vdev_t *vd)
{
l2arc_dev_t *adddev;
uint64_t l2dhdr_asize;
ASSERT(!l2arc_vdev_present(vd));
/*
* Create a new l2arc device entry.
*/
adddev = vmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
adddev->l2ad_spa = spa;
adddev->l2ad_vdev = vd;
/* leave extra size for an l2arc device header */
l2dhdr_asize = adddev->l2ad_dev_hdr_asize =
MAX(sizeof (*adddev->l2ad_dev_hdr), 1 << vd->vdev_ashift);
adddev->l2ad_start = VDEV_LABEL_START_SIZE + l2dhdr_asize;
adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
ASSERT3U(adddev->l2ad_start, <, adddev->l2ad_end);
adddev->l2ad_hand = adddev->l2ad_start;
adddev->l2ad_evict = adddev->l2ad_start;
adddev->l2ad_first = B_TRUE;
adddev->l2ad_writing = B_FALSE;
adddev->l2ad_trim_all = B_FALSE;
list_link_init(&adddev->l2ad_node);
adddev->l2ad_dev_hdr = kmem_zalloc(l2dhdr_asize, KM_SLEEP);
mutex_init(&adddev->l2ad_mtx, NULL, MUTEX_DEFAULT, NULL);
/*
* This is a list of all ARC buffers that are still valid on the
* device.
*/
list_create(&adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l2hdr.b_l2node));
/*
* This is a list of pointers to log blocks that are still present
* on the device.
*/
list_create(&adddev->l2ad_lbptr_list, sizeof (l2arc_lb_ptr_buf_t),
offsetof(l2arc_lb_ptr_buf_t, node));
vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
zfs_refcount_create(&adddev->l2ad_alloc);
zfs_refcount_create(&adddev->l2ad_lb_asize);
zfs_refcount_create(&adddev->l2ad_lb_count);
/*
* Decide if dev is eligible for L2ARC rebuild or whole device
* trimming. This has to happen before the device is added in the
* cache device list and l2arc_dev_mtx is released. Otherwise
* l2arc_feed_thread() might already start writing on the
* device.
*/
l2arc_rebuild_dev(adddev, B_FALSE);
/*
* Add device to global list
*/
mutex_enter(&l2arc_dev_mtx);
list_insert_head(l2arc_dev_list, adddev);
atomic_inc_64(&l2arc_ndev);
mutex_exit(&l2arc_dev_mtx);
}
/*
* Decide if a vdev is eligible for L2ARC rebuild, called from vdev_reopen()
* in case of onlining a cache device.
*/
void
l2arc_rebuild_vdev(vdev_t *vd, boolean_t reopen)
{
l2arc_dev_t *dev = NULL;
dev = l2arc_vdev_get(vd);
ASSERT3P(dev, !=, NULL);
/*
* In contrast to l2arc_add_vdev() we do not have to worry about
* l2arc_feed_thread() invalidating previous content when onlining a
* cache device. The device parameters (l2ad*) are not cleared when
* offlining the device and writing new buffers will not invalidate
* all previous content. In worst case only buffers that have not had
* their log block written to the device will be lost.
* When onlining the cache device (ie offline->online without exporting
* the pool in between) this happens:
* vdev_reopen() -> vdev_open() -> l2arc_rebuild_vdev()
* | |
* vdev_is_dead() = B_FALSE l2ad_rebuild = B_TRUE
* During the time where vdev_is_dead = B_FALSE and until l2ad_rebuild
* is set to B_TRUE we might write additional buffers to the device.
*/
l2arc_rebuild_dev(dev, reopen);
}
/*
* Remove a vdev from the L2ARC.
*/
void
l2arc_remove_vdev(vdev_t *vd)
{
l2arc_dev_t *remdev = NULL;
/*
* Find the device by vdev
*/
remdev = l2arc_vdev_get(vd);
ASSERT3P(remdev, !=, NULL);
/*
* Cancel any ongoing or scheduled rebuild.
*/
mutex_enter(&l2arc_rebuild_thr_lock);
if (remdev->l2ad_rebuild_began == B_TRUE) {
remdev->l2ad_rebuild_cancel = B_TRUE;
while (remdev->l2ad_rebuild == B_TRUE)
cv_wait(&l2arc_rebuild_thr_cv, &l2arc_rebuild_thr_lock);
}
mutex_exit(&l2arc_rebuild_thr_lock);
/*
* Remove device from global list
*/
mutex_enter(&l2arc_dev_mtx);
list_remove(l2arc_dev_list, remdev);
l2arc_dev_last = NULL; /* may have been invalidated */
atomic_dec_64(&l2arc_ndev);
mutex_exit(&l2arc_dev_mtx);
/*
* Clear all buflists and ARC references. L2ARC device flush.
*/
l2arc_evict(remdev, 0, B_TRUE);
list_destroy(&remdev->l2ad_buflist);
ASSERT(list_is_empty(&remdev->l2ad_lbptr_list));
list_destroy(&remdev->l2ad_lbptr_list);
mutex_destroy(&remdev->l2ad_mtx);
zfs_refcount_destroy(&remdev->l2ad_alloc);
zfs_refcount_destroy(&remdev->l2ad_lb_asize);
zfs_refcount_destroy(&remdev->l2ad_lb_count);
kmem_free(remdev->l2ad_dev_hdr, remdev->l2ad_dev_hdr_asize);
vmem_free(remdev, sizeof (l2arc_dev_t));
}
void
l2arc_init(void)
{
l2arc_thread_exit = 0;
l2arc_ndev = 0;
mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&l2arc_rebuild_thr_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&l2arc_rebuild_thr_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
l2arc_dev_list = &L2ARC_dev_list;
l2arc_free_on_write = &L2ARC_free_on_write;
list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
offsetof(l2arc_dev_t, l2ad_node));
list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
offsetof(l2arc_data_free_t, l2df_list_node));
}
void
l2arc_fini(void)
{
mutex_destroy(&l2arc_feed_thr_lock);
cv_destroy(&l2arc_feed_thr_cv);
mutex_destroy(&l2arc_rebuild_thr_lock);
cv_destroy(&l2arc_rebuild_thr_cv);
mutex_destroy(&l2arc_dev_mtx);
mutex_destroy(&l2arc_free_on_write_mtx);
list_destroy(l2arc_dev_list);
list_destroy(l2arc_free_on_write);
}
void
l2arc_start(void)
{
if (!(spa_mode_global & SPA_MODE_WRITE))
return;
(void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
TS_RUN, defclsyspri);
}
void
l2arc_stop(void)
{
if (!(spa_mode_global & SPA_MODE_WRITE))
return;
mutex_enter(&l2arc_feed_thr_lock);
cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */
l2arc_thread_exit = 1;
while (l2arc_thread_exit != 0)
cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
mutex_exit(&l2arc_feed_thr_lock);
}
/*
* Punches out rebuild threads for the L2ARC devices in a spa. This should
* be called after pool import from the spa async thread, since starting
* these threads directly from spa_import() will make them part of the
* "zpool import" context and delay process exit (and thus pool import).
*/
void
l2arc_spa_rebuild_start(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
/*
* Locate the spa's l2arc devices and kick off rebuild threads.
*/
for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
l2arc_dev_t *dev =
l2arc_vdev_get(spa->spa_l2cache.sav_vdevs[i]);
if (dev == NULL) {
/* Don't attempt a rebuild if the vdev is UNAVAIL */
continue;
}
mutex_enter(&l2arc_rebuild_thr_lock);
if (dev->l2ad_rebuild && !dev->l2ad_rebuild_cancel) {
dev->l2ad_rebuild_began = B_TRUE;
(void) thread_create(NULL, 0, l2arc_dev_rebuild_thread,
dev, 0, &p0, TS_RUN, minclsyspri);
}
mutex_exit(&l2arc_rebuild_thr_lock);
}
}
/*
* Main entry point for L2ARC rebuilding.
*/
static __attribute__((noreturn)) void
l2arc_dev_rebuild_thread(void *arg)
{
l2arc_dev_t *dev = arg;
VERIFY(!dev->l2ad_rebuild_cancel);
VERIFY(dev->l2ad_rebuild);
(void) l2arc_rebuild(dev);
mutex_enter(&l2arc_rebuild_thr_lock);
dev->l2ad_rebuild_began = B_FALSE;
dev->l2ad_rebuild = B_FALSE;
mutex_exit(&l2arc_rebuild_thr_lock);
thread_exit();
}
/*
* This function implements the actual L2ARC metadata rebuild. It:
* starts reading the log block chain and restores each block's contents
* to memory (reconstructing arc_buf_hdr_t's).
*
* Operation stops under any of the following conditions:
*
* 1) We reach the end of the log block chain.
* 2) We encounter *any* error condition (cksum errors, io errors)
*/
static int
l2arc_rebuild(l2arc_dev_t *dev)
{
vdev_t *vd = dev->l2ad_vdev;
spa_t *spa = vd->vdev_spa;
int err = 0;
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
l2arc_log_blk_phys_t *this_lb, *next_lb;
zio_t *this_io = NULL, *next_io = NULL;
l2arc_log_blkptr_t lbps[2];
l2arc_lb_ptr_buf_t *lb_ptr_buf;
boolean_t lock_held;
this_lb = vmem_zalloc(sizeof (*this_lb), KM_SLEEP);
next_lb = vmem_zalloc(sizeof (*next_lb), KM_SLEEP);
/*
* We prevent device removal while issuing reads to the device,
* then during the rebuilding phases we drop this lock again so
* that a spa_unload or device remove can be initiated - this is
* safe, because the spa will signal us to stop before removing
* our device and wait for us to stop.
*/
spa_config_enter(spa, SCL_L2ARC, vd, RW_READER);
lock_held = B_TRUE;
/*
* Retrieve the persistent L2ARC device state.
* L2BLK_GET_PSIZE returns aligned size for log blocks.
*/
dev->l2ad_evict = MAX(l2dhdr->dh_evict, dev->l2ad_start);
dev->l2ad_hand = MAX(l2dhdr->dh_start_lbps[0].lbp_daddr +
L2BLK_GET_PSIZE((&l2dhdr->dh_start_lbps[0])->lbp_prop),
dev->l2ad_start);
dev->l2ad_first = !!(l2dhdr->dh_flags & L2ARC_DEV_HDR_EVICT_FIRST);
vd->vdev_trim_action_time = l2dhdr->dh_trim_action_time;
vd->vdev_trim_state = l2dhdr->dh_trim_state;
/*
* In case the zfs module parameter l2arc_rebuild_enabled is false
* we do not start the rebuild process.
*/
if (!l2arc_rebuild_enabled)
goto out;
/* Prepare the rebuild process */
memcpy(lbps, l2dhdr->dh_start_lbps, sizeof (lbps));
/* Start the rebuild process */
for (;;) {
if (!l2arc_log_blkptr_valid(dev, &lbps[0]))
break;
if ((err = l2arc_log_blk_read(dev, &lbps[0], &lbps[1],
this_lb, next_lb, this_io, &next_io)) != 0)
goto out;
/*
* Our memory pressure valve. If the system is running low
* on memory, rather than swamping memory with new ARC buf
* hdrs, we opt not to rebuild the L2ARC. At this point,
* however, we have already set up our L2ARC dev to chain in
* new metadata log blocks, so the user may choose to offline/
* online the L2ARC dev at a later time (or re-import the pool)
* to reconstruct it (when there's less memory pressure).
*/
if (l2arc_hdr_limit_reached()) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_lowmem);
cmn_err(CE_NOTE, "System running low on memory, "
"aborting L2ARC rebuild.");
err = SET_ERROR(ENOMEM);
goto out;
}
spa_config_exit(spa, SCL_L2ARC, vd);
lock_held = B_FALSE;
/*
* Now that we know that the next_lb checks out alright, we
* can start reconstruction from this log block.
* L2BLK_GET_PSIZE returns aligned size for log blocks.
*/
uint64_t asize = L2BLK_GET_PSIZE((&lbps[0])->lbp_prop);
l2arc_log_blk_restore(dev, this_lb, asize);
/*
* log block restored, include its pointer in the list of
* pointers to log blocks present in the L2ARC device.
*/
lb_ptr_buf = kmem_zalloc(sizeof (l2arc_lb_ptr_buf_t), KM_SLEEP);
lb_ptr_buf->lb_ptr = kmem_zalloc(sizeof (l2arc_log_blkptr_t),
KM_SLEEP);
memcpy(lb_ptr_buf->lb_ptr, &lbps[0],
sizeof (l2arc_log_blkptr_t));
mutex_enter(&dev->l2ad_mtx);
list_insert_tail(&dev->l2ad_lbptr_list, lb_ptr_buf);
ARCSTAT_INCR(arcstat_l2_log_blk_asize, asize);
ARCSTAT_BUMP(arcstat_l2_log_blk_count);
zfs_refcount_add_many(&dev->l2ad_lb_asize, asize, lb_ptr_buf);
zfs_refcount_add(&dev->l2ad_lb_count, lb_ptr_buf);
mutex_exit(&dev->l2ad_mtx);
vdev_space_update(vd, asize, 0, 0);
/*
* Protection against loops of log blocks:
*
* l2ad_hand l2ad_evict
* V V
* l2ad_start |=======================================| l2ad_end
* -----|||----|||---|||----|||
* (3) (2) (1) (0)
* ---|||---|||----|||---|||
* (7) (6) (5) (4)
*
* In this situation the pointer of log block (4) passes
* l2arc_log_blkptr_valid() but the log block should not be
* restored as it is overwritten by the payload of log block
* (0). Only log blocks (0)-(3) should be restored. We check
* whether l2ad_evict lies in between the payload starting
* offset of the next log block (lbps[1].lbp_payload_start)
* and the payload starting offset of the present log block
* (lbps[0].lbp_payload_start). If true and this isn't the
* first pass, we are looping from the beginning and we should
* stop.
*/
if (l2arc_range_check_overlap(lbps[1].lbp_payload_start,
lbps[0].lbp_payload_start, dev->l2ad_evict) &&
!dev->l2ad_first)
goto out;
- cond_resched();
+ kpreempt(KPREEMPT_SYNC);
for (;;) {
mutex_enter(&l2arc_rebuild_thr_lock);
if (dev->l2ad_rebuild_cancel) {
dev->l2ad_rebuild = B_FALSE;
cv_signal(&l2arc_rebuild_thr_cv);
mutex_exit(&l2arc_rebuild_thr_lock);
err = SET_ERROR(ECANCELED);
goto out;
}
mutex_exit(&l2arc_rebuild_thr_lock);
if (spa_config_tryenter(spa, SCL_L2ARC, vd,
RW_READER)) {
lock_held = B_TRUE;
break;
}
/*
* L2ARC config lock held by somebody in writer,
* possibly due to them trying to remove us. They'll
* likely to want us to shut down, so after a little
* delay, we check l2ad_rebuild_cancel and retry
* the lock again.
*/
delay(1);
}
/*
* Continue with the next log block.
*/
lbps[0] = lbps[1];
lbps[1] = this_lb->lb_prev_lbp;
PTR_SWAP(this_lb, next_lb);
this_io = next_io;
next_io = NULL;
}
if (this_io != NULL)
l2arc_log_blk_fetch_abort(this_io);
out:
if (next_io != NULL)
l2arc_log_blk_fetch_abort(next_io);
vmem_free(this_lb, sizeof (*this_lb));
vmem_free(next_lb, sizeof (*next_lb));
if (!l2arc_rebuild_enabled) {
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"disabled");
} else if (err == 0 && zfs_refcount_count(&dev->l2ad_lb_count) > 0) {
ARCSTAT_BUMP(arcstat_l2_rebuild_success);
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"successful, restored %llu blocks",
(u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count));
} else if (err == 0 && zfs_refcount_count(&dev->l2ad_lb_count) == 0) {
/*
* No error but also nothing restored, meaning the lbps array
* in the device header points to invalid/non-present log
* blocks. Reset the header.
*/
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"no valid log blocks");
memset(l2dhdr, 0, dev->l2ad_dev_hdr_asize);
l2arc_dev_hdr_update(dev);
} else if (err == ECANCELED) {
/*
* In case the rebuild was canceled do not log to spa history
* log as the pool may be in the process of being removed.
*/
zfs_dbgmsg("L2ARC rebuild aborted, restored %llu blocks",
(u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count));
} else if (err != 0) {
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"aborted, restored %llu blocks",
(u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count));
}
if (lock_held)
spa_config_exit(spa, SCL_L2ARC, vd);
return (err);
}
/*
* Attempts to read the device header on the provided L2ARC device and writes
* it to `hdr'. On success, this function returns 0, otherwise the appropriate
* error code is returned.
*/
static int
l2arc_dev_hdr_read(l2arc_dev_t *dev)
{
int err;
uint64_t guid;
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
const uint64_t l2dhdr_asize = dev->l2ad_dev_hdr_asize;
abd_t *abd;
guid = spa_guid(dev->l2ad_vdev->vdev_spa);
abd = abd_get_from_buf(l2dhdr, l2dhdr_asize);
err = zio_wait(zio_read_phys(NULL, dev->l2ad_vdev,
VDEV_LABEL_START_SIZE, l2dhdr_asize, abd,
ZIO_CHECKSUM_LABEL, NULL, NULL, ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL |
ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY |
ZIO_FLAG_SPECULATIVE, B_FALSE));
abd_free(abd);
if (err != 0) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_dh_errors);
zfs_dbgmsg("L2ARC IO error (%d) while reading device header, "
"vdev guid: %llu", err,
(u_longlong_t)dev->l2ad_vdev->vdev_guid);
return (err);
}
if (l2dhdr->dh_magic == BSWAP_64(L2ARC_DEV_HDR_MAGIC))
byteswap_uint64_array(l2dhdr, sizeof (*l2dhdr));
if (l2dhdr->dh_magic != L2ARC_DEV_HDR_MAGIC ||
l2dhdr->dh_spa_guid != guid ||
l2dhdr->dh_vdev_guid != dev->l2ad_vdev->vdev_guid ||
l2dhdr->dh_version != L2ARC_PERSISTENT_VERSION ||
l2dhdr->dh_log_entries != dev->l2ad_log_entries ||
l2dhdr->dh_end != dev->l2ad_end ||
!l2arc_range_check_overlap(dev->l2ad_start, dev->l2ad_end,
l2dhdr->dh_evict) ||
(l2dhdr->dh_trim_state != VDEV_TRIM_COMPLETE &&
l2arc_trim_ahead > 0)) {
/*
* Attempt to rebuild a device containing no actual dev hdr
* or containing a header from some other pool or from another
* version of persistent L2ARC.
*/
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_unsupported);
return (SET_ERROR(ENOTSUP));
}
return (0);
}
/*
* Reads L2ARC log blocks from storage and validates their contents.
*
* This function implements a simple fetcher to make sure that while
* we're processing one buffer the L2ARC is already fetching the next
* one in the chain.
*
* The arguments this_lp and next_lp point to the current and next log block
* address in the block chain. Similarly, this_lb and next_lb hold the
* l2arc_log_blk_phys_t's of the current and next L2ARC blk.
*
* The `this_io' and `next_io' arguments are used for block fetching.
* When issuing the first blk IO during rebuild, you should pass NULL for
* `this_io'. This function will then issue a sync IO to read the block and
* also issue an async IO to fetch the next block in the block chain. The
* fetched IO is returned in `next_io'. On subsequent calls to this
* function, pass the value returned in `next_io' from the previous call
* as `this_io' and a fresh `next_io' pointer to hold the next fetch IO.
* Prior to the call, you should initialize your `next_io' pointer to be
* NULL. If no fetch IO was issued, the pointer is left set at NULL.
*
* On success, this function returns 0, otherwise it returns an appropriate
* error code. On error the fetching IO is aborted and cleared before
* returning from this function. Therefore, if we return `success', the
* caller can assume that we have taken care of cleanup of fetch IOs.
*/
static int
l2arc_log_blk_read(l2arc_dev_t *dev,
const l2arc_log_blkptr_t *this_lbp, const l2arc_log_blkptr_t *next_lbp,
l2arc_log_blk_phys_t *this_lb, l2arc_log_blk_phys_t *next_lb,
zio_t *this_io, zio_t **next_io)
{
int err = 0;
zio_cksum_t cksum;
abd_t *abd = NULL;
uint64_t asize;
ASSERT(this_lbp != NULL && next_lbp != NULL);
ASSERT(this_lb != NULL && next_lb != NULL);
ASSERT(next_io != NULL && *next_io == NULL);
ASSERT(l2arc_log_blkptr_valid(dev, this_lbp));
/*
* Check to see if we have issued the IO for this log block in a
* previous run. If not, this is the first call, so issue it now.
*/
if (this_io == NULL) {
this_io = l2arc_log_blk_fetch(dev->l2ad_vdev, this_lbp,
this_lb);
}
/*
* Peek to see if we can start issuing the next IO immediately.
*/
if (l2arc_log_blkptr_valid(dev, next_lbp)) {
/*
* Start issuing IO for the next log block early - this
* should help keep the L2ARC device busy while we
* decompress and restore this log block.
*/
*next_io = l2arc_log_blk_fetch(dev->l2ad_vdev, next_lbp,
next_lb);
}
/* Wait for the IO to read this log block to complete */
if ((err = zio_wait(this_io)) != 0) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_io_errors);
zfs_dbgmsg("L2ARC IO error (%d) while reading log block, "
"offset: %llu, vdev guid: %llu", err,
(u_longlong_t)this_lbp->lbp_daddr,
(u_longlong_t)dev->l2ad_vdev->vdev_guid);
goto cleanup;
}
/*
* Make sure the buffer checks out.
* L2BLK_GET_PSIZE returns aligned size for log blocks.
*/
asize = L2BLK_GET_PSIZE((this_lbp)->lbp_prop);
fletcher_4_native(this_lb, asize, NULL, &cksum);
if (!ZIO_CHECKSUM_EQUAL(cksum, this_lbp->lbp_cksum)) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_cksum_lb_errors);
zfs_dbgmsg("L2ARC log block cksum failed, offset: %llu, "
"vdev guid: %llu, l2ad_hand: %llu, l2ad_evict: %llu",
(u_longlong_t)this_lbp->lbp_daddr,
(u_longlong_t)dev->l2ad_vdev->vdev_guid,
(u_longlong_t)dev->l2ad_hand,
(u_longlong_t)dev->l2ad_evict);
err = SET_ERROR(ECKSUM);
goto cleanup;
}
/* Now we can take our time decoding this buffer */
switch (L2BLK_GET_COMPRESS((this_lbp)->lbp_prop)) {
case ZIO_COMPRESS_OFF:
break;
case ZIO_COMPRESS_LZ4:
abd = abd_alloc_for_io(asize, B_TRUE);
abd_copy_from_buf_off(abd, this_lb, 0, asize);
if ((err = zio_decompress_data(
L2BLK_GET_COMPRESS((this_lbp)->lbp_prop),
abd, this_lb, asize, sizeof (*this_lb), NULL)) != 0) {
err = SET_ERROR(EINVAL);
goto cleanup;
}
break;
default:
err = SET_ERROR(EINVAL);
goto cleanup;
}
if (this_lb->lb_magic == BSWAP_64(L2ARC_LOG_BLK_MAGIC))
byteswap_uint64_array(this_lb, sizeof (*this_lb));
if (this_lb->lb_magic != L2ARC_LOG_BLK_MAGIC) {
err = SET_ERROR(EINVAL);
goto cleanup;
}
cleanup:
/* Abort an in-flight fetch I/O in case of error */
if (err != 0 && *next_io != NULL) {
l2arc_log_blk_fetch_abort(*next_io);
*next_io = NULL;
}
if (abd != NULL)
abd_free(abd);
return (err);
}
/*
* Restores the payload of a log block to ARC. This creates empty ARC hdr
* entries which only contain an l2arc hdr, essentially restoring the
* buffers to their L2ARC evicted state. This function also updates space
* usage on the L2ARC vdev to make sure it tracks restored buffers.
*/
static void
l2arc_log_blk_restore(l2arc_dev_t *dev, const l2arc_log_blk_phys_t *lb,
uint64_t lb_asize)
{
uint64_t size = 0, asize = 0;
uint64_t log_entries = dev->l2ad_log_entries;
/*
* Usually arc_adapt() is called only for data, not headers, but
* since we may allocate significant amount of memory here, let ARC
* grow its arc_c.
*/
arc_adapt(log_entries * HDR_L2ONLY_SIZE, arc_l2c_only);
for (int i = log_entries - 1; i >= 0; i--) {
/*
* Restore goes in the reverse temporal direction to preserve
* correct temporal ordering of buffers in the l2ad_buflist.
* l2arc_hdr_restore also does a list_insert_tail instead of
* list_insert_head on the l2ad_buflist:
*
* LIST l2ad_buflist LIST
* HEAD <------ (time) ------ TAIL
* direction +-----+-----+-----+-----+-----+ direction
* of l2arc <== | buf | buf | buf | buf | buf | ===> of rebuild
* fill +-----+-----+-----+-----+-----+
* ^ ^
* | |
* | |
* l2arc_feed_thread l2arc_rebuild
* will place new bufs here restores bufs here
*
* During l2arc_rebuild() the device is not used by
* l2arc_feed_thread() as dev->l2ad_rebuild is set to true.
*/
size += L2BLK_GET_LSIZE((&lb->lb_entries[i])->le_prop);
asize += vdev_psize_to_asize(dev->l2ad_vdev,
L2BLK_GET_PSIZE((&lb->lb_entries[i])->le_prop));
l2arc_hdr_restore(&lb->lb_entries[i], dev);
}
/*
* Record rebuild stats:
* size Logical size of restored buffers in the L2ARC
* asize Aligned size of restored buffers in the L2ARC
*/
ARCSTAT_INCR(arcstat_l2_rebuild_size, size);
ARCSTAT_INCR(arcstat_l2_rebuild_asize, asize);
ARCSTAT_INCR(arcstat_l2_rebuild_bufs, log_entries);
ARCSTAT_F_AVG(arcstat_l2_log_blk_avg_asize, lb_asize);
ARCSTAT_F_AVG(arcstat_l2_data_to_meta_ratio, asize / lb_asize);
ARCSTAT_BUMP(arcstat_l2_rebuild_log_blks);
}
/*
* Restores a single ARC buf hdr from a log entry. The ARC buffer is put
* into a state indicating that it has been evicted to L2ARC.
*/
static void
l2arc_hdr_restore(const l2arc_log_ent_phys_t *le, l2arc_dev_t *dev)
{
arc_buf_hdr_t *hdr, *exists;
kmutex_t *hash_lock;
arc_buf_contents_t type = L2BLK_GET_TYPE((le)->le_prop);
uint64_t asize;
/*
* Do all the allocation before grabbing any locks, this lets us
* sleep if memory is full and we don't have to deal with failed
* allocations.
*/
hdr = arc_buf_alloc_l2only(L2BLK_GET_LSIZE((le)->le_prop), type,
dev, le->le_dva, le->le_daddr,
L2BLK_GET_PSIZE((le)->le_prop), le->le_birth,
L2BLK_GET_COMPRESS((le)->le_prop), le->le_complevel,
L2BLK_GET_PROTECTED((le)->le_prop),
L2BLK_GET_PREFETCH((le)->le_prop),
L2BLK_GET_STATE((le)->le_prop));
asize = vdev_psize_to_asize(dev->l2ad_vdev,
L2BLK_GET_PSIZE((le)->le_prop));
/*
* vdev_space_update() has to be called before arc_hdr_destroy() to
* avoid underflow since the latter also calls vdev_space_update().
*/
l2arc_hdr_arcstats_increment(hdr);
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);
mutex_enter(&dev->l2ad_mtx);
list_insert_tail(&dev->l2ad_buflist, hdr);
(void) zfs_refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(hdr), hdr);
mutex_exit(&dev->l2ad_mtx);
exists = buf_hash_insert(hdr, &hash_lock);
if (exists) {
/* Buffer was already cached, no need to restore it. */
arc_hdr_destroy(hdr);
/*
* If the buffer is already cached, check whether it has
* L2ARC metadata. If not, enter them and update the flag.
* This is important is case of onlining a cache device, since
* we previously evicted all L2ARC metadata from ARC.
*/
if (!HDR_HAS_L2HDR(exists)) {
arc_hdr_set_flags(exists, ARC_FLAG_HAS_L2HDR);
exists->b_l2hdr.b_dev = dev;
exists->b_l2hdr.b_daddr = le->le_daddr;
exists->b_l2hdr.b_arcs_state =
L2BLK_GET_STATE((le)->le_prop);
mutex_enter(&dev->l2ad_mtx);
list_insert_tail(&dev->l2ad_buflist, exists);
(void) zfs_refcount_add_many(&dev->l2ad_alloc,
arc_hdr_size(exists), exists);
mutex_exit(&dev->l2ad_mtx);
l2arc_hdr_arcstats_increment(exists);
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);
}
ARCSTAT_BUMP(arcstat_l2_rebuild_bufs_precached);
}
mutex_exit(hash_lock);
}
/*
* Starts an asynchronous read IO to read a log block. This is used in log
* block reconstruction to start reading the next block before we are done
* decoding and reconstructing the current block, to keep the l2arc device
* nice and hot with read IO to process.
* The returned zio will contain a newly allocated memory buffers for the IO
* data which should then be freed by the caller once the zio is no longer
* needed (i.e. due to it having completed). If you wish to abort this
* zio, you should do so using l2arc_log_blk_fetch_abort, which takes
* care of disposing of the allocated buffers correctly.
*/
static zio_t *
l2arc_log_blk_fetch(vdev_t *vd, const l2arc_log_blkptr_t *lbp,
l2arc_log_blk_phys_t *lb)
{
uint32_t asize;
zio_t *pio;
l2arc_read_callback_t *cb;
/* L2BLK_GET_PSIZE returns aligned size for log blocks */
asize = L2BLK_GET_PSIZE((lbp)->lbp_prop);
ASSERT(asize <= sizeof (l2arc_log_blk_phys_t));
cb = kmem_zalloc(sizeof (l2arc_read_callback_t), KM_SLEEP);
cb->l2rcb_abd = abd_get_from_buf(lb, asize);
pio = zio_root(vd->vdev_spa, l2arc_blk_fetch_done, cb,
ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE |
ZIO_FLAG_DONT_RETRY);
(void) zio_nowait(zio_read_phys(pio, vd, lbp->lbp_daddr, asize,
cb->l2rcb_abd, ZIO_CHECKSUM_OFF, NULL, NULL,
ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL |
ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY, B_FALSE));
return (pio);
}
/*
* Aborts a zio returned from l2arc_log_blk_fetch and frees the data
* buffers allocated for it.
*/
static void
l2arc_log_blk_fetch_abort(zio_t *zio)
{
(void) zio_wait(zio);
}
/*
* Creates a zio to update the device header on an l2arc device.
*/
void
l2arc_dev_hdr_update(l2arc_dev_t *dev)
{
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
const uint64_t l2dhdr_asize = dev->l2ad_dev_hdr_asize;
abd_t *abd;
int err;
VERIFY(spa_config_held(dev->l2ad_spa, SCL_STATE_ALL, RW_READER));
l2dhdr->dh_magic = L2ARC_DEV_HDR_MAGIC;
l2dhdr->dh_version = L2ARC_PERSISTENT_VERSION;
l2dhdr->dh_spa_guid = spa_guid(dev->l2ad_vdev->vdev_spa);
l2dhdr->dh_vdev_guid = dev->l2ad_vdev->vdev_guid;
l2dhdr->dh_log_entries = dev->l2ad_log_entries;
l2dhdr->dh_evict = dev->l2ad_evict;
l2dhdr->dh_start = dev->l2ad_start;
l2dhdr->dh_end = dev->l2ad_end;
l2dhdr->dh_lb_asize = zfs_refcount_count(&dev->l2ad_lb_asize);
l2dhdr->dh_lb_count = zfs_refcount_count(&dev->l2ad_lb_count);
l2dhdr->dh_flags = 0;
l2dhdr->dh_trim_action_time = dev->l2ad_vdev->vdev_trim_action_time;
l2dhdr->dh_trim_state = dev->l2ad_vdev->vdev_trim_state;
if (dev->l2ad_first)
l2dhdr->dh_flags |= L2ARC_DEV_HDR_EVICT_FIRST;
abd = abd_get_from_buf(l2dhdr, l2dhdr_asize);
err = zio_wait(zio_write_phys(NULL, dev->l2ad_vdev,
VDEV_LABEL_START_SIZE, l2dhdr_asize, abd, ZIO_CHECKSUM_LABEL, NULL,
NULL, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_CANFAIL, B_FALSE));
abd_free(abd);
if (err != 0) {
zfs_dbgmsg("L2ARC IO error (%d) while writing device header, "
"vdev guid: %llu", err,
(u_longlong_t)dev->l2ad_vdev->vdev_guid);
}
}
/*
* Commits a log block to the L2ARC device. This routine is invoked from
* l2arc_write_buffers when the log block fills up.
* This function allocates some memory to temporarily hold the serialized
* buffer to be written. This is then released in l2arc_write_done.
*/
static void
l2arc_log_blk_commit(l2arc_dev_t *dev, zio_t *pio, l2arc_write_callback_t *cb)
{
l2arc_log_blk_phys_t *lb = &dev->l2ad_log_blk;
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
uint64_t psize, asize;
zio_t *wzio;
l2arc_lb_abd_buf_t *abd_buf;
uint8_t *tmpbuf;
l2arc_lb_ptr_buf_t *lb_ptr_buf;
VERIFY3S(dev->l2ad_log_ent_idx, ==, dev->l2ad_log_entries);
tmpbuf = zio_buf_alloc(sizeof (*lb));
abd_buf = zio_buf_alloc(sizeof (*abd_buf));
abd_buf->abd = abd_get_from_buf(lb, sizeof (*lb));
lb_ptr_buf = kmem_zalloc(sizeof (l2arc_lb_ptr_buf_t), KM_SLEEP);
lb_ptr_buf->lb_ptr = kmem_zalloc(sizeof (l2arc_log_blkptr_t), KM_SLEEP);
/* link the buffer into the block chain */
lb->lb_prev_lbp = l2dhdr->dh_start_lbps[1];
lb->lb_magic = L2ARC_LOG_BLK_MAGIC;
/*
* l2arc_log_blk_commit() may be called multiple times during a single
* l2arc_write_buffers() call. Save the allocated abd buffers in a list
* so we can free them in l2arc_write_done() later on.
*/
list_insert_tail(&cb->l2wcb_abd_list, abd_buf);
/* try to compress the buffer */
psize = zio_compress_data(ZIO_COMPRESS_LZ4,
abd_buf->abd, tmpbuf, sizeof (*lb), 0);
/* a log block is never entirely zero */
ASSERT(psize != 0);
asize = vdev_psize_to_asize(dev->l2ad_vdev, psize);
ASSERT(asize <= sizeof (*lb));
/*
* Update the start log block pointer in the device header to point
* to the log block we're about to write.
*/
l2dhdr->dh_start_lbps[1] = l2dhdr->dh_start_lbps[0];
l2dhdr->dh_start_lbps[0].lbp_daddr = dev->l2ad_hand;
l2dhdr->dh_start_lbps[0].lbp_payload_asize =
dev->l2ad_log_blk_payload_asize;
l2dhdr->dh_start_lbps[0].lbp_payload_start =
dev->l2ad_log_blk_payload_start;
L2BLK_SET_LSIZE(
(&l2dhdr->dh_start_lbps[0])->lbp_prop, sizeof (*lb));
L2BLK_SET_PSIZE(
(&l2dhdr->dh_start_lbps[0])->lbp_prop, asize);
L2BLK_SET_CHECKSUM(
(&l2dhdr->dh_start_lbps[0])->lbp_prop,
ZIO_CHECKSUM_FLETCHER_4);
if (asize < sizeof (*lb)) {
/* compression succeeded */
memset(tmpbuf + psize, 0, asize - psize);
L2BLK_SET_COMPRESS(
(&l2dhdr->dh_start_lbps[0])->lbp_prop,
ZIO_COMPRESS_LZ4);
} else {
/* compression failed */
memcpy(tmpbuf, lb, sizeof (*lb));
L2BLK_SET_COMPRESS(
(&l2dhdr->dh_start_lbps[0])->lbp_prop,
ZIO_COMPRESS_OFF);
}
/* checksum what we're about to write */
fletcher_4_native(tmpbuf, asize, NULL,
&l2dhdr->dh_start_lbps[0].lbp_cksum);
abd_free(abd_buf->abd);
/* perform the write itself */
abd_buf->abd = abd_get_from_buf(tmpbuf, sizeof (*lb));
abd_take_ownership_of_buf(abd_buf->abd, B_TRUE);
wzio = zio_write_phys(pio, dev->l2ad_vdev, dev->l2ad_hand,
asize, abd_buf->abd, ZIO_CHECKSUM_OFF, NULL, NULL,
ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_CANFAIL, B_FALSE);
DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, zio_t *, wzio);
(void) zio_nowait(wzio);
dev->l2ad_hand += asize;
/*
* Include the committed log block's pointer in the list of pointers
* to log blocks present in the L2ARC device.
*/
memcpy(lb_ptr_buf->lb_ptr, &l2dhdr->dh_start_lbps[0],
sizeof (l2arc_log_blkptr_t));
mutex_enter(&dev->l2ad_mtx);
list_insert_head(&dev->l2ad_lbptr_list, lb_ptr_buf);
ARCSTAT_INCR(arcstat_l2_log_blk_asize, asize);
ARCSTAT_BUMP(arcstat_l2_log_blk_count);
zfs_refcount_add_many(&dev->l2ad_lb_asize, asize, lb_ptr_buf);
zfs_refcount_add(&dev->l2ad_lb_count, lb_ptr_buf);
mutex_exit(&dev->l2ad_mtx);
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);
/* bump the kstats */
ARCSTAT_INCR(arcstat_l2_write_bytes, asize);
ARCSTAT_BUMP(arcstat_l2_log_blk_writes);
ARCSTAT_F_AVG(arcstat_l2_log_blk_avg_asize, asize);
ARCSTAT_F_AVG(arcstat_l2_data_to_meta_ratio,
dev->l2ad_log_blk_payload_asize / asize);
/* start a new log block */
dev->l2ad_log_ent_idx = 0;
dev->l2ad_log_blk_payload_asize = 0;
dev->l2ad_log_blk_payload_start = 0;
}
/*
* Validates an L2ARC log block address to make sure that it can be read
* from the provided L2ARC device.
*/
boolean_t
l2arc_log_blkptr_valid(l2arc_dev_t *dev, const l2arc_log_blkptr_t *lbp)
{
/* L2BLK_GET_PSIZE returns aligned size for log blocks */
uint64_t asize = L2BLK_GET_PSIZE((lbp)->lbp_prop);
uint64_t end = lbp->lbp_daddr + asize - 1;
uint64_t start = lbp->lbp_payload_start;
boolean_t evicted = B_FALSE;
/*
* A log block is valid if all of the following conditions are true:
* - it fits entirely (including its payload) between l2ad_start and
* l2ad_end
* - it has a valid size
* - neither the log block itself nor part of its payload was evicted
* by l2arc_evict():
*
* l2ad_hand l2ad_evict
* | | lbp_daddr
* | start | | end
* | | | | |
* V V V V V
* l2ad_start ============================================ l2ad_end
* --------------------------||||
* ^ ^
* | log block
* payload
*/
evicted =
l2arc_range_check_overlap(start, end, dev->l2ad_hand) ||
l2arc_range_check_overlap(start, end, dev->l2ad_evict) ||
l2arc_range_check_overlap(dev->l2ad_hand, dev->l2ad_evict, start) ||
l2arc_range_check_overlap(dev->l2ad_hand, dev->l2ad_evict, end);
return (start >= dev->l2ad_start && end <= dev->l2ad_end &&
asize > 0 && asize <= sizeof (l2arc_log_blk_phys_t) &&
(!evicted || dev->l2ad_first));
}
/*
* Inserts ARC buffer header `hdr' into the current L2ARC log block on
* the device. The buffer being inserted must be present in L2ARC.
* Returns B_TRUE if the L2ARC log block is full and needs to be committed
* to L2ARC, or B_FALSE if it still has room for more ARC buffers.
*/
static boolean_t
l2arc_log_blk_insert(l2arc_dev_t *dev, const arc_buf_hdr_t *hdr)
{
l2arc_log_blk_phys_t *lb = &dev->l2ad_log_blk;
l2arc_log_ent_phys_t *le;
if (dev->l2ad_log_entries == 0)
return (B_FALSE);
int index = dev->l2ad_log_ent_idx++;
ASSERT3S(index, <, dev->l2ad_log_entries);
ASSERT(HDR_HAS_L2HDR(hdr));
le = &lb->lb_entries[index];
memset(le, 0, sizeof (*le));
le->le_dva = hdr->b_dva;
le->le_birth = hdr->b_birth;
le->le_daddr = hdr->b_l2hdr.b_daddr;
if (index == 0)
dev->l2ad_log_blk_payload_start = le->le_daddr;
L2BLK_SET_LSIZE((le)->le_prop, HDR_GET_LSIZE(hdr));
L2BLK_SET_PSIZE((le)->le_prop, HDR_GET_PSIZE(hdr));
L2BLK_SET_COMPRESS((le)->le_prop, HDR_GET_COMPRESS(hdr));
le->le_complevel = hdr->b_complevel;
L2BLK_SET_TYPE((le)->le_prop, hdr->b_type);
L2BLK_SET_PROTECTED((le)->le_prop, !!(HDR_PROTECTED(hdr)));
L2BLK_SET_PREFETCH((le)->le_prop, !!(HDR_PREFETCH(hdr)));
L2BLK_SET_STATE((le)->le_prop, hdr->b_l1hdr.b_state->arcs_state);
dev->l2ad_log_blk_payload_asize += vdev_psize_to_asize(dev->l2ad_vdev,
HDR_GET_PSIZE(hdr));
return (dev->l2ad_log_ent_idx == dev->l2ad_log_entries);
}
/*
* Checks whether a given L2ARC device address sits in a time-sequential
* range. The trick here is that the L2ARC is a rotary buffer, so we can't
* just do a range comparison, we need to handle the situation in which the
* range wraps around the end of the L2ARC device. Arguments:
* bottom -- Lower end of the range to check (written to earlier).
* top -- Upper end of the range to check (written to later).
* check -- The address for which we want to determine if it sits in
* between the top and bottom.
*
* The 3-way conditional below represents the following cases:
*
* bottom < top : Sequentially ordered case:
* <check>--------+-------------------+
* | (overlap here?) |
* L2ARC dev V V
* |---------------<bottom>============<top>--------------|
*
* bottom > top: Looped-around case:
* <check>--------+------------------+
* | (overlap here?) |
* L2ARC dev V V
* |===============<top>---------------<bottom>===========|
* ^ ^
* | (or here?) |
* +---------------+---------<check>
*
* top == bottom : Just a single address comparison.
*/
boolean_t
l2arc_range_check_overlap(uint64_t bottom, uint64_t top, uint64_t check)
{
if (bottom < top)
return (bottom <= check && check <= top);
else if (bottom > top)
return (check <= top || bottom <= check);
else
return (check == top);
}
EXPORT_SYMBOL(arc_buf_size);
EXPORT_SYMBOL(arc_write);
EXPORT_SYMBOL(arc_read);
EXPORT_SYMBOL(arc_buf_info);
EXPORT_SYMBOL(arc_getbuf_func);
EXPORT_SYMBOL(arc_add_prune_callback);
EXPORT_SYMBOL(arc_remove_prune_callback);
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min, param_set_arc_min,
param_get_long, ZMOD_RW, "Minimum ARC size in bytes");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, max, param_set_arc_max,
param_get_long, ZMOD_RW, "Maximum ARC size in bytes");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, meta_limit, param_set_arc_long,
param_get_long, ZMOD_RW, "Metadata limit for ARC size in bytes");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, meta_limit_percent,
param_set_arc_long, param_get_long, ZMOD_RW,
"Percent of ARC size for ARC meta limit");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, meta_min, param_set_arc_long,
param_get_long, ZMOD_RW, "Minimum ARC metadata size in bytes");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, meta_prune, INT, ZMOD_RW,
"Meta objects to scan for prune");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, meta_adjust_restarts, INT, ZMOD_RW,
"Limit number of restarts in arc_evict_meta");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, meta_strategy, INT, ZMOD_RW,
"Meta reclaim strategy");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, grow_retry, param_set_arc_int,
param_get_int, ZMOD_RW, "Seconds before growing ARC size");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, p_dampener_disable, INT, ZMOD_RW,
"Disable arc_p adapt dampener");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, shrink_shift, param_set_arc_int,
param_get_int, ZMOD_RW, "log2(fraction of ARC to reclaim)");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, pc_percent, UINT, ZMOD_RW,
"Percent of pagecache to reclaim ARC to");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, p_min_shift, param_set_arc_int,
param_get_int, ZMOD_RW, "arc_c shift to calc min/max arc_p");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, average_blocksize, INT, ZMOD_RD,
"Target average block size");
ZFS_MODULE_PARAM(zfs, zfs_, compressed_arc_enabled, INT, ZMOD_RW,
"Disable compressed ARC buffers");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min_prefetch_ms, param_set_arc_int,
param_get_int, ZMOD_RW, "Min life of prefetch block in ms");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min_prescient_prefetch_ms,
param_set_arc_int, param_get_int, ZMOD_RW,
"Min life of prescient prefetched block in ms");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, write_max, ULONG, ZMOD_RW,
"Max write bytes per interval");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, write_boost, ULONG, ZMOD_RW,
"Extra write bytes during device warmup");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, headroom, ULONG, ZMOD_RW,
"Number of max device writes to precache");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, headroom_boost, ULONG, ZMOD_RW,
"Compressed l2arc_headroom multiplier");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, trim_ahead, ULONG, ZMOD_RW,
"TRIM ahead L2ARC write size multiplier");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, feed_secs, ULONG, ZMOD_RW,
"Seconds between L2ARC writing");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, feed_min_ms, ULONG, ZMOD_RW,
"Min feed interval in milliseconds");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, noprefetch, INT, ZMOD_RW,
"Skip caching prefetched buffers");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, feed_again, INT, ZMOD_RW,
"Turbo L2ARC warmup");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, norw, INT, ZMOD_RW,
"No reads during writes");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, meta_percent, INT, ZMOD_RW,
"Percent of ARC size allowed for L2ARC-only headers");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, rebuild_enabled, INT, ZMOD_RW,
"Rebuild the L2ARC when importing a pool");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, rebuild_blocks_min_l2size, ULONG, ZMOD_RW,
"Min size in bytes to write rebuild log blocks in L2ARC");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, mfuonly, INT, ZMOD_RW,
"Cache only MFU data from ARC into L2ARC");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, exclude_special, INT, ZMOD_RW,
"Exclude dbufs on special vdevs from being cached to L2ARC if set.");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, lotsfree_percent, param_set_arc_int,
param_get_int, ZMOD_RW, "System free memory I/O throttle in bytes");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, sys_free, param_set_arc_long,
param_get_long, ZMOD_RW, "System free memory target size in bytes");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, dnode_limit, param_set_arc_long,
param_get_long, ZMOD_RW, "Minimum bytes of dnodes in ARC");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, dnode_limit_percent,
param_set_arc_long, param_get_long, ZMOD_RW,
"Percent of ARC meta buffers for dnodes");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, dnode_reduce_percent, ULONG, ZMOD_RW,
"Percentage of excess dnodes to try to unpin");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, eviction_pct, INT, ZMOD_RW,
"When full, ARC allocation waits for eviction of this % of alloc size");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, evict_batch_limit, INT, ZMOD_RW,
"The number of headers to evict per sublist before moving to the next");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, prune_task_threads, INT, ZMOD_RW,
"Number of arc_prune threads");
diff --git a/sys/contrib/openzfs/module/zfs/bqueue.c b/sys/contrib/openzfs/module/zfs/bqueue.c
index 22539efc4e23..ec5ce4388ec8 100644
--- a/sys/contrib/openzfs/module/zfs/bqueue.c
+++ b/sys/contrib/openzfs/module/zfs/bqueue.c
@@ -1,155 +1,158 @@
/*
* CDDL HEADER START
*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2014, 2018 by Delphix. All rights reserved.
*/
#include <sys/bqueue.h>
#include <sys/zfs_context.h>
static inline bqueue_node_t *
obj2node(bqueue_t *q, void *data)
{
return ((bqueue_node_t *)((char *)data + q->bq_node_offset));
}
/*
* Initialize a blocking queue The maximum capacity of the queue is set to
* size. Types that are stored in a bqueue must contain a bqueue_node_t,
* and node_offset must be its offset from the start of the struct.
* fill_fraction is a performance tuning value; when the queue is full, any
* threads attempting to enqueue records will block. They will block until
* they're signaled, which will occur when the queue is at least 1/fill_fraction
* empty. Similar behavior occurs on dequeue; if the queue is empty, threads
* block. They will be signalled when the queue has 1/fill_fraction full, or
* when bqueue_flush is called. As a result, you must call bqueue_flush when
* you enqueue your final record on a thread, in case the dequeueing threads are
* currently blocked and that enqueue does not cause them to be awoken.
* Alternatively, this behavior can be disabled (causing signaling to happen
* immediately) by setting fill_fraction to any value larger than size.
* Return 0 on success, or -1 on failure.
*/
int
-bqueue_init(bqueue_t *q, uint64_t fill_fraction, uint64_t size,
- size_t node_offset)
+bqueue_init(bqueue_t *q, uint_t fill_fraction, size_t size, size_t node_offset)
{
if (fill_fraction == 0) {
return (-1);
}
list_create(&q->bq_list, node_offset + sizeof (bqueue_node_t),
node_offset + offsetof(bqueue_node_t, bqn_node));
cv_init(&q->bq_add_cv, NULL, CV_DEFAULT, NULL);
cv_init(&q->bq_pop_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&q->bq_lock, NULL, MUTEX_DEFAULT, NULL);
q->bq_node_offset = node_offset;
q->bq_size = 0;
q->bq_maxsize = size;
q->bq_fill_fraction = fill_fraction;
return (0);
}
/*
* Destroy a blocking queue. This function asserts that there are no
* elements in the queue, and no one is blocked on the condition
* variables.
*/
void
bqueue_destroy(bqueue_t *q)
{
mutex_enter(&q->bq_lock);
ASSERT0(q->bq_size);
cv_destroy(&q->bq_add_cv);
cv_destroy(&q->bq_pop_cv);
list_destroy(&q->bq_list);
mutex_exit(&q->bq_lock);
mutex_destroy(&q->bq_lock);
}
static void
-bqueue_enqueue_impl(bqueue_t *q, void *data, uint64_t item_size,
- boolean_t flush)
+bqueue_enqueue_impl(bqueue_t *q, void *data, size_t item_size, boolean_t flush)
{
ASSERT3U(item_size, >, 0);
ASSERT3U(item_size, <=, q->bq_maxsize);
mutex_enter(&q->bq_lock);
obj2node(q, data)->bqn_size = item_size;
- while (q->bq_size + item_size > q->bq_maxsize) {
+ while (q->bq_size && q->bq_size + item_size > q->bq_maxsize) {
+ /*
+ * Wake up bqueue_dequeue() thread if already sleeping in order
+ * to prevent the deadlock condition
+ */
+ cv_signal(&q->bq_pop_cv);
cv_wait_sig(&q->bq_add_cv, &q->bq_lock);
}
q->bq_size += item_size;
list_insert_tail(&q->bq_list, data);
- if (q->bq_size >= q->bq_maxsize / q->bq_fill_fraction)
- cv_signal(&q->bq_pop_cv);
if (flush)
cv_broadcast(&q->bq_pop_cv);
+ else if (q->bq_size >= q->bq_maxsize / q->bq_fill_fraction)
+ cv_signal(&q->bq_pop_cv);
mutex_exit(&q->bq_lock);
}
/*
* Add data to q, consuming size units of capacity. If there is insufficient
* capacity to consume size units, block until capacity exists. Asserts size is
* > 0.
*/
void
-bqueue_enqueue(bqueue_t *q, void *data, uint64_t item_size)
+bqueue_enqueue(bqueue_t *q, void *data, size_t item_size)
{
bqueue_enqueue_impl(q, data, item_size, B_FALSE);
}
/*
* Enqueue an entry, and then flush the queue. This forces the popping threads
* to wake up, even if we're below the fill fraction. We have this in a single
* function, rather than having a separate call, because it prevents race
* conditions between the enqueuing thread and the dequeueing thread, where the
* enqueueing thread will wake up the dequeueing thread, that thread will
* destroy the condvar before the enqueuing thread is done.
*/
void
-bqueue_enqueue_flush(bqueue_t *q, void *data, uint64_t item_size)
+bqueue_enqueue_flush(bqueue_t *q, void *data, size_t item_size)
{
bqueue_enqueue_impl(q, data, item_size, B_TRUE);
}
/*
* Take the first element off of q. If there are no elements on the queue, wait
* until one is put there. Return the removed element.
*/
void *
bqueue_dequeue(bqueue_t *q)
{
void *ret = NULL;
- uint64_t item_size;
+ size_t item_size;
mutex_enter(&q->bq_lock);
while (q->bq_size == 0) {
cv_wait_sig(&q->bq_pop_cv, &q->bq_lock);
}
ret = list_remove_head(&q->bq_list);
ASSERT3P(ret, !=, NULL);
item_size = obj2node(q, ret)->bqn_size;
q->bq_size -= item_size;
if (q->bq_size <= q->bq_maxsize - (q->bq_maxsize / q->bq_fill_fraction))
cv_signal(&q->bq_add_cv);
mutex_exit(&q->bq_lock);
return (ret);
}
/*
* Returns true if the space used is 0.
*/
boolean_t
bqueue_empty(bqueue_t *q)
{
return (q->bq_size == 0);
}
diff --git a/sys/contrib/openzfs/module/zfs/btree.c b/sys/contrib/openzfs/module/zfs/btree.c
index 14cab4054cbc..f0a9222a4308 100644
--- a/sys/contrib/openzfs/module/zfs/btree.c
+++ b/sys/contrib/openzfs/module/zfs/btree.c
@@ -1,2173 +1,2179 @@
/*
* CDDL HEADER START
*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2019 by Delphix. All rights reserved.
*/
#include <sys/btree.h>
#include <sys/bitops.h>
#include <sys/zfs_context.h>
kmem_cache_t *zfs_btree_leaf_cache;
/*
* Control the extent of the verification that occurs when zfs_btree_verify is
* called. Primarily used for debugging when extending the btree logic and
* functionality. As the intensity is increased, new verification steps are
* added. These steps are cumulative; intensity = 3 includes the intensity = 1
* and intensity = 2 steps as well.
*
* Intensity 1: Verify that the tree's height is consistent throughout.
* Intensity 2: Verify that a core node's children's parent pointers point
* to the core node.
* Intensity 3: Verify that the total number of elements in the tree matches the
* sum of the number of elements in each node. Also verifies that each node's
* count obeys the invariants (less than or equal to maximum value, greater than
* or equal to half the maximum minus one).
* Intensity 4: Verify that each element compares less than the element
* immediately after it and greater than the one immediately before it using the
* comparator function. For core nodes, also checks that each element is greater
* than the last element in the first of the two nodes it separates, and less
* than the first element in the second of the two nodes.
* Intensity 5: Verifies, if ZFS_DEBUG is defined, that all unused memory inside
* of each node is poisoned appropriately. Note that poisoning always occurs if
* ZFS_DEBUG is set, so it is safe to set the intensity to 5 during normal
* operation.
*
* Intensity 4 and 5 are particularly expensive to perform; the previous levels
* are a few memory operations per node, while these levels require multiple
* operations per element. In addition, when creating large btrees, these
* operations are called at every step, resulting in extremely slow operation
* (while the asymptotic complexity of the other steps is the same, the
* importance of the constant factors cannot be denied).
*/
-int zfs_btree_verify_intensity = 0;
+uint_t zfs_btree_verify_intensity = 0;
/*
* Convenience functions to silence warnings from memcpy/memmove's
* return values and change argument order to src, dest.
*/
static void
bcpy(const void *src, void *dest, size_t size)
{
(void) memcpy(dest, src, size);
}
static void
bmov(const void *src, void *dest, size_t size)
{
(void) memmove(dest, src, size);
}
static boolean_t
zfs_btree_is_core(struct zfs_btree_hdr *hdr)
{
return (hdr->bth_first == -1);
}
#ifdef _ILP32
#define BTREE_POISON 0xabadb10c
#else
#define BTREE_POISON 0xabadb10cdeadbeef
#endif
static void
zfs_btree_poison_node(zfs_btree_t *tree, zfs_btree_hdr_t *hdr)
{
#ifdef ZFS_DEBUG
size_t size = tree->bt_elem_size;
if (zfs_btree_is_core(hdr)) {
zfs_btree_core_t *node = (zfs_btree_core_t *)hdr;
for (uint32_t i = hdr->bth_count + 1; i <= BTREE_CORE_ELEMS;
i++) {
node->btc_children[i] =
(zfs_btree_hdr_t *)BTREE_POISON;
}
(void) memset(node->btc_elems + hdr->bth_count * size, 0x0f,
(BTREE_CORE_ELEMS - hdr->bth_count) * size);
} else {
zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)hdr;
(void) memset(leaf->btl_elems, 0x0f, hdr->bth_first * size);
(void) memset(leaf->btl_elems +
(hdr->bth_first + hdr->bth_count) * size, 0x0f,
BTREE_LEAF_ESIZE -
(hdr->bth_first + hdr->bth_count) * size);
}
#endif
}
static inline void
zfs_btree_poison_node_at(zfs_btree_t *tree, zfs_btree_hdr_t *hdr,
uint32_t idx, uint32_t count)
{
#ifdef ZFS_DEBUG
size_t size = tree->bt_elem_size;
if (zfs_btree_is_core(hdr)) {
ASSERT3U(idx, >=, hdr->bth_count);
ASSERT3U(idx, <=, BTREE_CORE_ELEMS);
ASSERT3U(idx + count, <=, BTREE_CORE_ELEMS);
zfs_btree_core_t *node = (zfs_btree_core_t *)hdr;
for (uint32_t i = 1; i <= count; i++) {
node->btc_children[idx + i] =
(zfs_btree_hdr_t *)BTREE_POISON;
}
(void) memset(node->btc_elems + idx * size, 0x0f, count * size);
} else {
ASSERT3U(idx, <=, tree->bt_leaf_cap);
ASSERT3U(idx + count, <=, tree->bt_leaf_cap);
zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)hdr;
(void) memset(leaf->btl_elems +
(hdr->bth_first + idx) * size, 0x0f, count * size);
}
#endif
}
static inline void
zfs_btree_verify_poison_at(zfs_btree_t *tree, zfs_btree_hdr_t *hdr,
uint32_t idx)
{
#ifdef ZFS_DEBUG
size_t size = tree->bt_elem_size;
if (zfs_btree_is_core(hdr)) {
ASSERT3U(idx, <, BTREE_CORE_ELEMS);
zfs_btree_core_t *node = (zfs_btree_core_t *)hdr;
zfs_btree_hdr_t *cval = (zfs_btree_hdr_t *)BTREE_POISON;
VERIFY3P(node->btc_children[idx + 1], ==, cval);
for (size_t i = 0; i < size; i++)
VERIFY3U(node->btc_elems[idx * size + i], ==, 0x0f);
} else {
ASSERT3U(idx, <, tree->bt_leaf_cap);
zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)hdr;
if (idx >= tree->bt_leaf_cap - hdr->bth_first)
return;
for (size_t i = 0; i < size; i++) {
VERIFY3U(leaf->btl_elems[(hdr->bth_first + idx)
* size + i], ==, 0x0f);
}
}
#endif
}
void
zfs_btree_init(void)
{
zfs_btree_leaf_cache = kmem_cache_create("zfs_btree_leaf_cache",
BTREE_LEAF_SIZE, 0, NULL, NULL, NULL, NULL, NULL, 0);
}
void
zfs_btree_fini(void)
{
kmem_cache_destroy(zfs_btree_leaf_cache);
}
void
zfs_btree_create(zfs_btree_t *tree, int (*compar) (const void *, const void *),
size_t size)
{
ASSERT3U(size, <=, BTREE_LEAF_ESIZE / 2);
memset(tree, 0, sizeof (*tree));
tree->bt_compar = compar;
tree->bt_elem_size = size;
tree->bt_leaf_cap = P2ALIGN(BTREE_LEAF_ESIZE / size, 2);
tree->bt_height = -1;
tree->bt_bulk = NULL;
}
/*
* Find value in the array of elements provided. Uses a simple binary search.
*/
static void *
zfs_btree_find_in_buf(zfs_btree_t *tree, uint8_t *buf, uint32_t nelems,
const void *value, zfs_btree_index_t *where)
{
uint32_t max = nelems;
uint32_t min = 0;
while (max > min) {
uint32_t idx = (min + max) / 2;
uint8_t *cur = buf + idx * tree->bt_elem_size;
int comp = tree->bt_compar(cur, value);
if (comp < 0) {
min = idx + 1;
} else if (comp > 0) {
max = idx;
} else {
where->bti_offset = idx;
where->bti_before = B_FALSE;
return (cur);
}
}
where->bti_offset = max;
where->bti_before = B_TRUE;
return (NULL);
}
/*
* Find the given value in the tree. where may be passed as null to use as a
* membership test or if the btree is being used as a map.
*/
void *
zfs_btree_find(zfs_btree_t *tree, const void *value, zfs_btree_index_t *where)
{
if (tree->bt_height == -1) {
if (where != NULL) {
where->bti_node = NULL;
where->bti_offset = 0;
}
ASSERT0(tree->bt_num_elems);
return (NULL);
}
/*
* If we're in bulk-insert mode, we check the last spot in the tree
* and the last leaf in the tree before doing the normal search,
* because for most workloads the vast majority of finds in
* bulk-insert mode are to insert new elements.
*/
zfs_btree_index_t idx;
size_t size = tree->bt_elem_size;
if (tree->bt_bulk != NULL) {
zfs_btree_leaf_t *last_leaf = tree->bt_bulk;
int comp = tree->bt_compar(last_leaf->btl_elems +
(last_leaf->btl_hdr.bth_first +
last_leaf->btl_hdr.bth_count - 1) * size, value);
if (comp < 0) {
/*
* If what they're looking for is after the last
* element, it's not in the tree.
*/
if (where != NULL) {
where->bti_node = (zfs_btree_hdr_t *)last_leaf;
where->bti_offset =
last_leaf->btl_hdr.bth_count;
where->bti_before = B_TRUE;
}
return (NULL);
} else if (comp == 0) {
if (where != NULL) {
where->bti_node = (zfs_btree_hdr_t *)last_leaf;
where->bti_offset =
last_leaf->btl_hdr.bth_count - 1;
where->bti_before = B_FALSE;
}
return (last_leaf->btl_elems +
(last_leaf->btl_hdr.bth_first +
last_leaf->btl_hdr.bth_count - 1) * size);
}
if (tree->bt_compar(last_leaf->btl_elems +
last_leaf->btl_hdr.bth_first * size, value) <= 0) {
/*
* If what they're looking for is after the first
* element in the last leaf, it's in the last leaf or
* it's not in the tree.
*/
void *d = zfs_btree_find_in_buf(tree,
last_leaf->btl_elems +
last_leaf->btl_hdr.bth_first * size,
last_leaf->btl_hdr.bth_count, value, &idx);
if (where != NULL) {
idx.bti_node = (zfs_btree_hdr_t *)last_leaf;
*where = idx;
}
return (d);
}
}
zfs_btree_core_t *node = NULL;
uint32_t child = 0;
uint64_t depth = 0;
/*
* Iterate down the tree, finding which child the value should be in
* by comparing with the separators.
*/
for (node = (zfs_btree_core_t *)tree->bt_root; depth < tree->bt_height;
node = (zfs_btree_core_t *)node->btc_children[child], depth++) {
ASSERT3P(node, !=, NULL);
void *d = zfs_btree_find_in_buf(tree, node->btc_elems,
node->btc_hdr.bth_count, value, &idx);
EQUIV(d != NULL, !idx.bti_before);
if (d != NULL) {
if (where != NULL) {
idx.bti_node = (zfs_btree_hdr_t *)node;
*where = idx;
}
return (d);
}
ASSERT(idx.bti_before);
child = idx.bti_offset;
}
/*
* The value is in this leaf, or it would be if it were in the
* tree. Find its proper location and return it.
*/
zfs_btree_leaf_t *leaf = (depth == 0 ?
(zfs_btree_leaf_t *)tree->bt_root : (zfs_btree_leaf_t *)node);
void *d = zfs_btree_find_in_buf(tree, leaf->btl_elems +
leaf->btl_hdr.bth_first * size,
leaf->btl_hdr.bth_count, value, &idx);
if (where != NULL) {
idx.bti_node = (zfs_btree_hdr_t *)leaf;
*where = idx;
}
return (d);
}
/*
* To explain the following functions, it is useful to understand the four
* kinds of shifts used in btree operation. First, a shift is a movement of
* elements within a node. It is used to create gaps for inserting new
* elements and children, or cover gaps created when things are removed. A
* shift has two fundamental properties, each of which can be one of two
* values, making four types of shifts. There is the direction of the shift
* (left or right) and the shape of the shift (parallelogram or isoceles
* trapezoid (shortened to trapezoid hereafter)). The shape distinction only
* applies to shifts of core nodes.
*
* The names derive from the following imagining of the layout of a node:
*
* Elements: * * * * * * * ... * * *
* Children: * * * * * * * * ... * * *
*
* This layout follows from the fact that the elements act as separators
* between pairs of children, and that children root subtrees "below" the
* current node. A left and right shift are fairly self-explanatory; a left
* shift moves things to the left, while a right shift moves things to the
* right. A parallelogram shift is a shift with the same number of elements
* and children being moved, while a trapezoid shift is a shift that moves one
* more children than elements. An example follows:
*
* A parallelogram shift could contain the following:
* _______________
* \* * * * \ * * * ... * * *
* * \ * * * *\ * * * ... * * *
* ---------------
* A trapezoid shift could contain the following:
* ___________
* * / * * * \ * * * ... * * *
* * / * * * *\ * * * ... * * *
* ---------------
*
* Note that a parallelogram shift is always shaped like a "left-leaning"
* parallelogram, where the starting index of the children being moved is
* always one higher than the starting index of the elements being moved. No
* "right-leaning" parallelogram shifts are needed (shifts where the starting
* element index and starting child index being moved are the same) to achieve
* any btree operations, so we ignore them.
*/
enum bt_shift_shape {
BSS_TRAPEZOID,
BSS_PARALLELOGRAM
};
enum bt_shift_direction {
BSD_LEFT,
BSD_RIGHT
};
/*
* Shift elements and children in the provided core node by off spots. The
* first element moved is idx, and count elements are moved. The shape of the
* shift is determined by shape. The direction is determined by dir.
*/
static inline void
bt_shift_core(zfs_btree_t *tree, zfs_btree_core_t *node, uint32_t idx,
uint32_t count, uint32_t off, enum bt_shift_shape shape,
enum bt_shift_direction dir)
{
size_t size = tree->bt_elem_size;
ASSERT(zfs_btree_is_core(&node->btc_hdr));
uint8_t *e_start = node->btc_elems + idx * size;
uint8_t *e_out = (dir == BSD_LEFT ? e_start - off * size :
e_start + off * size);
bmov(e_start, e_out, count * size);
zfs_btree_hdr_t **c_start = node->btc_children + idx +
(shape == BSS_TRAPEZOID ? 0 : 1);
zfs_btree_hdr_t **c_out = (dir == BSD_LEFT ? c_start - off :
c_start + off);
uint32_t c_count = count + (shape == BSS_TRAPEZOID ? 1 : 0);
bmov(c_start, c_out, c_count * sizeof (*c_start));
}
/*
* Shift elements and children in the provided core node left by one spot.
* The first element moved is idx, and count elements are moved. The
* shape of the shift is determined by trap; true if the shift is a trapezoid,
* false if it is a parallelogram.
*/
static inline void
bt_shift_core_left(zfs_btree_t *tree, zfs_btree_core_t *node, uint32_t idx,
uint32_t count, enum bt_shift_shape shape)
{
bt_shift_core(tree, node, idx, count, 1, shape, BSD_LEFT);
}
/*
* Shift elements and children in the provided core node right by one spot.
* Starts with elements[idx] and children[idx] and one more child than element.
*/
static inline void
bt_shift_core_right(zfs_btree_t *tree, zfs_btree_core_t *node, uint32_t idx,
uint32_t count, enum bt_shift_shape shape)
{
bt_shift_core(tree, node, idx, count, 1, shape, BSD_RIGHT);
}
/*
* Shift elements and children in the provided leaf node by off spots.
* The first element moved is idx, and count elements are moved. The direction
* is determined by left.
*/
static inline void
bt_shift_leaf(zfs_btree_t *tree, zfs_btree_leaf_t *node, uint32_t idx,
uint32_t count, uint32_t off, enum bt_shift_direction dir)
{
size_t size = tree->bt_elem_size;
zfs_btree_hdr_t *hdr = &node->btl_hdr;
ASSERT(!zfs_btree_is_core(hdr));
if (count == 0)
return;
uint8_t *start = node->btl_elems + (hdr->bth_first + idx) * size;
uint8_t *out = (dir == BSD_LEFT ? start - off * size :
start + off * size);
bmov(start, out, count * size);
}
/*
* Grow leaf for n new elements before idx.
*/
static void
bt_grow_leaf(zfs_btree_t *tree, zfs_btree_leaf_t *leaf, uint32_t idx,
uint32_t n)
{
zfs_btree_hdr_t *hdr = &leaf->btl_hdr;
ASSERT(!zfs_btree_is_core(hdr));
ASSERT3U(idx, <=, hdr->bth_count);
uint32_t capacity = tree->bt_leaf_cap;
ASSERT3U(hdr->bth_count + n, <=, capacity);
boolean_t cl = (hdr->bth_first >= n);
boolean_t cr = (hdr->bth_first + hdr->bth_count + n <= capacity);
if (cl && (!cr || idx <= hdr->bth_count / 2)) {
/* Grow left. */
hdr->bth_first -= n;
bt_shift_leaf(tree, leaf, n, idx, n, BSD_LEFT);
} else if (cr) {
/* Grow right. */
bt_shift_leaf(tree, leaf, idx, hdr->bth_count - idx, n,
BSD_RIGHT);
} else {
/* Grow both ways. */
uint32_t fn = hdr->bth_first -
(capacity - (hdr->bth_count + n)) / 2;
hdr->bth_first -= fn;
bt_shift_leaf(tree, leaf, fn, idx, fn, BSD_LEFT);
bt_shift_leaf(tree, leaf, fn + idx, hdr->bth_count - idx,
n - fn, BSD_RIGHT);
}
hdr->bth_count += n;
}
/*
* Shrink leaf for count elements starting from idx.
*/
static void
bt_shrink_leaf(zfs_btree_t *tree, zfs_btree_leaf_t *leaf, uint32_t idx,
uint32_t n)
{
zfs_btree_hdr_t *hdr = &leaf->btl_hdr;
ASSERT(!zfs_btree_is_core(hdr));
ASSERT3U(idx, <=, hdr->bth_count);
ASSERT3U(idx + n, <=, hdr->bth_count);
if (idx <= (hdr->bth_count - n) / 2) {
bt_shift_leaf(tree, leaf, 0, idx, n, BSD_RIGHT);
zfs_btree_poison_node_at(tree, hdr, 0, n);
hdr->bth_first += n;
} else {
bt_shift_leaf(tree, leaf, idx + n, hdr->bth_count - idx - n, n,
BSD_LEFT);
zfs_btree_poison_node_at(tree, hdr, hdr->bth_count - n, n);
}
hdr->bth_count -= n;
}
/*
* Move children and elements from one core node to another. The shape
* parameter behaves the same as it does in the shift logic.
*/
static inline void
bt_transfer_core(zfs_btree_t *tree, zfs_btree_core_t *source, uint32_t sidx,
uint32_t count, zfs_btree_core_t *dest, uint32_t didx,
enum bt_shift_shape shape)
{
size_t size = tree->bt_elem_size;
ASSERT(zfs_btree_is_core(&source->btc_hdr));
ASSERT(zfs_btree_is_core(&dest->btc_hdr));
bcpy(source->btc_elems + sidx * size, dest->btc_elems + didx * size,
count * size);
uint32_t c_count = count + (shape == BSS_TRAPEZOID ? 1 : 0);
bcpy(source->btc_children + sidx + (shape == BSS_TRAPEZOID ? 0 : 1),
dest->btc_children + didx + (shape == BSS_TRAPEZOID ? 0 : 1),
c_count * sizeof (*source->btc_children));
}
static inline void
bt_transfer_leaf(zfs_btree_t *tree, zfs_btree_leaf_t *source, uint32_t sidx,
uint32_t count, zfs_btree_leaf_t *dest, uint32_t didx)
{
size_t size = tree->bt_elem_size;
ASSERT(!zfs_btree_is_core(&source->btl_hdr));
ASSERT(!zfs_btree_is_core(&dest->btl_hdr));
bcpy(source->btl_elems + (source->btl_hdr.bth_first + sidx) * size,
dest->btl_elems + (dest->btl_hdr.bth_first + didx) * size,
count * size);
}
/*
* Find the first element in the subtree rooted at hdr, return its value and
* put its location in where if non-null.
*/
static void *
zfs_btree_first_helper(zfs_btree_t *tree, zfs_btree_hdr_t *hdr,
zfs_btree_index_t *where)
{
zfs_btree_hdr_t *node;
for (node = hdr; zfs_btree_is_core(node);
node = ((zfs_btree_core_t *)node)->btc_children[0])
;
ASSERT(!zfs_btree_is_core(node));
zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)node;
if (where != NULL) {
where->bti_node = node;
where->bti_offset = 0;
where->bti_before = B_FALSE;
}
return (&leaf->btl_elems[node->bth_first * tree->bt_elem_size]);
}
/* Insert an element and a child into a core node at the given offset. */
static void
zfs_btree_insert_core_impl(zfs_btree_t *tree, zfs_btree_core_t *parent,
uint32_t offset, zfs_btree_hdr_t *new_node, void *buf)
{
size_t size = tree->bt_elem_size;
zfs_btree_hdr_t *par_hdr = &parent->btc_hdr;
ASSERT3P(par_hdr, ==, new_node->bth_parent);
ASSERT3U(par_hdr->bth_count, <, BTREE_CORE_ELEMS);
if (zfs_btree_verify_intensity >= 5) {
zfs_btree_verify_poison_at(tree, par_hdr,
par_hdr->bth_count);
}
/* Shift existing elements and children */
uint32_t count = par_hdr->bth_count - offset;
bt_shift_core_right(tree, parent, offset, count,
BSS_PARALLELOGRAM);
/* Insert new values */
parent->btc_children[offset + 1] = new_node;
bcpy(buf, parent->btc_elems + offset * size, size);
par_hdr->bth_count++;
}
/*
* Insert new_node into the parent of old_node directly after old_node, with
* buf as the dividing element between the two.
*/
static void
zfs_btree_insert_into_parent(zfs_btree_t *tree, zfs_btree_hdr_t *old_node,
zfs_btree_hdr_t *new_node, void *buf)
{
ASSERT3P(old_node->bth_parent, ==, new_node->bth_parent);
size_t size = tree->bt_elem_size;
zfs_btree_core_t *parent = old_node->bth_parent;
/*
* If this is the root node we were splitting, we create a new root
* and increase the height of the tree.
*/
if (parent == NULL) {
ASSERT3P(old_node, ==, tree->bt_root);
tree->bt_num_nodes++;
zfs_btree_core_t *new_root =
kmem_alloc(sizeof (zfs_btree_core_t) + BTREE_CORE_ELEMS *
size, KM_SLEEP);
zfs_btree_hdr_t *new_root_hdr = &new_root->btc_hdr;
new_root_hdr->bth_parent = NULL;
new_root_hdr->bth_first = -1;
new_root_hdr->bth_count = 1;
old_node->bth_parent = new_node->bth_parent = new_root;
new_root->btc_children[0] = old_node;
new_root->btc_children[1] = new_node;
bcpy(buf, new_root->btc_elems, size);
tree->bt_height++;
tree->bt_root = new_root_hdr;
zfs_btree_poison_node(tree, new_root_hdr);
return;
}
/*
* Since we have the new separator, binary search for where to put
* new_node.
*/
zfs_btree_hdr_t *par_hdr = &parent->btc_hdr;
zfs_btree_index_t idx;
ASSERT(zfs_btree_is_core(par_hdr));
VERIFY3P(zfs_btree_find_in_buf(tree, parent->btc_elems,
par_hdr->bth_count, buf, &idx), ==, NULL);
ASSERT(idx.bti_before);
uint32_t offset = idx.bti_offset;
ASSERT3U(offset, <=, par_hdr->bth_count);
ASSERT3P(parent->btc_children[offset], ==, old_node);
/*
* If the parent isn't full, shift things to accommodate our insertions
* and return.
*/
if (par_hdr->bth_count != BTREE_CORE_ELEMS) {
zfs_btree_insert_core_impl(tree, parent, offset, new_node, buf);
return;
}
/*
* We need to split this core node into two. Currently there are
* BTREE_CORE_ELEMS + 1 child nodes, and we are adding one for
* BTREE_CORE_ELEMS + 2. Some of the children will be part of the
* current node, and the others will be moved to the new core node.
* There are BTREE_CORE_ELEMS + 1 elements including the new one. One
* will be used as the new separator in our parent, and the others
* will be split among the two core nodes.
*
* Usually we will split the node in half evenly, with
* BTREE_CORE_ELEMS/2 elements in each node. If we're bulk loading, we
* instead move only about a quarter of the elements (and children) to
* the new node. Since the average state after a long time is a 3/4
* full node, shortcutting directly to that state improves efficiency.
*
* We do this in two stages: first we split into two nodes, and then we
* reuse our existing logic to insert the new element and child.
*/
uint32_t move_count = MAX((BTREE_CORE_ELEMS / (tree->bt_bulk == NULL ?
2 : 4)) - 1, 2);
uint32_t keep_count = BTREE_CORE_ELEMS - move_count - 1;
ASSERT3U(BTREE_CORE_ELEMS - move_count, >=, 2);
tree->bt_num_nodes++;
zfs_btree_core_t *new_parent = kmem_alloc(sizeof (zfs_btree_core_t) +
BTREE_CORE_ELEMS * size, KM_SLEEP);
zfs_btree_hdr_t *new_par_hdr = &new_parent->btc_hdr;
new_par_hdr->bth_parent = par_hdr->bth_parent;
new_par_hdr->bth_first = -1;
new_par_hdr->bth_count = move_count;
zfs_btree_poison_node(tree, new_par_hdr);
par_hdr->bth_count = keep_count;
bt_transfer_core(tree, parent, keep_count + 1, move_count, new_parent,
0, BSS_TRAPEZOID);
/* Store the new separator in a buffer. */
uint8_t *tmp_buf = kmem_alloc(size, KM_SLEEP);
bcpy(parent->btc_elems + keep_count * size, tmp_buf,
size);
zfs_btree_poison_node(tree, par_hdr);
if (offset < keep_count) {
/* Insert the new node into the left half */
zfs_btree_insert_core_impl(tree, parent, offset, new_node,
buf);
/*
* Move the new separator to the existing buffer.
*/
bcpy(tmp_buf, buf, size);
} else if (offset > keep_count) {
/* Insert the new node into the right half */
new_node->bth_parent = new_parent;
zfs_btree_insert_core_impl(tree, new_parent,
offset - keep_count - 1, new_node, buf);
/*
* Move the new separator to the existing buffer.
*/
bcpy(tmp_buf, buf, size);
} else {
/*
* Move the new separator into the right half, and replace it
* with buf. We also need to shift back the elements in the
* right half to accommodate new_node.
*/
bt_shift_core_right(tree, new_parent, 0, move_count,
BSS_TRAPEZOID);
new_parent->btc_children[0] = new_node;
bcpy(tmp_buf, new_parent->btc_elems, size);
new_par_hdr->bth_count++;
}
kmem_free(tmp_buf, size);
zfs_btree_poison_node(tree, par_hdr);
for (uint32_t i = 0; i <= new_parent->btc_hdr.bth_count; i++)
new_parent->btc_children[i]->bth_parent = new_parent;
for (uint32_t i = 0; i <= parent->btc_hdr.bth_count; i++)
ASSERT3P(parent->btc_children[i]->bth_parent, ==, parent);
/*
* Now that the node is split, we need to insert the new node into its
* parent. This may cause further splitting.
*/
zfs_btree_insert_into_parent(tree, &parent->btc_hdr,
&new_parent->btc_hdr, buf);
}
/* Insert an element into a leaf node at the given offset. */
static void
zfs_btree_insert_leaf_impl(zfs_btree_t *tree, zfs_btree_leaf_t *leaf,
uint32_t idx, const void *value)
{
size_t size = tree->bt_elem_size;
zfs_btree_hdr_t *hdr = &leaf->btl_hdr;
ASSERT3U(leaf->btl_hdr.bth_count, <, tree->bt_leaf_cap);
if (zfs_btree_verify_intensity >= 5) {
zfs_btree_verify_poison_at(tree, &leaf->btl_hdr,
leaf->btl_hdr.bth_count);
}
bt_grow_leaf(tree, leaf, idx, 1);
uint8_t *start = leaf->btl_elems + (hdr->bth_first + idx) * size;
bcpy(value, start, size);
}
static void
zfs_btree_verify_order_helper(zfs_btree_t *tree, zfs_btree_hdr_t *hdr);
/* Helper function for inserting a new value into leaf at the given index. */
static void
zfs_btree_insert_into_leaf(zfs_btree_t *tree, zfs_btree_leaf_t *leaf,
const void *value, uint32_t idx)
{
size_t size = tree->bt_elem_size;
uint32_t capacity = tree->bt_leaf_cap;
/*
* If the leaf isn't full, shift the elements after idx and insert
* value.
*/
if (leaf->btl_hdr.bth_count != capacity) {
zfs_btree_insert_leaf_impl(tree, leaf, idx, value);
return;
}
/*
* Otherwise, we split the leaf node into two nodes. If we're not bulk
* inserting, each is of size (capacity / 2). If we are bulk
* inserting, we move a quarter of the elements to the new node so
* inserts into the old node don't cause immediate splitting but the
* tree stays relatively dense. Since the average state after a long
* time is a 3/4 full node, shortcutting directly to that state
* improves efficiency. At the end of the bulk insertion process
* we'll need to go through and fix up any nodes (the last leaf and
* its ancestors, potentially) that are below the minimum.
*
* In either case, we're left with one extra element. The leftover
* element will become the new dividing element between the two nodes.
*/
uint32_t move_count = MAX(capacity / (tree->bt_bulk ? 4 : 2), 1) - 1;
uint32_t keep_count = capacity - move_count - 1;
ASSERT3U(keep_count, >=, 1);
/* If we insert on left. move one more to keep leaves balanced. */
if (idx < keep_count) {
keep_count--;
move_count++;
}
tree->bt_num_nodes++;
zfs_btree_leaf_t *new_leaf = kmem_cache_alloc(zfs_btree_leaf_cache,
KM_SLEEP);
zfs_btree_hdr_t *new_hdr = &new_leaf->btl_hdr;
new_hdr->bth_parent = leaf->btl_hdr.bth_parent;
new_hdr->bth_first = (tree->bt_bulk ? 0 : capacity / 4) +
(idx >= keep_count && idx <= keep_count + move_count / 2);
new_hdr->bth_count = move_count;
zfs_btree_poison_node(tree, new_hdr);
if (tree->bt_bulk != NULL && leaf == tree->bt_bulk)
tree->bt_bulk = new_leaf;
/* Copy the back part to the new leaf. */
bt_transfer_leaf(tree, leaf, keep_count + 1, move_count, new_leaf, 0);
/* We store the new separator in a buffer we control for simplicity. */
uint8_t *buf = kmem_alloc(size, KM_SLEEP);
bcpy(leaf->btl_elems + (leaf->btl_hdr.bth_first + keep_count) * size,
buf, size);
bt_shrink_leaf(tree, leaf, keep_count, 1 + move_count);
if (idx < keep_count) {
/* Insert into the existing leaf. */
zfs_btree_insert_leaf_impl(tree, leaf, idx, value);
} else if (idx > keep_count) {
/* Insert into the new leaf. */
zfs_btree_insert_leaf_impl(tree, new_leaf, idx - keep_count -
1, value);
} else {
/*
* Insert planned separator into the new leaf, and use
* the new value as the new separator.
*/
zfs_btree_insert_leaf_impl(tree, new_leaf, 0, buf);
bcpy(value, buf, size);
}
/*
* Now that the node is split, we need to insert the new node into its
* parent. This may cause further splitting, bur only of core nodes.
*/
zfs_btree_insert_into_parent(tree, &leaf->btl_hdr, &new_leaf->btl_hdr,
buf);
kmem_free(buf, size);
}
static uint32_t
zfs_btree_find_parent_idx(zfs_btree_t *tree, zfs_btree_hdr_t *hdr)
{
void *buf;
if (zfs_btree_is_core(hdr)) {
buf = ((zfs_btree_core_t *)hdr)->btc_elems;
} else {
buf = ((zfs_btree_leaf_t *)hdr)->btl_elems +
hdr->bth_first * tree->bt_elem_size;
}
zfs_btree_index_t idx;
zfs_btree_core_t *parent = hdr->bth_parent;
VERIFY3P(zfs_btree_find_in_buf(tree, parent->btc_elems,
parent->btc_hdr.bth_count, buf, &idx), ==, NULL);
ASSERT(idx.bti_before);
ASSERT3U(idx.bti_offset, <=, parent->btc_hdr.bth_count);
ASSERT3P(parent->btc_children[idx.bti_offset], ==, hdr);
return (idx.bti_offset);
}
/*
* Take the b-tree out of bulk insert mode. During bulk-insert mode, some
* nodes may violate the invariant that non-root nodes must be at least half
* full. All nodes violating this invariant should be the last node in their
* particular level. To correct the invariant, we take values from their left
* neighbor until they are half full. They must have a left neighbor at their
* level because the last node at a level is not the first node unless it's
* the root.
*/
static void
zfs_btree_bulk_finish(zfs_btree_t *tree)
{
ASSERT3P(tree->bt_bulk, !=, NULL);
ASSERT3P(tree->bt_root, !=, NULL);
zfs_btree_leaf_t *leaf = tree->bt_bulk;
zfs_btree_hdr_t *hdr = &leaf->btl_hdr;
zfs_btree_core_t *parent = hdr->bth_parent;
size_t size = tree->bt_elem_size;
uint32_t capacity = tree->bt_leaf_cap;
/*
* The invariant doesn't apply to the root node, if that's the only
* node in the tree we're done.
*/
if (parent == NULL) {
tree->bt_bulk = NULL;
return;
}
/* First, take elements to rebalance the leaf node. */
if (hdr->bth_count < capacity / 2) {
/*
* First, find the left neighbor. The simplest way to do this
* is to call zfs_btree_prev twice; the first time finds some
* ancestor of this node, and the second time finds the left
* neighbor. The ancestor found is the lowest common ancestor
* of leaf and the neighbor.
*/
zfs_btree_index_t idx = {
.bti_node = hdr,
.bti_offset = 0
};
VERIFY3P(zfs_btree_prev(tree, &idx, &idx), !=, NULL);
ASSERT(zfs_btree_is_core(idx.bti_node));
zfs_btree_core_t *common = (zfs_btree_core_t *)idx.bti_node;
uint32_t common_idx = idx.bti_offset;
VERIFY3P(zfs_btree_prev(tree, &idx, &idx), !=, NULL);
ASSERT(!zfs_btree_is_core(idx.bti_node));
zfs_btree_leaf_t *l_neighbor = (zfs_btree_leaf_t *)idx.bti_node;
zfs_btree_hdr_t *l_hdr = idx.bti_node;
uint32_t move_count = (capacity / 2) - hdr->bth_count;
ASSERT3U(l_neighbor->btl_hdr.bth_count - move_count, >=,
capacity / 2);
if (zfs_btree_verify_intensity >= 5) {
for (uint32_t i = 0; i < move_count; i++) {
zfs_btree_verify_poison_at(tree, hdr,
leaf->btl_hdr.bth_count + i);
}
}
/* First, shift elements in leaf back. */
bt_grow_leaf(tree, leaf, 0, move_count);
/* Next, move the separator from the common ancestor to leaf. */
uint8_t *separator = common->btc_elems + common_idx * size;
uint8_t *out = leaf->btl_elems +
(hdr->bth_first + move_count - 1) * size;
bcpy(separator, out, size);
/*
* Now we move elements from the tail of the left neighbor to
* fill the remaining spots in leaf.
*/
bt_transfer_leaf(tree, l_neighbor, l_hdr->bth_count -
(move_count - 1), move_count - 1, leaf, 0);
/*
* Finally, move the new last element in the left neighbor to
* the separator.
*/
bcpy(l_neighbor->btl_elems + (l_hdr->bth_first +
l_hdr->bth_count - move_count) * size, separator, size);
/* Adjust the node's counts, and we're done. */
bt_shrink_leaf(tree, l_neighbor, l_hdr->bth_count - move_count,
move_count);
ASSERT3U(l_hdr->bth_count, >=, capacity / 2);
ASSERT3U(hdr->bth_count, >=, capacity / 2);
}
/*
* Now we have to rebalance any ancestors of leaf that may also
* violate the invariant.
*/
capacity = BTREE_CORE_ELEMS;
while (parent->btc_hdr.bth_parent != NULL) {
zfs_btree_core_t *cur = parent;
zfs_btree_hdr_t *hdr = &cur->btc_hdr;
parent = hdr->bth_parent;
/*
* If the invariant isn't violated, move on to the next
* ancestor.
*/
if (hdr->bth_count >= capacity / 2)
continue;
/*
* Because the smallest number of nodes we can move when
* splitting is 2, we never need to worry about not having a
* left sibling (a sibling is a neighbor with the same parent).
*/
uint32_t parent_idx = zfs_btree_find_parent_idx(tree, hdr);
ASSERT3U(parent_idx, >, 0);
zfs_btree_core_t *l_neighbor =
(zfs_btree_core_t *)parent->btc_children[parent_idx - 1];
uint32_t move_count = (capacity / 2) - hdr->bth_count;
ASSERT3U(l_neighbor->btc_hdr.bth_count - move_count, >=,
capacity / 2);
if (zfs_btree_verify_intensity >= 5) {
for (uint32_t i = 0; i < move_count; i++) {
zfs_btree_verify_poison_at(tree, hdr,
hdr->bth_count + i);
}
}
/* First, shift things in the right node back. */
bt_shift_core(tree, cur, 0, hdr->bth_count, move_count,
BSS_TRAPEZOID, BSD_RIGHT);
/* Next, move the separator to the right node. */
uint8_t *separator = parent->btc_elems + ((parent_idx - 1) *
size);
uint8_t *e_out = cur->btc_elems + ((move_count - 1) * size);
bcpy(separator, e_out, size);
/*
* Now, move elements and children from the left node to the
* right. We move one more child than elements.
*/
move_count--;
uint32_t move_idx = l_neighbor->btc_hdr.bth_count - move_count;
bt_transfer_core(tree, l_neighbor, move_idx, move_count, cur, 0,
BSS_TRAPEZOID);
/*
* Finally, move the last element in the left node to the
* separator's position.
*/
move_idx--;
bcpy(l_neighbor->btc_elems + move_idx * size, separator, size);
l_neighbor->btc_hdr.bth_count -= move_count + 1;
hdr->bth_count += move_count + 1;
ASSERT3U(l_neighbor->btc_hdr.bth_count, >=, capacity / 2);
ASSERT3U(hdr->bth_count, >=, capacity / 2);
zfs_btree_poison_node(tree, &l_neighbor->btc_hdr);
for (uint32_t i = 0; i <= hdr->bth_count; i++)
cur->btc_children[i]->bth_parent = cur;
}
tree->bt_bulk = NULL;
zfs_btree_verify(tree);
}
/*
* Insert value into tree at the location specified by where.
*/
void
zfs_btree_add_idx(zfs_btree_t *tree, const void *value,
const zfs_btree_index_t *where)
{
zfs_btree_index_t idx = {0};
/* If we're not inserting in the last leaf, end bulk insert mode. */
if (tree->bt_bulk != NULL) {
if (where->bti_node != &tree->bt_bulk->btl_hdr) {
zfs_btree_bulk_finish(tree);
VERIFY3P(zfs_btree_find(tree, value, &idx), ==, NULL);
where = &idx;
}
}
tree->bt_num_elems++;
/*
* If this is the first element in the tree, create a leaf root node
* and add the value to it.
*/
if (where->bti_node == NULL) {
ASSERT3U(tree->bt_num_elems, ==, 1);
ASSERT3S(tree->bt_height, ==, -1);
ASSERT3P(tree->bt_root, ==, NULL);
ASSERT0(where->bti_offset);
tree->bt_num_nodes++;
zfs_btree_leaf_t *leaf = kmem_cache_alloc(zfs_btree_leaf_cache,
KM_SLEEP);
tree->bt_root = &leaf->btl_hdr;
tree->bt_height++;
zfs_btree_hdr_t *hdr = &leaf->btl_hdr;
hdr->bth_parent = NULL;
hdr->bth_first = 0;
hdr->bth_count = 0;
zfs_btree_poison_node(tree, hdr);
zfs_btree_insert_into_leaf(tree, leaf, value, 0);
tree->bt_bulk = leaf;
} else if (!zfs_btree_is_core(where->bti_node)) {
/*
* If we're inserting into a leaf, go directly to the helper
* function.
*/
zfs_btree_insert_into_leaf(tree,
(zfs_btree_leaf_t *)where->bti_node, value,
where->bti_offset);
} else {
/*
* If we're inserting into a core node, we can't just shift
* the existing element in that slot in the same node without
* breaking our ordering invariants. Instead we place the new
* value in the node at that spot and then insert the old
* separator into the first slot in the subtree to the right.
*/
zfs_btree_core_t *node = (zfs_btree_core_t *)where->bti_node;
/*
* We can ignore bti_before, because either way the value
* should end up in bti_offset.
*/
uint32_t off = where->bti_offset;
zfs_btree_hdr_t *subtree = node->btc_children[off + 1];
size_t size = tree->bt_elem_size;
uint8_t *buf = kmem_alloc(size, KM_SLEEP);
bcpy(node->btc_elems + off * size, buf, size);
bcpy(value, node->btc_elems + off * size, size);
/*
* Find the first slot in the subtree to the right, insert
* there.
*/
zfs_btree_index_t new_idx;
VERIFY3P(zfs_btree_first_helper(tree, subtree, &new_idx), !=,
NULL);
ASSERT0(new_idx.bti_offset);
ASSERT(!zfs_btree_is_core(new_idx.bti_node));
zfs_btree_insert_into_leaf(tree,
(zfs_btree_leaf_t *)new_idx.bti_node, buf, 0);
kmem_free(buf, size);
}
zfs_btree_verify(tree);
}
/*
* Return the first element in the tree, and put its location in where if
* non-null.
*/
void *
zfs_btree_first(zfs_btree_t *tree, zfs_btree_index_t *where)
{
if (tree->bt_height == -1) {
ASSERT0(tree->bt_num_elems);
return (NULL);
}
return (zfs_btree_first_helper(tree, tree->bt_root, where));
}
/*
* Find the last element in the subtree rooted at hdr, return its value and
* put its location in where if non-null.
*/
static void *
zfs_btree_last_helper(zfs_btree_t *btree, zfs_btree_hdr_t *hdr,
zfs_btree_index_t *where)
{
zfs_btree_hdr_t *node;
for (node = hdr; zfs_btree_is_core(node); node =
((zfs_btree_core_t *)node)->btc_children[node->bth_count])
;
zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)node;
if (where != NULL) {
where->bti_node = node;
where->bti_offset = node->bth_count - 1;
where->bti_before = B_FALSE;
}
return (leaf->btl_elems + (node->bth_first + node->bth_count - 1) *
btree->bt_elem_size);
}
/*
* Return the last element in the tree, and put its location in where if
* non-null.
*/
void *
zfs_btree_last(zfs_btree_t *tree, zfs_btree_index_t *where)
{
if (tree->bt_height == -1) {
ASSERT0(tree->bt_num_elems);
return (NULL);
}
return (zfs_btree_last_helper(tree, tree->bt_root, where));
}
/*
* This function contains the logic to find the next node in the tree. A
* helper function is used because there are multiple internal consumemrs of
* this logic. The done_func is used by zfs_btree_destroy_nodes to clean up each
* node after we've finished with it.
*/
static void *
zfs_btree_next_helper(zfs_btree_t *tree, const zfs_btree_index_t *idx,
zfs_btree_index_t *out_idx,
void (*done_func)(zfs_btree_t *, zfs_btree_hdr_t *))
{
if (idx->bti_node == NULL) {
ASSERT3S(tree->bt_height, ==, -1);
return (NULL);
}
uint32_t offset = idx->bti_offset;
if (!zfs_btree_is_core(idx->bti_node)) {
/*
* When finding the next element of an element in a leaf,
* there are two cases. If the element isn't the last one in
* the leaf, in which case we just return the next element in
* the leaf. Otherwise, we need to traverse up our parents
* until we find one where our ancestor isn't the last child
* of its parent. Once we do, the next element is the
* separator after our ancestor in its parent.
*/
zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)idx->bti_node;
uint32_t new_off = offset + (idx->bti_before ? 0 : 1);
if (leaf->btl_hdr.bth_count > new_off) {
out_idx->bti_node = &leaf->btl_hdr;
out_idx->bti_offset = new_off;
out_idx->bti_before = B_FALSE;
return (leaf->btl_elems + (leaf->btl_hdr.bth_first +
new_off) * tree->bt_elem_size);
}
zfs_btree_hdr_t *prev = &leaf->btl_hdr;
for (zfs_btree_core_t *node = leaf->btl_hdr.bth_parent;
node != NULL; node = node->btc_hdr.bth_parent) {
zfs_btree_hdr_t *hdr = &node->btc_hdr;
ASSERT(zfs_btree_is_core(hdr));
uint32_t i = zfs_btree_find_parent_idx(tree, prev);
if (done_func != NULL)
done_func(tree, prev);
if (i == hdr->bth_count) {
prev = hdr;
continue;
}
out_idx->bti_node = hdr;
out_idx->bti_offset = i;
out_idx->bti_before = B_FALSE;
return (node->btc_elems + i * tree->bt_elem_size);
}
if (done_func != NULL)
done_func(tree, prev);
/*
* We've traversed all the way up and been at the end of the
* node every time, so this was the last element in the tree.
*/
return (NULL);
}
/* If we were before an element in a core node, return that element. */
ASSERT(zfs_btree_is_core(idx->bti_node));
zfs_btree_core_t *node = (zfs_btree_core_t *)idx->bti_node;
if (idx->bti_before) {
out_idx->bti_before = B_FALSE;
return (node->btc_elems + offset * tree->bt_elem_size);
}
/*
* The next element from one in a core node is the first element in
* the subtree just to the right of the separator.
*/
zfs_btree_hdr_t *child = node->btc_children[offset + 1];
return (zfs_btree_first_helper(tree, child, out_idx));
}
/*
* Return the next valued node in the tree. The same address can be safely
* passed for idx and out_idx.
*/
void *
zfs_btree_next(zfs_btree_t *tree, const zfs_btree_index_t *idx,
zfs_btree_index_t *out_idx)
{
return (zfs_btree_next_helper(tree, idx, out_idx, NULL));
}
/*
* Return the previous valued node in the tree. The same value can be safely
* passed for idx and out_idx.
*/
void *
zfs_btree_prev(zfs_btree_t *tree, const zfs_btree_index_t *idx,
zfs_btree_index_t *out_idx)
{
if (idx->bti_node == NULL) {
ASSERT3S(tree->bt_height, ==, -1);
return (NULL);
}
uint32_t offset = idx->bti_offset;
if (!zfs_btree_is_core(idx->bti_node)) {
/*
* When finding the previous element of an element in a leaf,
* there are two cases. If the element isn't the first one in
* the leaf, in which case we just return the previous element
* in the leaf. Otherwise, we need to traverse up our parents
* until we find one where our previous ancestor isn't the
* first child. Once we do, the previous element is the
* separator after our previous ancestor.
*/
zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)idx->bti_node;
if (offset != 0) {
out_idx->bti_node = &leaf->btl_hdr;
out_idx->bti_offset = offset - 1;
out_idx->bti_before = B_FALSE;
return (leaf->btl_elems + (leaf->btl_hdr.bth_first +
offset - 1) * tree->bt_elem_size);
}
zfs_btree_hdr_t *prev = &leaf->btl_hdr;
for (zfs_btree_core_t *node = leaf->btl_hdr.bth_parent;
node != NULL; node = node->btc_hdr.bth_parent) {
zfs_btree_hdr_t *hdr = &node->btc_hdr;
ASSERT(zfs_btree_is_core(hdr));
uint32_t i = zfs_btree_find_parent_idx(tree, prev);
if (i == 0) {
prev = hdr;
continue;
}
out_idx->bti_node = hdr;
out_idx->bti_offset = i - 1;
out_idx->bti_before = B_FALSE;
return (node->btc_elems + (i - 1) * tree->bt_elem_size);
}
/*
* We've traversed all the way up and been at the start of the
* node every time, so this was the first node in the tree.
*/
return (NULL);
}
/*
* The previous element from one in a core node is the last element in
* the subtree just to the left of the separator.
*/
ASSERT(zfs_btree_is_core(idx->bti_node));
zfs_btree_core_t *node = (zfs_btree_core_t *)idx->bti_node;
zfs_btree_hdr_t *child = node->btc_children[offset];
return (zfs_btree_last_helper(tree, child, out_idx));
}
/*
* Get the value at the provided index in the tree.
*
* Note that the value returned from this function can be mutated, but only
* if it will not change the ordering of the element with respect to any other
* elements that could be in the tree.
*/
void *
zfs_btree_get(zfs_btree_t *tree, zfs_btree_index_t *idx)
{
ASSERT(!idx->bti_before);
size_t size = tree->bt_elem_size;
if (!zfs_btree_is_core(idx->bti_node)) {
zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)idx->bti_node;
return (leaf->btl_elems + (leaf->btl_hdr.bth_first +
idx->bti_offset) * size);
}
zfs_btree_core_t *node = (zfs_btree_core_t *)idx->bti_node;
return (node->btc_elems + idx->bti_offset * size);
}
/* Add the given value to the tree. Must not already be in the tree. */
void
zfs_btree_add(zfs_btree_t *tree, const void *node)
{
zfs_btree_index_t where = {0};
VERIFY3P(zfs_btree_find(tree, node, &where), ==, NULL);
zfs_btree_add_idx(tree, node, &where);
}
/* Helper function to free a tree node. */
static void
zfs_btree_node_destroy(zfs_btree_t *tree, zfs_btree_hdr_t *node)
{
tree->bt_num_nodes--;
if (!zfs_btree_is_core(node)) {
kmem_cache_free(zfs_btree_leaf_cache, node);
} else {
kmem_free(node, sizeof (zfs_btree_core_t) +
BTREE_CORE_ELEMS * tree->bt_elem_size);
}
}
/*
* Remove the rm_hdr and the separator to its left from the parent node. The
* buffer that rm_hdr was stored in may already be freed, so its contents
* cannot be accessed.
*/
static void
zfs_btree_remove_from_node(zfs_btree_t *tree, zfs_btree_core_t *node,
zfs_btree_hdr_t *rm_hdr)
{
size_t size = tree->bt_elem_size;
uint32_t min_count = (BTREE_CORE_ELEMS / 2) - 1;
zfs_btree_hdr_t *hdr = &node->btc_hdr;
/*
* If the node is the root node and rm_hdr is one of two children,
* promote the other child to the root.
*/
if (hdr->bth_parent == NULL && hdr->bth_count <= 1) {
ASSERT3U(hdr->bth_count, ==, 1);
ASSERT3P(tree->bt_root, ==, node);
ASSERT3P(node->btc_children[1], ==, rm_hdr);
tree->bt_root = node->btc_children[0];
node->btc_children[0]->bth_parent = NULL;
zfs_btree_node_destroy(tree, hdr);
tree->bt_height--;
return;
}
uint32_t idx;
for (idx = 0; idx <= hdr->bth_count; idx++) {
if (node->btc_children[idx] == rm_hdr)
break;
}
ASSERT3U(idx, <=, hdr->bth_count);
/*
* If the node is the root or it has more than the minimum number of
* children, just remove the child and separator, and return.
*/
if (hdr->bth_parent == NULL ||
hdr->bth_count > min_count) {
/*
* Shift the element and children to the right of rm_hdr to
* the left by one spot.
*/
bt_shift_core_left(tree, node, idx, hdr->bth_count - idx,
BSS_PARALLELOGRAM);
hdr->bth_count--;
zfs_btree_poison_node_at(tree, hdr, hdr->bth_count, 1);
return;
}
ASSERT3U(hdr->bth_count, ==, min_count);
/*
* Now we try to take a node from a neighbor. We check left, then
* right. If the neighbor exists and has more than the minimum number
* of elements, we move the separator between us and them to our
* node, move their closest element (last for left, first for right)
* to the separator, and move their closest child to our node. Along
* the way we need to collapse the gap made by idx, and (for our right
* neighbor) the gap made by removing their first element and child.
*
* Note: this logic currently doesn't support taking from a neighbor
* that isn't a sibling (i.e. a neighbor with a different
* parent). This isn't critical functionality, but may be worth
* implementing in the future for completeness' sake.
*/
zfs_btree_core_t *parent = hdr->bth_parent;
uint32_t parent_idx = zfs_btree_find_parent_idx(tree, hdr);
zfs_btree_hdr_t *l_hdr = (parent_idx == 0 ? NULL :
parent->btc_children[parent_idx - 1]);
if (l_hdr != NULL && l_hdr->bth_count > min_count) {
/* We can take a node from the left neighbor. */
ASSERT(zfs_btree_is_core(l_hdr));
zfs_btree_core_t *neighbor = (zfs_btree_core_t *)l_hdr;
/*
* Start by shifting the elements and children in the current
* node to the right by one spot.
*/
bt_shift_core_right(tree, node, 0, idx - 1, BSS_TRAPEZOID);
/*
* Move the separator between node and neighbor to the first
* element slot in the current node.
*/
uint8_t *separator = parent->btc_elems + (parent_idx - 1) *
size;
bcpy(separator, node->btc_elems, size);
/* Move the last child of neighbor to our first child slot. */
node->btc_children[0] =
neighbor->btc_children[l_hdr->bth_count];
node->btc_children[0]->bth_parent = node;
/* Move the last element of neighbor to the separator spot. */
uint8_t *take_elem = neighbor->btc_elems +
(l_hdr->bth_count - 1) * size;
bcpy(take_elem, separator, size);
l_hdr->bth_count--;
zfs_btree_poison_node_at(tree, l_hdr, l_hdr->bth_count, 1);
return;
}
zfs_btree_hdr_t *r_hdr = (parent_idx == parent->btc_hdr.bth_count ?
NULL : parent->btc_children[parent_idx + 1]);
if (r_hdr != NULL && r_hdr->bth_count > min_count) {
/* We can take a node from the right neighbor. */
ASSERT(zfs_btree_is_core(r_hdr));
zfs_btree_core_t *neighbor = (zfs_btree_core_t *)r_hdr;
/*
* Shift elements in node left by one spot to overwrite rm_hdr
* and the separator before it.
*/
bt_shift_core_left(tree, node, idx, hdr->bth_count - idx,
BSS_PARALLELOGRAM);
/*
* Move the separator between node and neighbor to the last
* element spot in node.
*/
uint8_t *separator = parent->btc_elems + parent_idx * size;
bcpy(separator, node->btc_elems + (hdr->bth_count - 1) * size,
size);
/*
* Move the first child of neighbor to the last child spot in
* node.
*/
node->btc_children[hdr->bth_count] = neighbor->btc_children[0];
node->btc_children[hdr->bth_count]->bth_parent = node;
/* Move the first element of neighbor to the separator spot. */
uint8_t *take_elem = neighbor->btc_elems;
bcpy(take_elem, separator, size);
r_hdr->bth_count--;
/*
* Shift the elements and children of neighbor to cover the
* stolen elements.
*/
bt_shift_core_left(tree, neighbor, 1, r_hdr->bth_count,
BSS_TRAPEZOID);
zfs_btree_poison_node_at(tree, r_hdr, r_hdr->bth_count, 1);
return;
}
/*
* In this case, neither of our neighbors can spare an element, so we
* need to merge with one of them. We prefer the left one,
* arbitrarily. Move the separator into the leftmost merging node
* (which may be us or the left neighbor), and then move the right
* merging node's elements. Once that's done, we go back and delete
* the element we're removing. Finally, go into the parent and delete
* the right merging node and the separator. This may cause further
* merging.
*/
zfs_btree_hdr_t *new_rm_hdr, *keep_hdr;
uint32_t new_idx = idx;
if (l_hdr != NULL) {
keep_hdr = l_hdr;
new_rm_hdr = hdr;
new_idx += keep_hdr->bth_count + 1;
} else {
ASSERT3P(r_hdr, !=, NULL);
keep_hdr = hdr;
new_rm_hdr = r_hdr;
parent_idx++;
}
ASSERT(zfs_btree_is_core(keep_hdr));
ASSERT(zfs_btree_is_core(new_rm_hdr));
zfs_btree_core_t *keep = (zfs_btree_core_t *)keep_hdr;
zfs_btree_core_t *rm = (zfs_btree_core_t *)new_rm_hdr;
if (zfs_btree_verify_intensity >= 5) {
for (uint32_t i = 0; i < new_rm_hdr->bth_count + 1; i++) {
zfs_btree_verify_poison_at(tree, keep_hdr,
keep_hdr->bth_count + i);
}
}
/* Move the separator into the left node. */
uint8_t *e_out = keep->btc_elems + keep_hdr->bth_count * size;
uint8_t *separator = parent->btc_elems + (parent_idx - 1) *
size;
bcpy(separator, e_out, size);
keep_hdr->bth_count++;
/* Move all our elements and children into the left node. */
bt_transfer_core(tree, rm, 0, new_rm_hdr->bth_count, keep,
keep_hdr->bth_count, BSS_TRAPEZOID);
uint32_t old_count = keep_hdr->bth_count;
/* Update bookkeeping */
keep_hdr->bth_count += new_rm_hdr->bth_count;
ASSERT3U(keep_hdr->bth_count, ==, (min_count * 2) + 1);
/*
* Shift the element and children to the right of rm_hdr to
* the left by one spot.
*/
ASSERT3P(keep->btc_children[new_idx], ==, rm_hdr);
bt_shift_core_left(tree, keep, new_idx, keep_hdr->bth_count - new_idx,
BSS_PARALLELOGRAM);
keep_hdr->bth_count--;
/* Reparent all our children to point to the left node. */
zfs_btree_hdr_t **new_start = keep->btc_children +
old_count - 1;
for (uint32_t i = 0; i < new_rm_hdr->bth_count + 1; i++)
new_start[i]->bth_parent = keep;
for (uint32_t i = 0; i <= keep_hdr->bth_count; i++) {
ASSERT3P(keep->btc_children[i]->bth_parent, ==, keep);
ASSERT3P(keep->btc_children[i], !=, rm_hdr);
}
zfs_btree_poison_node_at(tree, keep_hdr, keep_hdr->bth_count, 1);
new_rm_hdr->bth_count = 0;
- zfs_btree_node_destroy(tree, new_rm_hdr);
zfs_btree_remove_from_node(tree, parent, new_rm_hdr);
+ zfs_btree_node_destroy(tree, new_rm_hdr);
}
/* Remove the element at the specific location. */
void
zfs_btree_remove_idx(zfs_btree_t *tree, zfs_btree_index_t *where)
{
size_t size = tree->bt_elem_size;
zfs_btree_hdr_t *hdr = where->bti_node;
uint32_t idx = where->bti_offset;
ASSERT(!where->bti_before);
if (tree->bt_bulk != NULL) {
/*
* Leave bulk insert mode. Note that our index would be
* invalid after we correct the tree, so we copy the value
* we're planning to remove and find it again after
* bulk_finish.
*/
uint8_t *value = zfs_btree_get(tree, where);
uint8_t *tmp = kmem_alloc(size, KM_SLEEP);
bcpy(value, tmp, size);
zfs_btree_bulk_finish(tree);
VERIFY3P(zfs_btree_find(tree, tmp, where), !=, NULL);
kmem_free(tmp, size);
hdr = where->bti_node;
idx = where->bti_offset;
}
tree->bt_num_elems--;
/*
* If the element happens to be in a core node, we move a leaf node's
* element into its place and then remove the leaf node element. This
* makes the rebalance logic not need to be recursive both upwards and
* downwards.
*/
if (zfs_btree_is_core(hdr)) {
zfs_btree_core_t *node = (zfs_btree_core_t *)hdr;
zfs_btree_hdr_t *left_subtree = node->btc_children[idx];
void *new_value = zfs_btree_last_helper(tree, left_subtree,
where);
ASSERT3P(new_value, !=, NULL);
bcpy(new_value, node->btc_elems + idx * size, size);
hdr = where->bti_node;
idx = where->bti_offset;
ASSERT(!where->bti_before);
}
/*
* First, we'll update the leaf's metadata. Then, we shift any
* elements after the idx to the left. After that, we rebalance if
* needed.
*/
ASSERT(!zfs_btree_is_core(hdr));
zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)hdr;
ASSERT3U(hdr->bth_count, >, 0);
uint32_t min_count = (tree->bt_leaf_cap / 2) - 1;
/*
* If we're over the minimum size or this is the root, just overwrite
* the value and return.
*/
if (hdr->bth_count > min_count || hdr->bth_parent == NULL) {
bt_shrink_leaf(tree, leaf, idx, 1);
if (hdr->bth_parent == NULL) {
ASSERT0(tree->bt_height);
if (hdr->bth_count == 0) {
tree->bt_root = NULL;
tree->bt_height--;
zfs_btree_node_destroy(tree, &leaf->btl_hdr);
}
}
zfs_btree_verify(tree);
return;
}
ASSERT3U(hdr->bth_count, ==, min_count);
/*
* Now we try to take a node from a sibling. We check left, then
* right. If they exist and have more than the minimum number of
* elements, we move the separator between us and them to our node
* and move their closest element (last for left, first for right) to
* the separator. Along the way we need to collapse the gap made by
* idx, and (for our right neighbor) the gap made by removing their
* first element.
*
* Note: this logic currently doesn't support taking from a neighbor
* that isn't a sibling. This isn't critical functionality, but may be
* worth implementing in the future for completeness' sake.
*/
zfs_btree_core_t *parent = hdr->bth_parent;
uint32_t parent_idx = zfs_btree_find_parent_idx(tree, hdr);
zfs_btree_hdr_t *l_hdr = (parent_idx == 0 ? NULL :
parent->btc_children[parent_idx - 1]);
if (l_hdr != NULL && l_hdr->bth_count > min_count) {
/* We can take a node from the left neighbor. */
ASSERT(!zfs_btree_is_core(l_hdr));
zfs_btree_leaf_t *neighbor = (zfs_btree_leaf_t *)l_hdr;
/*
* Move our elements back by one spot to make room for the
* stolen element and overwrite the element being removed.
*/
bt_shift_leaf(tree, leaf, 0, idx, 1, BSD_RIGHT);
/* Move the separator to our first spot. */
uint8_t *separator = parent->btc_elems + (parent_idx - 1) *
size;
bcpy(separator, leaf->btl_elems + hdr->bth_first * size, size);
/* Move our neighbor's last element to the separator. */
uint8_t *take_elem = neighbor->btl_elems +
(l_hdr->bth_first + l_hdr->bth_count - 1) * size;
bcpy(take_elem, separator, size);
/* Delete our neighbor's last element. */
bt_shrink_leaf(tree, neighbor, l_hdr->bth_count - 1, 1);
zfs_btree_verify(tree);
return;
}
zfs_btree_hdr_t *r_hdr = (parent_idx == parent->btc_hdr.bth_count ?
NULL : parent->btc_children[parent_idx + 1]);
if (r_hdr != NULL && r_hdr->bth_count > min_count) {
/* We can take a node from the right neighbor. */
ASSERT(!zfs_btree_is_core(r_hdr));
zfs_btree_leaf_t *neighbor = (zfs_btree_leaf_t *)r_hdr;
/*
* Move our elements after the element being removed forwards
* by one spot to make room for the stolen element and
* overwrite the element being removed.
*/
bt_shift_leaf(tree, leaf, idx + 1, hdr->bth_count - idx - 1,
1, BSD_LEFT);
/* Move the separator between us to our last spot. */
uint8_t *separator = parent->btc_elems + parent_idx * size;
bcpy(separator, leaf->btl_elems + (hdr->bth_first +
hdr->bth_count - 1) * size, size);
/* Move our neighbor's first element to the separator. */
uint8_t *take_elem = neighbor->btl_elems +
r_hdr->bth_first * size;
bcpy(take_elem, separator, size);
/* Delete our neighbor's first element. */
bt_shrink_leaf(tree, neighbor, 0, 1);
zfs_btree_verify(tree);
return;
}
/*
* In this case, neither of our neighbors can spare an element, so we
* need to merge with one of them. We prefer the left one, arbitrarily.
* After remove we move the separator into the leftmost merging node
* (which may be us or the left neighbor), and then move the right
* merging node's elements. Once that's done, we go back and delete
* the element we're removing. Finally, go into the parent and delete
* the right merging node and the separator. This may cause further
* merging.
*/
zfs_btree_hdr_t *rm_hdr, *k_hdr;
if (l_hdr != NULL) {
k_hdr = l_hdr;
rm_hdr = hdr;
} else {
ASSERT3P(r_hdr, !=, NULL);
k_hdr = hdr;
rm_hdr = r_hdr;
parent_idx++;
}
ASSERT(!zfs_btree_is_core(k_hdr));
ASSERT(!zfs_btree_is_core(rm_hdr));
ASSERT3U(k_hdr->bth_count, ==, min_count);
ASSERT3U(rm_hdr->bth_count, ==, min_count);
zfs_btree_leaf_t *keep = (zfs_btree_leaf_t *)k_hdr;
zfs_btree_leaf_t *rm = (zfs_btree_leaf_t *)rm_hdr;
if (zfs_btree_verify_intensity >= 5) {
for (uint32_t i = 0; i < rm_hdr->bth_count + 1; i++) {
zfs_btree_verify_poison_at(tree, k_hdr,
k_hdr->bth_count + i);
}
}
/*
* Remove the value from the node. It will go below the minimum,
* but we'll fix it in no time.
*/
bt_shrink_leaf(tree, leaf, idx, 1);
/* Prepare space for elements to be moved from the right. */
uint32_t k_count = k_hdr->bth_count;
bt_grow_leaf(tree, keep, k_count, 1 + rm_hdr->bth_count);
ASSERT3U(k_hdr->bth_count, ==, min_count * 2);
/* Move the separator into the first open spot. */
uint8_t *out = keep->btl_elems + (k_hdr->bth_first + k_count) * size;
uint8_t *separator = parent->btc_elems + (parent_idx - 1) * size;
bcpy(separator, out, size);
/* Move our elements to the left neighbor. */
bt_transfer_leaf(tree, rm, 0, rm_hdr->bth_count, keep, k_count + 1);
- zfs_btree_node_destroy(tree, rm_hdr);
/* Remove the emptied node from the parent. */
zfs_btree_remove_from_node(tree, parent, rm_hdr);
+ zfs_btree_node_destroy(tree, rm_hdr);
zfs_btree_verify(tree);
}
/* Remove the given value from the tree. */
void
zfs_btree_remove(zfs_btree_t *tree, const void *value)
{
zfs_btree_index_t where = {0};
VERIFY3P(zfs_btree_find(tree, value, &where), !=, NULL);
zfs_btree_remove_idx(tree, &where);
}
/* Return the number of elements in the tree. */
ulong_t
zfs_btree_numnodes(zfs_btree_t *tree)
{
return (tree->bt_num_elems);
}
/*
* This function is used to visit all the elements in the tree before
* destroying the tree. This allows the calling code to perform any cleanup it
* needs to do. This is more efficient than just removing the first element
* over and over, because it removes all rebalancing. Once the destroy_nodes()
* function has been called, no other btree operations are valid until it
* returns NULL, which point the only valid operation is zfs_btree_destroy().
*
* example:
*
* zfs_btree_index_t *cookie = NULL;
* my_data_t *node;
*
* while ((node = zfs_btree_destroy_nodes(tree, &cookie)) != NULL)
* free(node->ptr);
* zfs_btree_destroy(tree);
*
*/
void *
zfs_btree_destroy_nodes(zfs_btree_t *tree, zfs_btree_index_t **cookie)
{
if (*cookie == NULL) {
if (tree->bt_height == -1)
return (NULL);
*cookie = kmem_alloc(sizeof (**cookie), KM_SLEEP);
return (zfs_btree_first(tree, *cookie));
}
void *rval = zfs_btree_next_helper(tree, *cookie, *cookie,
zfs_btree_node_destroy);
if (rval == NULL) {
tree->bt_root = NULL;
tree->bt_height = -1;
tree->bt_num_elems = 0;
kmem_free(*cookie, sizeof (**cookie));
tree->bt_bulk = NULL;
}
return (rval);
}
static void
zfs_btree_clear_helper(zfs_btree_t *tree, zfs_btree_hdr_t *hdr)
{
if (zfs_btree_is_core(hdr)) {
zfs_btree_core_t *btc = (zfs_btree_core_t *)hdr;
for (uint32_t i = 0; i <= hdr->bth_count; i++)
zfs_btree_clear_helper(tree, btc->btc_children[i]);
}
zfs_btree_node_destroy(tree, hdr);
}
void
zfs_btree_clear(zfs_btree_t *tree)
{
if (tree->bt_root == NULL) {
ASSERT0(tree->bt_num_elems);
return;
}
zfs_btree_clear_helper(tree, tree->bt_root);
tree->bt_num_elems = 0;
tree->bt_root = NULL;
tree->bt_num_nodes = 0;
tree->bt_height = -1;
tree->bt_bulk = NULL;
}
void
zfs_btree_destroy(zfs_btree_t *tree)
{
ASSERT0(tree->bt_num_elems);
ASSERT3P(tree->bt_root, ==, NULL);
}
/* Verify that every child of this node has the correct parent pointer. */
static void
zfs_btree_verify_pointers_helper(zfs_btree_t *tree, zfs_btree_hdr_t *hdr)
{
if (!zfs_btree_is_core(hdr))
return;
zfs_btree_core_t *node = (zfs_btree_core_t *)hdr;
for (uint32_t i = 0; i <= hdr->bth_count; i++) {
VERIFY3P(node->btc_children[i]->bth_parent, ==, hdr);
zfs_btree_verify_pointers_helper(tree, node->btc_children[i]);
}
}
/* Verify that every node has the correct parent pointer. */
static void
zfs_btree_verify_pointers(zfs_btree_t *tree)
{
if (tree->bt_height == -1) {
VERIFY3P(tree->bt_root, ==, NULL);
return;
}
VERIFY3P(tree->bt_root->bth_parent, ==, NULL);
zfs_btree_verify_pointers_helper(tree, tree->bt_root);
}
/*
* Verify that all the current node and its children satisfy the count
* invariants, and return the total count in the subtree rooted in this node.
*/
static uint64_t
zfs_btree_verify_counts_helper(zfs_btree_t *tree, zfs_btree_hdr_t *hdr)
{
if (!zfs_btree_is_core(hdr)) {
if (tree->bt_root != hdr && tree->bt_bulk &&
hdr != &tree->bt_bulk->btl_hdr) {
VERIFY3U(hdr->bth_count, >=, tree->bt_leaf_cap / 2 - 1);
}
return (hdr->bth_count);
} else {
zfs_btree_core_t *node = (zfs_btree_core_t *)hdr;
uint64_t ret = hdr->bth_count;
if (tree->bt_root != hdr && tree->bt_bulk == NULL)
VERIFY3P(hdr->bth_count, >=, BTREE_CORE_ELEMS / 2 - 1);
for (uint32_t i = 0; i <= hdr->bth_count; i++) {
ret += zfs_btree_verify_counts_helper(tree,
node->btc_children[i]);
}
return (ret);
}
}
/*
* Verify that all nodes satisfy the invariants and that the total number of
* elements is correct.
*/
static void
zfs_btree_verify_counts(zfs_btree_t *tree)
{
EQUIV(tree->bt_num_elems == 0, tree->bt_height == -1);
if (tree->bt_height == -1) {
return;
}
VERIFY3P(zfs_btree_verify_counts_helper(tree, tree->bt_root), ==,
tree->bt_num_elems);
}
/*
* Check that the subtree rooted at this node has a uniform height. Returns
* the number of nodes under this node, to help verify bt_num_nodes.
*/
static uint64_t
zfs_btree_verify_height_helper(zfs_btree_t *tree, zfs_btree_hdr_t *hdr,
int64_t height)
{
if (!zfs_btree_is_core(hdr)) {
VERIFY0(height);
return (1);
}
zfs_btree_core_t *node = (zfs_btree_core_t *)hdr;
uint64_t ret = 1;
for (uint32_t i = 0; i <= hdr->bth_count; i++) {
ret += zfs_btree_verify_height_helper(tree,
node->btc_children[i], height - 1);
}
return (ret);
}
/*
* Check that the tree rooted at this node has a uniform height, and that the
* bt_height in the tree is correct.
*/
static void
zfs_btree_verify_height(zfs_btree_t *tree)
{
EQUIV(tree->bt_height == -1, tree->bt_root == NULL);
if (tree->bt_height == -1) {
return;
}
VERIFY3U(zfs_btree_verify_height_helper(tree, tree->bt_root,
tree->bt_height), ==, tree->bt_num_nodes);
}
/*
* Check that the elements in this node are sorted, and that if this is a core
* node, the separators are properly between the subtrees they separaate and
* that the children also satisfy this requirement.
*/
static void
zfs_btree_verify_order_helper(zfs_btree_t *tree, zfs_btree_hdr_t *hdr)
{
size_t size = tree->bt_elem_size;
if (!zfs_btree_is_core(hdr)) {
zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)hdr;
for (uint32_t i = 1; i < hdr->bth_count; i++) {
VERIFY3S(tree->bt_compar(leaf->btl_elems +
(hdr->bth_first + i - 1) * size,
leaf->btl_elems +
(hdr->bth_first + i) * size), ==, -1);
}
return;
}
zfs_btree_core_t *node = (zfs_btree_core_t *)hdr;
for (uint32_t i = 1; i < hdr->bth_count; i++) {
VERIFY3S(tree->bt_compar(node->btc_elems + (i - 1) * size,
node->btc_elems + i * size), ==, -1);
}
for (uint32_t i = 0; i < hdr->bth_count; i++) {
uint8_t *left_child_last = NULL;
zfs_btree_hdr_t *left_child_hdr = node->btc_children[i];
if (zfs_btree_is_core(left_child_hdr)) {
zfs_btree_core_t *left_child =
(zfs_btree_core_t *)left_child_hdr;
left_child_last = left_child->btc_elems +
(left_child_hdr->bth_count - 1) * size;
} else {
zfs_btree_leaf_t *left_child =
(zfs_btree_leaf_t *)left_child_hdr;
left_child_last = left_child->btl_elems +
(left_child_hdr->bth_first +
left_child_hdr->bth_count - 1) * size;
}
int comp = tree->bt_compar(node->btc_elems + i * size,
left_child_last);
if (comp <= 0) {
panic("btree: compar returned %d (expected 1) at "
"%px %d: compar(%px, %px)", comp, node, i,
node->btc_elems + i * size, left_child_last);
}
uint8_t *right_child_first = NULL;
zfs_btree_hdr_t *right_child_hdr = node->btc_children[i + 1];
if (zfs_btree_is_core(right_child_hdr)) {
zfs_btree_core_t *right_child =
(zfs_btree_core_t *)right_child_hdr;
right_child_first = right_child->btc_elems;
} else {
zfs_btree_leaf_t *right_child =
(zfs_btree_leaf_t *)right_child_hdr;
right_child_first = right_child->btl_elems +
right_child_hdr->bth_first * size;
}
comp = tree->bt_compar(node->btc_elems + i * size,
right_child_first);
if (comp >= 0) {
panic("btree: compar returned %d (expected -1) at "
"%px %d: compar(%px, %px)", comp, node, i,
node->btc_elems + i * size, right_child_first);
}
}
for (uint32_t i = 0; i <= hdr->bth_count; i++)
zfs_btree_verify_order_helper(tree, node->btc_children[i]);
}
/* Check that all elements in the tree are in sorted order. */
static void
zfs_btree_verify_order(zfs_btree_t *tree)
{
EQUIV(tree->bt_height == -1, tree->bt_root == NULL);
if (tree->bt_height == -1) {
return;
}
zfs_btree_verify_order_helper(tree, tree->bt_root);
}
#ifdef ZFS_DEBUG
/* Check that all unused memory is poisoned correctly. */
static void
zfs_btree_verify_poison_helper(zfs_btree_t *tree, zfs_btree_hdr_t *hdr)
{
size_t size = tree->bt_elem_size;
if (!zfs_btree_is_core(hdr)) {
zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)hdr;
for (size_t i = 0; i < hdr->bth_first * size; i++)
VERIFY3U(leaf->btl_elems[i], ==, 0x0f);
for (size_t i = (hdr->bth_first + hdr->bth_count) * size;
i < BTREE_LEAF_ESIZE; i++)
VERIFY3U(leaf->btl_elems[i], ==, 0x0f);
} else {
zfs_btree_core_t *node = (zfs_btree_core_t *)hdr;
for (size_t i = hdr->bth_count * size;
i < BTREE_CORE_ELEMS * size; i++)
VERIFY3U(node->btc_elems[i], ==, 0x0f);
for (uint32_t i = hdr->bth_count + 1; i <= BTREE_CORE_ELEMS;
i++) {
VERIFY3P(node->btc_children[i], ==,
(zfs_btree_hdr_t *)BTREE_POISON);
}
for (uint32_t i = 0; i <= hdr->bth_count; i++) {
zfs_btree_verify_poison_helper(tree,
node->btc_children[i]);
}
}
}
#endif
/* Check that unused memory in the tree is still poisoned. */
static void
zfs_btree_verify_poison(zfs_btree_t *tree)
{
#ifdef ZFS_DEBUG
if (tree->bt_height == -1)
return;
zfs_btree_verify_poison_helper(tree, tree->bt_root);
#endif
}
void
zfs_btree_verify(zfs_btree_t *tree)
{
if (zfs_btree_verify_intensity == 0)
return;
zfs_btree_verify_height(tree);
if (zfs_btree_verify_intensity == 1)
return;
zfs_btree_verify_pointers(tree);
if (zfs_btree_verify_intensity == 2)
return;
zfs_btree_verify_counts(tree);
if (zfs_btree_verify_intensity == 3)
return;
zfs_btree_verify_order(tree);
if (zfs_btree_verify_intensity == 4)
return;
zfs_btree_verify_poison(tree);
}
+
+/* BEGIN CSTYLED */
+ZFS_MODULE_PARAM(zfs, zfs_, btree_verify_intensity, UINT, ZMOD_RW,
+ "Enable btree verification. Levels above 4 require ZFS be built "
+ "with debugging");
+/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/dbuf.c b/sys/contrib/openzfs/module/zfs/dbuf.c
index b2d1b9568786..80cab8177bc6 100644
--- a/sys/contrib/openzfs/module/zfs/dbuf.c
+++ b/sys/contrib/openzfs/module/zfs/dbuf.c
@@ -1,5115 +1,5119 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
*/
#include <sys/zfs_context.h>
#include <sys/arc.h>
#include <sys/dmu.h>
#include <sys/dmu_send.h>
#include <sys/dmu_impl.h>
#include <sys/dbuf.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dmu_tx.h>
#include <sys/spa.h>
#include <sys/zio.h>
#include <sys/dmu_zfetch.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/zfeature.h>
#include <sys/blkptr.h>
#include <sys/range_tree.h>
#include <sys/trace_zfs.h>
#include <sys/callb.h>
#include <sys/abd.h>
#include <sys/vdev.h>
#include <cityhash.h>
#include <sys/spa_impl.h>
#include <sys/wmsum.h>
#include <sys/vdev_impl.h>
static kstat_t *dbuf_ksp;
typedef struct dbuf_stats {
/*
* Various statistics about the size of the dbuf cache.
*/
kstat_named_t cache_count;
kstat_named_t cache_size_bytes;
kstat_named_t cache_size_bytes_max;
/*
* Statistics regarding the bounds on the dbuf cache size.
*/
kstat_named_t cache_target_bytes;
kstat_named_t cache_lowater_bytes;
kstat_named_t cache_hiwater_bytes;
/*
* Total number of dbuf cache evictions that have occurred.
*/
kstat_named_t cache_total_evicts;
/*
* The distribution of dbuf levels in the dbuf cache and
* the total size of all dbufs at each level.
*/
kstat_named_t cache_levels[DN_MAX_LEVELS];
kstat_named_t cache_levels_bytes[DN_MAX_LEVELS];
/*
* Statistics about the dbuf hash table.
*/
kstat_named_t hash_hits;
kstat_named_t hash_misses;
kstat_named_t hash_collisions;
kstat_named_t hash_elements;
kstat_named_t hash_elements_max;
/*
* Number of sublists containing more than one dbuf in the dbuf
* hash table. Keep track of the longest hash chain.
*/
kstat_named_t hash_chains;
kstat_named_t hash_chain_max;
/*
* Number of times a dbuf_create() discovers that a dbuf was
* already created and in the dbuf hash table.
*/
kstat_named_t hash_insert_race;
/*
* Statistics about the size of the metadata dbuf cache.
*/
kstat_named_t metadata_cache_count;
kstat_named_t metadata_cache_size_bytes;
kstat_named_t metadata_cache_size_bytes_max;
/*
* For diagnostic purposes, this is incremented whenever we can't add
* something to the metadata cache because it's full, and instead put
* the data in the regular dbuf cache.
*/
kstat_named_t metadata_cache_overflow;
} dbuf_stats_t;
dbuf_stats_t dbuf_stats = {
{ "cache_count", KSTAT_DATA_UINT64 },
{ "cache_size_bytes", KSTAT_DATA_UINT64 },
{ "cache_size_bytes_max", KSTAT_DATA_UINT64 },
{ "cache_target_bytes", KSTAT_DATA_UINT64 },
{ "cache_lowater_bytes", KSTAT_DATA_UINT64 },
{ "cache_hiwater_bytes", KSTAT_DATA_UINT64 },
{ "cache_total_evicts", KSTAT_DATA_UINT64 },
{ { "cache_levels_N", KSTAT_DATA_UINT64 } },
{ { "cache_levels_bytes_N", KSTAT_DATA_UINT64 } },
{ "hash_hits", KSTAT_DATA_UINT64 },
{ "hash_misses", KSTAT_DATA_UINT64 },
{ "hash_collisions", KSTAT_DATA_UINT64 },
{ "hash_elements", KSTAT_DATA_UINT64 },
{ "hash_elements_max", KSTAT_DATA_UINT64 },
{ "hash_chains", KSTAT_DATA_UINT64 },
{ "hash_chain_max", KSTAT_DATA_UINT64 },
{ "hash_insert_race", KSTAT_DATA_UINT64 },
{ "metadata_cache_count", KSTAT_DATA_UINT64 },
{ "metadata_cache_size_bytes", KSTAT_DATA_UINT64 },
{ "metadata_cache_size_bytes_max", KSTAT_DATA_UINT64 },
{ "metadata_cache_overflow", KSTAT_DATA_UINT64 }
};
struct {
wmsum_t cache_count;
wmsum_t cache_total_evicts;
wmsum_t cache_levels[DN_MAX_LEVELS];
wmsum_t cache_levels_bytes[DN_MAX_LEVELS];
wmsum_t hash_hits;
wmsum_t hash_misses;
wmsum_t hash_collisions;
wmsum_t hash_chains;
wmsum_t hash_insert_race;
wmsum_t metadata_cache_count;
wmsum_t metadata_cache_overflow;
} dbuf_sums;
#define DBUF_STAT_INCR(stat, val) \
wmsum_add(&dbuf_sums.stat, val);
#define DBUF_STAT_DECR(stat, val) \
DBUF_STAT_INCR(stat, -(val));
#define DBUF_STAT_BUMP(stat) \
DBUF_STAT_INCR(stat, 1);
#define DBUF_STAT_BUMPDOWN(stat) \
DBUF_STAT_INCR(stat, -1);
#define DBUF_STAT_MAX(stat, v) { \
uint64_t _m; \
while ((v) > (_m = dbuf_stats.stat.value.ui64) && \
(_m != atomic_cas_64(&dbuf_stats.stat.value.ui64, _m, (v))))\
continue; \
}
static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
static void dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr);
static int dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags);
/*
* Global data structures and functions for the dbuf cache.
*/
static kmem_cache_t *dbuf_kmem_cache;
static taskq_t *dbu_evict_taskq;
static kthread_t *dbuf_cache_evict_thread;
static kmutex_t dbuf_evict_lock;
static kcondvar_t dbuf_evict_cv;
static boolean_t dbuf_evict_thread_exit;
/*
* There are two dbuf caches; each dbuf can only be in one of them at a time.
*
* 1. Cache of metadata dbufs, to help make read-heavy administrative commands
* from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs
* that represent the metadata that describes filesystems/snapshots/
* bookmarks/properties/etc. We only evict from this cache when we export a
* pool, to short-circuit as much I/O as possible for all administrative
* commands that need the metadata. There is no eviction policy for this
* cache, because we try to only include types in it which would occupy a
* very small amount of space per object but create a large impact on the
* performance of these commands. Instead, after it reaches a maximum size
* (which should only happen on very small memory systems with a very large
* number of filesystem objects), we stop taking new dbufs into the
* metadata cache, instead putting them in the normal dbuf cache.
*
* 2. LRU cache of dbufs. The dbuf cache maintains a list of dbufs that
* are not currently held but have been recently released. These dbufs
* are not eligible for arc eviction until they are aged out of the cache.
* Dbufs that are aged out of the cache will be immediately destroyed and
* become eligible for arc eviction.
*
* Dbufs are added to these caches once the last hold is released. If a dbuf is
* later accessed and still exists in the dbuf cache, then it will be removed
* from the cache and later re-added to the head of the cache.
*
* If a given dbuf meets the requirements for the metadata cache, it will go
* there, otherwise it will be considered for the generic LRU dbuf cache. The
* caches and the refcounts tracking their sizes are stored in an array indexed
* by those caches' matching enum values (from dbuf_cached_state_t).
*/
typedef struct dbuf_cache {
multilist_t cache;
zfs_refcount_t size ____cacheline_aligned;
} dbuf_cache_t;
dbuf_cache_t dbuf_caches[DB_CACHE_MAX];
/* Size limits for the caches */
static unsigned long dbuf_cache_max_bytes = ULONG_MAX;
static unsigned long dbuf_metadata_cache_max_bytes = ULONG_MAX;
/* Set the default sizes of the caches to log2 fraction of arc size */
static int dbuf_cache_shift = 5;
static int dbuf_metadata_cache_shift = 6;
static unsigned long dbuf_cache_target_bytes(void);
static unsigned long dbuf_metadata_cache_target_bytes(void);
/*
* The LRU dbuf cache uses a three-stage eviction policy:
* - A low water marker designates when the dbuf eviction thread
* should stop evicting from the dbuf cache.
* - When we reach the maximum size (aka mid water mark), we
* signal the eviction thread to run.
* - The high water mark indicates when the eviction thread
* is unable to keep up with the incoming load and eviction must
* happen in the context of the calling thread.
*
* The dbuf cache:
* (max size)
* low water mid water hi water
* +----------------------------------------+----------+----------+
* | | | |
* | | | |
* | | | |
* | | | |
* +----------------------------------------+----------+----------+
* stop signal evict
* evicting eviction directly
* thread
*
* The high and low water marks indicate the operating range for the eviction
* thread. The low water mark is, by default, 90% of the total size of the
* cache and the high water mark is at 110% (both of these percentages can be
* changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct,
* respectively). The eviction thread will try to ensure that the cache remains
* within this range by waking up every second and checking if the cache is
* above the low water mark. The thread can also be woken up by callers adding
* elements into the cache if the cache is larger than the mid water (i.e max
* cache size). Once the eviction thread is woken up and eviction is required,
* it will continue evicting buffers until it's able to reduce the cache size
* to the low water mark. If the cache size continues to grow and hits the high
* water mark, then callers adding elements to the cache will begin to evict
* directly from the cache until the cache is no longer above the high water
* mark.
*/
/*
* The percentage above and below the maximum cache size.
*/
static uint_t dbuf_cache_hiwater_pct = 10;
static uint_t dbuf_cache_lowater_pct = 10;
static int
dbuf_cons(void *vdb, void *unused, int kmflag)
{
(void) unused, (void) kmflag;
dmu_buf_impl_t *db = vdb;
memset(db, 0, sizeof (dmu_buf_impl_t));
mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
rw_init(&db->db_rwlock, NULL, RW_DEFAULT, NULL);
cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
multilist_link_init(&db->db_cache_link);
zfs_refcount_create(&db->db_holds);
return (0);
}
static void
dbuf_dest(void *vdb, void *unused)
{
(void) unused;
dmu_buf_impl_t *db = vdb;
mutex_destroy(&db->db_mtx);
rw_destroy(&db->db_rwlock);
cv_destroy(&db->db_changed);
ASSERT(!multilist_link_active(&db->db_cache_link));
zfs_refcount_destroy(&db->db_holds);
}
/*
* dbuf hash table routines
*/
static dbuf_hash_table_t dbuf_hash_table;
/*
* We use Cityhash for this. It's fast, and has good hash properties without
* requiring any large static buffers.
*/
static uint64_t
dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
{
return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid));
}
#define DTRACE_SET_STATE(db, why) \
DTRACE_PROBE2(dbuf__state_change, dmu_buf_impl_t *, db, \
const char *, why)
#define DBUF_EQUAL(dbuf, os, obj, level, blkid) \
((dbuf)->db.db_object == (obj) && \
(dbuf)->db_objset == (os) && \
(dbuf)->db_level == (level) && \
(dbuf)->db_blkid == (blkid))
dmu_buf_impl_t *
dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid)
{
dbuf_hash_table_t *h = &dbuf_hash_table;
uint64_t hv;
uint64_t idx;
dmu_buf_impl_t *db;
hv = dbuf_hash(os, obj, level, blkid);
idx = hv & h->hash_table_mask;
rw_enter(DBUF_HASH_RWLOCK(h, idx), RW_READER);
for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
if (DBUF_EQUAL(db, os, obj, level, blkid)) {
mutex_enter(&db->db_mtx);
if (db->db_state != DB_EVICTING) {
rw_exit(DBUF_HASH_RWLOCK(h, idx));
return (db);
}
mutex_exit(&db->db_mtx);
}
}
rw_exit(DBUF_HASH_RWLOCK(h, idx));
return (NULL);
}
static dmu_buf_impl_t *
dbuf_find_bonus(objset_t *os, uint64_t object)
{
dnode_t *dn;
dmu_buf_impl_t *db = NULL;
if (dnode_hold(os, object, FTAG, &dn) == 0) {
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (dn->dn_bonus != NULL) {
db = dn->dn_bonus;
mutex_enter(&db->db_mtx);
}
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
}
return (db);
}
/*
* Insert an entry into the hash table. If there is already an element
* equal to elem in the hash table, then the already existing element
* will be returned and the new element will not be inserted.
* Otherwise returns NULL.
*/
static dmu_buf_impl_t *
dbuf_hash_insert(dmu_buf_impl_t *db)
{
dbuf_hash_table_t *h = &dbuf_hash_table;
objset_t *os = db->db_objset;
uint64_t obj = db->db.db_object;
int level = db->db_level;
uint64_t blkid, hv, idx;
dmu_buf_impl_t *dbf;
uint32_t i;
blkid = db->db_blkid;
hv = dbuf_hash(os, obj, level, blkid);
idx = hv & h->hash_table_mask;
rw_enter(DBUF_HASH_RWLOCK(h, idx), RW_WRITER);
for (dbf = h->hash_table[idx], i = 0; dbf != NULL;
dbf = dbf->db_hash_next, i++) {
if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
mutex_enter(&dbf->db_mtx);
if (dbf->db_state != DB_EVICTING) {
rw_exit(DBUF_HASH_RWLOCK(h, idx));
return (dbf);
}
mutex_exit(&dbf->db_mtx);
}
}
if (i > 0) {
DBUF_STAT_BUMP(hash_collisions);
if (i == 1)
DBUF_STAT_BUMP(hash_chains);
DBUF_STAT_MAX(hash_chain_max, i);
}
mutex_enter(&db->db_mtx);
db->db_hash_next = h->hash_table[idx];
h->hash_table[idx] = db;
rw_exit(DBUF_HASH_RWLOCK(h, idx));
uint64_t he = atomic_inc_64_nv(&dbuf_stats.hash_elements.value.ui64);
DBUF_STAT_MAX(hash_elements_max, he);
return (NULL);
}
/*
* This returns whether this dbuf should be stored in the metadata cache, which
* is based on whether it's from one of the dnode types that store data related
* to traversing dataset hierarchies.
*/
static boolean_t
dbuf_include_in_metadata_cache(dmu_buf_impl_t *db)
{
DB_DNODE_ENTER(db);
dmu_object_type_t type = DB_DNODE(db)->dn_type;
DB_DNODE_EXIT(db);
/* Check if this dbuf is one of the types we care about */
if (DMU_OT_IS_METADATA_CACHED(type)) {
/* If we hit this, then we set something up wrong in dmu_ot */
ASSERT(DMU_OT_IS_METADATA(type));
/*
* Sanity check for small-memory systems: don't allocate too
* much memory for this purpose.
*/
if (zfs_refcount_count(
&dbuf_caches[DB_DBUF_METADATA_CACHE].size) >
dbuf_metadata_cache_target_bytes()) {
DBUF_STAT_BUMP(metadata_cache_overflow);
return (B_FALSE);
}
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Remove an entry from the hash table. It must be in the EVICTING state.
*/
static void
dbuf_hash_remove(dmu_buf_impl_t *db)
{
dbuf_hash_table_t *h = &dbuf_hash_table;
uint64_t hv, idx;
dmu_buf_impl_t *dbf, **dbp;
hv = dbuf_hash(db->db_objset, db->db.db_object,
db->db_level, db->db_blkid);
idx = hv & h->hash_table_mask;
/*
* We mustn't hold db_mtx to maintain lock ordering:
* DBUF_HASH_RWLOCK > db_mtx.
*/
ASSERT(zfs_refcount_is_zero(&db->db_holds));
ASSERT(db->db_state == DB_EVICTING);
ASSERT(!MUTEX_HELD(&db->db_mtx));
rw_enter(DBUF_HASH_RWLOCK(h, idx), RW_WRITER);
dbp = &h->hash_table[idx];
while ((dbf = *dbp) != db) {
dbp = &dbf->db_hash_next;
ASSERT(dbf != NULL);
}
*dbp = db->db_hash_next;
db->db_hash_next = NULL;
if (h->hash_table[idx] &&
h->hash_table[idx]->db_hash_next == NULL)
DBUF_STAT_BUMPDOWN(hash_chains);
rw_exit(DBUF_HASH_RWLOCK(h, idx));
atomic_dec_64(&dbuf_stats.hash_elements.value.ui64);
}
typedef enum {
DBVU_EVICTING,
DBVU_NOT_EVICTING
} dbvu_verify_type_t;
static void
dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
{
#ifdef ZFS_DEBUG
int64_t holds;
if (db->db_user == NULL)
return;
/* Only data blocks support the attachment of user data. */
ASSERT(db->db_level == 0);
/* Clients must resolve a dbuf before attaching user data. */
ASSERT(db->db.db_data != NULL);
ASSERT3U(db->db_state, ==, DB_CACHED);
holds = zfs_refcount_count(&db->db_holds);
if (verify_type == DBVU_EVICTING) {
/*
* Immediate eviction occurs when holds == dirtycnt.
* For normal eviction buffers, holds is zero on
* eviction, except when dbuf_fix_old_data() calls
* dbuf_clear_data(). However, the hold count can grow
* during eviction even though db_mtx is held (see
* dmu_bonus_hold() for an example), so we can only
* test the generic invariant that holds >= dirtycnt.
*/
ASSERT3U(holds, >=, db->db_dirtycnt);
} else {
if (db->db_user_immediate_evict == TRUE)
ASSERT3U(holds, >=, db->db_dirtycnt);
else
ASSERT3U(holds, >, 0);
}
#endif
}
static void
dbuf_evict_user(dmu_buf_impl_t *db)
{
dmu_buf_user_t *dbu = db->db_user;
ASSERT(MUTEX_HELD(&db->db_mtx));
if (dbu == NULL)
return;
dbuf_verify_user(db, DBVU_EVICTING);
db->db_user = NULL;
#ifdef ZFS_DEBUG
if (dbu->dbu_clear_on_evict_dbufp != NULL)
*dbu->dbu_clear_on_evict_dbufp = NULL;
#endif
/*
* There are two eviction callbacks - one that we call synchronously
* and one that we invoke via a taskq. The async one is useful for
* avoiding lock order reversals and limiting stack depth.
*
* Note that if we have a sync callback but no async callback,
* it's likely that the sync callback will free the structure
* containing the dbu. In that case we need to take care to not
* dereference dbu after calling the sync evict func.
*/
boolean_t has_async = (dbu->dbu_evict_func_async != NULL);
if (dbu->dbu_evict_func_sync != NULL)
dbu->dbu_evict_func_sync(dbu);
if (has_async) {
taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async,
dbu, 0, &dbu->dbu_tqent);
}
}
boolean_t
dbuf_is_metadata(dmu_buf_impl_t *db)
{
/*
* Consider indirect blocks and spill blocks to be meta data.
*/
if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) {
return (B_TRUE);
} else {
boolean_t is_metadata;
DB_DNODE_ENTER(db);
is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
DB_DNODE_EXIT(db);
return (is_metadata);
}
}
/*
* We want to exclude buffers that are on a special allocation class from
* L2ARC.
*/
boolean_t
dbuf_is_l2cacheable(dmu_buf_impl_t *db)
{
vdev_t *vd = NULL;
zfs_cache_type_t cache = db->db_objset->os_secondary_cache;
blkptr_t *bp = db->db_blkptr;
if (bp != NULL && !BP_IS_HOLE(bp)) {
uint64_t vdev = DVA_GET_VDEV(bp->blk_dva);
vdev_t *rvd = db->db_objset->os_spa->spa_root_vdev;
if (vdev < rvd->vdev_children)
vd = rvd->vdev_child[vdev];
if (cache == ZFS_CACHE_ALL ||
(dbuf_is_metadata(db) && cache == ZFS_CACHE_METADATA)) {
if (vd == NULL)
return (B_TRUE);
if ((vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL &&
vd->vdev_alloc_bias != VDEV_BIAS_DEDUP) ||
l2arc_exclude_special == 0)
return (B_TRUE);
}
}
return (B_FALSE);
}
static inline boolean_t
dnode_level_is_l2cacheable(blkptr_t *bp, dnode_t *dn, int64_t level)
{
vdev_t *vd = NULL;
zfs_cache_type_t cache = dn->dn_objset->os_secondary_cache;
if (bp != NULL && !BP_IS_HOLE(bp)) {
uint64_t vdev = DVA_GET_VDEV(bp->blk_dva);
vdev_t *rvd = dn->dn_objset->os_spa->spa_root_vdev;
if (vdev < rvd->vdev_children)
vd = rvd->vdev_child[vdev];
if (cache == ZFS_CACHE_ALL || ((level > 0 ||
DMU_OT_IS_METADATA(dn->dn_handle->dnh_dnode->dn_type)) &&
cache == ZFS_CACHE_METADATA)) {
if (vd == NULL)
return (B_TRUE);
if ((vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL &&
vd->vdev_alloc_bias != VDEV_BIAS_DEDUP) ||
l2arc_exclude_special == 0)
return (B_TRUE);
}
}
return (B_FALSE);
}
/*
* This function *must* return indices evenly distributed between all
* sublists of the multilist. This is needed due to how the dbuf eviction
* code is laid out; dbuf_evict_thread() assumes dbufs are evenly
* distributed between all sublists and uses this assumption when
* deciding which sublist to evict from and how much to evict from it.
*/
static unsigned int
dbuf_cache_multilist_index_func(multilist_t *ml, void *obj)
{
dmu_buf_impl_t *db = obj;
/*
* The assumption here, is the hash value for a given
* dmu_buf_impl_t will remain constant throughout it's lifetime
* (i.e. it's objset, object, level and blkid fields don't change).
* Thus, we don't need to store the dbuf's sublist index
* on insertion, as this index can be recalculated on removal.
*
* Also, the low order bits of the hash value are thought to be
* distributed evenly. Otherwise, in the case that the multilist
* has a power of two number of sublists, each sublists' usage
* would not be evenly distributed. In this context full 64bit
* division would be a waste of time, so limit it to 32 bits.
*/
return ((unsigned int)dbuf_hash(db->db_objset, db->db.db_object,
db->db_level, db->db_blkid) %
multilist_get_num_sublists(ml));
}
/*
* The target size of the dbuf cache can grow with the ARC target,
* unless limited by the tunable dbuf_cache_max_bytes.
*/
static inline unsigned long
dbuf_cache_target_bytes(void)
{
return (MIN(dbuf_cache_max_bytes,
arc_target_bytes() >> dbuf_cache_shift));
}
/*
* The target size of the dbuf metadata cache can grow with the ARC target,
* unless limited by the tunable dbuf_metadata_cache_max_bytes.
*/
static inline unsigned long
dbuf_metadata_cache_target_bytes(void)
{
return (MIN(dbuf_metadata_cache_max_bytes,
arc_target_bytes() >> dbuf_metadata_cache_shift));
}
static inline uint64_t
dbuf_cache_hiwater_bytes(void)
{
uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
return (dbuf_cache_target +
(dbuf_cache_target * dbuf_cache_hiwater_pct) / 100);
}
static inline uint64_t
dbuf_cache_lowater_bytes(void)
{
uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
return (dbuf_cache_target -
(dbuf_cache_target * dbuf_cache_lowater_pct) / 100);
}
static inline boolean_t
dbuf_cache_above_lowater(void)
{
return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
dbuf_cache_lowater_bytes());
}
/*
* Evict the oldest eligible dbuf from the dbuf cache.
*/
static void
dbuf_evict_one(void)
{
int idx = multilist_get_random_index(&dbuf_caches[DB_DBUF_CACHE].cache);
multilist_sublist_t *mls = multilist_sublist_lock(
&dbuf_caches[DB_DBUF_CACHE].cache, idx);
ASSERT(!MUTEX_HELD(&dbuf_evict_lock));
dmu_buf_impl_t *db = multilist_sublist_tail(mls);
while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) {
db = multilist_sublist_prev(mls, db);
}
DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db,
multilist_sublist_t *, mls);
if (db != NULL) {
multilist_sublist_remove(mls, db);
multilist_sublist_unlock(mls);
(void) zfs_refcount_remove_many(
&dbuf_caches[DB_DBUF_CACHE].size, db->db.db_size, db);
DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
DBUF_STAT_BUMPDOWN(cache_count);
DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
db->db.db_size);
ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE);
db->db_caching_status = DB_NO_CACHE;
dbuf_destroy(db);
DBUF_STAT_BUMP(cache_total_evicts);
} else {
multilist_sublist_unlock(mls);
}
}
/*
* The dbuf evict thread is responsible for aging out dbufs from the
* cache. Once the cache has reached it's maximum size, dbufs are removed
* and destroyed. The eviction thread will continue running until the size
* of the dbuf cache is at or below the maximum size. Once the dbuf is aged
* out of the cache it is destroyed and becomes eligible for arc eviction.
*/
static __attribute__((noreturn)) void
dbuf_evict_thread(void *unused)
{
(void) unused;
callb_cpr_t cpr;
CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG);
mutex_enter(&dbuf_evict_lock);
while (!dbuf_evict_thread_exit) {
while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
CALLB_CPR_SAFE_BEGIN(&cpr);
(void) cv_timedwait_idle_hires(&dbuf_evict_cv,
&dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0);
CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock);
}
mutex_exit(&dbuf_evict_lock);
/*
* Keep evicting as long as we're above the low water mark
* for the cache. We do this without holding the locks to
* minimize lock contention.
*/
while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
dbuf_evict_one();
}
mutex_enter(&dbuf_evict_lock);
}
dbuf_evict_thread_exit = B_FALSE;
cv_broadcast(&dbuf_evict_cv);
CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */
thread_exit();
}
/*
* Wake up the dbuf eviction thread if the dbuf cache is at its max size.
* If the dbuf cache is at its high water mark, then evict a dbuf from the
* dbuf cache using the caller's context.
*/
static void
dbuf_evict_notify(uint64_t size)
{
/*
* We check if we should evict without holding the dbuf_evict_lock,
* because it's OK to occasionally make the wrong decision here,
* and grabbing the lock results in massive lock contention.
*/
if (size > dbuf_cache_target_bytes()) {
if (size > dbuf_cache_hiwater_bytes())
dbuf_evict_one();
cv_signal(&dbuf_evict_cv);
}
}
static int
dbuf_kstat_update(kstat_t *ksp, int rw)
{
dbuf_stats_t *ds = ksp->ks_data;
if (rw == KSTAT_WRITE)
return (SET_ERROR(EACCES));
ds->cache_count.value.ui64 =
wmsum_value(&dbuf_sums.cache_count);
ds->cache_size_bytes.value.ui64 =
zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size);
ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes();
ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes();
ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes();
ds->cache_total_evicts.value.ui64 =
wmsum_value(&dbuf_sums.cache_total_evicts);
for (int i = 0; i < DN_MAX_LEVELS; i++) {
ds->cache_levels[i].value.ui64 =
wmsum_value(&dbuf_sums.cache_levels[i]);
ds->cache_levels_bytes[i].value.ui64 =
wmsum_value(&dbuf_sums.cache_levels_bytes[i]);
}
ds->hash_hits.value.ui64 =
wmsum_value(&dbuf_sums.hash_hits);
ds->hash_misses.value.ui64 =
wmsum_value(&dbuf_sums.hash_misses);
ds->hash_collisions.value.ui64 =
wmsum_value(&dbuf_sums.hash_collisions);
ds->hash_chains.value.ui64 =
wmsum_value(&dbuf_sums.hash_chains);
ds->hash_insert_race.value.ui64 =
wmsum_value(&dbuf_sums.hash_insert_race);
ds->metadata_cache_count.value.ui64 =
wmsum_value(&dbuf_sums.metadata_cache_count);
ds->metadata_cache_size_bytes.value.ui64 = zfs_refcount_count(
&dbuf_caches[DB_DBUF_METADATA_CACHE].size);
ds->metadata_cache_overflow.value.ui64 =
wmsum_value(&dbuf_sums.metadata_cache_overflow);
return (0);
}
void
dbuf_init(void)
{
uint64_t hsize = 1ULL << 16;
dbuf_hash_table_t *h = &dbuf_hash_table;
int i;
/*
* The hash table is big enough to fill one eighth of physical memory
* with an average block size of zfs_arc_average_blocksize (default 8K).
* By default, the table will take up
* totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
*/
while (hsize * zfs_arc_average_blocksize < arc_all_memory() / 8)
hsize <<= 1;
retry:
h->hash_table_mask = hsize - 1;
#if defined(_KERNEL)
/*
* Large allocations which do not require contiguous pages
* should be using vmem_alloc() in the linux kernel
*/
h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP);
#else
h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
#endif
if (h->hash_table == NULL) {
/* XXX - we should really return an error instead of assert */
ASSERT(hsize > (1ULL << 10));
hsize >>= 1;
goto retry;
}
dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t",
sizeof (dmu_buf_impl_t),
0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
for (i = 0; i < DBUF_RWLOCKS; i++)
rw_init(&h->hash_rwlocks[i], NULL, RW_DEFAULT, NULL);
dbuf_stats_init(h);
/*
* All entries are queued via taskq_dispatch_ent(), so min/maxalloc
* configuration is not required.
*/
dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0);
for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
multilist_create(&dbuf_caches[dcs].cache,
sizeof (dmu_buf_impl_t),
offsetof(dmu_buf_impl_t, db_cache_link),
dbuf_cache_multilist_index_func);
zfs_refcount_create(&dbuf_caches[dcs].size);
}
dbuf_evict_thread_exit = B_FALSE;
mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL);
dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread,
NULL, 0, &p0, TS_RUN, minclsyspri);
wmsum_init(&dbuf_sums.cache_count, 0);
wmsum_init(&dbuf_sums.cache_total_evicts, 0);
for (i = 0; i < DN_MAX_LEVELS; i++) {
wmsum_init(&dbuf_sums.cache_levels[i], 0);
wmsum_init(&dbuf_sums.cache_levels_bytes[i], 0);
}
wmsum_init(&dbuf_sums.hash_hits, 0);
wmsum_init(&dbuf_sums.hash_misses, 0);
wmsum_init(&dbuf_sums.hash_collisions, 0);
wmsum_init(&dbuf_sums.hash_chains, 0);
wmsum_init(&dbuf_sums.hash_insert_race, 0);
wmsum_init(&dbuf_sums.metadata_cache_count, 0);
wmsum_init(&dbuf_sums.metadata_cache_overflow, 0);
dbuf_ksp = kstat_create("zfs", 0, "dbufstats", "misc",
KSTAT_TYPE_NAMED, sizeof (dbuf_stats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (dbuf_ksp != NULL) {
for (i = 0; i < DN_MAX_LEVELS; i++) {
snprintf(dbuf_stats.cache_levels[i].name,
KSTAT_STRLEN, "cache_level_%d", i);
dbuf_stats.cache_levels[i].data_type =
KSTAT_DATA_UINT64;
snprintf(dbuf_stats.cache_levels_bytes[i].name,
KSTAT_STRLEN, "cache_level_%d_bytes", i);
dbuf_stats.cache_levels_bytes[i].data_type =
KSTAT_DATA_UINT64;
}
dbuf_ksp->ks_data = &dbuf_stats;
dbuf_ksp->ks_update = dbuf_kstat_update;
kstat_install(dbuf_ksp);
}
}
void
dbuf_fini(void)
{
dbuf_hash_table_t *h = &dbuf_hash_table;
int i;
dbuf_stats_destroy();
for (i = 0; i < DBUF_RWLOCKS; i++)
rw_destroy(&h->hash_rwlocks[i]);
#if defined(_KERNEL)
/*
* Large allocations which do not require contiguous pages
* should be using vmem_free() in the linux kernel
*/
vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
#else
kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
#endif
kmem_cache_destroy(dbuf_kmem_cache);
taskq_destroy(dbu_evict_taskq);
mutex_enter(&dbuf_evict_lock);
dbuf_evict_thread_exit = B_TRUE;
while (dbuf_evict_thread_exit) {
cv_signal(&dbuf_evict_cv);
cv_wait(&dbuf_evict_cv, &dbuf_evict_lock);
}
mutex_exit(&dbuf_evict_lock);
mutex_destroy(&dbuf_evict_lock);
cv_destroy(&dbuf_evict_cv);
for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
zfs_refcount_destroy(&dbuf_caches[dcs].size);
multilist_destroy(&dbuf_caches[dcs].cache);
}
if (dbuf_ksp != NULL) {
kstat_delete(dbuf_ksp);
dbuf_ksp = NULL;
}
wmsum_fini(&dbuf_sums.cache_count);
wmsum_fini(&dbuf_sums.cache_total_evicts);
for (i = 0; i < DN_MAX_LEVELS; i++) {
wmsum_fini(&dbuf_sums.cache_levels[i]);
wmsum_fini(&dbuf_sums.cache_levels_bytes[i]);
}
wmsum_fini(&dbuf_sums.hash_hits);
wmsum_fini(&dbuf_sums.hash_misses);
wmsum_fini(&dbuf_sums.hash_collisions);
wmsum_fini(&dbuf_sums.hash_chains);
wmsum_fini(&dbuf_sums.hash_insert_race);
wmsum_fini(&dbuf_sums.metadata_cache_count);
wmsum_fini(&dbuf_sums.metadata_cache_overflow);
}
/*
* Other stuff.
*/
#ifdef ZFS_DEBUG
static void
dbuf_verify(dmu_buf_impl_t *db)
{
dnode_t *dn;
dbuf_dirty_record_t *dr;
uint32_t txg_prev;
ASSERT(MUTEX_HELD(&db->db_mtx));
if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
return;
ASSERT(db->db_objset != NULL);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
if (dn == NULL) {
ASSERT(db->db_parent == NULL);
ASSERT(db->db_blkptr == NULL);
} else {
ASSERT3U(db->db.db_object, ==, dn->dn_object);
ASSERT3P(db->db_objset, ==, dn->dn_objset);
ASSERT3U(db->db_level, <, dn->dn_nlevels);
ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
db->db_blkid == DMU_SPILL_BLKID ||
!avl_is_empty(&dn->dn_dbufs));
}
if (db->db_blkid == DMU_BONUS_BLKID) {
ASSERT(dn != NULL);
ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
} else if (db->db_blkid == DMU_SPILL_BLKID) {
ASSERT(dn != NULL);
ASSERT0(db->db.db_offset);
} else {
ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
}
if ((dr = list_head(&db->db_dirty_records)) != NULL) {
ASSERT(dr->dr_dbuf == db);
txg_prev = dr->dr_txg;
for (dr = list_next(&db->db_dirty_records, dr); dr != NULL;
dr = list_next(&db->db_dirty_records, dr)) {
ASSERT(dr->dr_dbuf == db);
ASSERT(txg_prev > dr->dr_txg);
txg_prev = dr->dr_txg;
}
}
/*
* We can't assert that db_size matches dn_datablksz because it
* can be momentarily different when another thread is doing
* dnode_set_blksz().
*/
if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
dr = db->db_data_pending;
/*
* It should only be modified in syncing context, so
* make sure we only have one copy of the data.
*/
ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
}
/* verify db->db_blkptr */
if (db->db_blkptr) {
if (db->db_parent == dn->dn_dbuf) {
/* db is pointed to by the dnode */
/* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
ASSERT(db->db_parent == NULL);
else
ASSERT(db->db_parent != NULL);
if (db->db_blkid != DMU_SPILL_BLKID)
ASSERT3P(db->db_blkptr, ==,
&dn->dn_phys->dn_blkptr[db->db_blkid]);
} else {
/* db is pointed to by an indirect block */
int epb __maybe_unused = db->db_parent->db.db_size >>
SPA_BLKPTRSHIFT;
ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
ASSERT3U(db->db_parent->db.db_object, ==,
db->db.db_object);
/*
* dnode_grow_indblksz() can make this fail if we don't
* have the parent's rwlock. XXX indblksz no longer
* grows. safe to do this now?
*/
if (RW_LOCK_HELD(&db->db_parent->db_rwlock)) {
ASSERT3P(db->db_blkptr, ==,
((blkptr_t *)db->db_parent->db.db_data +
db->db_blkid % epb));
}
}
}
if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
(db->db_buf == NULL || db->db_buf->b_data) &&
db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
db->db_state != DB_FILL && !dn->dn_free_txg) {
/*
* If the blkptr isn't set but they have nonzero data,
* it had better be dirty, otherwise we'll lose that
* data when we evict this buffer.
*
* There is an exception to this rule for indirect blocks; in
* this case, if the indirect block is a hole, we fill in a few
* fields on each of the child blocks (importantly, birth time)
* to prevent hole birth times from being lost when you
* partially fill in a hole.
*/
if (db->db_dirtycnt == 0) {
if (db->db_level == 0) {
uint64_t *buf = db->db.db_data;
int i;
for (i = 0; i < db->db.db_size >> 3; i++) {
ASSERT(buf[i] == 0);
}
} else {
blkptr_t *bps = db->db.db_data;
ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==,
db->db.db_size);
/*
* We want to verify that all the blkptrs in the
* indirect block are holes, but we may have
* automatically set up a few fields for them.
* We iterate through each blkptr and verify
* they only have those fields set.
*/
for (int i = 0;
i < db->db.db_size / sizeof (blkptr_t);
i++) {
blkptr_t *bp = &bps[i];
ASSERT(ZIO_CHECKSUM_IS_ZERO(
&bp->blk_cksum));
ASSERT(
DVA_IS_EMPTY(&bp->blk_dva[0]) &&
DVA_IS_EMPTY(&bp->blk_dva[1]) &&
DVA_IS_EMPTY(&bp->blk_dva[2]));
ASSERT0(bp->blk_fill);
ASSERT0(bp->blk_pad[0]);
ASSERT0(bp->blk_pad[1]);
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(BP_IS_HOLE(bp));
ASSERT0(bp->blk_phys_birth);
}
}
}
}
DB_DNODE_EXIT(db);
}
#endif
static void
dbuf_clear_data(dmu_buf_impl_t *db)
{
ASSERT(MUTEX_HELD(&db->db_mtx));
dbuf_evict_user(db);
ASSERT3P(db->db_buf, ==, NULL);
db->db.db_data = NULL;
if (db->db_state != DB_NOFILL) {
db->db_state = DB_UNCACHED;
DTRACE_SET_STATE(db, "clear data");
}
}
static void
dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
{
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(buf != NULL);
db->db_buf = buf;
ASSERT(buf->b_data != NULL);
db->db.db_data = buf->b_data;
}
static arc_buf_t *
dbuf_alloc_arcbuf(dmu_buf_impl_t *db)
{
spa_t *spa = db->db_objset->os_spa;
return (arc_alloc_buf(spa, db, DBUF_GET_BUFC_TYPE(db), db->db.db_size));
}
/*
* Loan out an arc_buf for read. Return the loaned arc_buf.
*/
arc_buf_t *
dbuf_loan_arcbuf(dmu_buf_impl_t *db)
{
arc_buf_t *abuf;
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
mutex_enter(&db->db_mtx);
if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) {
int blksz = db->db.db_size;
spa_t *spa = db->db_objset->os_spa;
mutex_exit(&db->db_mtx);
abuf = arc_loan_buf(spa, B_FALSE, blksz);
memcpy(abuf->b_data, db->db.db_data, blksz);
} else {
abuf = db->db_buf;
arc_loan_inuse_buf(abuf, db);
db->db_buf = NULL;
dbuf_clear_data(db);
mutex_exit(&db->db_mtx);
}
return (abuf);
}
/*
* Calculate which level n block references the data at the level 0 offset
* provided.
*/
uint64_t
dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset)
{
if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) {
/*
* The level n blkid is equal to the level 0 blkid divided by
* the number of level 0s in a level n block.
*
* The level 0 blkid is offset >> datablkshift =
* offset / 2^datablkshift.
*
* The number of level 0s in a level n is the number of block
* pointers in an indirect block, raised to the power of level.
* This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level =
* 2^(level*(indblkshift - SPA_BLKPTRSHIFT)).
*
* Thus, the level n blkid is: offset /
* ((2^datablkshift)*(2^(level*(indblkshift-SPA_BLKPTRSHIFT))))
* = offset / 2^(datablkshift + level *
* (indblkshift - SPA_BLKPTRSHIFT))
* = offset >> (datablkshift + level *
* (indblkshift - SPA_BLKPTRSHIFT))
*/
const unsigned exp = dn->dn_datablkshift +
level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
if (exp >= 8 * sizeof (offset)) {
/* This only happens on the highest indirection level */
ASSERT3U(level, ==, dn->dn_nlevels - 1);
return (0);
}
ASSERT3U(exp, <, 8 * sizeof (offset));
return (offset >> exp);
} else {
ASSERT3U(offset, <, dn->dn_datablksz);
return (0);
}
}
/*
* This function is used to lock the parent of the provided dbuf. This should be
* used when modifying or reading db_blkptr.
*/
db_lock_type_t
dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, const void *tag)
{
enum db_lock_type ret = DLT_NONE;
if (db->db_parent != NULL) {
rw_enter(&db->db_parent->db_rwlock, rw);
ret = DLT_PARENT;
} else if (dmu_objset_ds(db->db_objset) != NULL) {
rrw_enter(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, rw,
tag);
ret = DLT_OBJSET;
}
/*
* We only return a DLT_NONE lock when it's the top-most indirect block
* of the meta-dnode of the MOS.
*/
return (ret);
}
/*
* We need to pass the lock type in because it's possible that the block will
* move from being the topmost indirect block in a dnode (and thus, have no
* parent) to not the top-most via an indirection increase. This would cause a
* panic if we didn't pass the lock type in.
*/
void
dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type, const void *tag)
{
if (type == DLT_PARENT)
rw_exit(&db->db_parent->db_rwlock);
else if (type == DLT_OBJSET)
rrw_exit(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, tag);
}
static void
dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *vdb)
{
(void) zb, (void) bp;
dmu_buf_impl_t *db = vdb;
mutex_enter(&db->db_mtx);
ASSERT3U(db->db_state, ==, DB_READ);
/*
* All reads are synchronous, so we must have a hold on the dbuf
*/
ASSERT(zfs_refcount_count(&db->db_holds) > 0);
ASSERT(db->db_buf == NULL);
ASSERT(db->db.db_data == NULL);
if (buf == NULL) {
/* i/o error */
ASSERT(zio == NULL || zio->io_error != 0);
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT3P(db->db_buf, ==, NULL);
db->db_state = DB_UNCACHED;
DTRACE_SET_STATE(db, "i/o error");
} else if (db->db_level == 0 && db->db_freed_in_flight) {
/* freed in flight */
ASSERT(zio == NULL || zio->io_error == 0);
arc_release(buf, db);
memset(buf->b_data, 0, db->db.db_size);
arc_buf_freeze(buf);
db->db_freed_in_flight = FALSE;
dbuf_set_data(db, buf);
db->db_state = DB_CACHED;
DTRACE_SET_STATE(db, "freed in flight");
} else {
/* success */
ASSERT(zio == NULL || zio->io_error == 0);
dbuf_set_data(db, buf);
db->db_state = DB_CACHED;
DTRACE_SET_STATE(db, "successful read");
}
cv_broadcast(&db->db_changed);
dbuf_rele_and_unlock(db, NULL, B_FALSE);
}
/*
* Shortcut for performing reads on bonus dbufs. Returns
* an error if we fail to verify the dnode associated with
* a decrypted block. Otherwise success.
*/
static int
dbuf_read_bonus(dmu_buf_impl_t *db, dnode_t *dn, uint32_t flags)
{
int bonuslen, max_bonuslen, err;
err = dbuf_read_verify_dnode_crypt(db, flags);
if (err)
return (err);
bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(DB_DNODE_HELD(db));
ASSERT3U(bonuslen, <=, db->db.db_size);
db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP);
arc_space_consume(max_bonuslen, ARC_SPACE_BONUS);
if (bonuslen < max_bonuslen)
memset(db->db.db_data, 0, max_bonuslen);
if (bonuslen)
memcpy(db->db.db_data, DN_BONUS(dn->dn_phys), bonuslen);
db->db_state = DB_CACHED;
DTRACE_SET_STATE(db, "bonus buffer filled");
return (0);
}
static void
dbuf_handle_indirect_hole(dmu_buf_impl_t *db, dnode_t *dn)
{
blkptr_t *bps = db->db.db_data;
uint32_t indbs = 1ULL << dn->dn_indblkshift;
int n_bps = indbs >> SPA_BLKPTRSHIFT;
for (int i = 0; i < n_bps; i++) {
blkptr_t *bp = &bps[i];
ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, indbs);
BP_SET_LSIZE(bp, BP_GET_LEVEL(db->db_blkptr) == 1 ?
dn->dn_datablksz : BP_GET_LSIZE(db->db_blkptr));
BP_SET_TYPE(bp, BP_GET_TYPE(db->db_blkptr));
BP_SET_LEVEL(bp, BP_GET_LEVEL(db->db_blkptr) - 1);
BP_SET_BIRTH(bp, db->db_blkptr->blk_birth, 0);
}
}
/*
* Handle reads on dbufs that are holes, if necessary. This function
* requires that the dbuf's mutex is held. Returns success (0) if action
* was taken, ENOENT if no action was taken.
*/
static int
dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn)
{
ASSERT(MUTEX_HELD(&db->db_mtx));
int is_hole = db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr);
/*
* For level 0 blocks only, if the above check fails:
* Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
* processes the delete record and clears the bp while we are waiting
* for the dn_mtx (resulting in a "no" from block_freed).
*/
if (!is_hole && db->db_level == 0) {
is_hole = dnode_block_freed(dn, db->db_blkid) ||
BP_IS_HOLE(db->db_blkptr);
}
if (is_hole) {
dbuf_set_data(db, dbuf_alloc_arcbuf(db));
memset(db->db.db_data, 0, db->db.db_size);
if (db->db_blkptr != NULL && db->db_level > 0 &&
BP_IS_HOLE(db->db_blkptr) &&
db->db_blkptr->blk_birth != 0) {
dbuf_handle_indirect_hole(db, dn);
}
db->db_state = DB_CACHED;
DTRACE_SET_STATE(db, "hole read satisfied");
return (0);
}
return (ENOENT);
}
/*
* This function ensures that, when doing a decrypting read of a block,
* we make sure we have decrypted the dnode associated with it. We must do
* this so that we ensure we are fully authenticating the checksum-of-MACs
* tree from the root of the objset down to this block. Indirect blocks are
* always verified against their secure checksum-of-MACs assuming that the
* dnode containing them is correct. Now that we are doing a decrypting read,
* we can be sure that the key is loaded and verify that assumption. This is
* especially important considering that we always read encrypted dnode
* blocks as raw data (without verifying their MACs) to start, and
* decrypt / authenticate them when we need to read an encrypted bonus buffer.
*/
static int
dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags)
{
int err = 0;
objset_t *os = db->db_objset;
arc_buf_t *dnode_abuf;
dnode_t *dn;
zbookmark_phys_t zb;
ASSERT(MUTEX_HELD(&db->db_mtx));
if (!os->os_encrypted || os->os_raw_receive ||
(flags & DB_RF_NO_DECRYPT) != 0)
return (0);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
dnode_abuf = (dn->dn_dbuf != NULL) ? dn->dn_dbuf->db_buf : NULL;
if (dnode_abuf == NULL || !arc_is_encrypted(dnode_abuf)) {
DB_DNODE_EXIT(db);
return (0);
}
SET_BOOKMARK(&zb, dmu_objset_id(os),
DMU_META_DNODE_OBJECT, 0, dn->dn_dbuf->db_blkid);
err = arc_untransform(dnode_abuf, os->os_spa, &zb, B_TRUE);
/*
* An error code of EACCES tells us that the key is still not
* available. This is ok if we are only reading authenticated
* (and therefore non-encrypted) blocks.
*/
if (err == EACCES && ((db->db_blkid != DMU_BONUS_BLKID &&
!DMU_OT_IS_ENCRYPTED(dn->dn_type)) ||
(db->db_blkid == DMU_BONUS_BLKID &&
!DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))))
err = 0;
DB_DNODE_EXIT(db);
return (err);
}
/*
* Drops db_mtx and the parent lock specified by dblt and tag before
* returning.
*/
static int
dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags,
db_lock_type_t dblt, const void *tag)
{
dnode_t *dn;
zbookmark_phys_t zb;
uint32_t aflags = ARC_FLAG_NOWAIT;
int err, zio_flags;
err = zio_flags = 0;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(db->db_state == DB_UNCACHED);
ASSERT(db->db_buf == NULL);
ASSERT(db->db_parent == NULL ||
RW_LOCK_HELD(&db->db_parent->db_rwlock));
if (db->db_blkid == DMU_BONUS_BLKID) {
err = dbuf_read_bonus(db, dn, flags);
goto early_unlock;
}
err = dbuf_read_hole(db, dn);
if (err == 0)
goto early_unlock;
/*
* Any attempt to read a redacted block should result in an error. This
* will never happen under normal conditions, but can be useful for
* debugging purposes.
*/
if (BP_IS_REDACTED(db->db_blkptr)) {
ASSERT(dsl_dataset_feature_is_active(
db->db_objset->os_dsl_dataset,
SPA_FEATURE_REDACTED_DATASETS));
err = SET_ERROR(EIO);
goto early_unlock;
}
SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
db->db.db_object, db->db_level, db->db_blkid);
/*
* All bps of an encrypted os should have the encryption bit set.
* If this is not true it indicates tampering and we report an error.
*/
if (db->db_objset->os_encrypted && !BP_USES_CRYPT(db->db_blkptr)) {
spa_log_error(db->db_objset->os_spa, &zb);
zfs_panic_recover("unencrypted block in encrypted "
"object set %llu", dmu_objset_id(db->db_objset));
err = SET_ERROR(EIO);
goto early_unlock;
}
err = dbuf_read_verify_dnode_crypt(db, flags);
if (err != 0)
goto early_unlock;
DB_DNODE_EXIT(db);
db->db_state = DB_READ;
DTRACE_SET_STATE(db, "read issued");
mutex_exit(&db->db_mtx);
if (dbuf_is_l2cacheable(db))
aflags |= ARC_FLAG_L2CACHE;
dbuf_add_ref(db, NULL);
zio_flags = (flags & DB_RF_CANFAIL) ?
ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED;
if ((flags & DB_RF_NO_DECRYPT) && BP_IS_PROTECTED(db->db_blkptr))
zio_flags |= ZIO_FLAG_RAW;
/*
* The zio layer will copy the provided blkptr later, but we need to
* do this now so that we can release the parent's rwlock. We have to
* do that now so that if dbuf_read_done is called synchronously (on
* an l1 cache hit) we don't acquire the db_mtx while holding the
* parent's rwlock, which would be a lock ordering violation.
*/
blkptr_t bp = *db->db_blkptr;
dmu_buf_unlock_parent(db, dblt, tag);
(void) arc_read(zio, db->db_objset->os_spa, &bp,
dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags,
&aflags, &zb);
return (err);
early_unlock:
DB_DNODE_EXIT(db);
mutex_exit(&db->db_mtx);
dmu_buf_unlock_parent(db, dblt, tag);
return (err);
}
/*
* This is our just-in-time copy function. It makes a copy of buffers that
* have been modified in a previous transaction group before we access them in
* the current active group.
*
* This function is used in three places: when we are dirtying a buffer for the
* first time in a txg, when we are freeing a range in a dnode that includes
* this buffer, and when we are accessing a buffer which was received compressed
* and later referenced in a WRITE_BYREF record.
*
* Note that when we are called from dbuf_free_range() we do not put a hold on
* the buffer, we just traverse the active dbuf list for the dnode.
*/
static void
dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
{
dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(db->db.db_data != NULL);
ASSERT(db->db_level == 0);
ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
if (dr == NULL ||
(dr->dt.dl.dr_data !=
((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
return;
/*
* If the last dirty record for this dbuf has not yet synced
* and its referencing the dbuf data, either:
* reset the reference to point to a new copy,
* or (if there a no active holders)
* just null out the current db_data pointer.
*/
ASSERT3U(dr->dr_txg, >=, txg - 2);
if (db->db_blkid == DMU_BONUS_BLKID) {
dnode_t *dn = DB_DNODE(db);
int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP);
arc_space_consume(bonuslen, ARC_SPACE_BONUS);
memcpy(dr->dt.dl.dr_data, db->db.db_data, bonuslen);
} else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) {
dnode_t *dn = DB_DNODE(db);
int size = arc_buf_size(db->db_buf);
arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
spa_t *spa = db->db_objset->os_spa;
enum zio_compress compress_type =
arc_get_compression(db->db_buf);
uint8_t complevel = arc_get_complevel(db->db_buf);
if (arc_is_encrypted(db->db_buf)) {
boolean_t byteorder;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
arc_get_raw_params(db->db_buf, &byteorder, salt,
iv, mac);
dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db,
dmu_objset_id(dn->dn_objset), byteorder, salt, iv,
mac, dn->dn_type, size, arc_buf_lsize(db->db_buf),
compress_type, complevel);
} else if (compress_type != ZIO_COMPRESS_OFF) {
ASSERT3U(type, ==, ARC_BUFC_DATA);
dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db,
size, arc_buf_lsize(db->db_buf), compress_type,
complevel);
} else {
dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size);
}
memcpy(dr->dt.dl.dr_data->b_data, db->db.db_data, size);
} else {
db->db_buf = NULL;
dbuf_clear_data(db);
}
}
int
dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
{
int err = 0;
boolean_t prefetch;
dnode_t *dn;
/*
* We don't have to hold the mutex to check db_state because it
* can't be freed while we have a hold on the buffer.
*/
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
if (db->db_state == DB_NOFILL)
return (SET_ERROR(EIO));
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
(flags & DB_RF_NOPREFETCH) == 0 && dn != NULL &&
DBUF_IS_CACHEABLE(db);
mutex_enter(&db->db_mtx);
if (db->db_state == DB_CACHED) {
spa_t *spa = dn->dn_objset->os_spa;
/*
* Ensure that this block's dnode has been decrypted if
* the caller has requested decrypted data.
*/
err = dbuf_read_verify_dnode_crypt(db, flags);
/*
* If the arc buf is compressed or encrypted and the caller
* requested uncompressed data, we need to untransform it
* before returning. We also call arc_untransform() on any
* unauthenticated blocks, which will verify their MAC if
* the key is now available.
*/
if (err == 0 && db->db_buf != NULL &&
(flags & DB_RF_NO_DECRYPT) == 0 &&
(arc_is_encrypted(db->db_buf) ||
arc_is_unauthenticated(db->db_buf) ||
arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) {
zbookmark_phys_t zb;
SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
db->db.db_object, db->db_level, db->db_blkid);
dbuf_fix_old_data(db, spa_syncing_txg(spa));
err = arc_untransform(db->db_buf, spa, &zb, B_FALSE);
dbuf_set_data(db, db->db_buf);
}
mutex_exit(&db->db_mtx);
if (err == 0 && prefetch) {
dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
B_FALSE, flags & DB_RF_HAVESTRUCT);
}
DB_DNODE_EXIT(db);
DBUF_STAT_BUMP(hash_hits);
} else if (db->db_state == DB_UNCACHED) {
spa_t *spa = dn->dn_objset->os_spa;
boolean_t need_wait = B_FALSE;
db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
if (zio == NULL &&
db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) {
zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
need_wait = B_TRUE;
}
err = dbuf_read_impl(db, zio, flags, dblt, FTAG);
/*
* dbuf_read_impl has dropped db_mtx and our parent's rwlock
* for us
*/
if (!err && prefetch) {
dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
db->db_state != DB_CACHED,
flags & DB_RF_HAVESTRUCT);
}
DB_DNODE_EXIT(db);
DBUF_STAT_BUMP(hash_misses);
/*
* If we created a zio_root we must execute it to avoid
* leaking it, even if it isn't attached to any work due
* to an error in dbuf_read_impl().
*/
if (need_wait) {
if (err == 0)
err = zio_wait(zio);
else
VERIFY0(zio_wait(zio));
}
} else {
/*
* Another reader came in while the dbuf was in flight
* between UNCACHED and CACHED. Either a writer will finish
* writing the buffer (sending the dbuf to CACHED) or the
* first reader's request will reach the read_done callback
* and send the dbuf to CACHED. Otherwise, a failure
* occurred and the dbuf went to UNCACHED.
*/
mutex_exit(&db->db_mtx);
if (prefetch) {
dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
B_TRUE, flags & DB_RF_HAVESTRUCT);
}
DB_DNODE_EXIT(db);
DBUF_STAT_BUMP(hash_misses);
/* Skip the wait per the caller's request. */
if ((flags & DB_RF_NEVERWAIT) == 0) {
mutex_enter(&db->db_mtx);
while (db->db_state == DB_READ ||
db->db_state == DB_FILL) {
ASSERT(db->db_state == DB_READ ||
(flags & DB_RF_HAVESTRUCT) == 0);
DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *,
db, zio_t *, zio);
cv_wait(&db->db_changed, &db->db_mtx);
}
if (db->db_state == DB_UNCACHED)
err = SET_ERROR(EIO);
mutex_exit(&db->db_mtx);
}
}
return (err);
}
static void
dbuf_noread(dmu_buf_impl_t *db)
{
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
mutex_enter(&db->db_mtx);
while (db->db_state == DB_READ || db->db_state == DB_FILL)
cv_wait(&db->db_changed, &db->db_mtx);
if (db->db_state == DB_UNCACHED) {
ASSERT(db->db_buf == NULL);
ASSERT(db->db.db_data == NULL);
dbuf_set_data(db, dbuf_alloc_arcbuf(db));
db->db_state = DB_FILL;
DTRACE_SET_STATE(db, "assigning filled buffer");
} else if (db->db_state == DB_NOFILL) {
dbuf_clear_data(db);
} else {
ASSERT3U(db->db_state, ==, DB_CACHED);
}
mutex_exit(&db->db_mtx);
}
void
dbuf_unoverride(dbuf_dirty_record_t *dr)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
uint64_t txg = dr->dr_txg;
ASSERT(MUTEX_HELD(&db->db_mtx));
/*
* This assert is valid because dmu_sync() expects to be called by
* a zilog's get_data while holding a range lock. This call only
* comes from dbuf_dirty() callers who must also hold a range lock.
*/
ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
ASSERT(db->db_level == 0);
if (db->db_blkid == DMU_BONUS_BLKID ||
dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
return;
ASSERT(db->db_data_pending != dr);
/* free this block */
if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite)
zio_free(db->db_objset->os_spa, txg, bp);
dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
dr->dt.dl.dr_nopwrite = B_FALSE;
dr->dt.dl.dr_has_raw_params = B_FALSE;
/*
* Release the already-written buffer, so we leave it in
* a consistent dirty state. Note that all callers are
* modifying the buffer, so they will immediately do
* another (redundant) arc_release(). Therefore, leave
* the buf thawed to save the effort of freezing &
* immediately re-thawing it.
*/
arc_release(dr->dt.dl.dr_data, db);
}
/*
* Evict (if its unreferenced) or clear (if its referenced) any level-0
* data blocks in the free range, so that any future readers will find
* empty blocks.
*/
void
dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
dmu_tx_t *tx)
{
dmu_buf_impl_t *db_search;
dmu_buf_impl_t *db, *db_next;
uint64_t txg = tx->tx_txg;
avl_index_t where;
dbuf_dirty_record_t *dr;
if (end_blkid > dn->dn_maxblkid &&
!(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID))
end_blkid = dn->dn_maxblkid;
dprintf_dnode(dn, "start=%llu end=%llu\n", (u_longlong_t)start_blkid,
(u_longlong_t)end_blkid);
db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
db_search->db_level = 0;
db_search->db_blkid = start_blkid;
db_search->db_state = DB_SEARCH;
mutex_enter(&dn->dn_dbufs_mtx);
db = avl_find(&dn->dn_dbufs, db_search, &where);
ASSERT3P(db, ==, NULL);
db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
for (; db != NULL; db = db_next) {
db_next = AVL_NEXT(&dn->dn_dbufs, db);
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
if (db->db_level != 0 || db->db_blkid > end_blkid) {
break;
}
ASSERT3U(db->db_blkid, >=, start_blkid);
/* found a level 0 buffer in the range */
mutex_enter(&db->db_mtx);
if (dbuf_undirty(db, tx)) {
/* mutex has been dropped and dbuf destroyed */
continue;
}
if (db->db_state == DB_UNCACHED ||
db->db_state == DB_NOFILL ||
db->db_state == DB_EVICTING) {
ASSERT(db->db.db_data == NULL);
mutex_exit(&db->db_mtx);
continue;
}
if (db->db_state == DB_READ || db->db_state == DB_FILL) {
/* will be handled in dbuf_read_done or dbuf_rele */
db->db_freed_in_flight = TRUE;
mutex_exit(&db->db_mtx);
continue;
}
if (zfs_refcount_count(&db->db_holds) == 0) {
ASSERT(db->db_buf);
dbuf_destroy(db);
continue;
}
/* The dbuf is referenced */
dr = list_head(&db->db_dirty_records);
if (dr != NULL) {
if (dr->dr_txg == txg) {
/*
* This buffer is "in-use", re-adjust the file
* size to reflect that this buffer may
* contain new data when we sync.
*/
if (db->db_blkid != DMU_SPILL_BLKID &&
db->db_blkid > dn->dn_maxblkid)
dn->dn_maxblkid = db->db_blkid;
dbuf_unoverride(dr);
} else {
/*
* This dbuf is not dirty in the open context.
* Either uncache it (if its not referenced in
* the open context) or reset its contents to
* empty.
*/
dbuf_fix_old_data(db, txg);
}
}
/* clear the contents if its cached */
if (db->db_state == DB_CACHED) {
ASSERT(db->db.db_data != NULL);
arc_release(db->db_buf, db);
rw_enter(&db->db_rwlock, RW_WRITER);
memset(db->db.db_data, 0, db->db.db_size);
rw_exit(&db->db_rwlock);
arc_buf_freeze(db->db_buf);
}
mutex_exit(&db->db_mtx);
}
mutex_exit(&dn->dn_dbufs_mtx);
kmem_free(db_search, sizeof (dmu_buf_impl_t));
}
void
dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
{
arc_buf_t *buf, *old_buf;
dbuf_dirty_record_t *dr;
int osize = db->db.db_size;
arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
dnode_t *dn;
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
/*
* XXX we should be doing a dbuf_read, checking the return
* value and returning that up to our callers
*/
dmu_buf_will_dirty(&db->db, tx);
/* create the data buffer for the new block */
buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size);
/* copy old block data to the new block */
old_buf = db->db_buf;
memcpy(buf->b_data, old_buf->b_data, MIN(osize, size));
/* zero the remainder */
if (size > osize)
memset((uint8_t *)buf->b_data + osize, 0, size - osize);
mutex_enter(&db->db_mtx);
dbuf_set_data(db, buf);
arc_buf_destroy(old_buf, db);
db->db.db_size = size;
dr = list_head(&db->db_dirty_records);
/* dirty record added by dmu_buf_will_dirty() */
VERIFY(dr != NULL);
if (db->db_level == 0)
dr->dt.dl.dr_data = buf;
ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
ASSERT3U(dr->dr_accounted, ==, osize);
dr->dr_accounted = size;
mutex_exit(&db->db_mtx);
dmu_objset_willuse_space(dn->dn_objset, size - osize, tx);
DB_DNODE_EXIT(db);
}
void
dbuf_release_bp(dmu_buf_impl_t *db)
{
objset_t *os __maybe_unused = db->db_objset;
ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
ASSERT(arc_released(os->os_phys_buf) ||
list_link_active(&os->os_dsl_dataset->ds_synced_link));
ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
(void) arc_release(db->db_buf, db);
}
/*
* We already have a dirty record for this TXG, and we are being
* dirtied again.
*/
static void
dbuf_redirty(dbuf_dirty_record_t *dr)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
ASSERT(MUTEX_HELD(&db->db_mtx));
if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
/*
* If this buffer has already been written out,
* we now need to reset its state.
*/
dbuf_unoverride(dr);
if (db->db.db_object != DMU_META_DNODE_OBJECT &&
db->db_state != DB_NOFILL) {
/* Already released on initial dirty, so just thaw. */
ASSERT(arc_released(db->db_buf));
arc_buf_thaw(db->db_buf);
}
}
}
dbuf_dirty_record_t *
dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx)
{
rw_enter(&dn->dn_struct_rwlock, RW_READER);
IMPLY(dn->dn_objset->os_raw_receive, dn->dn_maxblkid >= blkid);
dnode_new_blkid(dn, blkid, tx, B_TRUE, B_FALSE);
ASSERT(dn->dn_maxblkid >= blkid);
dbuf_dirty_record_t *dr = kmem_zalloc(sizeof (*dr), KM_SLEEP);
list_link_init(&dr->dr_dirty_node);
list_link_init(&dr->dr_dbuf_node);
dr->dr_dnode = dn;
dr->dr_txg = tx->tx_txg;
dr->dt.dll.dr_blkid = blkid;
dr->dr_accounted = dn->dn_datablksz;
/*
* There should not be any dbuf for the block that we're dirtying.
* Otherwise the buffer contents could be inconsistent between the
* dbuf and the lightweight dirty record.
*/
ASSERT3P(NULL, ==, dbuf_find(dn->dn_objset, dn->dn_object, 0, blkid));
mutex_enter(&dn->dn_mtx);
int txgoff = tx->tx_txg & TXG_MASK;
if (dn->dn_free_ranges[txgoff] != NULL) {
range_tree_clear(dn->dn_free_ranges[txgoff], blkid, 1);
}
if (dn->dn_nlevels == 1) {
ASSERT3U(blkid, <, dn->dn_nblkptr);
list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
mutex_exit(&dn->dn_mtx);
rw_exit(&dn->dn_struct_rwlock);
dnode_setdirty(dn, tx);
} else {
mutex_exit(&dn->dn_mtx);
int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
dmu_buf_impl_t *parent_db = dbuf_hold_level(dn,
1, blkid >> epbs, FTAG);
rw_exit(&dn->dn_struct_rwlock);
if (parent_db == NULL) {
kmem_free(dr, sizeof (*dr));
return (NULL);
}
int err = dbuf_read(parent_db, NULL,
(DB_RF_NOPREFETCH | DB_RF_CANFAIL));
if (err != 0) {
dbuf_rele(parent_db, FTAG);
kmem_free(dr, sizeof (*dr));
return (NULL);
}
dbuf_dirty_record_t *parent_dr = dbuf_dirty(parent_db, tx);
dbuf_rele(parent_db, FTAG);
mutex_enter(&parent_dr->dt.di.dr_mtx);
ASSERT3U(parent_dr->dr_txg, ==, tx->tx_txg);
list_insert_tail(&parent_dr->dt.di.dr_children, dr);
mutex_exit(&parent_dr->dt.di.dr_mtx);
dr->dr_parent = parent_dr;
}
dmu_objset_willuse_space(dn->dn_objset, dr->dr_accounted, tx);
return (dr);
}
dbuf_dirty_record_t *
dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
{
dnode_t *dn;
objset_t *os;
dbuf_dirty_record_t *dr, *dr_next, *dr_head;
int txgoff = tx->tx_txg & TXG_MASK;
boolean_t drop_struct_rwlock = B_FALSE;
ASSERT(tx->tx_txg != 0);
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
DMU_TX_DIRTY_BUF(tx, db);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
/*
* Shouldn't dirty a regular buffer in syncing context. Private
* objects may be dirtied in syncing context, but only if they
* were already pre-dirtied in open context.
*/
#ifdef ZFS_DEBUG
if (dn->dn_objset->os_dsl_dataset != NULL) {
rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
RW_READER, FTAG);
}
ASSERT(!dmu_tx_is_syncing(tx) ||
BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
dn->dn_objset->os_dsl_dataset == NULL);
if (dn->dn_objset->os_dsl_dataset != NULL)
rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG);
#endif
/*
* We make this assert for private objects as well, but after we
* check if we're already dirty. They are allowed to re-dirty
* in syncing context.
*/
ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
(dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
mutex_enter(&db->db_mtx);
/*
* XXX make this true for indirects too? The problem is that
* transactions created with dmu_tx_create_assigned() from
* syncing context don't bother holding ahead.
*/
ASSERT(db->db_level != 0 ||
db->db_state == DB_CACHED || db->db_state == DB_FILL ||
db->db_state == DB_NOFILL);
mutex_enter(&dn->dn_mtx);
dnode_set_dirtyctx(dn, tx, db);
if (tx->tx_txg > dn->dn_dirty_txg)
dn->dn_dirty_txg = tx->tx_txg;
mutex_exit(&dn->dn_mtx);
if (db->db_blkid == DMU_SPILL_BLKID)
dn->dn_have_spill = B_TRUE;
/*
* If this buffer is already dirty, we're done.
*/
dr_head = list_head(&db->db_dirty_records);
ASSERT(dr_head == NULL || dr_head->dr_txg <= tx->tx_txg ||
db->db.db_object == DMU_META_DNODE_OBJECT);
dr_next = dbuf_find_dirty_lte(db, tx->tx_txg);
if (dr_next && dr_next->dr_txg == tx->tx_txg) {
DB_DNODE_EXIT(db);
dbuf_redirty(dr_next);
mutex_exit(&db->db_mtx);
return (dr_next);
}
/*
* Only valid if not already dirty.
*/
ASSERT(dn->dn_object == 0 ||
dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
(dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
ASSERT3U(dn->dn_nlevels, >, db->db_level);
/*
* We should only be dirtying in syncing context if it's the
* mos or we're initializing the os or it's a special object.
* However, we are allowed to dirty in syncing context provided
* we already dirtied it in open context. Hence we must make
* this assertion only if we're not already dirty.
*/
os = dn->dn_objset;
VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa));
#ifdef ZFS_DEBUG
if (dn->dn_objset->os_dsl_dataset != NULL)
rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG);
ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
if (dn->dn_objset->os_dsl_dataset != NULL)
rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
#endif
ASSERT(db->db.db_size != 0);
dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
if (db->db_blkid != DMU_BONUS_BLKID) {
dmu_objset_willuse_space(os, db->db.db_size, tx);
}
/*
* If this buffer is dirty in an old transaction group we need
* to make a copy of it so that the changes we make in this
* transaction group won't leak out when we sync the older txg.
*/
dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
list_link_init(&dr->dr_dirty_node);
list_link_init(&dr->dr_dbuf_node);
dr->dr_dnode = dn;
if (db->db_level == 0) {
void *data_old = db->db_buf;
if (db->db_state != DB_NOFILL) {
if (db->db_blkid == DMU_BONUS_BLKID) {
dbuf_fix_old_data(db, tx->tx_txg);
data_old = db->db.db_data;
} else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
/*
* Release the data buffer from the cache so
* that we can modify it without impacting
* possible other users of this cached data
* block. Note that indirect blocks and
* private objects are not released until the
* syncing state (since they are only modified
* then).
*/
arc_release(db->db_buf, db);
dbuf_fix_old_data(db, tx->tx_txg);
data_old = db->db_buf;
}
ASSERT(data_old != NULL);
}
dr->dt.dl.dr_data = data_old;
} else {
mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_NOLOCKDEP, NULL);
list_create(&dr->dt.di.dr_children,
sizeof (dbuf_dirty_record_t),
offsetof(dbuf_dirty_record_t, dr_dirty_node));
}
if (db->db_blkid != DMU_BONUS_BLKID)
dr->dr_accounted = db->db.db_size;
dr->dr_dbuf = db;
dr->dr_txg = tx->tx_txg;
list_insert_before(&db->db_dirty_records, dr_next, dr);
/*
* We could have been freed_in_flight between the dbuf_noread
* and dbuf_dirty. We win, as though the dbuf_noread() had
* happened after the free.
*/
if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
db->db_blkid != DMU_SPILL_BLKID) {
mutex_enter(&dn->dn_mtx);
if (dn->dn_free_ranges[txgoff] != NULL) {
range_tree_clear(dn->dn_free_ranges[txgoff],
db->db_blkid, 1);
}
mutex_exit(&dn->dn_mtx);
db->db_freed_in_flight = FALSE;
}
/*
* This buffer is now part of this txg
*/
dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
db->db_dirtycnt += 1;
ASSERT3U(db->db_dirtycnt, <=, 3);
mutex_exit(&db->db_mtx);
if (db->db_blkid == DMU_BONUS_BLKID ||
db->db_blkid == DMU_SPILL_BLKID) {
mutex_enter(&dn->dn_mtx);
ASSERT(!list_link_active(&dr->dr_dirty_node));
list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
mutex_exit(&dn->dn_mtx);
dnode_setdirty(dn, tx);
DB_DNODE_EXIT(db);
return (dr);
}
if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
rw_enter(&dn->dn_struct_rwlock, RW_READER);
drop_struct_rwlock = B_TRUE;
}
/*
* If we are overwriting a dedup BP, then unless it is snapshotted,
* when we get to syncing context we will need to decrement its
* refcount in the DDT. Prefetch the relevant DDT block so that
* syncing context won't have to wait for the i/o.
*/
if (db->db_blkptr != NULL) {
db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
ddt_prefetch(os->os_spa, db->db_blkptr);
dmu_buf_unlock_parent(db, dblt, FTAG);
}
/*
* We need to hold the dn_struct_rwlock to make this assertion,
* because it protects dn_phys / dn_next_nlevels from changing.
*/
ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
dn->dn_phys->dn_nlevels > db->db_level ||
dn->dn_next_nlevels[txgoff] > db->db_level ||
dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
if (db->db_level == 0) {
ASSERT(!db->db_objset->os_raw_receive ||
dn->dn_maxblkid >= db->db_blkid);
dnode_new_blkid(dn, db->db_blkid, tx,
drop_struct_rwlock, B_FALSE);
ASSERT(dn->dn_maxblkid >= db->db_blkid);
}
if (db->db_level+1 < dn->dn_nlevels) {
dmu_buf_impl_t *parent = db->db_parent;
dbuf_dirty_record_t *di;
int parent_held = FALSE;
if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
parent = dbuf_hold_level(dn, db->db_level + 1,
db->db_blkid >> epbs, FTAG);
ASSERT(parent != NULL);
parent_held = TRUE;
}
if (drop_struct_rwlock)
rw_exit(&dn->dn_struct_rwlock);
ASSERT3U(db->db_level + 1, ==, parent->db_level);
di = dbuf_dirty(parent, tx);
if (parent_held)
dbuf_rele(parent, FTAG);
mutex_enter(&db->db_mtx);
/*
* Since we've dropped the mutex, it's possible that
* dbuf_undirty() might have changed this out from under us.
*/
if (list_head(&db->db_dirty_records) == dr ||
dn->dn_object == DMU_META_DNODE_OBJECT) {
mutex_enter(&di->dt.di.dr_mtx);
ASSERT3U(di->dr_txg, ==, tx->tx_txg);
ASSERT(!list_link_active(&dr->dr_dirty_node));
list_insert_tail(&di->dt.di.dr_children, dr);
mutex_exit(&di->dt.di.dr_mtx);
dr->dr_parent = di;
}
mutex_exit(&db->db_mtx);
} else {
ASSERT(db->db_level + 1 == dn->dn_nlevels);
ASSERT(db->db_blkid < dn->dn_nblkptr);
ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
mutex_enter(&dn->dn_mtx);
ASSERT(!list_link_active(&dr->dr_dirty_node));
list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
mutex_exit(&dn->dn_mtx);
if (drop_struct_rwlock)
rw_exit(&dn->dn_struct_rwlock);
}
dnode_setdirty(dn, tx);
DB_DNODE_EXIT(db);
return (dr);
}
static void
dbuf_undirty_bonus(dbuf_dirty_record_t *dr)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
if (dr->dt.dl.dr_data != db->db.db_data) {
struct dnode *dn = dr->dr_dnode;
int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
kmem_free(dr->dt.dl.dr_data, max_bonuslen);
arc_space_return(max_bonuslen, ARC_SPACE_BONUS);
}
db->db_data_pending = NULL;
ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
list_remove(&db->db_dirty_records, dr);
if (dr->dr_dbuf->db_level != 0) {
mutex_destroy(&dr->dt.di.dr_mtx);
list_destroy(&dr->dt.di.dr_children);
}
kmem_free(dr, sizeof (dbuf_dirty_record_t));
ASSERT3U(db->db_dirtycnt, >, 0);
db->db_dirtycnt -= 1;
}
/*
* Undirty a buffer in the transaction group referenced by the given
* transaction. Return whether this evicted the dbuf.
*/
static boolean_t
dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
{
uint64_t txg = tx->tx_txg;
ASSERT(txg != 0);
/*
* Due to our use of dn_nlevels below, this can only be called
* in open context, unless we are operating on the MOS.
* From syncing context, dn_nlevels may be different from the
* dn_nlevels used when dbuf was dirtied.
*/
ASSERT(db->db_objset ==
dmu_objset_pool(db->db_objset)->dp_meta_objset ||
txg != spa_syncing_txg(dmu_objset_spa(db->db_objset)));
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT0(db->db_level);
ASSERT(MUTEX_HELD(&db->db_mtx));
/*
* If this buffer is not dirty, we're done.
*/
dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, txg);
if (dr == NULL)
return (B_FALSE);
ASSERT(dr->dr_dbuf == db);
dnode_t *dn = dr->dr_dnode;
dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
ASSERT(db->db.db_size != 0);
dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset),
dr->dr_accounted, txg);
list_remove(&db->db_dirty_records, dr);
/*
* Note that there are three places in dbuf_dirty()
* where this dirty record may be put on a list.
* Make sure to do a list_remove corresponding to
* every one of those list_insert calls.
*/
if (dr->dr_parent) {
mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
list_remove(&dr->dr_parent->dt.di.dr_children, dr);
mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
} else if (db->db_blkid == DMU_SPILL_BLKID ||
db->db_level + 1 == dn->dn_nlevels) {
ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
mutex_enter(&dn->dn_mtx);
list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
mutex_exit(&dn->dn_mtx);
}
if (db->db_state != DB_NOFILL) {
dbuf_unoverride(dr);
ASSERT(db->db_buf != NULL);
ASSERT(dr->dt.dl.dr_data != NULL);
if (dr->dt.dl.dr_data != db->db_buf)
arc_buf_destroy(dr->dt.dl.dr_data, db);
}
kmem_free(dr, sizeof (dbuf_dirty_record_t));
ASSERT(db->db_dirtycnt > 0);
db->db_dirtycnt -= 1;
if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf));
dbuf_destroy(db);
return (B_TRUE);
}
return (B_FALSE);
}
static void
dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
ASSERT(tx->tx_txg != 0);
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
/*
* Quick check for dirtiness. For already dirty blocks, this
* reduces runtime of this function by >90%, and overall performance
* by 50% for some workloads (e.g. file deletion with indirect blocks
* cached).
*/
mutex_enter(&db->db_mtx);
if (db->db_state == DB_CACHED) {
dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, tx->tx_txg);
/*
* It's possible that it is already dirty but not cached,
* because there are some calls to dbuf_dirty() that don't
* go through dmu_buf_will_dirty().
*/
if (dr != NULL) {
/* This dbuf is already dirty and cached. */
dbuf_redirty(dr);
mutex_exit(&db->db_mtx);
return;
}
}
mutex_exit(&db->db_mtx);
DB_DNODE_ENTER(db);
if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
flags |= DB_RF_HAVESTRUCT;
DB_DNODE_EXIT(db);
(void) dbuf_read(db, NULL, flags);
(void) dbuf_dirty(db, tx);
}
void
dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
{
dmu_buf_will_dirty_impl(db_fake,
DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH, tx);
}
boolean_t
dmu_buf_is_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dbuf_dirty_record_t *dr;
mutex_enter(&db->db_mtx);
dr = dbuf_find_dirty_eq(db, tx->tx_txg);
mutex_exit(&db->db_mtx);
return (dr != NULL);
}
void
dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
db->db_state = DB_NOFILL;
DTRACE_SET_STATE(db, "allocating NOFILL buffer");
dmu_buf_will_fill(db_fake, tx);
}
void
dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT(tx->tx_txg != 0);
ASSERT(db->db_level == 0);
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
dmu_tx_private_ok(tx));
dbuf_noread(db);
(void) dbuf_dirty(db, tx);
}
/*
* This function is effectively the same as dmu_buf_will_dirty(), but
* indicates the caller expects raw encrypted data in the db, and provides
* the crypt params (byteorder, salt, iv, mac) which should be stored in the
* blkptr_t when this dbuf is written. This is only used for blocks of
* dnodes, during raw receive.
*/
void
dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder,
const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dbuf_dirty_record_t *dr;
/*
* dr_has_raw_params is only processed for blocks of dnodes
* (see dbuf_sync_dnode_leaf_crypt()).
*/
ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
ASSERT3U(db->db_level, ==, 0);
ASSERT(db->db_objset->os_raw_receive);
dmu_buf_will_dirty_impl(db_fake,
DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_NO_DECRYPT, tx);
dr = dbuf_find_dirty_eq(db, tx->tx_txg);
ASSERT3P(dr, !=, NULL);
dr->dt.dl.dr_has_raw_params = B_TRUE;
dr->dt.dl.dr_byteorder = byteorder;
memcpy(dr->dt.dl.dr_salt, salt, ZIO_DATA_SALT_LEN);
memcpy(dr->dt.dl.dr_iv, iv, ZIO_DATA_IV_LEN);
memcpy(dr->dt.dl.dr_mac, mac, ZIO_DATA_MAC_LEN);
}
static void
dbuf_override_impl(dmu_buf_impl_t *db, const blkptr_t *bp, dmu_tx_t *tx)
{
struct dirty_leaf *dl;
dbuf_dirty_record_t *dr;
dr = list_head(&db->db_dirty_records);
ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
dl = &dr->dt.dl;
dl->dr_overridden_by = *bp;
dl->dr_override_state = DR_OVERRIDDEN;
dl->dr_overridden_by.blk_birth = dr->dr_txg;
}
void
dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx)
{
(void) tx;
dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
dbuf_states_t old_state;
mutex_enter(&db->db_mtx);
DBUF_VERIFY(db);
old_state = db->db_state;
db->db_state = DB_CACHED;
if (old_state == DB_FILL) {
if (db->db_level == 0 && db->db_freed_in_flight) {
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
/* we were freed while filling */
/* XXX dbuf_undirty? */
memset(db->db.db_data, 0, db->db.db_size);
db->db_freed_in_flight = FALSE;
DTRACE_SET_STATE(db,
"fill done handling freed in flight");
} else {
DTRACE_SET_STATE(db, "fill done");
}
cv_broadcast(&db->db_changed);
}
mutex_exit(&db->db_mtx);
}
void
dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
bp_embedded_type_t etype, enum zio_compress comp,
int uncompressed_size, int compressed_size, int byteorder,
dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
struct dirty_leaf *dl;
dmu_object_type_t type;
dbuf_dirty_record_t *dr;
if (etype == BP_EMBEDDED_TYPE_DATA) {
ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset),
SPA_FEATURE_EMBEDDED_DATA));
}
DB_DNODE_ENTER(db);
type = DB_DNODE(db)->dn_type;
DB_DNODE_EXIT(db);
ASSERT0(db->db_level);
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
dmu_buf_will_not_fill(dbuf, tx);
dr = list_head(&db->db_dirty_records);
ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
dl = &dr->dt.dl;
encode_embedded_bp_compressed(&dl->dr_overridden_by,
data, comp, uncompressed_size, compressed_size);
BPE_SET_ETYPE(&dl->dr_overridden_by, etype);
BP_SET_TYPE(&dl->dr_overridden_by, type);
BP_SET_LEVEL(&dl->dr_overridden_by, 0);
BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder);
dl->dr_override_state = DR_OVERRIDDEN;
dl->dr_overridden_by.blk_birth = dr->dr_txg;
}
void
dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
dmu_object_type_t type;
ASSERT(dsl_dataset_feature_is_active(db->db_objset->os_dsl_dataset,
SPA_FEATURE_REDACTED_DATASETS));
DB_DNODE_ENTER(db);
type = DB_DNODE(db)->dn_type;
DB_DNODE_EXIT(db);
ASSERT0(db->db_level);
dmu_buf_will_not_fill(dbuf, tx);
blkptr_t bp = { { { {0} } } };
BP_SET_TYPE(&bp, type);
BP_SET_LEVEL(&bp, 0);
BP_SET_BIRTH(&bp, tx->tx_txg, 0);
BP_SET_REDACTED(&bp);
BPE_SET_LSIZE(&bp, dbuf->db_size);
dbuf_override_impl(db, &bp, tx);
}
/*
* Directly assign a provided arc buf to a given dbuf if it's not referenced
* by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
*/
void
dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
{
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT(db->db_level == 0);
ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf));
ASSERT(buf != NULL);
ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size);
ASSERT(tx->tx_txg != 0);
arc_return_buf(buf, db);
ASSERT(arc_released(buf));
mutex_enter(&db->db_mtx);
while (db->db_state == DB_READ || db->db_state == DB_FILL)
cv_wait(&db->db_changed, &db->db_mtx);
ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
if (db->db_state == DB_CACHED &&
zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
/*
* In practice, we will never have a case where we have an
* encrypted arc buffer while additional holds exist on the
* dbuf. We don't handle this here so we simply assert that
* fact instead.
*/
ASSERT(!arc_is_encrypted(buf));
mutex_exit(&db->db_mtx);
(void) dbuf_dirty(db, tx);
memcpy(db->db.db_data, buf->b_data, db->db.db_size);
arc_buf_destroy(buf, db);
return;
}
if (db->db_state == DB_CACHED) {
dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
ASSERT(db->db_buf != NULL);
if (dr != NULL && dr->dr_txg == tx->tx_txg) {
ASSERT(dr->dt.dl.dr_data == db->db_buf);
if (!arc_released(db->db_buf)) {
ASSERT(dr->dt.dl.dr_override_state ==
DR_OVERRIDDEN);
arc_release(db->db_buf, db);
}
dr->dt.dl.dr_data = buf;
arc_buf_destroy(db->db_buf, db);
} else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
arc_release(db->db_buf, db);
arc_buf_destroy(db->db_buf, db);
}
db->db_buf = NULL;
}
ASSERT(db->db_buf == NULL);
dbuf_set_data(db, buf);
db->db_state = DB_FILL;
DTRACE_SET_STATE(db, "filling assigned arcbuf");
mutex_exit(&db->db_mtx);
(void) dbuf_dirty(db, tx);
dmu_buf_fill_done(&db->db, tx);
}
void
dbuf_destroy(dmu_buf_impl_t *db)
{
dnode_t *dn;
dmu_buf_impl_t *parent = db->db_parent;
dmu_buf_impl_t *dndb;
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(zfs_refcount_is_zero(&db->db_holds));
if (db->db_buf != NULL) {
arc_buf_destroy(db->db_buf, db);
db->db_buf = NULL;
}
if (db->db_blkid == DMU_BONUS_BLKID) {
int slots = DB_DNODE(db)->dn_num_slots;
int bonuslen = DN_SLOTS_TO_BONUSLEN(slots);
if (db->db.db_data != NULL) {
kmem_free(db->db.db_data, bonuslen);
arc_space_return(bonuslen, ARC_SPACE_BONUS);
db->db_state = DB_UNCACHED;
DTRACE_SET_STATE(db, "buffer cleared");
}
}
dbuf_clear_data(db);
if (multilist_link_active(&db->db_cache_link)) {
ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
db->db_caching_status == DB_DBUF_METADATA_CACHE);
multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
(void) zfs_refcount_remove_many(
&dbuf_caches[db->db_caching_status].size,
db->db.db_size, db);
if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
DBUF_STAT_BUMPDOWN(metadata_cache_count);
} else {
DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
DBUF_STAT_BUMPDOWN(cache_count);
DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
db->db.db_size);
}
db->db_caching_status = DB_NO_CACHE;
}
ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
ASSERT(db->db_data_pending == NULL);
ASSERT(list_is_empty(&db->db_dirty_records));
db->db_state = DB_EVICTING;
DTRACE_SET_STATE(db, "buffer eviction started");
db->db_blkptr = NULL;
/*
* Now that db_state is DB_EVICTING, nobody else can find this via
* the hash table. We can now drop db_mtx, which allows us to
* acquire the dn_dbufs_mtx.
*/
mutex_exit(&db->db_mtx);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
dndb = dn->dn_dbuf;
if (db->db_blkid != DMU_BONUS_BLKID) {
boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx);
if (needlock)
mutex_enter_nested(&dn->dn_dbufs_mtx,
NESTED_SINGLE);
avl_remove(&dn->dn_dbufs, db);
membar_producer();
DB_DNODE_EXIT(db);
if (needlock)
mutex_exit(&dn->dn_dbufs_mtx);
/*
* Decrementing the dbuf count means that the hold corresponding
* to the removed dbuf is no longer discounted in dnode_move(),
* so the dnode cannot be moved until after we release the hold.
* The membar_producer() ensures visibility of the decremented
* value in dnode_move(), since DB_DNODE_EXIT doesn't actually
* release any lock.
*/
mutex_enter(&dn->dn_mtx);
dnode_rele_and_unlock(dn, db, B_TRUE);
db->db_dnode_handle = NULL;
dbuf_hash_remove(db);
} else {
DB_DNODE_EXIT(db);
}
ASSERT(zfs_refcount_is_zero(&db->db_holds));
db->db_parent = NULL;
ASSERT(db->db_buf == NULL);
ASSERT(db->db.db_data == NULL);
ASSERT(db->db_hash_next == NULL);
ASSERT(db->db_blkptr == NULL);
ASSERT(db->db_data_pending == NULL);
ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
ASSERT(!multilist_link_active(&db->db_cache_link));
/*
* If this dbuf is referenced from an indirect dbuf,
* decrement the ref count on the indirect dbuf.
*/
if (parent && parent != dndb) {
mutex_enter(&parent->db_mtx);
dbuf_rele_and_unlock(parent, db, B_TRUE);
}
kmem_cache_free(dbuf_kmem_cache, db);
arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
}
/*
* Note: While bpp will always be updated if the function returns success,
* parentp will not be updated if the dnode does not have dn_dbuf filled in;
* this happens when the dnode is the meta-dnode, or {user|group|project}used
* object.
*/
__attribute__((always_inline))
static inline int
dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
dmu_buf_impl_t **parentp, blkptr_t **bpp)
{
*parentp = NULL;
*bpp = NULL;
ASSERT(blkid != DMU_BONUS_BLKID);
if (blkid == DMU_SPILL_BLKID) {
mutex_enter(&dn->dn_mtx);
if (dn->dn_have_spill &&
(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
*bpp = DN_SPILL_BLKPTR(dn->dn_phys);
else
*bpp = NULL;
dbuf_add_ref(dn->dn_dbuf, NULL);
*parentp = dn->dn_dbuf;
mutex_exit(&dn->dn_mtx);
return (0);
}
int nlevels =
(dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels;
int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
ASSERT3U(level * epbs, <, 64);
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
/*
* This assertion shouldn't trip as long as the max indirect block size
* is less than 1M. The reason for this is that up to that point,
* the number of levels required to address an entire object with blocks
* of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In
* other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55
* (i.e. we can address the entire object), objects will all use at most
* N-1 levels and the assertion won't overflow. However, once epbs is
* 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be
* enough to address an entire object, so objects will have 5 levels,
* but then this assertion will overflow.
*
* All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we
* need to redo this logic to handle overflows.
*/
ASSERT(level >= nlevels ||
((nlevels - level - 1) * epbs) +
highbit64(dn->dn_phys->dn_nblkptr) <= 64);
if (level >= nlevels ||
blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr <<
((nlevels - level - 1) * epbs)) ||
(fail_sparse &&
blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
/* the buffer has no parent yet */
return (SET_ERROR(ENOENT));
} else if (level < nlevels-1) {
/* this block is referenced from an indirect block */
int err;
err = dbuf_hold_impl(dn, level + 1,
blkid >> epbs, fail_sparse, FALSE, NULL, parentp);
if (err)
return (err);
err = dbuf_read(*parentp, NULL,
(DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
if (err) {
dbuf_rele(*parentp, NULL);
*parentp = NULL;
return (err);
}
rw_enter(&(*parentp)->db_rwlock, RW_READER);
*bpp = ((blkptr_t *)(*parentp)->db.db_data) +
(blkid & ((1ULL << epbs) - 1));
if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))
ASSERT(BP_IS_HOLE(*bpp));
rw_exit(&(*parentp)->db_rwlock);
return (0);
} else {
/* the block is referenced from the dnode */
ASSERT3U(level, ==, nlevels-1);
ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
blkid < dn->dn_phys->dn_nblkptr);
if (dn->dn_dbuf) {
dbuf_add_ref(dn->dn_dbuf, NULL);
*parentp = dn->dn_dbuf;
}
*bpp = &dn->dn_phys->dn_blkptr[blkid];
return (0);
}
}
static dmu_buf_impl_t *
dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
dmu_buf_impl_t *parent, blkptr_t *blkptr)
{
objset_t *os = dn->dn_objset;
dmu_buf_impl_t *db, *odb;
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
ASSERT(dn->dn_type != DMU_OT_NONE);
db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP);
list_create(&db->db_dirty_records, sizeof (dbuf_dirty_record_t),
offsetof(dbuf_dirty_record_t, dr_dbuf_node));
db->db_objset = os;
db->db.db_object = dn->dn_object;
db->db_level = level;
db->db_blkid = blkid;
db->db_dirtycnt = 0;
db->db_dnode_handle = dn->dn_handle;
db->db_parent = parent;
db->db_blkptr = blkptr;
db->db_user = NULL;
db->db_user_immediate_evict = FALSE;
db->db_freed_in_flight = FALSE;
db->db_pending_evict = FALSE;
if (blkid == DMU_BONUS_BLKID) {
ASSERT3P(parent, ==, dn->dn_dbuf);
db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
(dn->dn_nblkptr-1) * sizeof (blkptr_t);
ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
db->db.db_offset = DMU_BONUS_BLKID;
db->db_state = DB_UNCACHED;
DTRACE_SET_STATE(db, "bonus buffer created");
db->db_caching_status = DB_NO_CACHE;
/* the bonus dbuf is not placed in the hash table */
arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
return (db);
} else if (blkid == DMU_SPILL_BLKID) {
db->db.db_size = (blkptr != NULL) ?
BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
db->db.db_offset = 0;
} else {
int blocksize =
db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
db->db.db_size = blocksize;
db->db.db_offset = db->db_blkid * blocksize;
}
/*
* Hold the dn_dbufs_mtx while we get the new dbuf
* in the hash table *and* added to the dbufs list.
* This prevents a possible deadlock with someone
* trying to look up this dbuf before it's added to the
* dn_dbufs list.
*/
mutex_enter(&dn->dn_dbufs_mtx);
db->db_state = DB_EVICTING; /* not worth logging this state change */
if ((odb = dbuf_hash_insert(db)) != NULL) {
/* someone else inserted it first */
mutex_exit(&dn->dn_dbufs_mtx);
kmem_cache_free(dbuf_kmem_cache, db);
DBUF_STAT_BUMP(hash_insert_race);
return (odb);
}
avl_add(&dn->dn_dbufs, db);
db->db_state = DB_UNCACHED;
DTRACE_SET_STATE(db, "regular buffer created");
db->db_caching_status = DB_NO_CACHE;
mutex_exit(&dn->dn_dbufs_mtx);
arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
if (parent && parent != dn->dn_dbuf)
dbuf_add_ref(parent, db);
ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
zfs_refcount_count(&dn->dn_holds) > 0);
(void) zfs_refcount_add(&dn->dn_holds, db);
dprintf_dbuf(db, "db=%p\n", db);
return (db);
}
/*
* This function returns a block pointer and information about the object,
* given a dnode and a block. This is a publicly accessible version of
* dbuf_findbp that only returns some information, rather than the
* dbuf. Note that the dnode passed in must be held, and the dn_struct_rwlock
* should be locked as (at least) a reader.
*/
int
dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid,
blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift)
{
dmu_buf_impl_t *dbp = NULL;
blkptr_t *bp2;
int err = 0;
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
err = dbuf_findbp(dn, level, blkid, B_FALSE, &dbp, &bp2);
if (err == 0) {
*bp = *bp2;
if (dbp != NULL)
dbuf_rele(dbp, NULL);
if (datablkszsec != NULL)
*datablkszsec = dn->dn_phys->dn_datablkszsec;
if (indblkshift != NULL)
*indblkshift = dn->dn_phys->dn_indblkshift;
}
return (err);
}
typedef struct dbuf_prefetch_arg {
spa_t *dpa_spa; /* The spa to issue the prefetch in. */
zbookmark_phys_t dpa_zb; /* The target block to prefetch. */
int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */
int dpa_curlevel; /* The current level that we're reading */
dnode_t *dpa_dnode; /* The dnode associated with the prefetch */
zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */
zio_t *dpa_zio; /* The parent zio_t for all prefetches. */
arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */
dbuf_prefetch_fn dpa_cb; /* prefetch completion callback */
void *dpa_arg; /* prefetch completion arg */
} dbuf_prefetch_arg_t;
static void
dbuf_prefetch_fini(dbuf_prefetch_arg_t *dpa, boolean_t io_done)
{
if (dpa->dpa_cb != NULL) {
dpa->dpa_cb(dpa->dpa_arg, dpa->dpa_zb.zb_level,
dpa->dpa_zb.zb_blkid, io_done);
}
kmem_free(dpa, sizeof (*dpa));
}
static void
dbuf_issue_final_prefetch_done(zio_t *zio, const zbookmark_phys_t *zb,
const blkptr_t *iobp, arc_buf_t *abuf, void *private)
{
(void) zio, (void) zb, (void) iobp;
dbuf_prefetch_arg_t *dpa = private;
if (abuf != NULL)
arc_buf_destroy(abuf, private);
dbuf_prefetch_fini(dpa, B_TRUE);
}
/*
* Actually issue the prefetch read for the block given.
*/
static void
dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp)
{
ASSERT(!BP_IS_REDACTED(bp) ||
dsl_dataset_feature_is_active(
dpa->dpa_dnode->dn_objset->os_dsl_dataset,
SPA_FEATURE_REDACTED_DATASETS));
if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
return (dbuf_prefetch_fini(dpa, B_FALSE));
int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
arc_flags_t aflags =
dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH |
ARC_FLAG_NO_BUF;
/* dnodes are always read as raw and then converted later */
if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) &&
dpa->dpa_curlevel == 0)
zio_flags |= ZIO_FLAG_RAW;
ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level);
ASSERT(dpa->dpa_zio != NULL);
(void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp,
dbuf_issue_final_prefetch_done, dpa,
dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb);
}
/*
* Called when an indirect block above our prefetch target is read in. This
* will either read in the next indirect block down the tree or issue the actual
* prefetch if the next block down is our target.
*/
static void
dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb,
const blkptr_t *iobp, arc_buf_t *abuf, void *private)
{
(void) zb, (void) iobp;
dbuf_prefetch_arg_t *dpa = private;
ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel);
ASSERT3S(dpa->dpa_curlevel, >, 0);
if (abuf == NULL) {
ASSERT(zio == NULL || zio->io_error != 0);
- return (dbuf_prefetch_fini(dpa, B_TRUE));
+ dbuf_prefetch_fini(dpa, B_TRUE);
+ return;
}
ASSERT(zio == NULL || zio->io_error == 0);
/*
* The dpa_dnode is only valid if we are called with a NULL
* zio. This indicates that the arc_read() returned without
* first calling zio_read() to issue a physical read. Once
* a physical read is made the dpa_dnode must be invalidated
* as the locks guarding it may have been dropped. If the
* dpa_dnode is still valid, then we want to add it to the dbuf
* cache. To do so, we must hold the dbuf associated with the block
* we just prefetched, read its contents so that we associate it
* with an arc_buf_t, and then release it.
*/
if (zio != NULL) {
ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel);
if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) {
ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size);
} else {
ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size);
}
ASSERT3P(zio->io_spa, ==, dpa->dpa_spa);
dpa->dpa_dnode = NULL;
} else if (dpa->dpa_dnode != NULL) {
uint64_t curblkid = dpa->dpa_zb.zb_blkid >>
(dpa->dpa_epbs * (dpa->dpa_curlevel -
dpa->dpa_zb.zb_level));
dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode,
dpa->dpa_curlevel, curblkid, FTAG);
if (db == NULL) {
arc_buf_destroy(abuf, private);
- return (dbuf_prefetch_fini(dpa, B_TRUE));
+ dbuf_prefetch_fini(dpa, B_TRUE);
+ return;
}
(void) dbuf_read(db, NULL,
DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT);
dbuf_rele(db, FTAG);
}
dpa->dpa_curlevel--;
uint64_t nextblkid = dpa->dpa_zb.zb_blkid >>
(dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level));
blkptr_t *bp = ((blkptr_t *)abuf->b_data) +
P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs);
ASSERT(!BP_IS_REDACTED(bp) ||
dsl_dataset_feature_is_active(
dpa->dpa_dnode->dn_objset->os_dsl_dataset,
SPA_FEATURE_REDACTED_DATASETS));
if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) {
+ arc_buf_destroy(abuf, private);
dbuf_prefetch_fini(dpa, B_TRUE);
+ return;
} else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) {
ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid);
dbuf_issue_final_prefetch(dpa, bp);
} else {
arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
zbookmark_phys_t zb;
/* flag if L2ARC eligible, l2arc_noprefetch then decides */
if (dpa->dpa_aflags & ARC_FLAG_L2CACHE)
iter_aflags |= ARC_FLAG_L2CACHE;
ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset,
dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid);
(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
bp, dbuf_prefetch_indirect_done, dpa,
ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
&iter_aflags, &zb);
}
arc_buf_destroy(abuf, private);
}
/*
* Issue prefetch reads for the given block on the given level. If the indirect
* blocks above that block are not in memory, we will read them in
* asynchronously. As a result, this call never blocks waiting for a read to
* complete. Note that the prefetch might fail if the dataset is encrypted and
* the encryption key is unmapped before the IO completes.
*/
int
dbuf_prefetch_impl(dnode_t *dn, int64_t level, uint64_t blkid,
zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb,
void *arg)
{
blkptr_t bp;
int epbs, nlevels, curlevel;
uint64_t curblkid;
ASSERT(blkid != DMU_BONUS_BLKID);
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
if (blkid > dn->dn_maxblkid)
goto no_issue;
if (level == 0 && dnode_block_freed(dn, blkid))
goto no_issue;
/*
* This dnode hasn't been written to disk yet, so there's nothing to
* prefetch.
*/
nlevels = dn->dn_phys->dn_nlevels;
if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0)
goto no_issue;
epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level))
goto no_issue;
dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object,
level, blkid);
if (db != NULL) {
mutex_exit(&db->db_mtx);
/*
* This dbuf already exists. It is either CACHED, or
* (we assume) about to be read or filled.
*/
goto no_issue;
}
/*
* Find the closest ancestor (indirect block) of the target block
* that is present in the cache. In this indirect block, we will
* find the bp that is at curlevel, curblkid.
*/
curlevel = level;
curblkid = blkid;
while (curlevel < nlevels - 1) {
int parent_level = curlevel + 1;
uint64_t parent_blkid = curblkid >> epbs;
dmu_buf_impl_t *db;
if (dbuf_hold_impl(dn, parent_level, parent_blkid,
FALSE, TRUE, FTAG, &db) == 0) {
blkptr_t *bpp = db->db_buf->b_data;
bp = bpp[P2PHASE(curblkid, 1 << epbs)];
dbuf_rele(db, FTAG);
break;
}
curlevel = parent_level;
curblkid = parent_blkid;
}
if (curlevel == nlevels - 1) {
/* No cached indirect blocks found. */
ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr);
bp = dn->dn_phys->dn_blkptr[curblkid];
}
ASSERT(!BP_IS_REDACTED(&bp) ||
dsl_dataset_feature_is_active(dn->dn_objset->os_dsl_dataset,
SPA_FEATURE_REDACTED_DATASETS));
if (BP_IS_HOLE(&bp) || BP_IS_REDACTED(&bp))
goto no_issue;
ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp));
zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL,
ZIO_FLAG_CANFAIL);
dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP);
dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
dn->dn_object, level, blkid);
dpa->dpa_curlevel = curlevel;
dpa->dpa_prio = prio;
dpa->dpa_aflags = aflags;
dpa->dpa_spa = dn->dn_objset->os_spa;
dpa->dpa_dnode = dn;
dpa->dpa_epbs = epbs;
dpa->dpa_zio = pio;
dpa->dpa_cb = cb;
dpa->dpa_arg = arg;
/* flag if L2ARC eligible, l2arc_noprefetch then decides */
if (dnode_level_is_l2cacheable(&bp, dn, level))
dpa->dpa_aflags |= ARC_FLAG_L2CACHE;
/*
* If we have the indirect just above us, no need to do the asynchronous
* prefetch chain; we'll just run the last step ourselves. If we're at
* a higher level, though, we want to issue the prefetches for all the
* indirect blocks asynchronously, so we can go on with whatever we were
* doing.
*/
if (curlevel == level) {
ASSERT3U(curblkid, ==, blkid);
dbuf_issue_final_prefetch(dpa, &bp);
} else {
arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
zbookmark_phys_t zb;
/* flag if L2ARC eligible, l2arc_noprefetch then decides */
if (dnode_level_is_l2cacheable(&bp, dn, level))
iter_aflags |= ARC_FLAG_L2CACHE;
SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
dn->dn_object, curlevel, curblkid);
(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
&bp, dbuf_prefetch_indirect_done, dpa,
ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
&iter_aflags, &zb);
}
/*
* We use pio here instead of dpa_zio since it's possible that
* dpa may have already been freed.
*/
zio_nowait(pio);
return (1);
no_issue:
if (cb != NULL)
cb(arg, level, blkid, B_FALSE);
return (0);
}
int
dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
arc_flags_t aflags)
{
return (dbuf_prefetch_impl(dn, level, blkid, prio, aflags, NULL, NULL));
}
/*
* Helper function for dbuf_hold_impl() to copy a buffer. Handles
* the case of encrypted, compressed and uncompressed buffers by
* allocating the new buffer, respectively, with arc_alloc_raw_buf(),
* arc_alloc_compressed_buf() or arc_alloc_buf().*
*
* NOTE: Declared noinline to avoid stack bloat in dbuf_hold_impl().
*/
noinline static void
dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db)
{
dbuf_dirty_record_t *dr = db->db_data_pending;
arc_buf_t *data = dr->dt.dl.dr_data;
enum zio_compress compress_type = arc_get_compression(data);
uint8_t complevel = arc_get_complevel(data);
if (arc_is_encrypted(data)) {
boolean_t byteorder;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
arc_get_raw_params(data, &byteorder, salt, iv, mac);
dbuf_set_data(db, arc_alloc_raw_buf(dn->dn_objset->os_spa, db,
dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac,
dn->dn_type, arc_buf_size(data), arc_buf_lsize(data),
compress_type, complevel));
} else if (compress_type != ZIO_COMPRESS_OFF) {
dbuf_set_data(db, arc_alloc_compressed_buf(
dn->dn_objset->os_spa, db, arc_buf_size(data),
arc_buf_lsize(data), compress_type, complevel));
} else {
dbuf_set_data(db, arc_alloc_buf(dn->dn_objset->os_spa, db,
DBUF_GET_BUFC_TYPE(db), db->db.db_size));
}
rw_enter(&db->db_rwlock, RW_WRITER);
memcpy(db->db.db_data, data->b_data, arc_buf_size(data));
rw_exit(&db->db_rwlock);
}
/*
* Returns with db_holds incremented, and db_mtx not held.
* Note: dn_struct_rwlock must be held.
*/
int
dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
boolean_t fail_sparse, boolean_t fail_uncached,
const void *tag, dmu_buf_impl_t **dbp)
{
dmu_buf_impl_t *db, *parent = NULL;
/* If the pool has been created, verify the tx_sync_lock is not held */
spa_t *spa = dn->dn_objset->os_spa;
dsl_pool_t *dp = spa->spa_dsl_pool;
if (dp != NULL) {
ASSERT(!MUTEX_HELD(&dp->dp_tx.tx_sync_lock));
}
ASSERT(blkid != DMU_BONUS_BLKID);
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
ASSERT3U(dn->dn_nlevels, >, level);
*dbp = NULL;
/* dbuf_find() returns with db_mtx held */
db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid);
if (db == NULL) {
blkptr_t *bp = NULL;
int err;
if (fail_uncached)
return (SET_ERROR(ENOENT));
ASSERT3P(parent, ==, NULL);
err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
if (fail_sparse) {
if (err == 0 && bp && BP_IS_HOLE(bp))
err = SET_ERROR(ENOENT);
if (err) {
if (parent)
dbuf_rele(parent, NULL);
return (err);
}
}
if (err && err != ENOENT)
return (err);
db = dbuf_create(dn, level, blkid, parent, bp);
}
if (fail_uncached && db->db_state != DB_CACHED) {
mutex_exit(&db->db_mtx);
return (SET_ERROR(ENOENT));
}
if (db->db_buf != NULL) {
arc_buf_access(db->db_buf);
ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
}
ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
/*
* If this buffer is currently syncing out, and we are
* still referencing it from db_data, we need to make a copy
* of it in case we decide we want to dirty it again in this txg.
*/
if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
dn->dn_object != DMU_META_DNODE_OBJECT &&
db->db_state == DB_CACHED && db->db_data_pending) {
dbuf_dirty_record_t *dr = db->db_data_pending;
if (dr->dt.dl.dr_data == db->db_buf)
dbuf_hold_copy(dn, db);
}
if (multilist_link_active(&db->db_cache_link)) {
ASSERT(zfs_refcount_is_zero(&db->db_holds));
ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
db->db_caching_status == DB_DBUF_METADATA_CACHE);
multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
(void) zfs_refcount_remove_many(
&dbuf_caches[db->db_caching_status].size,
db->db.db_size, db);
if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
DBUF_STAT_BUMPDOWN(metadata_cache_count);
} else {
DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
DBUF_STAT_BUMPDOWN(cache_count);
DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
db->db.db_size);
}
db->db_caching_status = DB_NO_CACHE;
}
(void) zfs_refcount_add(&db->db_holds, tag);
DBUF_VERIFY(db);
mutex_exit(&db->db_mtx);
/* NOTE: we can't rele the parent until after we drop the db_mtx */
if (parent)
dbuf_rele(parent, NULL);
ASSERT3P(DB_DNODE(db), ==, dn);
ASSERT3U(db->db_blkid, ==, blkid);
ASSERT3U(db->db_level, ==, level);
*dbp = db;
return (0);
}
dmu_buf_impl_t *
dbuf_hold(dnode_t *dn, uint64_t blkid, const void *tag)
{
return (dbuf_hold_level(dn, 0, blkid, tag));
}
dmu_buf_impl_t *
dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, const void *tag)
{
dmu_buf_impl_t *db;
int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db);
return (err ? NULL : db);
}
void
dbuf_create_bonus(dnode_t *dn)
{
ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
ASSERT(dn->dn_bonus == NULL);
dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL);
}
int
dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
if (db->db_blkid != DMU_SPILL_BLKID)
return (SET_ERROR(ENOTSUP));
if (blksz == 0)
blksz = SPA_MINBLOCKSIZE;
ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset)));
blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
dbuf_new_size(db, blksz, tx);
return (0);
}
void
dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
{
dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
}
#pragma weak dmu_buf_add_ref = dbuf_add_ref
void
dbuf_add_ref(dmu_buf_impl_t *db, const void *tag)
{
int64_t holds = zfs_refcount_add(&db->db_holds, tag);
VERIFY3S(holds, >, 1);
}
#pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref
boolean_t
dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
const void *tag)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dmu_buf_impl_t *found_db;
boolean_t result = B_FALSE;
if (blkid == DMU_BONUS_BLKID)
found_db = dbuf_find_bonus(os, obj);
else
found_db = dbuf_find(os, obj, 0, blkid);
if (found_db != NULL) {
if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) {
(void) zfs_refcount_add(&db->db_holds, tag);
result = B_TRUE;
}
mutex_exit(&found_db->db_mtx);
}
return (result);
}
/*
* If you call dbuf_rele() you had better not be referencing the dnode handle
* unless you have some other direct or indirect hold on the dnode. (An indirect
* hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
* Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
* dnode's parent dbuf evicting its dnode handles.
*/
void
dbuf_rele(dmu_buf_impl_t *db, const void *tag)
{
mutex_enter(&db->db_mtx);
dbuf_rele_and_unlock(db, tag, B_FALSE);
}
void
dmu_buf_rele(dmu_buf_t *db, const void *tag)
{
dbuf_rele((dmu_buf_impl_t *)db, tag);
}
/*
* dbuf_rele() for an already-locked dbuf. This is necessary to allow
* db_dirtycnt and db_holds to be updated atomically. The 'evicting'
* argument should be set if we are already in the dbuf-evicting code
* path, in which case we don't want to recursively evict. This allows us to
* avoid deeply nested stacks that would have a call flow similar to this:
*
* dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify()
* ^ |
* | |
* +-----dbuf_destroy()<--dbuf_evict_one()<--------+
*
*/
void
dbuf_rele_and_unlock(dmu_buf_impl_t *db, const void *tag, boolean_t evicting)
{
int64_t holds;
uint64_t size;
ASSERT(MUTEX_HELD(&db->db_mtx));
DBUF_VERIFY(db);
/*
* Remove the reference to the dbuf before removing its hold on the
* dnode so we can guarantee in dnode_move() that a referenced bonus
* buffer has a corresponding dnode hold.
*/
holds = zfs_refcount_remove(&db->db_holds, tag);
ASSERT(holds >= 0);
/*
* We can't freeze indirects if there is a possibility that they
* may be modified in the current syncing context.
*/
if (db->db_buf != NULL &&
holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) {
arc_buf_freeze(db->db_buf);
}
if (holds == db->db_dirtycnt &&
db->db_level == 0 && db->db_user_immediate_evict)
dbuf_evict_user(db);
if (holds == 0) {
if (db->db_blkid == DMU_BONUS_BLKID) {
dnode_t *dn;
boolean_t evict_dbuf = db->db_pending_evict;
/*
* If the dnode moves here, we cannot cross this
* barrier until the move completes.
*/
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
atomic_dec_32(&dn->dn_dbufs_count);
/*
* Decrementing the dbuf count means that the bonus
* buffer's dnode hold is no longer discounted in
* dnode_move(). The dnode cannot move until after
* the dnode_rele() below.
*/
DB_DNODE_EXIT(db);
/*
* Do not reference db after its lock is dropped.
* Another thread may evict it.
*/
mutex_exit(&db->db_mtx);
if (evict_dbuf)
dnode_evict_bonus(dn);
dnode_rele(dn, db);
} else if (db->db_buf == NULL) {
/*
* This is a special case: we never associated this
* dbuf with any data allocated from the ARC.
*/
ASSERT(db->db_state == DB_UNCACHED ||
db->db_state == DB_NOFILL);
dbuf_destroy(db);
} else if (arc_released(db->db_buf)) {
/*
* This dbuf has anonymous data associated with it.
*/
dbuf_destroy(db);
} else {
boolean_t do_arc_evict = B_FALSE;
blkptr_t bp;
spa_t *spa = dmu_objset_spa(db->db_objset);
if (!DBUF_IS_CACHEABLE(db) &&
db->db_blkptr != NULL &&
!BP_IS_HOLE(db->db_blkptr) &&
!BP_IS_EMBEDDED(db->db_blkptr)) {
do_arc_evict = B_TRUE;
bp = *db->db_blkptr;
}
if (!DBUF_IS_CACHEABLE(db) ||
db->db_pending_evict) {
dbuf_destroy(db);
} else if (!multilist_link_active(&db->db_cache_link)) {
ASSERT3U(db->db_caching_status, ==,
DB_NO_CACHE);
dbuf_cached_state_t dcs =
dbuf_include_in_metadata_cache(db) ?
DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE;
db->db_caching_status = dcs;
multilist_insert(&dbuf_caches[dcs].cache, db);
uint64_t db_size = db->db.db_size;
size = zfs_refcount_add_many(
&dbuf_caches[dcs].size, db_size, db);
uint8_t db_level = db->db_level;
mutex_exit(&db->db_mtx);
if (dcs == DB_DBUF_METADATA_CACHE) {
DBUF_STAT_BUMP(metadata_cache_count);
DBUF_STAT_MAX(
metadata_cache_size_bytes_max,
size);
} else {
DBUF_STAT_BUMP(cache_count);
DBUF_STAT_MAX(cache_size_bytes_max,
size);
DBUF_STAT_BUMP(cache_levels[db_level]);
DBUF_STAT_INCR(
cache_levels_bytes[db_level],
db_size);
}
if (dcs == DB_DBUF_CACHE && !evicting)
dbuf_evict_notify(size);
}
if (do_arc_evict)
arc_freed(spa, &bp);
}
} else {
mutex_exit(&db->db_mtx);
}
}
#pragma weak dmu_buf_refcount = dbuf_refcount
uint64_t
dbuf_refcount(dmu_buf_impl_t *db)
{
return (zfs_refcount_count(&db->db_holds));
}
uint64_t
dmu_buf_user_refcount(dmu_buf_t *db_fake)
{
uint64_t holds;
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
mutex_enter(&db->db_mtx);
ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt);
holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt;
mutex_exit(&db->db_mtx);
return (holds);
}
void *
dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user,
dmu_buf_user_t *new_user)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
mutex_enter(&db->db_mtx);
dbuf_verify_user(db, DBVU_NOT_EVICTING);
if (db->db_user == old_user)
db->db_user = new_user;
else
old_user = db->db_user;
dbuf_verify_user(db, DBVU_NOT_EVICTING);
mutex_exit(&db->db_mtx);
return (old_user);
}
void *
dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
{
return (dmu_buf_replace_user(db_fake, NULL, user));
}
void *
dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
db->db_user_immediate_evict = TRUE;
return (dmu_buf_set_user(db_fake, user));
}
void *
dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
{
return (dmu_buf_replace_user(db_fake, user, NULL));
}
void *
dmu_buf_get_user(dmu_buf_t *db_fake)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dbuf_verify_user(db, DBVU_NOT_EVICTING);
return (db->db_user);
}
void
dmu_buf_user_evict_wait(void)
{
taskq_wait(dbu_evict_taskq);
}
blkptr_t *
dmu_buf_get_blkptr(dmu_buf_t *db)
{
dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
return (dbi->db_blkptr);
}
objset_t *
dmu_buf_get_objset(dmu_buf_t *db)
{
dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
return (dbi->db_objset);
}
dnode_t *
dmu_buf_dnode_enter(dmu_buf_t *db)
{
dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
DB_DNODE_ENTER(dbi);
return (DB_DNODE(dbi));
}
void
dmu_buf_dnode_exit(dmu_buf_t *db)
{
dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
DB_DNODE_EXIT(dbi);
}
static void
dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
{
/* ASSERT(dmu_tx_is_syncing(tx) */
ASSERT(MUTEX_HELD(&db->db_mtx));
if (db->db_blkptr != NULL)
return;
if (db->db_blkid == DMU_SPILL_BLKID) {
db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys);
BP_ZERO(db->db_blkptr);
return;
}
if (db->db_level == dn->dn_phys->dn_nlevels-1) {
/*
* This buffer was allocated at a time when there was
* no available blkptrs from the dnode, or it was
* inappropriate to hook it in (i.e., nlevels mismatch).
*/
ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
ASSERT(db->db_parent == NULL);
db->db_parent = dn->dn_dbuf;
db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
DBUF_VERIFY(db);
} else {
dmu_buf_impl_t *parent = db->db_parent;
int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
ASSERT(dn->dn_phys->dn_nlevels > 1);
if (parent == NULL) {
mutex_exit(&db->db_mtx);
rw_enter(&dn->dn_struct_rwlock, RW_READER);
parent = dbuf_hold_level(dn, db->db_level + 1,
db->db_blkid >> epbs, db);
rw_exit(&dn->dn_struct_rwlock);
mutex_enter(&db->db_mtx);
db->db_parent = parent;
}
db->db_blkptr = (blkptr_t *)parent->db.db_data +
(db->db_blkid & ((1ULL << epbs) - 1));
DBUF_VERIFY(db);
}
}
static void
dbuf_sync_bonus(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
void *data = dr->dt.dl.dr_data;
ASSERT0(db->db_level);
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(db->db_blkid == DMU_BONUS_BLKID);
ASSERT(data != NULL);
dnode_t *dn = dr->dr_dnode;
ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=,
DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1));
memcpy(DN_BONUS(dn->dn_phys), data, DN_MAX_BONUS_LEN(dn->dn_phys));
dbuf_sync_leaf_verify_bonus_dnode(dr);
dbuf_undirty_bonus(dr);
dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
}
/*
* When syncing out a blocks of dnodes, adjust the block to deal with
* encryption. Normally, we make sure the block is decrypted before writing
* it. If we have crypt params, then we are writing a raw (encrypted) block,
* from a raw receive. In this case, set the ARC buf's crypt params so
* that the BP will be filled with the correct byteorder, salt, iv, and mac.
*/
static void
dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr)
{
int err;
dmu_buf_impl_t *db = dr->dr_dbuf;
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
ASSERT3U(db->db_level, ==, 0);
if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) {
zbookmark_phys_t zb;
/*
* Unfortunately, there is currently no mechanism for
* syncing context to handle decryption errors. An error
* here is only possible if an attacker maliciously
* changed a dnode block and updated the associated
* checksums going up the block tree.
*/
SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
db->db.db_object, db->db_level, db->db_blkid);
err = arc_untransform(db->db_buf, db->db_objset->os_spa,
&zb, B_TRUE);
if (err)
panic("Invalid dnode block MAC");
} else if (dr->dt.dl.dr_has_raw_params) {
(void) arc_release(dr->dt.dl.dr_data, db);
arc_convert_to_raw(dr->dt.dl.dr_data,
dmu_objset_id(db->db_objset),
dr->dt.dl.dr_byteorder, DMU_OT_DNODE,
dr->dt.dl.dr_salt, dr->dt.dl.dr_iv, dr->dt.dl.dr_mac);
}
}
/*
* dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
* is critical the we not allow the compiler to inline this function in to
* dbuf_sync_list() thereby drastically bloating the stack usage.
*/
noinline static void
dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
dnode_t *dn = dr->dr_dnode;
ASSERT(dmu_tx_is_syncing(tx));
dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
mutex_enter(&db->db_mtx);
ASSERT(db->db_level > 0);
DBUF_VERIFY(db);
/* Read the block if it hasn't been read yet. */
if (db->db_buf == NULL) {
mutex_exit(&db->db_mtx);
(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
mutex_enter(&db->db_mtx);
}
ASSERT3U(db->db_state, ==, DB_CACHED);
ASSERT(db->db_buf != NULL);
/* Indirect block size must match what the dnode thinks it is. */
ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
dbuf_check_blkptr(dn, db);
/* Provide the pending dirty record to child dbufs */
db->db_data_pending = dr;
mutex_exit(&db->db_mtx);
dbuf_write(dr, db->db_buf, tx);
zio_t *zio = dr->dr_zio;
mutex_enter(&dr->dt.di.dr_mtx);
dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx);
ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
mutex_exit(&dr->dt.di.dr_mtx);
zio_nowait(zio);
}
/*
* Verify that the size of the data in our bonus buffer does not exceed
* its recorded size.
*
* The purpose of this verification is to catch any cases in development
* where the size of a phys structure (i.e space_map_phys_t) grows and,
* due to incorrect feature management, older pools expect to read more
* data even though they didn't actually write it to begin with.
*
* For a example, this would catch an error in the feature logic where we
* open an older pool and we expect to write the space map histogram of
* a space map with size SPACE_MAP_SIZE_V0.
*/
static void
dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr)
{
#ifdef ZFS_DEBUG
dnode_t *dn = dr->dr_dnode;
/*
* Encrypted bonus buffers can have data past their bonuslen.
* Skip the verification of these blocks.
*/
if (DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))
return;
uint16_t bonuslen = dn->dn_phys->dn_bonuslen;
uint16_t maxbonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
ASSERT3U(bonuslen, <=, maxbonuslen);
arc_buf_t *datap = dr->dt.dl.dr_data;
char *datap_end = ((char *)datap) + bonuslen;
char *datap_max = ((char *)datap) + maxbonuslen;
/* ensure that everything is zero after our data */
for (; datap_end < datap_max; datap_end++)
ASSERT(*datap_end == 0);
#endif
}
static blkptr_t *
dbuf_lightweight_bp(dbuf_dirty_record_t *dr)
{
/* This must be a lightweight dirty record. */
ASSERT3P(dr->dr_dbuf, ==, NULL);
dnode_t *dn = dr->dr_dnode;
if (dn->dn_phys->dn_nlevels == 1) {
VERIFY3U(dr->dt.dll.dr_blkid, <, dn->dn_phys->dn_nblkptr);
return (&dn->dn_phys->dn_blkptr[dr->dt.dll.dr_blkid]);
} else {
dmu_buf_impl_t *parent_db = dr->dr_parent->dr_dbuf;
int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
VERIFY3U(parent_db->db_level, ==, 1);
VERIFY3P(parent_db->db_dnode_handle->dnh_dnode, ==, dn);
VERIFY3U(dr->dt.dll.dr_blkid >> epbs, ==, parent_db->db_blkid);
blkptr_t *bp = parent_db->db.db_data;
return (&bp[dr->dt.dll.dr_blkid & ((1 << epbs) - 1)]);
}
}
static void
dbuf_lightweight_ready(zio_t *zio)
{
dbuf_dirty_record_t *dr = zio->io_private;
blkptr_t *bp = zio->io_bp;
if (zio->io_error != 0)
return;
dnode_t *dn = dr->dr_dnode;
blkptr_t *bp_orig = dbuf_lightweight_bp(dr);
spa_t *spa = dmu_objset_spa(dn->dn_objset);
int64_t delta = bp_get_dsize_sync(spa, bp) -
bp_get_dsize_sync(spa, bp_orig);
dnode_diduse_space(dn, delta);
uint64_t blkid = dr->dt.dll.dr_blkid;
mutex_enter(&dn->dn_mtx);
if (blkid > dn->dn_phys->dn_maxblkid) {
ASSERT0(dn->dn_objset->os_raw_receive);
dn->dn_phys->dn_maxblkid = blkid;
}
mutex_exit(&dn->dn_mtx);
if (!BP_IS_EMBEDDED(bp)) {
uint64_t fill = BP_IS_HOLE(bp) ? 0 : 1;
BP_SET_FILL(bp, fill);
}
dmu_buf_impl_t *parent_db;
EQUIV(dr->dr_parent == NULL, dn->dn_phys->dn_nlevels == 1);
if (dr->dr_parent == NULL) {
parent_db = dn->dn_dbuf;
} else {
parent_db = dr->dr_parent->dr_dbuf;
}
rw_enter(&parent_db->db_rwlock, RW_WRITER);
*bp_orig = *bp;
rw_exit(&parent_db->db_rwlock);
}
static void
dbuf_lightweight_physdone(zio_t *zio)
{
dbuf_dirty_record_t *dr = zio->io_private;
dsl_pool_t *dp = spa_get_dsl(zio->io_spa);
ASSERT3U(dr->dr_txg, ==, zio->io_txg);
/*
* The callback will be called io_phys_children times. Retire one
* portion of our dirty space each time we are called. Any rounding
* error will be cleaned up by dbuf_lightweight_done().
*/
int delta = dr->dr_accounted / zio->io_phys_children;
dsl_pool_undirty_space(dp, delta, zio->io_txg);
}
static void
dbuf_lightweight_done(zio_t *zio)
{
dbuf_dirty_record_t *dr = zio->io_private;
VERIFY0(zio->io_error);
objset_t *os = dr->dr_dnode->dn_objset;
dmu_tx_t *tx = os->os_synctx;
if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
ASSERT(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
} else {
dsl_dataset_t *ds = os->os_dsl_dataset;
(void) dsl_dataset_block_kill(ds, &zio->io_bp_orig, tx, B_TRUE);
dsl_dataset_block_born(ds, zio->io_bp, tx);
}
/*
* See comment in dbuf_write_done().
*/
if (zio->io_phys_children == 0) {
dsl_pool_undirty_space(dmu_objset_pool(os),
dr->dr_accounted, zio->io_txg);
} else {
dsl_pool_undirty_space(dmu_objset_pool(os),
dr->dr_accounted % zio->io_phys_children, zio->io_txg);
}
abd_free(dr->dt.dll.dr_abd);
kmem_free(dr, sizeof (*dr));
}
noinline static void
dbuf_sync_lightweight(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
{
dnode_t *dn = dr->dr_dnode;
zio_t *pio;
if (dn->dn_phys->dn_nlevels == 1) {
pio = dn->dn_zio;
} else {
pio = dr->dr_parent->dr_zio;
}
zbookmark_phys_t zb = {
.zb_objset = dmu_objset_id(dn->dn_objset),
.zb_object = dn->dn_object,
.zb_level = 0,
.zb_blkid = dr->dt.dll.dr_blkid,
};
/*
* See comment in dbuf_write(). This is so that zio->io_bp_orig
* will have the old BP in dbuf_lightweight_done().
*/
dr->dr_bp_copy = *dbuf_lightweight_bp(dr);
dr->dr_zio = zio_write(pio, dmu_objset_spa(dn->dn_objset),
dmu_tx_get_txg(tx), &dr->dr_bp_copy, dr->dt.dll.dr_abd,
dn->dn_datablksz, abd_get_size(dr->dt.dll.dr_abd),
&dr->dt.dll.dr_props, dbuf_lightweight_ready, NULL,
dbuf_lightweight_physdone, dbuf_lightweight_done, dr,
ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_MUSTSUCCEED | dr->dt.dll.dr_flags, &zb);
zio_nowait(dr->dr_zio);
}
/*
* dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
* critical the we not allow the compiler to inline this function in to
* dbuf_sync_list() thereby drastically bloating the stack usage.
*/
noinline static void
dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
{
arc_buf_t **datap = &dr->dt.dl.dr_data;
dmu_buf_impl_t *db = dr->dr_dbuf;
dnode_t *dn = dr->dr_dnode;
objset_t *os;
uint64_t txg = tx->tx_txg;
ASSERT(dmu_tx_is_syncing(tx));
dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
mutex_enter(&db->db_mtx);
/*
* To be synced, we must be dirtied. But we
* might have been freed after the dirty.
*/
if (db->db_state == DB_UNCACHED) {
/* This buffer has been freed since it was dirtied */
ASSERT(db->db.db_data == NULL);
} else if (db->db_state == DB_FILL) {
/* This buffer was freed and is now being re-filled */
ASSERT(db->db.db_data != dr->dt.dl.dr_data);
} else {
ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
}
DBUF_VERIFY(db);
if (db->db_blkid == DMU_SPILL_BLKID) {
mutex_enter(&dn->dn_mtx);
if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
/*
* In the previous transaction group, the bonus buffer
* was entirely used to store the attributes for the
* dnode which overrode the dn_spill field. However,
* when adding more attributes to the file a spill
* block was required to hold the extra attributes.
*
* Make sure to clear the garbage left in the dn_spill
* field from the previous attributes in the bonus
* buffer. Otherwise, after writing out the spill
* block to the new allocated dva, it will free
* the old block pointed to by the invalid dn_spill.
*/
db->db_blkptr = NULL;
}
dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
mutex_exit(&dn->dn_mtx);
}
/*
* If this is a bonus buffer, simply copy the bonus data into the
* dnode. It will be written out when the dnode is synced (and it
* will be synced, since it must have been dirty for dbuf_sync to
* be called).
*/
if (db->db_blkid == DMU_BONUS_BLKID) {
ASSERT(dr->dr_dbuf == db);
dbuf_sync_bonus(dr, tx);
return;
}
os = dn->dn_objset;
/*
* This function may have dropped the db_mtx lock allowing a dmu_sync
* operation to sneak in. As a result, we need to ensure that we
* don't check the dr_override_state until we have returned from
* dbuf_check_blkptr.
*/
dbuf_check_blkptr(dn, db);
/*
* If this buffer is in the middle of an immediate write,
* wait for the synchronous IO to complete.
*/
while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
cv_wait(&db->db_changed, &db->db_mtx);
ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN);
}
/*
* If this is a dnode block, ensure it is appropriately encrypted
* or decrypted, depending on what we are writing to it this txg.
*/
if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT)
dbuf_prepare_encrypted_dnode_leaf(dr);
if (db->db_state != DB_NOFILL &&
dn->dn_object != DMU_META_DNODE_OBJECT &&
zfs_refcount_count(&db->db_holds) > 1 &&
dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
*datap == db->db_buf) {
/*
* If this buffer is currently "in use" (i.e., there
* are active holds and db_data still references it),
* then make a copy before we start the write so that
* any modifications from the open txg will not leak
* into this write.
*
* NOTE: this copy does not need to be made for
* objects only modified in the syncing context (e.g.
* DNONE_DNODE blocks).
*/
int psize = arc_buf_size(*datap);
int lsize = arc_buf_lsize(*datap);
arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
enum zio_compress compress_type = arc_get_compression(*datap);
uint8_t complevel = arc_get_complevel(*datap);
if (arc_is_encrypted(*datap)) {
boolean_t byteorder;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
arc_get_raw_params(*datap, &byteorder, salt, iv, mac);
*datap = arc_alloc_raw_buf(os->os_spa, db,
dmu_objset_id(os), byteorder, salt, iv, mac,
dn->dn_type, psize, lsize, compress_type,
complevel);
} else if (compress_type != ZIO_COMPRESS_OFF) {
ASSERT3U(type, ==, ARC_BUFC_DATA);
*datap = arc_alloc_compressed_buf(os->os_spa, db,
psize, lsize, compress_type, complevel);
} else {
*datap = arc_alloc_buf(os->os_spa, db, type, psize);
}
memcpy((*datap)->b_data, db->db.db_data, psize);
}
db->db_data_pending = dr;
mutex_exit(&db->db_mtx);
dbuf_write(dr, *datap, tx);
ASSERT(!list_link_active(&dr->dr_dirty_node));
if (dn->dn_object == DMU_META_DNODE_OBJECT) {
list_insert_tail(&dn->dn_dirty_records[txg & TXG_MASK], dr);
} else {
zio_nowait(dr->dr_zio);
}
}
void
dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx)
{
dbuf_dirty_record_t *dr;
while ((dr = list_head(list))) {
if (dr->dr_zio != NULL) {
/*
* If we find an already initialized zio then we
* are processing the meta-dnode, and we have finished.
* The dbufs for all dnodes are put back on the list
* during processing, so that we can zio_wait()
* these IOs after initiating all child IOs.
*/
ASSERT3U(dr->dr_dbuf->db.db_object, ==,
DMU_META_DNODE_OBJECT);
break;
}
list_remove(list, dr);
if (dr->dr_dbuf == NULL) {
dbuf_sync_lightweight(dr, tx);
} else {
if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
VERIFY3U(dr->dr_dbuf->db_level, ==, level);
}
if (dr->dr_dbuf->db_level > 0)
dbuf_sync_indirect(dr, tx);
else
dbuf_sync_leaf(dr, tx);
}
}
}
static void
dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
{
(void) buf;
dmu_buf_impl_t *db = vdb;
dnode_t *dn;
blkptr_t *bp = zio->io_bp;
blkptr_t *bp_orig = &zio->io_bp_orig;
spa_t *spa = zio->io_spa;
int64_t delta;
uint64_t fill = 0;
int i;
ASSERT3P(db->db_blkptr, !=, NULL);
ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
zio->io_prev_space_delta = delta;
if (bp->blk_birth != 0) {
ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
BP_GET_TYPE(bp) == dn->dn_type) ||
(db->db_blkid == DMU_SPILL_BLKID &&
BP_GET_TYPE(bp) == dn->dn_bonustype) ||
BP_IS_EMBEDDED(bp));
ASSERT(BP_GET_LEVEL(bp) == db->db_level);
}
mutex_enter(&db->db_mtx);
#ifdef ZFS_DEBUG
if (db->db_blkid == DMU_SPILL_BLKID) {
ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
ASSERT(!(BP_IS_HOLE(bp)) &&
db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
}
#endif
if (db->db_level == 0) {
mutex_enter(&dn->dn_mtx);
if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
db->db_blkid != DMU_SPILL_BLKID) {
ASSERT0(db->db_objset->os_raw_receive);
dn->dn_phys->dn_maxblkid = db->db_blkid;
}
mutex_exit(&dn->dn_mtx);
if (dn->dn_type == DMU_OT_DNODE) {
i = 0;
while (i < db->db.db_size) {
dnode_phys_t *dnp =
(void *)(((char *)db->db.db_data) + i);
i += DNODE_MIN_SIZE;
if (dnp->dn_type != DMU_OT_NONE) {
fill++;
i += dnp->dn_extra_slots *
DNODE_MIN_SIZE;
}
}
} else {
if (BP_IS_HOLE(bp)) {
fill = 0;
} else {
fill = 1;
}
}
} else {
blkptr_t *ibp = db->db.db_data;
ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
if (BP_IS_HOLE(ibp))
continue;
fill += BP_GET_FILL(ibp);
}
}
DB_DNODE_EXIT(db);
if (!BP_IS_EMBEDDED(bp))
BP_SET_FILL(bp, fill);
mutex_exit(&db->db_mtx);
db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_WRITER, FTAG);
*db->db_blkptr = *bp;
dmu_buf_unlock_parent(db, dblt, FTAG);
}
/*
* This function gets called just prior to running through the compression
* stage of the zio pipeline. If we're an indirect block comprised of only
* holes, then we want this indirect to be compressed away to a hole. In
* order to do that we must zero out any information about the holes that
* this indirect points to prior to before we try to compress it.
*/
static void
dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
{
(void) zio, (void) buf;
dmu_buf_impl_t *db = vdb;
dnode_t *dn;
blkptr_t *bp;
unsigned int epbs, i;
ASSERT3U(db->db_level, >, 0);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
ASSERT3U(epbs, <, 31);
/* Determine if all our children are holes */
for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) {
if (!BP_IS_HOLE(bp))
break;
}
/*
* If all the children are holes, then zero them all out so that
* we may get compressed away.
*/
if (i == 1ULL << epbs) {
/*
* We only found holes. Grab the rwlock to prevent
* anybody from reading the blocks we're about to
* zero out.
*/
rw_enter(&db->db_rwlock, RW_WRITER);
memset(db->db.db_data, 0, db->db.db_size);
rw_exit(&db->db_rwlock);
}
DB_DNODE_EXIT(db);
}
/*
* The SPA will call this callback several times for each zio - once
* for every physical child i/o (zio->io_phys_children times). This
* allows the DMU to monitor the progress of each logical i/o. For example,
* there may be 2 copies of an indirect block, or many fragments of a RAID-Z
* block. There may be a long delay before all copies/fragments are completed,
* so this callback allows us to retire dirty space gradually, as the physical
* i/os complete.
*/
static void
dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg)
{
(void) buf;
dmu_buf_impl_t *db = arg;
objset_t *os = db->db_objset;
dsl_pool_t *dp = dmu_objset_pool(os);
dbuf_dirty_record_t *dr;
int delta = 0;
dr = db->db_data_pending;
ASSERT3U(dr->dr_txg, ==, zio->io_txg);
/*
* The callback will be called io_phys_children times. Retire one
* portion of our dirty space each time we are called. Any rounding
* error will be cleaned up by dbuf_write_done().
*/
delta = dr->dr_accounted / zio->io_phys_children;
dsl_pool_undirty_space(dp, delta, zio->io_txg);
}
static void
dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
{
(void) buf;
dmu_buf_impl_t *db = vdb;
blkptr_t *bp_orig = &zio->io_bp_orig;
blkptr_t *bp = db->db_blkptr;
objset_t *os = db->db_objset;
dmu_tx_t *tx = os->os_synctx;
ASSERT0(zio->io_error);
ASSERT(db->db_blkptr == bp);
/*
* For nopwrites and rewrites we ensure that the bp matches our
* original and bypass all the accounting.
*/
if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
ASSERT(BP_EQUAL(bp, bp_orig));
} else {
dsl_dataset_t *ds = os->os_dsl_dataset;
(void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
dsl_dataset_block_born(ds, bp, tx);
}
mutex_enter(&db->db_mtx);
DBUF_VERIFY(db);
dbuf_dirty_record_t *dr = db->db_data_pending;
dnode_t *dn = dr->dr_dnode;
ASSERT(!list_link_active(&dr->dr_dirty_node));
ASSERT(dr->dr_dbuf == db);
ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
list_remove(&db->db_dirty_records, dr);
#ifdef ZFS_DEBUG
if (db->db_blkid == DMU_SPILL_BLKID) {
ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
}
#endif
if (db->db_level == 0) {
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
if (db->db_state != DB_NOFILL) {
if (dr->dt.dl.dr_data != db->db_buf)
arc_buf_destroy(dr->dt.dl.dr_data, db);
}
} else {
ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
if (!BP_IS_HOLE(db->db_blkptr)) {
int epbs __maybe_unused = dn->dn_phys->dn_indblkshift -
SPA_BLKPTRSHIFT;
ASSERT3U(db->db_blkid, <=,
dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
db->db.db_size);
}
mutex_destroy(&dr->dt.di.dr_mtx);
list_destroy(&dr->dt.di.dr_children);
}
cv_broadcast(&db->db_changed);
ASSERT(db->db_dirtycnt > 0);
db->db_dirtycnt -= 1;
db->db_data_pending = NULL;
dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
/*
* If we didn't do a physical write in this ZIO and we
* still ended up here, it means that the space of the
* dbuf that we just released (and undirtied) above hasn't
* been marked as undirtied in the pool's accounting.
*
* Thus, we undirty that space in the pool's view of the
* world here. For physical writes this type of update
* happens in dbuf_write_physdone().
*
* If we did a physical write, cleanup any rounding errors
* that came up due to writing multiple copies of a block
* on disk [see dbuf_write_physdone()].
*/
if (zio->io_phys_children == 0) {
dsl_pool_undirty_space(dmu_objset_pool(os),
dr->dr_accounted, zio->io_txg);
} else {
dsl_pool_undirty_space(dmu_objset_pool(os),
dr->dr_accounted % zio->io_phys_children, zio->io_txg);
}
kmem_free(dr, sizeof (dbuf_dirty_record_t));
}
static void
dbuf_write_nofill_ready(zio_t *zio)
{
dbuf_write_ready(zio, NULL, zio->io_private);
}
static void
dbuf_write_nofill_done(zio_t *zio)
{
dbuf_write_done(zio, NULL, zio->io_private);
}
static void
dbuf_write_override_ready(zio_t *zio)
{
dbuf_dirty_record_t *dr = zio->io_private;
dmu_buf_impl_t *db = dr->dr_dbuf;
dbuf_write_ready(zio, NULL, db);
}
static void
dbuf_write_override_done(zio_t *zio)
{
dbuf_dirty_record_t *dr = zio->io_private;
dmu_buf_impl_t *db = dr->dr_dbuf;
blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
mutex_enter(&db->db_mtx);
if (!BP_EQUAL(zio->io_bp, obp)) {
if (!BP_IS_HOLE(obp))
dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
arc_release(dr->dt.dl.dr_data, db);
}
mutex_exit(&db->db_mtx);
dbuf_write_done(zio, NULL, db);
if (zio->io_abd != NULL)
abd_free(zio->io_abd);
}
typedef struct dbuf_remap_impl_callback_arg {
objset_t *drica_os;
uint64_t drica_blk_birth;
dmu_tx_t *drica_tx;
} dbuf_remap_impl_callback_arg_t;
static void
dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size,
void *arg)
{
dbuf_remap_impl_callback_arg_t *drica = arg;
objset_t *os = drica->drica_os;
spa_t *spa = dmu_objset_spa(os);
dmu_tx_t *tx = drica->drica_tx;
ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
if (os == spa_meta_objset(spa)) {
spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx);
} else {
dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset,
size, drica->drica_blk_birth, tx);
}
}
static void
dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, krwlock_t *rw, dmu_tx_t *tx)
{
blkptr_t bp_copy = *bp;
spa_t *spa = dmu_objset_spa(dn->dn_objset);
dbuf_remap_impl_callback_arg_t drica;
ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
drica.drica_os = dn->dn_objset;
drica.drica_blk_birth = bp->blk_birth;
drica.drica_tx = tx;
if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback,
&drica)) {
/*
* If the blkptr being remapped is tracked by a livelist,
* then we need to make sure the livelist reflects the update.
* First, cancel out the old blkptr by appending a 'FREE'
* entry. Next, add an 'ALLOC' to track the new version. This
* way we avoid trying to free an inaccurate blkptr at delete.
* Note that embedded blkptrs are not tracked in livelists.
*/
if (dn->dn_objset != spa_meta_objset(spa)) {
dsl_dataset_t *ds = dmu_objset_ds(dn->dn_objset);
if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
bp->blk_birth > ds->ds_dir->dd_origin_txg) {
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(dsl_dir_is_clone(ds->ds_dir));
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_LIVELIST));
bplist_append(&ds->ds_dir->dd_pending_frees,
bp);
bplist_append(&ds->ds_dir->dd_pending_allocs,
&bp_copy);
}
}
/*
* The db_rwlock prevents dbuf_read_impl() from
* dereferencing the BP while we are changing it. To
* avoid lock contention, only grab it when we are actually
* changing the BP.
*/
if (rw != NULL)
rw_enter(rw, RW_WRITER);
*bp = bp_copy;
if (rw != NULL)
rw_exit(rw);
}
}
/*
* Remap any existing BP's to concrete vdevs, if possible.
*/
static void
dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx)
{
spa_t *spa = dmu_objset_spa(db->db_objset);
ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL))
return;
if (db->db_level > 0) {
blkptr_t *bp = db->db.db_data;
for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) {
dbuf_remap_impl(dn, &bp[i], &db->db_rwlock, tx);
}
} else if (db->db.db_object == DMU_META_DNODE_OBJECT) {
dnode_phys_t *dnp = db->db.db_data;
ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==,
DMU_OT_DNODE);
for (int i = 0; i < db->db.db_size >> DNODE_SHIFT;
i += dnp[i].dn_extra_slots + 1) {
for (int j = 0; j < dnp[i].dn_nblkptr; j++) {
krwlock_t *lock = (dn->dn_dbuf == NULL ? NULL :
&dn->dn_dbuf->db_rwlock);
dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], lock,
tx);
}
}
}
}
/* Issue I/O to commit a dirty buffer to disk. */
static void
dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
dnode_t *dn = dr->dr_dnode;
objset_t *os;
dmu_buf_impl_t *parent = db->db_parent;
uint64_t txg = tx->tx_txg;
zbookmark_phys_t zb;
zio_prop_t zp;
zio_t *pio; /* parent I/O */
int wp_flag = 0;
ASSERT(dmu_tx_is_syncing(tx));
os = dn->dn_objset;
if (db->db_state != DB_NOFILL) {
if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
/*
* Private object buffers are released here rather
* than in dbuf_dirty() since they are only modified
* in the syncing context and we don't want the
* overhead of making multiple copies of the data.
*/
if (BP_IS_HOLE(db->db_blkptr)) {
arc_buf_thaw(data);
} else {
dbuf_release_bp(db);
}
dbuf_remap(dn, db, tx);
}
}
if (parent != dn->dn_dbuf) {
/* Our parent is an indirect block. */
/* We have a dirty parent that has been scheduled for write. */
ASSERT(parent && parent->db_data_pending);
/* Our parent's buffer is one level closer to the dnode. */
ASSERT(db->db_level == parent->db_level-1);
/*
* We're about to modify our parent's db_data by modifying
* our block pointer, so the parent must be released.
*/
ASSERT(arc_released(parent->db_buf));
pio = parent->db_data_pending->dr_zio;
} else {
/* Our parent is the dnode itself. */
ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
db->db_blkid != DMU_SPILL_BLKID) ||
(db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
if (db->db_blkid != DMU_SPILL_BLKID)
ASSERT3P(db->db_blkptr, ==,
&dn->dn_phys->dn_blkptr[db->db_blkid]);
pio = dn->dn_zio;
}
ASSERT(db->db_level == 0 || data == db->db_buf);
ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
ASSERT(pio);
SET_BOOKMARK(&zb, os->os_dsl_dataset ?
os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
db->db.db_object, db->db_level, db->db_blkid);
if (db->db_blkid == DMU_SPILL_BLKID)
wp_flag = WP_SPILL;
wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
/*
* We copy the blkptr now (rather than when we instantiate the dirty
* record), because its value can change between open context and
* syncing context. We do not need to hold dn_struct_rwlock to read
* db_blkptr because we are in syncing context.
*/
dr->dr_bp_copy = *db->db_blkptr;
if (db->db_level == 0 &&
dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
/*
* The BP for this block has been provided by open context
* (by dmu_sync() or dmu_buf_write_embedded()).
*/
abd_t *contents = (data != NULL) ?
abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL;
dr->dr_zio = zio_write(pio, os->os_spa, txg, &dr->dr_bp_copy,
contents, db->db.db_size, db->db.db_size, &zp,
dbuf_write_override_ready, NULL, NULL,
dbuf_write_override_done,
dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
mutex_enter(&db->db_mtx);
dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite);
mutex_exit(&db->db_mtx);
} else if (db->db_state == DB_NOFILL) {
ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
dr->dr_zio = zio_write(pio, os->os_spa, txg,
&dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp,
dbuf_write_nofill_ready, NULL, NULL,
dbuf_write_nofill_done, db,
ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
} else {
ASSERT(arc_released(data));
/*
* For indirect blocks, we want to setup the children
* ready callback so that we can properly handle an indirect
* block that only contains holes.
*/
arc_write_done_func_t *children_ready_cb = NULL;
if (db->db_level != 0)
children_ready_cb = dbuf_write_children_ready;
dr->dr_zio = arc_write(pio, os->os_spa, txg,
&dr->dr_bp_copy, data, dbuf_is_l2cacheable(db),
&zp, dbuf_write_ready,
children_ready_cb, dbuf_write_physdone,
dbuf_write_done, db, ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_MUSTSUCCEED, &zb);
}
}
EXPORT_SYMBOL(dbuf_find);
EXPORT_SYMBOL(dbuf_is_metadata);
EXPORT_SYMBOL(dbuf_destroy);
EXPORT_SYMBOL(dbuf_loan_arcbuf);
EXPORT_SYMBOL(dbuf_whichblock);
EXPORT_SYMBOL(dbuf_read);
EXPORT_SYMBOL(dbuf_unoverride);
EXPORT_SYMBOL(dbuf_free_range);
EXPORT_SYMBOL(dbuf_new_size);
EXPORT_SYMBOL(dbuf_release_bp);
EXPORT_SYMBOL(dbuf_dirty);
EXPORT_SYMBOL(dmu_buf_set_crypt_params);
EXPORT_SYMBOL(dmu_buf_will_dirty);
EXPORT_SYMBOL(dmu_buf_is_dirty);
EXPORT_SYMBOL(dmu_buf_will_not_fill);
EXPORT_SYMBOL(dmu_buf_will_fill);
EXPORT_SYMBOL(dmu_buf_fill_done);
EXPORT_SYMBOL(dmu_buf_rele);
EXPORT_SYMBOL(dbuf_assign_arcbuf);
EXPORT_SYMBOL(dbuf_prefetch);
EXPORT_SYMBOL(dbuf_hold_impl);
EXPORT_SYMBOL(dbuf_hold);
EXPORT_SYMBOL(dbuf_hold_level);
EXPORT_SYMBOL(dbuf_create_bonus);
EXPORT_SYMBOL(dbuf_spill_set_blksz);
EXPORT_SYMBOL(dbuf_rm_spill);
EXPORT_SYMBOL(dbuf_add_ref);
EXPORT_SYMBOL(dbuf_rele);
EXPORT_SYMBOL(dbuf_rele_and_unlock);
EXPORT_SYMBOL(dbuf_refcount);
EXPORT_SYMBOL(dbuf_sync_list);
EXPORT_SYMBOL(dmu_buf_set_user);
EXPORT_SYMBOL(dmu_buf_set_user_ie);
EXPORT_SYMBOL(dmu_buf_get_user);
EXPORT_SYMBOL(dmu_buf_get_blkptr);
ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, max_bytes, ULONG, ZMOD_RW,
"Maximum size in bytes of the dbuf cache.");
ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, hiwater_pct, UINT, ZMOD_RW,
"Percentage over dbuf_cache_max_bytes for direct dbuf eviction.");
ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, lowater_pct, UINT, ZMOD_RW,
"Percentage below dbuf_cache_max_bytes when dbuf eviction stops.");
ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_max_bytes, ULONG, ZMOD_RW,
"Maximum size in bytes of dbuf metadata cache.");
ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, cache_shift, INT, ZMOD_RW,
"Set size of dbuf cache to log2 fraction of arc size.");
ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_shift, INT, ZMOD_RW,
"Set size of dbuf metadata cache to log2 fraction of arc size.");
diff --git a/sys/contrib/openzfs/module/zfs/dmu_objset.c b/sys/contrib/openzfs/module/zfs/dmu_objset.c
index b9e16f79efc5..4c20afcdb9c6 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_objset.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_objset.c
@@ -1,3088 +1,3078 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2013, Joyent, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright (c) 2015, STRATO AG, Inc. All rights reserved.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
* Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
*/
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/cred.h>
#include <sys/zfs_context.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_synctask.h>
#include <sys/dsl_deleg.h>
#include <sys/dnode.h>
#include <sys/dbuf.h>
#include <sys/zvol.h>
#include <sys/dmu_tx.h>
#include <sys/zap.h>
#include <sys/zil.h>
#include <sys/dmu_impl.h>
#include <sys/zfs_ioctl.h>
#include <sys/sa.h>
#include <sys/zfs_onexit.h>
#include <sys/dsl_destroy.h>
#include <sys/vdev.h>
#include <sys/zfeature.h>
#include <sys/policy.h>
#include <sys/spa_impl.h>
#include <sys/dmu_recv.h>
#include <sys/zfs_project.h>
#include "zfs_namecheck.h"
#include <sys/vdev_impl.h>
#include <sys/arc.h>
/*
* Needed to close a window in dnode_move() that allows the objset to be freed
* before it can be safely accessed.
*/
krwlock_t os_lock;
/*
* Tunable to overwrite the maximum number of threads for the parallelization
* of dmu_objset_find_dp, needed to speed up the import of pools with many
* datasets.
* Default is 4 times the number of leaf vdevs.
*/
static const int dmu_find_threads = 0;
/*
* Backfill lower metadnode objects after this many have been freed.
* Backfilling negatively impacts object creation rates, so only do it
* if there are enough holes to fill.
*/
static const int dmu_rescan_dnode_threshold = 1 << DN_MAX_INDBLKSHIFT;
static const char *upgrade_tag = "upgrade_tag";
static void dmu_objset_find_dp_cb(void *arg);
static void dmu_objset_upgrade(objset_t *os, dmu_objset_upgrade_cb_t cb);
static void dmu_objset_upgrade_stop(objset_t *os);
void
dmu_objset_init(void)
{
rw_init(&os_lock, NULL, RW_DEFAULT, NULL);
}
void
dmu_objset_fini(void)
{
rw_destroy(&os_lock);
}
spa_t *
dmu_objset_spa(objset_t *os)
{
return (os->os_spa);
}
zilog_t *
dmu_objset_zil(objset_t *os)
{
return (os->os_zil);
}
dsl_pool_t *
dmu_objset_pool(objset_t *os)
{
dsl_dataset_t *ds;
if ((ds = os->os_dsl_dataset) != NULL && ds->ds_dir)
return (ds->ds_dir->dd_pool);
else
return (spa_get_dsl(os->os_spa));
}
dsl_dataset_t *
dmu_objset_ds(objset_t *os)
{
return (os->os_dsl_dataset);
}
dmu_objset_type_t
dmu_objset_type(objset_t *os)
{
return (os->os_phys->os_type);
}
void
dmu_objset_name(objset_t *os, char *buf)
{
dsl_dataset_name(os->os_dsl_dataset, buf);
}
uint64_t
dmu_objset_id(objset_t *os)
{
dsl_dataset_t *ds = os->os_dsl_dataset;
return (ds ? ds->ds_object : 0);
}
uint64_t
dmu_objset_dnodesize(objset_t *os)
{
return (os->os_dnodesize);
}
zfs_sync_type_t
dmu_objset_syncprop(objset_t *os)
{
return (os->os_sync);
}
zfs_logbias_op_t
dmu_objset_logbias(objset_t *os)
{
return (os->os_logbias);
}
static void
checksum_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance should have been done by now.
*/
ASSERT(newval != ZIO_CHECKSUM_INHERIT);
os->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE);
}
static void
compression_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance and range checking should have been done by now.
*/
ASSERT(newval != ZIO_COMPRESS_INHERIT);
os->os_compress = zio_compress_select(os->os_spa,
ZIO_COMPRESS_ALGO(newval), ZIO_COMPRESS_ON);
os->os_complevel = zio_complevel_select(os->os_spa, os->os_compress,
ZIO_COMPRESS_LEVEL(newval), ZIO_COMPLEVEL_DEFAULT);
}
static void
copies_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance and range checking should have been done by now.
*/
ASSERT(newval > 0);
ASSERT(newval <= spa_max_replication(os->os_spa));
os->os_copies = newval;
}
static void
dedup_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
spa_t *spa = os->os_spa;
enum zio_checksum checksum;
/*
* Inheritance should have been done by now.
*/
ASSERT(newval != ZIO_CHECKSUM_INHERIT);
checksum = zio_checksum_dedup_select(spa, newval, ZIO_CHECKSUM_OFF);
os->os_dedup_checksum = checksum & ZIO_CHECKSUM_MASK;
os->os_dedup_verify = !!(checksum & ZIO_CHECKSUM_VERIFY);
}
static void
primary_cache_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance and range checking should have been done by now.
*/
ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE ||
newval == ZFS_CACHE_METADATA);
os->os_primary_cache = newval;
}
static void
secondary_cache_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance and range checking should have been done by now.
*/
ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE ||
newval == ZFS_CACHE_METADATA);
os->os_secondary_cache = newval;
}
static void
sync_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance and range checking should have been done by now.
*/
ASSERT(newval == ZFS_SYNC_STANDARD || newval == ZFS_SYNC_ALWAYS ||
newval == ZFS_SYNC_DISABLED);
os->os_sync = newval;
if (os->os_zil)
zil_set_sync(os->os_zil, newval);
}
static void
redundant_metadata_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance and range checking should have been done by now.
*/
ASSERT(newval == ZFS_REDUNDANT_METADATA_ALL ||
newval == ZFS_REDUNDANT_METADATA_MOST);
os->os_redundant_metadata = newval;
}
static void
dnodesize_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
switch (newval) {
case ZFS_DNSIZE_LEGACY:
os->os_dnodesize = DNODE_MIN_SIZE;
break;
case ZFS_DNSIZE_AUTO:
/*
* Choose a dnode size that will work well for most
* workloads if the user specified "auto". Future code
* improvements could dynamically select a dnode size
* based on observed workload patterns.
*/
os->os_dnodesize = DNODE_MIN_SIZE * 2;
break;
case ZFS_DNSIZE_1K:
case ZFS_DNSIZE_2K:
case ZFS_DNSIZE_4K:
case ZFS_DNSIZE_8K:
case ZFS_DNSIZE_16K:
os->os_dnodesize = newval;
break;
}
}
static void
smallblk_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance and range checking should have been done by now.
*/
ASSERT(newval <= SPA_MAXBLOCKSIZE);
ASSERT(ISP2(newval));
os->os_zpl_special_smallblock = newval;
}
static void
logbias_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
ASSERT(newval == ZFS_LOGBIAS_LATENCY ||
newval == ZFS_LOGBIAS_THROUGHPUT);
os->os_logbias = newval;
if (os->os_zil)
zil_set_logbias(os->os_zil, newval);
}
static void
recordsize_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
os->os_recordsize = newval;
}
void
dmu_objset_byteswap(void *buf, size_t size)
{
objset_phys_t *osp = buf;
ASSERT(size == OBJSET_PHYS_SIZE_V1 || size == OBJSET_PHYS_SIZE_V2 ||
size == sizeof (objset_phys_t));
dnode_byteswap(&osp->os_meta_dnode);
byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t));
osp->os_type = BSWAP_64(osp->os_type);
osp->os_flags = BSWAP_64(osp->os_flags);
if (size >= OBJSET_PHYS_SIZE_V2) {
dnode_byteswap(&osp->os_userused_dnode);
dnode_byteswap(&osp->os_groupused_dnode);
if (size >= sizeof (objset_phys_t))
dnode_byteswap(&osp->os_projectused_dnode);
}
}
/*
* The hash is a CRC-based hash of the objset_t pointer and the object number.
*/
static uint64_t
dnode_hash(const objset_t *os, uint64_t obj)
{
uintptr_t osv = (uintptr_t)os;
uint64_t crc = -1ULL;
ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
/*
* The low 6 bits of the pointer don't have much entropy, because
* the objset_t is larger than 2^6 bytes long.
*/
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF];
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF];
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF];
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 16)) & 0xFF];
crc ^= (osv>>14) ^ (obj>>24);
return (crc);
}
static unsigned int
dnode_multilist_index_func(multilist_t *ml, void *obj)
{
dnode_t *dn = obj;
/*
* The low order bits of the hash value are thought to be
* distributed evenly. Otherwise, in the case that the multilist
* has a power of two number of sublists, each sublists' usage
* would not be evenly distributed. In this context full 64bit
* division would be a waste of time, so limit it to 32 bits.
*/
return ((unsigned int)dnode_hash(dn->dn_objset, dn->dn_object) %
multilist_get_num_sublists(ml));
}
static inline boolean_t
dmu_os_is_l2cacheable(objset_t *os)
{
vdev_t *vd = NULL;
zfs_cache_type_t cache = os->os_secondary_cache;
blkptr_t *bp = os->os_rootbp;
if (bp != NULL && !BP_IS_HOLE(bp)) {
uint64_t vdev = DVA_GET_VDEV(bp->blk_dva);
vdev_t *rvd = os->os_spa->spa_root_vdev;
if (vdev < rvd->vdev_children)
vd = rvd->vdev_child[vdev];
if (cache == ZFS_CACHE_ALL || cache == ZFS_CACHE_METADATA) {
if (vd == NULL)
return (B_TRUE);
if ((vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL &&
vd->vdev_alloc_bias != VDEV_BIAS_DEDUP) ||
l2arc_exclude_special == 0)
return (B_TRUE);
}
}
return (B_FALSE);
}
/*
* Instantiates the objset_t in-memory structure corresponding to the
* objset_phys_t that's pointed to by the specified blkptr_t.
*/
int
dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
objset_t **osp)
{
objset_t *os;
int i, err;
ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock));
ASSERT(!BP_IS_REDACTED(bp));
/*
* We need the pool config lock to get properties.
*/
ASSERT(ds == NULL || dsl_pool_config_held(ds->ds_dir->dd_pool));
/*
* The $ORIGIN dataset (if it exists) doesn't have an associated
* objset, so there's no reason to open it. The $ORIGIN dataset
* will not exist on pools older than SPA_VERSION_ORIGIN.
*/
if (ds != NULL && spa_get_dsl(spa) != NULL &&
spa_get_dsl(spa)->dp_origin_snap != NULL) {
ASSERT3P(ds->ds_dir, !=,
spa_get_dsl(spa)->dp_origin_snap->ds_dir);
}
os = kmem_zalloc(sizeof (objset_t), KM_SLEEP);
os->os_dsl_dataset = ds;
os->os_spa = spa;
os->os_rootbp = bp;
if (!BP_IS_HOLE(os->os_rootbp)) {
arc_flags_t aflags = ARC_FLAG_WAIT;
zbookmark_phys_t zb;
int size;
enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
if (dmu_os_is_l2cacheable(os))
aflags |= ARC_FLAG_L2CACHE;
if (ds != NULL && ds->ds_dir->dd_crypto_obj != 0) {
ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
ASSERT(BP_IS_AUTHENTICATED(bp));
zio_flags |= ZIO_FLAG_RAW;
}
dprintf_bp(os->os_rootbp, "reading %s", "");
err = arc_read(NULL, spa, os->os_rootbp,
arc_getbuf_func, &os->os_phys_buf,
ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
if (err != 0) {
kmem_free(os, sizeof (objset_t));
/* convert checksum errors into IO errors */
if (err == ECKSUM)
err = SET_ERROR(EIO);
return (err);
}
if (spa_version(spa) < SPA_VERSION_USERSPACE)
size = OBJSET_PHYS_SIZE_V1;
else if (!spa_feature_is_enabled(spa,
SPA_FEATURE_PROJECT_QUOTA))
size = OBJSET_PHYS_SIZE_V2;
else
size = sizeof (objset_phys_t);
/* Increase the blocksize if we are permitted. */
if (arc_buf_size(os->os_phys_buf) < size) {
arc_buf_t *buf = arc_alloc_buf(spa, &os->os_phys_buf,
ARC_BUFC_METADATA, size);
memset(buf->b_data, 0, size);
memcpy(buf->b_data, os->os_phys_buf->b_data,
arc_buf_size(os->os_phys_buf));
arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf);
os->os_phys_buf = buf;
}
os->os_phys = os->os_phys_buf->b_data;
os->os_flags = os->os_phys->os_flags;
} else {
int size = spa_version(spa) >= SPA_VERSION_USERSPACE ?
sizeof (objset_phys_t) : OBJSET_PHYS_SIZE_V1;
os->os_phys_buf = arc_alloc_buf(spa, &os->os_phys_buf,
ARC_BUFC_METADATA, size);
os->os_phys = os->os_phys_buf->b_data;
memset(os->os_phys, 0, size);
}
/*
* These properties will be filled in by the logic in zfs_get_zplprop()
* when they are queried for the first time.
*/
os->os_version = OBJSET_PROP_UNINITIALIZED;
os->os_normalization = OBJSET_PROP_UNINITIALIZED;
os->os_utf8only = OBJSET_PROP_UNINITIALIZED;
os->os_casesensitivity = OBJSET_PROP_UNINITIALIZED;
/*
* Note: the changed_cb will be called once before the register
* func returns, thus changing the checksum/compression from the
* default (fletcher2/off). Snapshots don't need to know about
* checksum/compression/copies.
*/
if (ds != NULL) {
os->os_encrypted = (ds->ds_dir->dd_crypto_obj != 0);
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_PRIMARYCACHE),
primary_cache_changed_cb, os);
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_SECONDARYCACHE),
secondary_cache_changed_cb, os);
}
if (!ds->ds_is_snapshot) {
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_CHECKSUM),
checksum_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_COMPRESSION),
compression_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_COPIES),
copies_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_DEDUP),
dedup_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_LOGBIAS),
logbias_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_SYNC),
sync_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(
ZFS_PROP_REDUNDANT_METADATA),
redundant_metadata_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_RECORDSIZE),
recordsize_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_DNODESIZE),
dnodesize_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(
ZFS_PROP_SPECIAL_SMALL_BLOCKS),
smallblk_changed_cb, os);
}
}
if (err != 0) {
arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf);
kmem_free(os, sizeof (objset_t));
return (err);
}
} else {
/* It's the meta-objset. */
os->os_checksum = ZIO_CHECKSUM_FLETCHER_4;
os->os_compress = ZIO_COMPRESS_ON;
os->os_complevel = ZIO_COMPLEVEL_DEFAULT;
os->os_encrypted = B_FALSE;
os->os_copies = spa_max_replication(spa);
os->os_dedup_checksum = ZIO_CHECKSUM_OFF;
os->os_dedup_verify = B_FALSE;
os->os_logbias = ZFS_LOGBIAS_LATENCY;
os->os_sync = ZFS_SYNC_STANDARD;
os->os_primary_cache = ZFS_CACHE_ALL;
os->os_secondary_cache = ZFS_CACHE_ALL;
os->os_dnodesize = DNODE_MIN_SIZE;
}
if (ds == NULL || !ds->ds_is_snapshot)
os->os_zil_header = os->os_phys->os_zil_header;
os->os_zil = zil_alloc(os, &os->os_zil_header);
for (i = 0; i < TXG_SIZE; i++) {
multilist_create(&os->os_dirty_dnodes[i], sizeof (dnode_t),
offsetof(dnode_t, dn_dirty_link[i]),
dnode_multilist_index_func);
}
list_create(&os->os_dnodes, sizeof (dnode_t),
offsetof(dnode_t, dn_link));
list_create(&os->os_downgraded_dbufs, sizeof (dmu_buf_impl_t),
offsetof(dmu_buf_impl_t, db_link));
list_link_init(&os->os_evicting_node);
mutex_init(&os->os_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&os->os_userused_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL);
os->os_obj_next_percpu_len = boot_ncpus;
os->os_obj_next_percpu = kmem_zalloc(os->os_obj_next_percpu_len *
sizeof (os->os_obj_next_percpu[0]), KM_SLEEP);
dnode_special_open(os, &os->os_phys->os_meta_dnode,
DMU_META_DNODE_OBJECT, &os->os_meta_dnode);
if (OBJSET_BUF_HAS_USERUSED(os->os_phys_buf)) {
dnode_special_open(os, &os->os_phys->os_userused_dnode,
DMU_USERUSED_OBJECT, &os->os_userused_dnode);
dnode_special_open(os, &os->os_phys->os_groupused_dnode,
DMU_GROUPUSED_OBJECT, &os->os_groupused_dnode);
if (OBJSET_BUF_HAS_PROJECTUSED(os->os_phys_buf))
dnode_special_open(os,
&os->os_phys->os_projectused_dnode,
DMU_PROJECTUSED_OBJECT, &os->os_projectused_dnode);
}
mutex_init(&os->os_upgrade_lock, NULL, MUTEX_DEFAULT, NULL);
*osp = os;
return (0);
}
int
dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp)
{
int err = 0;
/*
* We need the pool_config lock to manipulate the dsl_dataset_t.
* Even if the dataset is long-held, we need the pool_config lock
* to open the objset, as it needs to get properties.
*/
ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
mutex_enter(&ds->ds_opening_lock);
if (ds->ds_objset == NULL) {
objset_t *os;
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
err = dmu_objset_open_impl(dsl_dataset_get_spa(ds),
ds, dsl_dataset_get_blkptr(ds), &os);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
if (err == 0) {
mutex_enter(&ds->ds_lock);
ASSERT(ds->ds_objset == NULL);
ds->ds_objset = os;
mutex_exit(&ds->ds_lock);
}
}
*osp = ds->ds_objset;
mutex_exit(&ds->ds_opening_lock);
return (err);
}
/*
* Holds the pool while the objset is held. Therefore only one objset
* can be held at a time.
*/
int
dmu_objset_hold_flags(const char *name, boolean_t decrypt, const void *tag,
objset_t **osp)
{
dsl_pool_t *dp;
dsl_dataset_t *ds;
int err;
ds_hold_flags_t flags;
flags = (decrypt) ? DS_HOLD_FLAG_DECRYPT : DS_HOLD_FLAG_NONE;
err = dsl_pool_hold(name, tag, &dp);
if (err != 0)
return (err);
err = dsl_dataset_hold_flags(dp, name, flags, tag, &ds);
if (err != 0) {
dsl_pool_rele(dp, tag);
return (err);
}
err = dmu_objset_from_ds(ds, osp);
if (err != 0) {
dsl_dataset_rele(ds, tag);
dsl_pool_rele(dp, tag);
}
return (err);
}
int
dmu_objset_hold(const char *name, const void *tag, objset_t **osp)
{
return (dmu_objset_hold_flags(name, B_FALSE, tag, osp));
}
static int
dmu_objset_own_impl(dsl_dataset_t *ds, dmu_objset_type_t type,
boolean_t readonly, boolean_t decrypt, const void *tag, objset_t **osp)
{
(void) tag;
int err = dmu_objset_from_ds(ds, osp);
if (err != 0) {
return (err);
} else if (type != DMU_OST_ANY && type != (*osp)->os_phys->os_type) {
return (SET_ERROR(EINVAL));
} else if (!readonly && dsl_dataset_is_snapshot(ds)) {
return (SET_ERROR(EROFS));
} else if (!readonly && decrypt &&
dsl_dir_incompatible_encryption_version(ds->ds_dir)) {
return (SET_ERROR(EROFS));
}
/* if we are decrypting, we can now check MACs in os->os_phys_buf */
if (decrypt && arc_is_unauthenticated((*osp)->os_phys_buf)) {
zbookmark_phys_t zb;
SET_BOOKMARK(&zb, ds->ds_object, ZB_ROOT_OBJECT,
ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
err = arc_untransform((*osp)->os_phys_buf, (*osp)->os_spa,
&zb, B_FALSE);
if (err != 0)
return (err);
ASSERT0(arc_is_unauthenticated((*osp)->os_phys_buf));
}
return (0);
}
/*
* dsl_pool must not be held when this is called.
* Upon successful return, there will be a longhold on the dataset,
* and the dsl_pool will not be held.
*/
int
dmu_objset_own(const char *name, dmu_objset_type_t type,
boolean_t readonly, boolean_t decrypt, const void *tag, objset_t **osp)
{
dsl_pool_t *dp;
dsl_dataset_t *ds;
int err;
ds_hold_flags_t flags;
flags = (decrypt) ? DS_HOLD_FLAG_DECRYPT : DS_HOLD_FLAG_NONE;
err = dsl_pool_hold(name, FTAG, &dp);
if (err != 0)
return (err);
err = dsl_dataset_own(dp, name, flags, tag, &ds);
if (err != 0) {
dsl_pool_rele(dp, FTAG);
return (err);
}
err = dmu_objset_own_impl(ds, type, readonly, decrypt, tag, osp);
if (err != 0) {
dsl_dataset_disown(ds, flags, tag);
dsl_pool_rele(dp, FTAG);
return (err);
}
/*
* User accounting requires the dataset to be decrypted and rw.
* We also don't begin user accounting during claiming to help
* speed up pool import times and to keep this txg reserved
* completely for recovery work.
*/
if (!readonly && !dp->dp_spa->spa_claiming &&
(ds->ds_dir->dd_crypto_obj == 0 || decrypt)) {
if (dmu_objset_userobjspace_upgradable(*osp) ||
dmu_objset_projectquota_upgradable(*osp)) {
dmu_objset_id_quota_upgrade(*osp);
} else if (dmu_objset_userused_enabled(*osp)) {
dmu_objset_userspace_upgrade(*osp);
}
}
dsl_pool_rele(dp, FTAG);
return (0);
}
int
dmu_objset_own_obj(dsl_pool_t *dp, uint64_t obj, dmu_objset_type_t type,
boolean_t readonly, boolean_t decrypt, const void *tag, objset_t **osp)
{
dsl_dataset_t *ds;
int err;
ds_hold_flags_t flags;
flags = (decrypt) ? DS_HOLD_FLAG_DECRYPT : DS_HOLD_FLAG_NONE;
err = dsl_dataset_own_obj(dp, obj, flags, tag, &ds);
if (err != 0)
return (err);
err = dmu_objset_own_impl(ds, type, readonly, decrypt, tag, osp);
if (err != 0) {
dsl_dataset_disown(ds, flags, tag);
return (err);
}
return (0);
}
void
dmu_objset_rele_flags(objset_t *os, boolean_t decrypt, const void *tag)
{
ds_hold_flags_t flags;
dsl_pool_t *dp = dmu_objset_pool(os);
flags = (decrypt) ? DS_HOLD_FLAG_DECRYPT : DS_HOLD_FLAG_NONE;
dsl_dataset_rele_flags(os->os_dsl_dataset, flags, tag);
dsl_pool_rele(dp, tag);
}
void
dmu_objset_rele(objset_t *os, const void *tag)
{
dmu_objset_rele_flags(os, B_FALSE, tag);
}
/*
* When we are called, os MUST refer to an objset associated with a dataset
* that is owned by 'tag'; that is, is held and long held by 'tag' and ds_owner
* == tag. We will then release and reacquire ownership of the dataset while
* holding the pool config_rwlock to avoid intervening namespace or ownership
* changes may occur.
*
* This exists solely to accommodate zfs_ioc_userspace_upgrade()'s desire to
* release the hold on its dataset and acquire a new one on the dataset of the
* same name so that it can be partially torn down and reconstructed.
*/
void
dmu_objset_refresh_ownership(dsl_dataset_t *ds, dsl_dataset_t **newds,
boolean_t decrypt, const void *tag)
{
dsl_pool_t *dp;
char name[ZFS_MAX_DATASET_NAME_LEN];
ds_hold_flags_t flags;
flags = (decrypt) ? DS_HOLD_FLAG_DECRYPT : DS_HOLD_FLAG_NONE;
VERIFY3P(ds, !=, NULL);
VERIFY3P(ds->ds_owner, ==, tag);
VERIFY(dsl_dataset_long_held(ds));
dsl_dataset_name(ds, name);
dp = ds->ds_dir->dd_pool;
dsl_pool_config_enter(dp, FTAG);
dsl_dataset_disown(ds, flags, tag);
VERIFY0(dsl_dataset_own(dp, name, flags, tag, newds));
dsl_pool_config_exit(dp, FTAG);
}
void
dmu_objset_disown(objset_t *os, boolean_t decrypt, const void *tag)
{
ds_hold_flags_t flags;
flags = (decrypt) ? DS_HOLD_FLAG_DECRYPT : DS_HOLD_FLAG_NONE;
/*
* Stop upgrading thread
*/
dmu_objset_upgrade_stop(os);
dsl_dataset_disown(os->os_dsl_dataset, flags, tag);
}
void
dmu_objset_evict_dbufs(objset_t *os)
{
dnode_t *dn_marker;
dnode_t *dn;
dn_marker = kmem_alloc(sizeof (dnode_t), KM_SLEEP);
mutex_enter(&os->os_lock);
dn = list_head(&os->os_dnodes);
while (dn != NULL) {
/*
* Skip dnodes without holds. We have to do this dance
* because dnode_add_ref() only works if there is already a
* hold. If the dnode has no holds, then it has no dbufs.
*/
if (dnode_add_ref(dn, FTAG)) {
list_insert_after(&os->os_dnodes, dn, dn_marker);
mutex_exit(&os->os_lock);
dnode_evict_dbufs(dn);
dnode_rele(dn, FTAG);
mutex_enter(&os->os_lock);
dn = list_next(&os->os_dnodes, dn_marker);
list_remove(&os->os_dnodes, dn_marker);
} else {
dn = list_next(&os->os_dnodes, dn);
}
}
mutex_exit(&os->os_lock);
kmem_free(dn_marker, sizeof (dnode_t));
if (DMU_USERUSED_DNODE(os) != NULL) {
if (DMU_PROJECTUSED_DNODE(os) != NULL)
dnode_evict_dbufs(DMU_PROJECTUSED_DNODE(os));
dnode_evict_dbufs(DMU_GROUPUSED_DNODE(os));
dnode_evict_dbufs(DMU_USERUSED_DNODE(os));
}
dnode_evict_dbufs(DMU_META_DNODE(os));
}
/*
* Objset eviction processing is split into into two pieces.
* The first marks the objset as evicting, evicts any dbufs that
* have a refcount of zero, and then queues up the objset for the
* second phase of eviction. Once os->os_dnodes has been cleared by
* dnode_buf_pageout()->dnode_destroy(), the second phase is executed.
* The second phase closes the special dnodes, dequeues the objset from
* the list of those undergoing eviction, and finally frees the objset.
*
* NOTE: Due to asynchronous eviction processing (invocation of
* dnode_buf_pageout()), it is possible for the meta dnode for the
* objset to have no holds even though os->os_dnodes is not empty.
*/
void
dmu_objset_evict(objset_t *os)
{
dsl_dataset_t *ds = os->os_dsl_dataset;
for (int t = 0; t < TXG_SIZE; t++)
ASSERT(!dmu_objset_is_dirty(os, t));
if (ds)
dsl_prop_unregister_all(ds, os);
if (os->os_sa)
sa_tear_down(os);
dmu_objset_evict_dbufs(os);
mutex_enter(&os->os_lock);
spa_evicting_os_register(os->os_spa, os);
if (list_is_empty(&os->os_dnodes)) {
mutex_exit(&os->os_lock);
dmu_objset_evict_done(os);
} else {
mutex_exit(&os->os_lock);
}
}
void
dmu_objset_evict_done(objset_t *os)
{
ASSERT3P(list_head(&os->os_dnodes), ==, NULL);
dnode_special_close(&os->os_meta_dnode);
if (DMU_USERUSED_DNODE(os)) {
if (DMU_PROJECTUSED_DNODE(os))
dnode_special_close(&os->os_projectused_dnode);
dnode_special_close(&os->os_userused_dnode);
dnode_special_close(&os->os_groupused_dnode);
}
zil_free(os->os_zil);
arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf);
/*
* This is a barrier to prevent the objset from going away in
* dnode_move() until we can safely ensure that the objset is still in
* use. We consider the objset valid before the barrier and invalid
* after the barrier.
*/
rw_enter(&os_lock, RW_READER);
rw_exit(&os_lock);
kmem_free(os->os_obj_next_percpu,
os->os_obj_next_percpu_len * sizeof (os->os_obj_next_percpu[0]));
mutex_destroy(&os->os_lock);
mutex_destroy(&os->os_userused_lock);
mutex_destroy(&os->os_obj_lock);
mutex_destroy(&os->os_user_ptr_lock);
mutex_destroy(&os->os_upgrade_lock);
for (int i = 0; i < TXG_SIZE; i++)
multilist_destroy(&os->os_dirty_dnodes[i]);
spa_evicting_os_deregister(os->os_spa, os);
kmem_free(os, sizeof (objset_t));
}
inode_timespec_t
dmu_objset_snap_cmtime(objset_t *os)
{
return (dsl_dir_snap_cmtime(os->os_dsl_dataset->ds_dir));
}
objset_t *
dmu_objset_create_impl_dnstats(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
dmu_objset_type_t type, int levels, int blksz, int ibs, dmu_tx_t *tx)
{
objset_t *os;
dnode_t *mdn;
ASSERT(dmu_tx_is_syncing(tx));
if (blksz == 0)
blksz = DNODE_BLOCK_SIZE;
if (ibs == 0)
ibs = DN_MAX_INDBLKSHIFT;
if (ds != NULL)
VERIFY0(dmu_objset_from_ds(ds, &os));
else
VERIFY0(dmu_objset_open_impl(spa, NULL, bp, &os));
mdn = DMU_META_DNODE(os);
dnode_allocate(mdn, DMU_OT_DNODE, blksz, ibs, DMU_OT_NONE, 0,
DNODE_MIN_SLOTS, tx);
/*
* We don't want to have to increase the meta-dnode's nlevels
* later, because then we could do it in quiescing context while
* we are also accessing it in open context.
*
* This precaution is not necessary for the MOS (ds == NULL),
* because the MOS is only updated in syncing context.
* This is most fortunate: the MOS is the only objset that
* needs to be synced multiple times as spa_sync() iterates
* to convergence, so minimizing its dn_nlevels matters.
*/
if (ds != NULL) {
if (levels == 0) {
levels = 1;
/*
* Determine the number of levels necessary for the
* meta-dnode to contain DN_MAX_OBJECT dnodes. Note
* that in order to ensure that we do not overflow
* 64 bits, there has to be a nlevels that gives us a
* number of blocks > DN_MAX_OBJECT but < 2^64.
* Therefore, (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)
* (10) must be less than (64 - log2(DN_MAX_OBJECT))
* (16).
*/
while ((uint64_t)mdn->dn_nblkptr <<
(mdn->dn_datablkshift - DNODE_SHIFT + (levels - 1) *
(mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) <
DN_MAX_OBJECT)
levels++;
}
mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] =
mdn->dn_nlevels = levels;
}
ASSERT(type != DMU_OST_NONE);
ASSERT(type != DMU_OST_ANY);
ASSERT(type < DMU_OST_NUMTYPES);
os->os_phys->os_type = type;
/*
* Enable user accounting if it is enabled and this is not an
* encrypted receive.
*/
if (dmu_objset_userused_enabled(os) &&
(!os->os_encrypted || !dmu_objset_is_receiving(os))) {
os->os_phys->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
if (dmu_objset_userobjused_enabled(os)) {
ds->ds_feature_activation[
SPA_FEATURE_USEROBJ_ACCOUNTING] = (void *)B_TRUE;
os->os_phys->os_flags |=
OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE;
}
if (dmu_objset_projectquota_enabled(os)) {
ds->ds_feature_activation[
SPA_FEATURE_PROJECT_QUOTA] = (void *)B_TRUE;
os->os_phys->os_flags |=
OBJSET_FLAG_PROJECTQUOTA_COMPLETE;
}
os->os_flags = os->os_phys->os_flags;
}
dsl_dataset_dirty(ds, tx);
return (os);
}
/* called from dsl for meta-objset */
objset_t *
dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
dmu_objset_type_t type, dmu_tx_t *tx)
{
return (dmu_objset_create_impl_dnstats(spa, ds, bp, type, 0, 0, 0, tx));
}
typedef struct dmu_objset_create_arg {
const char *doca_name;
cred_t *doca_cred;
proc_t *doca_proc;
void (*doca_userfunc)(objset_t *os, void *arg,
cred_t *cr, dmu_tx_t *tx);
void *doca_userarg;
dmu_objset_type_t doca_type;
uint64_t doca_flags;
dsl_crypto_params_t *doca_dcp;
} dmu_objset_create_arg_t;
static int
dmu_objset_create_check(void *arg, dmu_tx_t *tx)
{
dmu_objset_create_arg_t *doca = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *pdd;
dsl_dataset_t *parentds;
objset_t *parentos;
const char *tail;
int error;
if (strchr(doca->doca_name, '@') != NULL)
return (SET_ERROR(EINVAL));
if (strlen(doca->doca_name) >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
if (dataset_nestcheck(doca->doca_name) != 0)
return (SET_ERROR(ENAMETOOLONG));
error = dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail);
if (error != 0)
return (error);
if (tail == NULL) {
dsl_dir_rele(pdd, FTAG);
return (SET_ERROR(EEXIST));
}
error = dmu_objset_create_crypt_check(pdd, doca->doca_dcp, NULL);
if (error != 0) {
dsl_dir_rele(pdd, FTAG);
return (error);
}
error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL,
doca->doca_cred, doca->doca_proc);
if (error != 0) {
dsl_dir_rele(pdd, FTAG);
return (error);
}
/* can't create below anything but filesystems (eg. no ZVOLs) */
error = dsl_dataset_hold_obj(pdd->dd_pool,
dsl_dir_phys(pdd)->dd_head_dataset_obj, FTAG, &parentds);
if (error != 0) {
dsl_dir_rele(pdd, FTAG);
return (error);
}
error = dmu_objset_from_ds(parentds, &parentos);
if (error != 0) {
dsl_dataset_rele(parentds, FTAG);
dsl_dir_rele(pdd, FTAG);
return (error);
}
if (dmu_objset_type(parentos) != DMU_OST_ZFS) {
dsl_dataset_rele(parentds, FTAG);
dsl_dir_rele(pdd, FTAG);
return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
}
dsl_dataset_rele(parentds, FTAG);
dsl_dir_rele(pdd, FTAG);
return (error);
}
static void
dmu_objset_create_sync(void *arg, dmu_tx_t *tx)
{
dmu_objset_create_arg_t *doca = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
spa_t *spa = dp->dp_spa;
dsl_dir_t *pdd;
const char *tail;
dsl_dataset_t *ds;
uint64_t obj;
blkptr_t *bp;
objset_t *os;
zio_t *rzio;
VERIFY0(dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail));
obj = dsl_dataset_create_sync(pdd, tail, NULL, doca->doca_flags,
doca->doca_cred, doca->doca_dcp, tx);
VERIFY0(dsl_dataset_hold_obj_flags(pdd->dd_pool, obj,
DS_HOLD_FLAG_DECRYPT, FTAG, &ds));
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
bp = dsl_dataset_get_blkptr(ds);
os = dmu_objset_create_impl(spa, ds, bp, doca->doca_type, tx);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
if (doca->doca_userfunc != NULL) {
doca->doca_userfunc(os, doca->doca_userarg,
doca->doca_cred, tx);
}
/*
* The doca_userfunc() may write out some data that needs to be
* encrypted if the dataset is encrypted (specifically the root
* directory). This data must be written out before the encryption
* key mapping is removed by dsl_dataset_rele_flags(). Force the
* I/O to occur immediately by invoking the relevant sections of
* dsl_pool_sync().
*/
if (os->os_encrypted) {
dsl_dataset_t *tmpds = NULL;
boolean_t need_sync_done = B_FALSE;
mutex_enter(&ds->ds_lock);
ds->ds_owner = FTAG;
mutex_exit(&ds->ds_lock);
rzio = zio_root(spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
tmpds = txg_list_remove_this(&dp->dp_dirty_datasets, ds,
tx->tx_txg);
if (tmpds != NULL) {
dsl_dataset_sync(ds, rzio, tx);
need_sync_done = B_TRUE;
}
VERIFY0(zio_wait(rzio));
dmu_objset_sync_done(os, tx);
taskq_wait(dp->dp_sync_taskq);
if (txg_list_member(&dp->dp_dirty_datasets, ds, tx->tx_txg)) {
ASSERT3P(ds->ds_key_mapping, !=, NULL);
key_mapping_rele(spa, ds->ds_key_mapping, ds);
}
rzio = zio_root(spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
tmpds = txg_list_remove_this(&dp->dp_dirty_datasets, ds,
tx->tx_txg);
if (tmpds != NULL) {
dmu_buf_rele(ds->ds_dbuf, ds);
dsl_dataset_sync(ds, rzio, tx);
}
VERIFY0(zio_wait(rzio));
if (need_sync_done) {
ASSERT3P(ds->ds_key_mapping, !=, NULL);
key_mapping_rele(spa, ds->ds_key_mapping, ds);
dsl_dataset_sync_done(ds, tx);
}
mutex_enter(&ds->ds_lock);
ds->ds_owner = NULL;
mutex_exit(&ds->ds_lock);
}
spa_history_log_internal_ds(ds, "create", tx, " ");
dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
dsl_dir_rele(pdd, FTAG);
}
int
dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags,
dsl_crypto_params_t *dcp, dmu_objset_create_sync_func_t func, void *arg)
{
dmu_objset_create_arg_t doca;
dsl_crypto_params_t tmp_dcp = { 0 };
doca.doca_name = name;
doca.doca_cred = CRED();
doca.doca_proc = curproc;
doca.doca_flags = flags;
doca.doca_userfunc = func;
doca.doca_userarg = arg;
doca.doca_type = type;
/*
* Some callers (mostly for testing) do not provide a dcp on their
* own but various code inside the sync task will require it to be
* allocated. Rather than adding NULL checks throughout this code
* or adding dummy dcp's to all of the callers we simply create a
* dummy one here and use that. This zero dcp will have the same
* effect as asking for inheritance of all encryption params.
*/
doca.doca_dcp = (dcp != NULL) ? dcp : &tmp_dcp;
int rv = dsl_sync_task(name,
dmu_objset_create_check, dmu_objset_create_sync, &doca,
6, ZFS_SPACE_CHECK_NORMAL);
if (rv == 0)
zvol_create_minor(name);
return (rv);
}
typedef struct dmu_objset_clone_arg {
const char *doca_clone;
const char *doca_origin;
cred_t *doca_cred;
proc_t *doca_proc;
} dmu_objset_clone_arg_t;
static int
dmu_objset_clone_check(void *arg, dmu_tx_t *tx)
{
dmu_objset_clone_arg_t *doca = arg;
dsl_dir_t *pdd;
const char *tail;
int error;
dsl_dataset_t *origin;
dsl_pool_t *dp = dmu_tx_pool(tx);
if (strchr(doca->doca_clone, '@') != NULL)
return (SET_ERROR(EINVAL));
if (strlen(doca->doca_clone) >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
error = dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail);
if (error != 0)
return (error);
if (tail == NULL) {
dsl_dir_rele(pdd, FTAG);
return (SET_ERROR(EEXIST));
}
error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL,
doca->doca_cred, doca->doca_proc);
if (error != 0) {
dsl_dir_rele(pdd, FTAG);
return (SET_ERROR(EDQUOT));
}
error = dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin);
if (error != 0) {
dsl_dir_rele(pdd, FTAG);
return (error);
}
/* You can only clone snapshots, not the head datasets. */
if (!origin->ds_is_snapshot) {
dsl_dataset_rele(origin, FTAG);
dsl_dir_rele(pdd, FTAG);
return (SET_ERROR(EINVAL));
}
dsl_dataset_rele(origin, FTAG);
dsl_dir_rele(pdd, FTAG);
return (0);
}
static void
dmu_objset_clone_sync(void *arg, dmu_tx_t *tx)
{
dmu_objset_clone_arg_t *doca = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *pdd;
const char *tail;
dsl_dataset_t *origin, *ds;
uint64_t obj;
char namebuf[ZFS_MAX_DATASET_NAME_LEN];
VERIFY0(dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail));
VERIFY0(dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin));
obj = dsl_dataset_create_sync(pdd, tail, origin, 0,
doca->doca_cred, NULL, tx);
VERIFY0(dsl_dataset_hold_obj(pdd->dd_pool, obj, FTAG, &ds));
dsl_dataset_name(origin, namebuf);
spa_history_log_internal_ds(ds, "clone", tx,
"origin=%s (%llu)", namebuf, (u_longlong_t)origin->ds_object);
dsl_dataset_rele(ds, FTAG);
dsl_dataset_rele(origin, FTAG);
dsl_dir_rele(pdd, FTAG);
}
int
dmu_objset_clone(const char *clone, const char *origin)
{
dmu_objset_clone_arg_t doca;
doca.doca_clone = clone;
doca.doca_origin = origin;
doca.doca_cred = CRED();
doca.doca_proc = curproc;
int rv = dsl_sync_task(clone,
dmu_objset_clone_check, dmu_objset_clone_sync, &doca,
6, ZFS_SPACE_CHECK_NORMAL);
if (rv == 0)
zvol_create_minor(clone);
return (rv);
}
int
dmu_objset_snapshot_one(const char *fsname, const char *snapname)
{
int err;
char *longsnap = kmem_asprintf("%s@%s", fsname, snapname);
nvlist_t *snaps = fnvlist_alloc();
fnvlist_add_boolean(snaps, longsnap);
kmem_strfree(longsnap);
err = dsl_dataset_snapshot(snaps, NULL, NULL);
fnvlist_free(snaps);
return (err);
}
static void
dmu_objset_upgrade_task_cb(void *data)
{
objset_t *os = data;
mutex_enter(&os->os_upgrade_lock);
os->os_upgrade_status = EINTR;
if (!os->os_upgrade_exit) {
int status;
mutex_exit(&os->os_upgrade_lock);
status = os->os_upgrade_cb(os);
mutex_enter(&os->os_upgrade_lock);
os->os_upgrade_status = status;
}
os->os_upgrade_exit = B_TRUE;
os->os_upgrade_id = 0;
mutex_exit(&os->os_upgrade_lock);
dsl_dataset_long_rele(dmu_objset_ds(os), upgrade_tag);
}
static void
dmu_objset_upgrade(objset_t *os, dmu_objset_upgrade_cb_t cb)
{
if (os->os_upgrade_id != 0)
return;
ASSERT(dsl_pool_config_held(dmu_objset_pool(os)));
dsl_dataset_long_hold(dmu_objset_ds(os), upgrade_tag);
mutex_enter(&os->os_upgrade_lock);
if (os->os_upgrade_id == 0 && os->os_upgrade_status == 0) {
os->os_upgrade_exit = B_FALSE;
os->os_upgrade_cb = cb;
os->os_upgrade_id = taskq_dispatch(
os->os_spa->spa_upgrade_taskq,
dmu_objset_upgrade_task_cb, os, TQ_SLEEP);
if (os->os_upgrade_id == TASKQID_INVALID) {
dsl_dataset_long_rele(dmu_objset_ds(os), upgrade_tag);
os->os_upgrade_status = ENOMEM;
}
} else {
dsl_dataset_long_rele(dmu_objset_ds(os), upgrade_tag);
}
mutex_exit(&os->os_upgrade_lock);
}
static void
dmu_objset_upgrade_stop(objset_t *os)
{
mutex_enter(&os->os_upgrade_lock);
os->os_upgrade_exit = B_TRUE;
if (os->os_upgrade_id != 0) {
taskqid_t id = os->os_upgrade_id;
os->os_upgrade_id = 0;
mutex_exit(&os->os_upgrade_lock);
if ((taskq_cancel_id(os->os_spa->spa_upgrade_taskq, id)) == 0) {
dsl_dataset_long_rele(dmu_objset_ds(os), upgrade_tag);
}
txg_wait_synced(os->os_spa->spa_dsl_pool, 0);
} else {
mutex_exit(&os->os_upgrade_lock);
}
}
static void
dmu_objset_sync_dnodes(multilist_sublist_t *list, dmu_tx_t *tx)
{
dnode_t *dn;
while ((dn = multilist_sublist_head(list)) != NULL) {
ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
ASSERT(dn->dn_dbuf->db_data_pending);
/*
* Initialize dn_zio outside dnode_sync() because the
* meta-dnode needs to set it outside dnode_sync().
*/
dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio;
ASSERT(dn->dn_zio);
ASSERT3U(dn->dn_nlevels, <=, DN_MAX_LEVELS);
multilist_sublist_remove(list, dn);
/*
* See the comment above dnode_rele_task() for an explanation
* of why this dnode hold is always needed (even when not
* doing user accounting).
*/
multilist_t *newlist = &dn->dn_objset->os_synced_dnodes;
(void) dnode_add_ref(dn, newlist);
multilist_insert(newlist, dn);
dnode_sync(dn, tx);
}
}
static void
dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg)
{
(void) abuf;
blkptr_t *bp = zio->io_bp;
objset_t *os = arg;
dnode_phys_t *dnp = &os->os_phys->os_meta_dnode;
uint64_t fill = 0;
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_OBJSET);
ASSERT0(BP_GET_LEVEL(bp));
/*
* Update rootbp fill count: it should be the number of objects
* allocated in the object set (not counting the "special"
* objects that are stored in the objset_phys_t -- the meta
* dnode and user/group/project accounting objects).
*/
for (int i = 0; i < dnp->dn_nblkptr; i++)
fill += BP_GET_FILL(&dnp->dn_blkptr[i]);
BP_SET_FILL(bp, fill);
if (os->os_dsl_dataset != NULL)
rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_WRITER, FTAG);
*os->os_rootbp = *bp;
if (os->os_dsl_dataset != NULL)
rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
}
static void
dmu_objset_write_done(zio_t *zio, arc_buf_t *abuf, void *arg)
{
(void) abuf;
blkptr_t *bp = zio->io_bp;
blkptr_t *bp_orig = &zio->io_bp_orig;
objset_t *os = arg;
if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
ASSERT(BP_EQUAL(bp, bp_orig));
} else {
dsl_dataset_t *ds = os->os_dsl_dataset;
dmu_tx_t *tx = os->os_synctx;
(void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
dsl_dataset_block_born(ds, bp, tx);
}
kmem_free(bp, sizeof (*bp));
}
typedef struct sync_dnodes_arg {
multilist_t *sda_list;
int sda_sublist_idx;
multilist_t *sda_newlist;
dmu_tx_t *sda_tx;
} sync_dnodes_arg_t;
static void
sync_dnodes_task(void *arg)
{
sync_dnodes_arg_t *sda = arg;
multilist_sublist_t *ms =
multilist_sublist_lock(sda->sda_list, sda->sda_sublist_idx);
dmu_objset_sync_dnodes(ms, sda->sda_tx);
multilist_sublist_unlock(ms);
kmem_free(sda, sizeof (*sda));
}
/* called from dsl */
void
dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx)
{
int txgoff;
zbookmark_phys_t zb;
zio_prop_t zp;
zio_t *zio;
list_t *list;
dbuf_dirty_record_t *dr;
int num_sublists;
multilist_t *ml;
blkptr_t *blkptr_copy = kmem_alloc(sizeof (*os->os_rootbp), KM_SLEEP);
*blkptr_copy = *os->os_rootbp;
dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", (u_longlong_t)tx->tx_txg);
ASSERT(dmu_tx_is_syncing(tx));
/* XXX the write_done callback should really give us the tx... */
os->os_synctx = tx;
if (os->os_dsl_dataset == NULL) {
/*
* This is the MOS. If we have upgraded,
* spa_max_replication() could change, so reset
* os_copies here.
*/
os->os_copies = spa_max_replication(os->os_spa);
}
/*
* Create the root block IO
*/
SET_BOOKMARK(&zb, os->os_dsl_dataset ?
os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
arc_release(os->os_phys_buf, &os->os_phys_buf);
dmu_write_policy(os, NULL, 0, 0, &zp);
/*
* If we are either claiming the ZIL or doing a raw receive, write
* out the os_phys_buf raw. Neither of these actions will effect the
* MAC at this point.
*/
if (os->os_raw_receive ||
os->os_next_write_raw[tx->tx_txg & TXG_MASK]) {
ASSERT(os->os_encrypted);
arc_convert_to_raw(os->os_phys_buf,
os->os_dsl_dataset->ds_object, ZFS_HOST_BYTEORDER,
DMU_OT_OBJSET, NULL, NULL, NULL);
}
zio = arc_write(pio, os->os_spa, tx->tx_txg,
blkptr_copy, os->os_phys_buf, dmu_os_is_l2cacheable(os),
&zp, dmu_objset_write_ready, NULL, NULL, dmu_objset_write_done,
os, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
- /*
- * In the codepath dsl_dataset_sync()->dmu_objset_sync() we cannot
- * rely on the zio above completing and calling back
- * dmu_objset_write_done()->dsl_dataset_block_born() before
- * dsl_dataset_sync() actually activates feature flags near its end.
- * Decide here if any features need to be activated, before
- * dsl_dataset_sync() completes its run.
- */
- dsl_dataset_feature_set_activation(blkptr_copy, os->os_dsl_dataset);
-
/*
* Sync special dnodes - the parent IO for the sync is the root block
*/
DMU_META_DNODE(os)->dn_zio = zio;
dnode_sync(DMU_META_DNODE(os), tx);
os->os_phys->os_flags = os->os_flags;
if (DMU_USERUSED_DNODE(os) &&
DMU_USERUSED_DNODE(os)->dn_type != DMU_OT_NONE) {
DMU_USERUSED_DNODE(os)->dn_zio = zio;
dnode_sync(DMU_USERUSED_DNODE(os), tx);
DMU_GROUPUSED_DNODE(os)->dn_zio = zio;
dnode_sync(DMU_GROUPUSED_DNODE(os), tx);
}
if (DMU_PROJECTUSED_DNODE(os) &&
DMU_PROJECTUSED_DNODE(os)->dn_type != DMU_OT_NONE) {
DMU_PROJECTUSED_DNODE(os)->dn_zio = zio;
dnode_sync(DMU_PROJECTUSED_DNODE(os), tx);
}
txgoff = tx->tx_txg & TXG_MASK;
/*
* We must create the list here because it uses the
* dn_dirty_link[] of this txg. But it may already
* exist because we call dsl_dataset_sync() twice per txg.
*/
if (os->os_synced_dnodes.ml_sublists == NULL) {
multilist_create(&os->os_synced_dnodes, sizeof (dnode_t),
offsetof(dnode_t, dn_dirty_link[txgoff]),
dnode_multilist_index_func);
} else {
ASSERT3U(os->os_synced_dnodes.ml_offset, ==,
offsetof(dnode_t, dn_dirty_link[txgoff]));
}
ml = &os->os_dirty_dnodes[txgoff];
num_sublists = multilist_get_num_sublists(ml);
for (int i = 0; i < num_sublists; i++) {
if (multilist_sublist_is_empty_idx(ml, i))
continue;
sync_dnodes_arg_t *sda = kmem_alloc(sizeof (*sda), KM_SLEEP);
sda->sda_list = ml;
sda->sda_sublist_idx = i;
sda->sda_tx = tx;
(void) taskq_dispatch(dmu_objset_pool(os)->dp_sync_taskq,
sync_dnodes_task, sda, 0);
/* callback frees sda */
}
taskq_wait(dmu_objset_pool(os)->dp_sync_taskq);
list = &DMU_META_DNODE(os)->dn_dirty_records[txgoff];
while ((dr = list_head(list)) != NULL) {
ASSERT0(dr->dr_dbuf->db_level);
list_remove(list, dr);
zio_nowait(dr->dr_zio);
}
/* Enable dnode backfill if enough objects have been freed. */
if (os->os_freed_dnodes >= dmu_rescan_dnode_threshold) {
os->os_rescan_dnodes = B_TRUE;
os->os_freed_dnodes = 0;
}
/*
* Free intent log blocks up to this tx.
*/
zil_sync(os->os_zil, tx);
os->os_phys->os_zil_header = os->os_zil_header;
zio_nowait(zio);
}
boolean_t
dmu_objset_is_dirty(objset_t *os, uint64_t txg)
{
return (!multilist_is_empty(&os->os_dirty_dnodes[txg & TXG_MASK]));
}
static file_info_cb_t *file_cbs[DMU_OST_NUMTYPES];
void
dmu_objset_register_type(dmu_objset_type_t ost, file_info_cb_t *cb)
{
file_cbs[ost] = cb;
}
int
dmu_get_file_info(objset_t *os, dmu_object_type_t bonustype, const void *data,
zfs_file_info_t *zfi)
{
file_info_cb_t *cb = file_cbs[os->os_phys->os_type];
if (cb == NULL)
return (EINVAL);
return (cb(bonustype, data, zfi));
}
boolean_t
dmu_objset_userused_enabled(objset_t *os)
{
return (spa_version(os->os_spa) >= SPA_VERSION_USERSPACE &&
file_cbs[os->os_phys->os_type] != NULL &&
DMU_USERUSED_DNODE(os) != NULL);
}
boolean_t
dmu_objset_userobjused_enabled(objset_t *os)
{
return (dmu_objset_userused_enabled(os) &&
spa_feature_is_enabled(os->os_spa, SPA_FEATURE_USEROBJ_ACCOUNTING));
}
boolean_t
dmu_objset_projectquota_enabled(objset_t *os)
{
return (file_cbs[os->os_phys->os_type] != NULL &&
DMU_PROJECTUSED_DNODE(os) != NULL &&
spa_feature_is_enabled(os->os_spa, SPA_FEATURE_PROJECT_QUOTA));
}
typedef struct userquota_node {
/* must be in the first filed, see userquota_update_cache() */
char uqn_id[20 + DMU_OBJACCT_PREFIX_LEN];
int64_t uqn_delta;
avl_node_t uqn_node;
} userquota_node_t;
typedef struct userquota_cache {
avl_tree_t uqc_user_deltas;
avl_tree_t uqc_group_deltas;
avl_tree_t uqc_project_deltas;
} userquota_cache_t;
static int
userquota_compare(const void *l, const void *r)
{
const userquota_node_t *luqn = l;
const userquota_node_t *ruqn = r;
int rv;
/*
* NB: can only access uqn_id because userquota_update_cache() doesn't
* pass in an entire userquota_node_t.
*/
rv = strcmp(luqn->uqn_id, ruqn->uqn_id);
return (TREE_ISIGN(rv));
}
static void
do_userquota_cacheflush(objset_t *os, userquota_cache_t *cache, dmu_tx_t *tx)
{
void *cookie;
userquota_node_t *uqn;
ASSERT(dmu_tx_is_syncing(tx));
cookie = NULL;
while ((uqn = avl_destroy_nodes(&cache->uqc_user_deltas,
&cookie)) != NULL) {
/*
* os_userused_lock protects against concurrent calls to
* zap_increment_int(). It's needed because zap_increment_int()
* is not thread-safe (i.e. not atomic).
*/
mutex_enter(&os->os_userused_lock);
VERIFY0(zap_increment(os, DMU_USERUSED_OBJECT,
uqn->uqn_id, uqn->uqn_delta, tx));
mutex_exit(&os->os_userused_lock);
kmem_free(uqn, sizeof (*uqn));
}
avl_destroy(&cache->uqc_user_deltas);
cookie = NULL;
while ((uqn = avl_destroy_nodes(&cache->uqc_group_deltas,
&cookie)) != NULL) {
mutex_enter(&os->os_userused_lock);
VERIFY0(zap_increment(os, DMU_GROUPUSED_OBJECT,
uqn->uqn_id, uqn->uqn_delta, tx));
mutex_exit(&os->os_userused_lock);
kmem_free(uqn, sizeof (*uqn));
}
avl_destroy(&cache->uqc_group_deltas);
if (dmu_objset_projectquota_enabled(os)) {
cookie = NULL;
while ((uqn = avl_destroy_nodes(&cache->uqc_project_deltas,
&cookie)) != NULL) {
mutex_enter(&os->os_userused_lock);
VERIFY0(zap_increment(os, DMU_PROJECTUSED_OBJECT,
uqn->uqn_id, uqn->uqn_delta, tx));
mutex_exit(&os->os_userused_lock);
kmem_free(uqn, sizeof (*uqn));
}
avl_destroy(&cache->uqc_project_deltas);
}
}
static void
userquota_update_cache(avl_tree_t *avl, const char *id, int64_t delta)
{
userquota_node_t *uqn;
avl_index_t idx;
ASSERT(strlen(id) < sizeof (uqn->uqn_id));
/*
* Use id directly for searching because uqn_id is the first field of
* userquota_node_t and fields after uqn_id won't be accessed in
* avl_find().
*/
uqn = avl_find(avl, (const void *)id, &idx);
if (uqn == NULL) {
uqn = kmem_zalloc(sizeof (*uqn), KM_SLEEP);
strlcpy(uqn->uqn_id, id, sizeof (uqn->uqn_id));
avl_insert(avl, uqn, idx);
}
uqn->uqn_delta += delta;
}
static void
do_userquota_update(objset_t *os, userquota_cache_t *cache, uint64_t used,
uint64_t flags, uint64_t user, uint64_t group, uint64_t project,
boolean_t subtract)
{
if (flags & DNODE_FLAG_USERUSED_ACCOUNTED) {
int64_t delta = DNODE_MIN_SIZE + used;
char name[20];
if (subtract)
delta = -delta;
(void) snprintf(name, sizeof (name), "%llx", (longlong_t)user);
userquota_update_cache(&cache->uqc_user_deltas, name, delta);
(void) snprintf(name, sizeof (name), "%llx", (longlong_t)group);
userquota_update_cache(&cache->uqc_group_deltas, name, delta);
if (dmu_objset_projectquota_enabled(os)) {
(void) snprintf(name, sizeof (name), "%llx",
(longlong_t)project);
userquota_update_cache(&cache->uqc_project_deltas,
name, delta);
}
}
}
static void
do_userobjquota_update(objset_t *os, userquota_cache_t *cache, uint64_t flags,
uint64_t user, uint64_t group, uint64_t project, boolean_t subtract)
{
if (flags & DNODE_FLAG_USEROBJUSED_ACCOUNTED) {
char name[20 + DMU_OBJACCT_PREFIX_LEN];
int delta = subtract ? -1 : 1;
(void) snprintf(name, sizeof (name), DMU_OBJACCT_PREFIX "%llx",
(longlong_t)user);
userquota_update_cache(&cache->uqc_user_deltas, name, delta);
(void) snprintf(name, sizeof (name), DMU_OBJACCT_PREFIX "%llx",
(longlong_t)group);
userquota_update_cache(&cache->uqc_group_deltas, name, delta);
if (dmu_objset_projectquota_enabled(os)) {
(void) snprintf(name, sizeof (name),
DMU_OBJACCT_PREFIX "%llx", (longlong_t)project);
userquota_update_cache(&cache->uqc_project_deltas,
name, delta);
}
}
}
typedef struct userquota_updates_arg {
objset_t *uua_os;
int uua_sublist_idx;
dmu_tx_t *uua_tx;
} userquota_updates_arg_t;
static void
userquota_updates_task(void *arg)
{
userquota_updates_arg_t *uua = arg;
objset_t *os = uua->uua_os;
dmu_tx_t *tx = uua->uua_tx;
dnode_t *dn;
userquota_cache_t cache = { { 0 } };
multilist_sublist_t *list =
multilist_sublist_lock(&os->os_synced_dnodes, uua->uua_sublist_idx);
ASSERT(multilist_sublist_head(list) == NULL ||
dmu_objset_userused_enabled(os));
avl_create(&cache.uqc_user_deltas, userquota_compare,
sizeof (userquota_node_t), offsetof(userquota_node_t, uqn_node));
avl_create(&cache.uqc_group_deltas, userquota_compare,
sizeof (userquota_node_t), offsetof(userquota_node_t, uqn_node));
if (dmu_objset_projectquota_enabled(os))
avl_create(&cache.uqc_project_deltas, userquota_compare,
sizeof (userquota_node_t), offsetof(userquota_node_t,
uqn_node));
while ((dn = multilist_sublist_head(list)) != NULL) {
int flags;
ASSERT(!DMU_OBJECT_IS_SPECIAL(dn->dn_object));
ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE ||
dn->dn_phys->dn_flags &
DNODE_FLAG_USERUSED_ACCOUNTED);
flags = dn->dn_id_flags;
ASSERT(flags);
if (flags & DN_ID_OLD_EXIST) {
do_userquota_update(os, &cache, dn->dn_oldused,
dn->dn_oldflags, dn->dn_olduid, dn->dn_oldgid,
dn->dn_oldprojid, B_TRUE);
do_userobjquota_update(os, &cache, dn->dn_oldflags,
dn->dn_olduid, dn->dn_oldgid,
dn->dn_oldprojid, B_TRUE);
}
if (flags & DN_ID_NEW_EXIST) {
do_userquota_update(os, &cache,
DN_USED_BYTES(dn->dn_phys), dn->dn_phys->dn_flags,
dn->dn_newuid, dn->dn_newgid,
dn->dn_newprojid, B_FALSE);
do_userobjquota_update(os, &cache,
dn->dn_phys->dn_flags, dn->dn_newuid, dn->dn_newgid,
dn->dn_newprojid, B_FALSE);
}
mutex_enter(&dn->dn_mtx);
dn->dn_oldused = 0;
dn->dn_oldflags = 0;
if (dn->dn_id_flags & DN_ID_NEW_EXIST) {
dn->dn_olduid = dn->dn_newuid;
dn->dn_oldgid = dn->dn_newgid;
dn->dn_oldprojid = dn->dn_newprojid;
dn->dn_id_flags |= DN_ID_OLD_EXIST;
if (dn->dn_bonuslen == 0)
dn->dn_id_flags |= DN_ID_CHKED_SPILL;
else
dn->dn_id_flags |= DN_ID_CHKED_BONUS;
}
dn->dn_id_flags &= ~(DN_ID_NEW_EXIST);
mutex_exit(&dn->dn_mtx);
multilist_sublist_remove(list, dn);
dnode_rele(dn, &os->os_synced_dnodes);
}
do_userquota_cacheflush(os, &cache, tx);
multilist_sublist_unlock(list);
kmem_free(uua, sizeof (*uua));
}
/*
* Release dnode holds from dmu_objset_sync_dnodes(). When the dnode is being
* synced (i.e. we have issued the zio's for blocks in the dnode), it can't be
* evicted because the block containing the dnode can't be evicted until it is
* written out. However, this hold is necessary to prevent the dnode_t from
* being moved (via dnode_move()) while it's still referenced by
* dbuf_dirty_record_t:dr_dnode. And dr_dnode is needed for
* dirty_lightweight_leaf-type dirty records.
*
* If we are doing user-object accounting, the dnode_rele() happens from
* userquota_updates_task() instead.
*/
static void
dnode_rele_task(void *arg)
{
userquota_updates_arg_t *uua = arg;
objset_t *os = uua->uua_os;
multilist_sublist_t *list =
multilist_sublist_lock(&os->os_synced_dnodes, uua->uua_sublist_idx);
dnode_t *dn;
while ((dn = multilist_sublist_head(list)) != NULL) {
multilist_sublist_remove(list, dn);
dnode_rele(dn, &os->os_synced_dnodes);
}
multilist_sublist_unlock(list);
kmem_free(uua, sizeof (*uua));
}
/*
* Return TRUE if userquota updates are needed.
*/
static boolean_t
dmu_objset_do_userquota_updates_prep(objset_t *os, dmu_tx_t *tx)
{
if (!dmu_objset_userused_enabled(os))
return (B_FALSE);
/*
* If this is a raw receive just return and handle accounting
* later when we have the keys loaded. We also don't do user
* accounting during claiming since the datasets are not owned
* for the duration of claiming and this txg should only be
* used for recovery.
*/
if (os->os_encrypted && dmu_objset_is_receiving(os))
return (B_FALSE);
if (tx->tx_txg <= os->os_spa->spa_claim_max_txg)
return (B_FALSE);
/* Allocate the user/group/project used objects if necessary. */
if (DMU_USERUSED_DNODE(os)->dn_type == DMU_OT_NONE) {
VERIFY0(zap_create_claim(os,
DMU_USERUSED_OBJECT,
DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
VERIFY0(zap_create_claim(os,
DMU_GROUPUSED_OBJECT,
DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
}
if (dmu_objset_projectquota_enabled(os) &&
DMU_PROJECTUSED_DNODE(os)->dn_type == DMU_OT_NONE) {
VERIFY0(zap_create_claim(os, DMU_PROJECTUSED_OBJECT,
DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
}
return (B_TRUE);
}
/*
* Dispatch taskq tasks to dp_sync_taskq to update the user accounting, and
* also release the holds on the dnodes from dmu_objset_sync_dnodes().
* The caller must taskq_wait(dp_sync_taskq).
*/
void
dmu_objset_sync_done(objset_t *os, dmu_tx_t *tx)
{
boolean_t need_userquota = dmu_objset_do_userquota_updates_prep(os, tx);
int num_sublists = multilist_get_num_sublists(&os->os_synced_dnodes);
for (int i = 0; i < num_sublists; i++) {
userquota_updates_arg_t *uua =
kmem_alloc(sizeof (*uua), KM_SLEEP);
uua->uua_os = os;
uua->uua_sublist_idx = i;
uua->uua_tx = tx;
/*
* If we don't need to update userquotas, use
* dnode_rele_task() to call dnode_rele()
*/
(void) taskq_dispatch(dmu_objset_pool(os)->dp_sync_taskq,
need_userquota ? userquota_updates_task : dnode_rele_task,
uua, 0);
/* callback frees uua */
}
}
/*
* Returns a pointer to data to find uid/gid from
*
* If a dirty record for transaction group that is syncing can't
* be found then NULL is returned. In the NULL case it is assumed
* the uid/gid aren't changing.
*/
static void *
dmu_objset_userquota_find_data(dmu_buf_impl_t *db, dmu_tx_t *tx)
{
dbuf_dirty_record_t *dr;
void *data;
if (db->db_dirtycnt == 0)
return (db->db.db_data); /* Nothing is changing */
dr = dbuf_find_dirty_eq(db, tx->tx_txg);
if (dr == NULL) {
data = NULL;
} else {
if (dr->dr_dnode->dn_bonuslen == 0 &&
dr->dr_dbuf->db_blkid == DMU_SPILL_BLKID)
data = dr->dt.dl.dr_data->b_data;
else
data = dr->dt.dl.dr_data;
}
return (data);
}
void
dmu_objset_userquota_get_ids(dnode_t *dn, boolean_t before, dmu_tx_t *tx)
{
objset_t *os = dn->dn_objset;
void *data = NULL;
dmu_buf_impl_t *db = NULL;
int flags = dn->dn_id_flags;
int error;
boolean_t have_spill = B_FALSE;
if (!dmu_objset_userused_enabled(dn->dn_objset))
return;
/*
* Raw receives introduce a problem with user accounting. Raw
* receives cannot update the user accounting info because the
* user ids and the sizes are encrypted. To guarantee that we
* never end up with bad user accounting, we simply disable it
* during raw receives. We also disable this for normal receives
* so that an incremental raw receive may be done on top of an
* existing non-raw receive.
*/
if (os->os_encrypted && dmu_objset_is_receiving(os))
return;
if (before && (flags & (DN_ID_CHKED_BONUS|DN_ID_OLD_EXIST|
DN_ID_CHKED_SPILL)))
return;
if (before && dn->dn_bonuslen != 0)
data = DN_BONUS(dn->dn_phys);
else if (!before && dn->dn_bonuslen != 0) {
if (dn->dn_bonus) {
db = dn->dn_bonus;
mutex_enter(&db->db_mtx);
data = dmu_objset_userquota_find_data(db, tx);
} else {
data = DN_BONUS(dn->dn_phys);
}
} else if (dn->dn_bonuslen == 0 && dn->dn_bonustype == DMU_OT_SA) {
int rf = 0;
if (RW_WRITE_HELD(&dn->dn_struct_rwlock))
rf |= DB_RF_HAVESTRUCT;
error = dmu_spill_hold_by_dnode(dn,
rf | DB_RF_MUST_SUCCEED,
FTAG, (dmu_buf_t **)&db);
ASSERT(error == 0);
mutex_enter(&db->db_mtx);
data = (before) ? db->db.db_data :
dmu_objset_userquota_find_data(db, tx);
have_spill = B_TRUE;
} else {
mutex_enter(&dn->dn_mtx);
dn->dn_id_flags |= DN_ID_CHKED_BONUS;
mutex_exit(&dn->dn_mtx);
return;
}
/*
* Must always call the callback in case the object
* type has changed and that type isn't an object type to track
*/
zfs_file_info_t zfi;
error = file_cbs[os->os_phys->os_type](dn->dn_bonustype, data, &zfi);
if (before) {
ASSERT(data);
dn->dn_olduid = zfi.zfi_user;
dn->dn_oldgid = zfi.zfi_group;
dn->dn_oldprojid = zfi.zfi_project;
} else if (data) {
dn->dn_newuid = zfi.zfi_user;
dn->dn_newgid = zfi.zfi_group;
dn->dn_newprojid = zfi.zfi_project;
}
/*
* Preserve existing uid/gid when the callback can't determine
* what the new uid/gid are and the callback returned EEXIST.
* The EEXIST error tells us to just use the existing uid/gid.
* If we don't know what the old values are then just assign
* them to 0, since that is a new file being created.
*/
if (!before && data == NULL && error == EEXIST) {
if (flags & DN_ID_OLD_EXIST) {
dn->dn_newuid = dn->dn_olduid;
dn->dn_newgid = dn->dn_oldgid;
dn->dn_newprojid = dn->dn_oldprojid;
} else {
dn->dn_newuid = 0;
dn->dn_newgid = 0;
dn->dn_newprojid = ZFS_DEFAULT_PROJID;
}
error = 0;
}
if (db)
mutex_exit(&db->db_mtx);
mutex_enter(&dn->dn_mtx);
if (error == 0 && before)
dn->dn_id_flags |= DN_ID_OLD_EXIST;
if (error == 0 && !before)
dn->dn_id_flags |= DN_ID_NEW_EXIST;
if (have_spill) {
dn->dn_id_flags |= DN_ID_CHKED_SPILL;
} else {
dn->dn_id_flags |= DN_ID_CHKED_BONUS;
}
mutex_exit(&dn->dn_mtx);
if (have_spill)
dmu_buf_rele((dmu_buf_t *)db, FTAG);
}
boolean_t
dmu_objset_userspace_present(objset_t *os)
{
return (os->os_phys->os_flags &
OBJSET_FLAG_USERACCOUNTING_COMPLETE);
}
boolean_t
dmu_objset_userobjspace_present(objset_t *os)
{
return (os->os_phys->os_flags &
OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE);
}
boolean_t
dmu_objset_projectquota_present(objset_t *os)
{
return (os->os_phys->os_flags &
OBJSET_FLAG_PROJECTQUOTA_COMPLETE);
}
static int
dmu_objset_space_upgrade(objset_t *os)
{
uint64_t obj;
int err = 0;
/*
* We simply need to mark every object dirty, so that it will be
* synced out and now accounted. If this is called
* concurrently, or if we already did some work before crashing,
* that's fine, since we track each object's accounted state
* independently.
*/
for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) {
dmu_tx_t *tx;
dmu_buf_t *db;
int objerr;
mutex_enter(&os->os_upgrade_lock);
if (os->os_upgrade_exit)
err = SET_ERROR(EINTR);
mutex_exit(&os->os_upgrade_lock);
if (err != 0)
return (err);
if (issig(JUSTLOOKING) && issig(FORREAL))
return (SET_ERROR(EINTR));
objerr = dmu_bonus_hold(os, obj, FTAG, &db);
if (objerr != 0)
continue;
tx = dmu_tx_create(os);
dmu_tx_hold_bonus(tx, obj);
objerr = dmu_tx_assign(tx, TXG_WAIT);
if (objerr != 0) {
dmu_buf_rele(db, FTAG);
dmu_tx_abort(tx);
continue;
}
dmu_buf_will_dirty(db, tx);
dmu_buf_rele(db, FTAG);
dmu_tx_commit(tx);
}
return (0);
}
static int
dmu_objset_userspace_upgrade_cb(objset_t *os)
{
int err = 0;
if (dmu_objset_userspace_present(os))
return (0);
if (dmu_objset_is_snapshot(os))
return (SET_ERROR(EINVAL));
if (!dmu_objset_userused_enabled(os))
return (SET_ERROR(ENOTSUP));
err = dmu_objset_space_upgrade(os);
if (err)
return (err);
os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
txg_wait_synced(dmu_objset_pool(os), 0);
return (0);
}
void
dmu_objset_userspace_upgrade(objset_t *os)
{
dmu_objset_upgrade(os, dmu_objset_userspace_upgrade_cb);
}
static int
dmu_objset_id_quota_upgrade_cb(objset_t *os)
{
int err = 0;
if (dmu_objset_userobjspace_present(os) &&
dmu_objset_projectquota_present(os))
return (0);
if (dmu_objset_is_snapshot(os))
return (SET_ERROR(EINVAL));
if (!dmu_objset_userused_enabled(os))
return (SET_ERROR(ENOTSUP));
if (!dmu_objset_projectquota_enabled(os) &&
dmu_objset_userobjspace_present(os))
return (SET_ERROR(ENOTSUP));
if (dmu_objset_userobjused_enabled(os))
dmu_objset_ds(os)->ds_feature_activation[
SPA_FEATURE_USEROBJ_ACCOUNTING] = (void *)B_TRUE;
if (dmu_objset_projectquota_enabled(os))
dmu_objset_ds(os)->ds_feature_activation[
SPA_FEATURE_PROJECT_QUOTA] = (void *)B_TRUE;
err = dmu_objset_space_upgrade(os);
if (err)
return (err);
os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
if (dmu_objset_userobjused_enabled(os))
os->os_flags |= OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE;
if (dmu_objset_projectquota_enabled(os))
os->os_flags |= OBJSET_FLAG_PROJECTQUOTA_COMPLETE;
txg_wait_synced(dmu_objset_pool(os), 0);
return (0);
}
void
dmu_objset_id_quota_upgrade(objset_t *os)
{
dmu_objset_upgrade(os, dmu_objset_id_quota_upgrade_cb);
}
boolean_t
dmu_objset_userobjspace_upgradable(objset_t *os)
{
return (dmu_objset_type(os) == DMU_OST_ZFS &&
!dmu_objset_is_snapshot(os) &&
dmu_objset_userobjused_enabled(os) &&
!dmu_objset_userobjspace_present(os) &&
spa_writeable(dmu_objset_spa(os)));
}
boolean_t
dmu_objset_projectquota_upgradable(objset_t *os)
{
return (dmu_objset_type(os) == DMU_OST_ZFS &&
!dmu_objset_is_snapshot(os) &&
dmu_objset_projectquota_enabled(os) &&
!dmu_objset_projectquota_present(os) &&
spa_writeable(dmu_objset_spa(os)));
}
void
dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp,
uint64_t *usedobjsp, uint64_t *availobjsp)
{
dsl_dataset_space(os->os_dsl_dataset, refdbytesp, availbytesp,
usedobjsp, availobjsp);
}
uint64_t
dmu_objset_fsid_guid(objset_t *os)
{
return (dsl_dataset_fsid_guid(os->os_dsl_dataset));
}
void
dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat)
{
stat->dds_type = os->os_phys->os_type;
if (os->os_dsl_dataset)
dsl_dataset_fast_stat(os->os_dsl_dataset, stat);
}
void
dmu_objset_stats(objset_t *os, nvlist_t *nv)
{
ASSERT(os->os_dsl_dataset ||
os->os_phys->os_type == DMU_OST_META);
if (os->os_dsl_dataset != NULL)
dsl_dataset_stats(os->os_dsl_dataset, nv);
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_TYPE,
os->os_phys->os_type);
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERACCOUNTING,
dmu_objset_userspace_present(os));
}
int
dmu_objset_is_snapshot(objset_t *os)
{
if (os->os_dsl_dataset != NULL)
return (os->os_dsl_dataset->ds_is_snapshot);
else
return (B_FALSE);
}
int
dmu_snapshot_realname(objset_t *os, const char *name, char *real, int maxlen,
boolean_t *conflict)
{
dsl_dataset_t *ds = os->os_dsl_dataset;
uint64_t ignored;
if (dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0)
return (SET_ERROR(ENOENT));
return (zap_lookup_norm(ds->ds_dir->dd_pool->dp_meta_objset,
dsl_dataset_phys(ds)->ds_snapnames_zapobj, name, 8, 1, &ignored,
MT_NORMALIZE, real, maxlen, conflict));
}
int
dmu_snapshot_list_next(objset_t *os, int namelen, char *name,
uint64_t *idp, uint64_t *offp, boolean_t *case_conflict)
{
dsl_dataset_t *ds = os->os_dsl_dataset;
zap_cursor_t cursor;
zap_attribute_t attr;
ASSERT(dsl_pool_config_held(dmu_objset_pool(os)));
if (dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0)
return (SET_ERROR(ENOENT));
zap_cursor_init_serialized(&cursor,
ds->ds_dir->dd_pool->dp_meta_objset,
dsl_dataset_phys(ds)->ds_snapnames_zapobj, *offp);
if (zap_cursor_retrieve(&cursor, &attr) != 0) {
zap_cursor_fini(&cursor);
return (SET_ERROR(ENOENT));
}
if (strlen(attr.za_name) + 1 > namelen) {
zap_cursor_fini(&cursor);
return (SET_ERROR(ENAMETOOLONG));
}
(void) strlcpy(name, attr.za_name, namelen);
if (idp)
*idp = attr.za_first_integer;
if (case_conflict)
*case_conflict = attr.za_normalization_conflict;
zap_cursor_advance(&cursor);
*offp = zap_cursor_serialize(&cursor);
zap_cursor_fini(&cursor);
return (0);
}
int
dmu_snapshot_lookup(objset_t *os, const char *name, uint64_t *value)
{
return (dsl_dataset_snap_lookup(os->os_dsl_dataset, name, value));
}
int
dmu_dir_list_next(objset_t *os, int namelen, char *name,
uint64_t *idp, uint64_t *offp)
{
dsl_dir_t *dd = os->os_dsl_dataset->ds_dir;
zap_cursor_t cursor;
zap_attribute_t attr;
/* there is no next dir on a snapshot! */
if (os->os_dsl_dataset->ds_object !=
dsl_dir_phys(dd)->dd_head_dataset_obj)
return (SET_ERROR(ENOENT));
zap_cursor_init_serialized(&cursor,
dd->dd_pool->dp_meta_objset,
dsl_dir_phys(dd)->dd_child_dir_zapobj, *offp);
if (zap_cursor_retrieve(&cursor, &attr) != 0) {
zap_cursor_fini(&cursor);
return (SET_ERROR(ENOENT));
}
if (strlen(attr.za_name) + 1 > namelen) {
zap_cursor_fini(&cursor);
return (SET_ERROR(ENAMETOOLONG));
}
(void) strlcpy(name, attr.za_name, namelen);
if (idp)
*idp = attr.za_first_integer;
zap_cursor_advance(&cursor);
*offp = zap_cursor_serialize(&cursor);
zap_cursor_fini(&cursor);
return (0);
}
typedef struct dmu_objset_find_ctx {
taskq_t *dc_tq;
dsl_pool_t *dc_dp;
uint64_t dc_ddobj;
char *dc_ddname; /* last component of ddobj's name */
int (*dc_func)(dsl_pool_t *, dsl_dataset_t *, void *);
void *dc_arg;
int dc_flags;
kmutex_t *dc_error_lock;
int *dc_error;
} dmu_objset_find_ctx_t;
static void
dmu_objset_find_dp_impl(dmu_objset_find_ctx_t *dcp)
{
dsl_pool_t *dp = dcp->dc_dp;
dsl_dir_t *dd;
dsl_dataset_t *ds;
zap_cursor_t zc;
zap_attribute_t *attr;
uint64_t thisobj;
int err = 0;
/* don't process if there already was an error */
if (*dcp->dc_error != 0)
goto out;
/*
* Note: passing the name (dc_ddname) here is optional, but it
* improves performance because we don't need to call
* zap_value_search() to determine the name.
*/
err = dsl_dir_hold_obj(dp, dcp->dc_ddobj, dcp->dc_ddname, FTAG, &dd);
if (err != 0)
goto out;
/* Don't visit hidden ($MOS & $ORIGIN) objsets. */
if (dd->dd_myname[0] == '$') {
dsl_dir_rele(dd, FTAG);
goto out;
}
thisobj = dsl_dir_phys(dd)->dd_head_dataset_obj;
attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
/*
* Iterate over all children.
*/
if (dcp->dc_flags & DS_FIND_CHILDREN) {
for (zap_cursor_init(&zc, dp->dp_meta_objset,
dsl_dir_phys(dd)->dd_child_dir_zapobj);
zap_cursor_retrieve(&zc, attr) == 0;
(void) zap_cursor_advance(&zc)) {
ASSERT3U(attr->za_integer_length, ==,
sizeof (uint64_t));
ASSERT3U(attr->za_num_integers, ==, 1);
dmu_objset_find_ctx_t *child_dcp =
kmem_alloc(sizeof (*child_dcp), KM_SLEEP);
*child_dcp = *dcp;
child_dcp->dc_ddobj = attr->za_first_integer;
child_dcp->dc_ddname = spa_strdup(attr->za_name);
if (dcp->dc_tq != NULL)
(void) taskq_dispatch(dcp->dc_tq,
dmu_objset_find_dp_cb, child_dcp, TQ_SLEEP);
else
dmu_objset_find_dp_impl(child_dcp);
}
zap_cursor_fini(&zc);
}
/*
* Iterate over all snapshots.
*/
if (dcp->dc_flags & DS_FIND_SNAPSHOTS) {
dsl_dataset_t *ds;
err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
if (err == 0) {
uint64_t snapobj;
snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj;
dsl_dataset_rele(ds, FTAG);
for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj);
zap_cursor_retrieve(&zc, attr) == 0;
(void) zap_cursor_advance(&zc)) {
ASSERT3U(attr->za_integer_length, ==,
sizeof (uint64_t));
ASSERT3U(attr->za_num_integers, ==, 1);
err = dsl_dataset_hold_obj(dp,
attr->za_first_integer, FTAG, &ds);
if (err != 0)
break;
err = dcp->dc_func(dp, ds, dcp->dc_arg);
dsl_dataset_rele(ds, FTAG);
if (err != 0)
break;
}
zap_cursor_fini(&zc);
}
}
kmem_free(attr, sizeof (zap_attribute_t));
if (err != 0) {
dsl_dir_rele(dd, FTAG);
goto out;
}
/*
* Apply to self.
*/
err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
/*
* Note: we hold the dir while calling dsl_dataset_hold_obj() so
* that the dir will remain cached, and we won't have to re-instantiate
* it (which could be expensive due to finding its name via
* zap_value_search()).
*/
dsl_dir_rele(dd, FTAG);
if (err != 0)
goto out;
err = dcp->dc_func(dp, ds, dcp->dc_arg);
dsl_dataset_rele(ds, FTAG);
out:
if (err != 0) {
mutex_enter(dcp->dc_error_lock);
/* only keep first error */
if (*dcp->dc_error == 0)
*dcp->dc_error = err;
mutex_exit(dcp->dc_error_lock);
}
if (dcp->dc_ddname != NULL)
spa_strfree(dcp->dc_ddname);
kmem_free(dcp, sizeof (*dcp));
}
static void
dmu_objset_find_dp_cb(void *arg)
{
dmu_objset_find_ctx_t *dcp = arg;
dsl_pool_t *dp = dcp->dc_dp;
/*
* We need to get a pool_config_lock here, as there are several
* assert(pool_config_held) down the stack. Getting a lock via
* dsl_pool_config_enter is risky, as it might be stalled by a
* pending writer. This would deadlock, as the write lock can
* only be granted when our parent thread gives up the lock.
* The _prio interface gives us priority over a pending writer.
*/
dsl_pool_config_enter_prio(dp, FTAG);
dmu_objset_find_dp_impl(dcp);
dsl_pool_config_exit(dp, FTAG);
}
/*
* Find objsets under and including ddobj, call func(ds) on each.
* The order for the enumeration is completely undefined.
* func is called with dsl_pool_config held.
*/
int
dmu_objset_find_dp(dsl_pool_t *dp, uint64_t ddobj,
int func(dsl_pool_t *, dsl_dataset_t *, void *), void *arg, int flags)
{
int error = 0;
taskq_t *tq = NULL;
int ntasks;
dmu_objset_find_ctx_t *dcp;
kmutex_t err_lock;
mutex_init(&err_lock, NULL, MUTEX_DEFAULT, NULL);
dcp = kmem_alloc(sizeof (*dcp), KM_SLEEP);
dcp->dc_tq = NULL;
dcp->dc_dp = dp;
dcp->dc_ddobj = ddobj;
dcp->dc_ddname = NULL;
dcp->dc_func = func;
dcp->dc_arg = arg;
dcp->dc_flags = flags;
dcp->dc_error_lock = &err_lock;
dcp->dc_error = &error;
if ((flags & DS_FIND_SERIALIZE) || dsl_pool_config_held_writer(dp)) {
/*
* In case a write lock is held we can't make use of
* parallelism, as down the stack of the worker threads
* the lock is asserted via dsl_pool_config_held.
* In case of a read lock this is solved by getting a read
* lock in each worker thread, which isn't possible in case
* of a writer lock. So we fall back to the synchronous path
* here.
* In the future it might be possible to get some magic into
* dsl_pool_config_held in a way that it returns true for
* the worker threads so that a single lock held from this
* thread suffices. For now, stay single threaded.
*/
dmu_objset_find_dp_impl(dcp);
mutex_destroy(&err_lock);
return (error);
}
ntasks = dmu_find_threads;
if (ntasks == 0)
ntasks = vdev_count_leaves(dp->dp_spa) * 4;
tq = taskq_create("dmu_objset_find", ntasks, maxclsyspri, ntasks,
INT_MAX, 0);
if (tq == NULL) {
kmem_free(dcp, sizeof (*dcp));
mutex_destroy(&err_lock);
return (SET_ERROR(ENOMEM));
}
dcp->dc_tq = tq;
/* dcp will be freed by task */
(void) taskq_dispatch(tq, dmu_objset_find_dp_cb, dcp, TQ_SLEEP);
/*
* PORTING: this code relies on the property of taskq_wait to wait
* until no more tasks are queued and no more tasks are active. As
* we always queue new tasks from within other tasks, task_wait
* reliably waits for the full recursion to finish, even though we
* enqueue new tasks after taskq_wait has been called.
* On platforms other than illumos, taskq_wait may not have this
* property.
*/
taskq_wait(tq);
taskq_destroy(tq);
mutex_destroy(&err_lock);
return (error);
}
/*
* Find all objsets under name, and for each, call 'func(child_name, arg)'.
* The dp_config_rwlock must not be held when this is called, and it
* will not be held when the callback is called.
* Therefore this function should only be used when the pool is not changing
* (e.g. in syncing context), or the callback can deal with the possible races.
*/
static int
dmu_objset_find_impl(spa_t *spa, const char *name,
int func(const char *, void *), void *arg, int flags)
{
dsl_dir_t *dd;
dsl_pool_t *dp = spa_get_dsl(spa);
dsl_dataset_t *ds;
zap_cursor_t zc;
zap_attribute_t *attr;
char *child;
uint64_t thisobj;
int err;
dsl_pool_config_enter(dp, FTAG);
err = dsl_dir_hold(dp, name, FTAG, &dd, NULL);
if (err != 0) {
dsl_pool_config_exit(dp, FTAG);
return (err);
}
/* Don't visit hidden ($MOS & $ORIGIN) objsets. */
if (dd->dd_myname[0] == '$') {
dsl_dir_rele(dd, FTAG);
dsl_pool_config_exit(dp, FTAG);
return (0);
}
thisobj = dsl_dir_phys(dd)->dd_head_dataset_obj;
attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
/*
* Iterate over all children.
*/
if (flags & DS_FIND_CHILDREN) {
for (zap_cursor_init(&zc, dp->dp_meta_objset,
dsl_dir_phys(dd)->dd_child_dir_zapobj);
zap_cursor_retrieve(&zc, attr) == 0;
(void) zap_cursor_advance(&zc)) {
ASSERT3U(attr->za_integer_length, ==,
sizeof (uint64_t));
ASSERT3U(attr->za_num_integers, ==, 1);
child = kmem_asprintf("%s/%s", name, attr->za_name);
dsl_pool_config_exit(dp, FTAG);
err = dmu_objset_find_impl(spa, child,
func, arg, flags);
dsl_pool_config_enter(dp, FTAG);
kmem_strfree(child);
if (err != 0)
break;
}
zap_cursor_fini(&zc);
if (err != 0) {
dsl_dir_rele(dd, FTAG);
dsl_pool_config_exit(dp, FTAG);
kmem_free(attr, sizeof (zap_attribute_t));
return (err);
}
}
/*
* Iterate over all snapshots.
*/
if (flags & DS_FIND_SNAPSHOTS) {
err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
if (err == 0) {
uint64_t snapobj;
snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj;
dsl_dataset_rele(ds, FTAG);
for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj);
zap_cursor_retrieve(&zc, attr) == 0;
(void) zap_cursor_advance(&zc)) {
ASSERT3U(attr->za_integer_length, ==,
sizeof (uint64_t));
ASSERT3U(attr->za_num_integers, ==, 1);
child = kmem_asprintf("%s@%s",
name, attr->za_name);
dsl_pool_config_exit(dp, FTAG);
err = func(child, arg);
dsl_pool_config_enter(dp, FTAG);
kmem_strfree(child);
if (err != 0)
break;
}
zap_cursor_fini(&zc);
}
}
dsl_dir_rele(dd, FTAG);
kmem_free(attr, sizeof (zap_attribute_t));
dsl_pool_config_exit(dp, FTAG);
if (err != 0)
return (err);
/* Apply to self. */
return (func(name, arg));
}
/*
* See comment above dmu_objset_find_impl().
*/
int
dmu_objset_find(const char *name, int func(const char *, void *), void *arg,
int flags)
{
spa_t *spa;
int error;
error = spa_open(name, &spa, FTAG);
if (error != 0)
return (error);
error = dmu_objset_find_impl(spa, name, func, arg, flags);
spa_close(spa, FTAG);
return (error);
}
boolean_t
dmu_objset_incompatible_encryption_version(objset_t *os)
{
return (dsl_dir_incompatible_encryption_version(
os->os_dsl_dataset->ds_dir));
}
void
dmu_objset_set_user(objset_t *os, void *user_ptr)
{
ASSERT(MUTEX_HELD(&os->os_user_ptr_lock));
os->os_user_ptr = user_ptr;
}
void *
dmu_objset_get_user(objset_t *os)
{
ASSERT(MUTEX_HELD(&os->os_user_ptr_lock));
return (os->os_user_ptr);
}
/*
* Determine name of filesystem, given name of snapshot.
* buf must be at least ZFS_MAX_DATASET_NAME_LEN bytes
*/
int
dmu_fsname(const char *snapname, char *buf)
{
char *atp = strchr(snapname, '@');
if (atp == NULL)
return (SET_ERROR(EINVAL));
if (atp - snapname >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
(void) strlcpy(buf, snapname, atp - snapname + 1);
return (0);
}
/*
* Call when we think we're going to write/free space in open context
* to track the amount of dirty data in the open txg, which is also the
* amount of memory that can not be evicted until this txg syncs.
*
* Note that there are two conditions where this can be called from
* syncing context:
*
* [1] When we just created the dataset, in which case we go on with
* updating any accounting of dirty data as usual.
* [2] When we are dirtying MOS data, in which case we only update the
* pool's accounting of dirty data.
*/
void
dmu_objset_willuse_space(objset_t *os, int64_t space, dmu_tx_t *tx)
{
dsl_dataset_t *ds = os->os_dsl_dataset;
int64_t aspace = spa_get_worst_case_asize(os->os_spa, space);
if (ds != NULL) {
dsl_dir_willuse_space(ds->ds_dir, aspace, tx);
}
dsl_pool_dirty_space(dmu_tx_pool(tx), space, tx);
}
#if defined(_KERNEL)
EXPORT_SYMBOL(dmu_objset_zil);
EXPORT_SYMBOL(dmu_objset_pool);
EXPORT_SYMBOL(dmu_objset_ds);
EXPORT_SYMBOL(dmu_objset_type);
EXPORT_SYMBOL(dmu_objset_name);
EXPORT_SYMBOL(dmu_objset_hold);
EXPORT_SYMBOL(dmu_objset_hold_flags);
EXPORT_SYMBOL(dmu_objset_own);
EXPORT_SYMBOL(dmu_objset_rele);
EXPORT_SYMBOL(dmu_objset_rele_flags);
EXPORT_SYMBOL(dmu_objset_disown);
EXPORT_SYMBOL(dmu_objset_from_ds);
EXPORT_SYMBOL(dmu_objset_create);
EXPORT_SYMBOL(dmu_objset_clone);
EXPORT_SYMBOL(dmu_objset_stats);
EXPORT_SYMBOL(dmu_objset_fast_stat);
EXPORT_SYMBOL(dmu_objset_spa);
EXPORT_SYMBOL(dmu_objset_space);
EXPORT_SYMBOL(dmu_objset_fsid_guid);
EXPORT_SYMBOL(dmu_objset_find);
EXPORT_SYMBOL(dmu_objset_byteswap);
EXPORT_SYMBOL(dmu_objset_evict_dbufs);
EXPORT_SYMBOL(dmu_objset_snap_cmtime);
EXPORT_SYMBOL(dmu_objset_dnodesize);
EXPORT_SYMBOL(dmu_objset_sync);
EXPORT_SYMBOL(dmu_objset_is_dirty);
EXPORT_SYMBOL(dmu_objset_create_impl_dnstats);
EXPORT_SYMBOL(dmu_objset_create_impl);
EXPORT_SYMBOL(dmu_objset_open_impl);
EXPORT_SYMBOL(dmu_objset_evict);
EXPORT_SYMBOL(dmu_objset_register_type);
EXPORT_SYMBOL(dmu_objset_sync_done);
EXPORT_SYMBOL(dmu_objset_userquota_get_ids);
EXPORT_SYMBOL(dmu_objset_userused_enabled);
EXPORT_SYMBOL(dmu_objset_userspace_upgrade);
EXPORT_SYMBOL(dmu_objset_userspace_present);
EXPORT_SYMBOL(dmu_objset_userobjused_enabled);
EXPORT_SYMBOL(dmu_objset_userobjspace_upgradable);
EXPORT_SYMBOL(dmu_objset_userobjspace_present);
EXPORT_SYMBOL(dmu_objset_projectquota_enabled);
EXPORT_SYMBOL(dmu_objset_projectquota_present);
EXPORT_SYMBOL(dmu_objset_projectquota_upgradable);
EXPORT_SYMBOL(dmu_objset_id_quota_upgrade);
#endif
diff --git a/sys/contrib/openzfs/module/zfs/dmu_redact.c b/sys/contrib/openzfs/module/zfs/dmu_redact.c
index 09ca7d509ce7..7afcc1231348 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_redact.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_redact.c
@@ -1,1202 +1,1202 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2017, 2018 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/txg.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_traverse.h>
#include <sys/dmu_redact.h>
#include <sys/bqueue.h>
#include <sys/objlist.h>
#include <sys/dmu_tx.h>
#ifdef _KERNEL
#include <sys/zfs_vfsops.h>
#include <sys/zap.h>
#include <sys/zfs_znode.h>
#endif
/*
* This controls the number of entries in the buffer the redaction_list_update
* synctask uses to buffer writes to the redaction list.
*/
static const int redact_sync_bufsize = 1024;
/*
* Controls how often to update the redaction list when creating a redaction
* list.
*/
static const uint64_t redaction_list_update_interval_ns =
1000 * 1000 * 1000ULL; /* 1s */
/*
* This tunable controls the length of the queues that zfs redact worker threads
* use to communicate. If the dmu_redact_snap thread is blocking on these
* queues, this variable may need to be increased. If there is a significant
* slowdown at the start of a redact operation as these threads consume all the
* available IO resources, or the queues are consuming too much memory, this
* variable may need to be decreased.
*/
static const int zfs_redact_queue_length = 1024 * 1024;
/*
* These tunables control the fill fraction of the queues by zfs redact. The
* fill fraction controls the frequency with which threads have to be
* cv_signaled. If a lot of cpu time is being spent on cv_signal, then these
* should be tuned down. If the queues empty before the signalled thread can
* catch up, then these should be tuned up.
*/
static const uint64_t zfs_redact_queue_ff = 20;
struct redact_record {
bqueue_node_t ln;
boolean_t eos_marker; /* Marks the end of the stream */
uint64_t start_object;
uint64_t start_blkid;
uint64_t end_object;
uint64_t end_blkid;
uint8_t indblkshift;
uint32_t datablksz;
};
struct redact_thread_arg {
bqueue_t q;
objset_t *os; /* Objset to traverse */
dsl_dataset_t *ds; /* Dataset to traverse */
struct redact_record *current_record;
int error_code;
boolean_t cancel;
zbookmark_phys_t resume;
objlist_t *deleted_objs;
uint64_t *num_blocks_visited;
uint64_t ignore_object; /* ignore further callbacks on this */
uint64_t txg; /* txg to traverse since */
};
/*
* The redaction node is a wrapper around the redaction record that is used
* by the redaction merging thread to sort the records and determine overlaps.
*
* It contains two nodes; one sorts the records by their start_zb, and the other
* sorts the records by their end_zb.
*/
struct redact_node {
avl_node_t avl_node_start;
avl_node_t avl_node_end;
struct redact_record *record;
struct redact_thread_arg *rt_arg;
uint32_t thread_num;
};
struct merge_data {
list_t md_redact_block_pending;
redact_block_phys_t md_coalesce_block;
uint64_t md_last_time;
redact_block_phys_t md_furthest[TXG_SIZE];
/* Lists of struct redact_block_list_node. */
list_t md_blocks[TXG_SIZE];
boolean_t md_synctask_txg[TXG_SIZE];
uint64_t md_latest_synctask_txg;
redaction_list_t *md_redaction_list;
};
/*
* A wrapper around struct redact_block so it can be stored in a list_t.
*/
struct redact_block_list_node {
redact_block_phys_t block;
list_node_t node;
};
/*
* We've found a new redaction candidate. In order to improve performance, we
* coalesce these blocks when they're adjacent to each other. This function
* handles that. If the new candidate block range is immediately after the
* range we're building, coalesce it into the range we're building. Otherwise,
* put the record we're building on the queue, and update the build pointer to
* point to the new record.
*/
static void
record_merge_enqueue(bqueue_t *q, struct redact_record **build,
struct redact_record *new)
{
if (new->eos_marker) {
if (*build != NULL)
- bqueue_enqueue(q, *build, sizeof (*build));
+ bqueue_enqueue(q, *build, sizeof (**build));
bqueue_enqueue_flush(q, new, sizeof (*new));
return;
}
if (*build == NULL) {
*build = new;
return;
}
struct redact_record *curbuild = *build;
if ((curbuild->end_object == new->start_object &&
curbuild->end_blkid + 1 == new->start_blkid &&
curbuild->end_blkid != UINT64_MAX) ||
(curbuild->end_object + 1 == new->start_object &&
curbuild->end_blkid == UINT64_MAX && new->start_blkid == 0)) {
curbuild->end_object = new->end_object;
curbuild->end_blkid = new->end_blkid;
kmem_free(new, sizeof (*new));
} else {
bqueue_enqueue(q, curbuild, sizeof (*curbuild));
*build = new;
}
}
#ifdef _KERNEL
struct objnode {
avl_node_t node;
uint64_t obj;
};
static int
objnode_compare(const void *o1, const void *o2)
{
const struct objnode *obj1 = o1;
const struct objnode *obj2 = o2;
if (obj1->obj < obj2->obj)
return (-1);
if (obj1->obj > obj2->obj)
return (1);
return (0);
}
static objlist_t *
zfs_get_deleteq(objset_t *os)
{
objlist_t *deleteq_objlist = objlist_create();
uint64_t deleteq_obj;
zap_cursor_t zc;
zap_attribute_t za;
dmu_object_info_t doi;
ASSERT3U(os->os_phys->os_type, ==, DMU_OST_ZFS);
VERIFY0(dmu_object_info(os, MASTER_NODE_OBJ, &doi));
ASSERT3U(doi.doi_type, ==, DMU_OT_MASTER_NODE);
VERIFY0(zap_lookup(os, MASTER_NODE_OBJ,
ZFS_UNLINKED_SET, sizeof (uint64_t), 1, &deleteq_obj));
/*
* In order to insert objects into the objlist, they must be in sorted
* order. We don't know what order we'll get them out of the ZAP in, so
* we insert them into and remove them from an avl_tree_t to sort them.
*/
avl_tree_t at;
avl_create(&at, objnode_compare, sizeof (struct objnode),
offsetof(struct objnode, node));
for (zap_cursor_init(&zc, os, deleteq_obj);
zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) {
struct objnode *obj = kmem_zalloc(sizeof (*obj), KM_SLEEP);
obj->obj = za.za_first_integer;
avl_add(&at, obj);
}
zap_cursor_fini(&zc);
struct objnode *next, *found = avl_first(&at);
while (found != NULL) {
next = AVL_NEXT(&at, found);
objlist_insert(deleteq_objlist, found->obj);
found = next;
}
void *cookie = NULL;
while ((found = avl_destroy_nodes(&at, &cookie)) != NULL)
kmem_free(found, sizeof (*found));
avl_destroy(&at);
return (deleteq_objlist);
}
#endif
/*
* This is the callback function to traverse_dataset for the redaction threads
* for dmu_redact_snap. This thread is responsible for creating redaction
* records for all the data that is modified by the snapshots we're redacting
* with respect to. Redaction records represent ranges of data that have been
* modified by one of the redaction snapshots, and are stored in the
* redact_record struct. We need to create redaction records for three
* cases:
*
* First, if there's a normal write, we need to create a redaction record for
* that block.
*
* Second, if there's a hole, we need to create a redaction record that covers
* the whole range of the hole. If the hole is in the meta-dnode, it must cover
* every block in all of the objects in the hole.
*
* Third, if there is a deleted object, we need to create a redaction record for
* all of the blocks in that object.
*/
static int
redact_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg)
{
(void) spa, (void) zilog;
struct redact_thread_arg *rta = arg;
struct redact_record *record;
ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT ||
zb->zb_object >= rta->resume.zb_object);
if (rta->cancel)
return (SET_ERROR(EINTR));
if (rta->ignore_object == zb->zb_object)
return (0);
/*
* If we're visiting a dnode, we need to handle the case where the
* object has been deleted.
*/
if (zb->zb_level == ZB_DNODE_LEVEL) {
ASSERT3U(zb->zb_level, ==, ZB_DNODE_LEVEL);
if (zb->zb_object == 0)
return (0);
/*
* If the object has been deleted, redact all of the blocks in
* it.
*/
if (dnp->dn_type == DMU_OT_NONE ||
objlist_exists(rta->deleted_objs, zb->zb_object)) {
rta->ignore_object = zb->zb_object;
record = kmem_zalloc(sizeof (struct redact_record),
KM_SLEEP);
record->eos_marker = B_FALSE;
record->start_object = record->end_object =
zb->zb_object;
record->start_blkid = 0;
record->end_blkid = UINT64_MAX;
record_merge_enqueue(&rta->q,
&rta->current_record, record);
}
return (0);
} else if (zb->zb_level < 0) {
return (0);
} else if (zb->zb_level > 0 && !BP_IS_HOLE(bp)) {
/*
* If this is an indirect block, but not a hole, it doesn't
* provide any useful information for redaction, so ignore it.
*/
return (0);
}
/*
* At this point, there are two options left for the type of block we're
* looking at. Either this is a hole (which could be in the dnode or
* the meta-dnode), or it's a level 0 block of some sort. If it's a
* hole, we create a redaction record that covers the whole range. If
* the hole is in a dnode, we need to redact all the blocks in that
* hole. If the hole is in the meta-dnode, we instead need to redact
* all blocks in every object covered by that hole. If it's a level 0
* block, we only need to redact that single block.
*/
record = kmem_zalloc(sizeof (struct redact_record), KM_SLEEP);
record->eos_marker = B_FALSE;
record->start_object = record->end_object = zb->zb_object;
if (BP_IS_HOLE(bp)) {
record->start_blkid = zb->zb_blkid *
bp_span_in_blocks(dnp->dn_indblkshift, zb->zb_level);
record->end_blkid = ((zb->zb_blkid + 1) *
bp_span_in_blocks(dnp->dn_indblkshift, zb->zb_level)) - 1;
if (zb->zb_object == DMU_META_DNODE_OBJECT) {
record->start_object = record->start_blkid *
((SPA_MINBLOCKSIZE * dnp->dn_datablkszsec) /
sizeof (dnode_phys_t));
record->start_blkid = 0;
record->end_object = ((record->end_blkid +
1) * ((SPA_MINBLOCKSIZE * dnp->dn_datablkszsec) /
sizeof (dnode_phys_t))) - 1;
record->end_blkid = UINT64_MAX;
}
} else if (zb->zb_level != 0 ||
zb->zb_object == DMU_META_DNODE_OBJECT) {
kmem_free(record, sizeof (*record));
return (0);
} else {
record->start_blkid = record->end_blkid = zb->zb_blkid;
}
record->indblkshift = dnp->dn_indblkshift;
record->datablksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
record_merge_enqueue(&rta->q, &rta->current_record, record);
return (0);
}
static __attribute__((noreturn)) void
redact_traverse_thread(void *arg)
{
struct redact_thread_arg *rt_arg = arg;
int err;
struct redact_record *data;
#ifdef _KERNEL
if (rt_arg->os->os_phys->os_type == DMU_OST_ZFS)
rt_arg->deleted_objs = zfs_get_deleteq(rt_arg->os);
else
rt_arg->deleted_objs = objlist_create();
#else
rt_arg->deleted_objs = objlist_create();
#endif
err = traverse_dataset_resume(rt_arg->ds, rt_arg->txg,
&rt_arg->resume, TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA,
redact_cb, rt_arg);
if (err != EINTR)
rt_arg->error_code = err;
objlist_destroy(rt_arg->deleted_objs);
data = kmem_zalloc(sizeof (*data), KM_SLEEP);
data->eos_marker = B_TRUE;
record_merge_enqueue(&rt_arg->q, &rt_arg->current_record, data);
thread_exit();
}
static inline void
create_zbookmark_from_obj_off(zbookmark_phys_t *zb, uint64_t object,
uint64_t blkid)
{
zb->zb_object = object;
zb->zb_level = 0;
zb->zb_blkid = blkid;
}
/*
* This is a utility function that can do the comparison for the start or ends
* of the ranges in a redact_record.
*/
static int
redact_range_compare(uint64_t obj1, uint64_t off1, uint32_t dbss1,
uint64_t obj2, uint64_t off2, uint32_t dbss2)
{
zbookmark_phys_t z1, z2;
create_zbookmark_from_obj_off(&z1, obj1, off1);
create_zbookmark_from_obj_off(&z2, obj2, off2);
return (zbookmark_compare(dbss1 >> SPA_MINBLOCKSHIFT, 0,
dbss2 >> SPA_MINBLOCKSHIFT, 0, &z1, &z2));
}
/*
* Compare two redaction records by their range's start location. Also makes
* eos records always compare last. We use the thread number in the redact_node
* to ensure that records do not compare equal (which is not allowed in our avl
* trees).
*/
static int
redact_node_compare_start(const void *arg1, const void *arg2)
{
const struct redact_node *rn1 = arg1;
const struct redact_node *rn2 = arg2;
const struct redact_record *rr1 = rn1->record;
const struct redact_record *rr2 = rn2->record;
if (rr1->eos_marker)
return (1);
if (rr2->eos_marker)
return (-1);
int cmp = redact_range_compare(rr1->start_object, rr1->start_blkid,
rr1->datablksz, rr2->start_object, rr2->start_blkid,
rr2->datablksz);
if (cmp == 0)
cmp = (rn1->thread_num < rn2->thread_num ? -1 : 1);
return (cmp);
}
/*
* Compare two redaction records by their range's end location. Also makes
* eos records always compare last. We use the thread number in the redact_node
* to ensure that records do not compare equal (which is not allowed in our avl
* trees).
*/
static int
redact_node_compare_end(const void *arg1, const void *arg2)
{
const struct redact_node *rn1 = arg1;
const struct redact_node *rn2 = arg2;
const struct redact_record *srr1 = rn1->record;
const struct redact_record *srr2 = rn2->record;
if (srr1->eos_marker)
return (1);
if (srr2->eos_marker)
return (-1);
int cmp = redact_range_compare(srr1->end_object, srr1->end_blkid,
srr1->datablksz, srr2->end_object, srr2->end_blkid,
srr2->datablksz);
if (cmp == 0)
cmp = (rn1->thread_num < rn2->thread_num ? -1 : 1);
return (cmp);
}
/*
* Utility function that compares two redaction records to determine if any part
* of the "from" record is before any part of the "to" record. Also causes End
* of Stream redaction records to compare after all others, so that the
* redaction merging logic can stay simple.
*/
static boolean_t
redact_record_before(const struct redact_record *from,
const struct redact_record *to)
{
if (from->eos_marker == B_TRUE)
return (B_FALSE);
else if (to->eos_marker == B_TRUE)
return (B_TRUE);
return (redact_range_compare(from->start_object, from->start_blkid,
from->datablksz, to->end_object, to->end_blkid,
to->datablksz) <= 0);
}
/*
* Pop a new redaction record off the queue, check that the records are in the
* right order, and free the old data.
*/
static struct redact_record *
get_next_redact_record(bqueue_t *bq, struct redact_record *prev)
{
struct redact_record *next = bqueue_dequeue(bq);
ASSERT(redact_record_before(prev, next));
kmem_free(prev, sizeof (*prev));
return (next);
}
/*
* Remove the given redaction node from both trees, pull a new redaction record
* off the queue, free the old redaction record, update the redaction node, and
* reinsert the node into the trees.
*/
static int
update_avl_trees(avl_tree_t *start_tree, avl_tree_t *end_tree,
struct redact_node *redact_node)
{
avl_remove(start_tree, redact_node);
avl_remove(end_tree, redact_node);
redact_node->record = get_next_redact_record(&redact_node->rt_arg->q,
redact_node->record);
avl_add(end_tree, redact_node);
avl_add(start_tree, redact_node);
return (redact_node->rt_arg->error_code);
}
/*
* Synctask for updating redaction lists. We first take this txg's list of
* redacted blocks and append those to the redaction list. We then update the
* redaction list's bonus buffer. We store the furthest blocks we visited and
* the list of snapshots that we're redacting with respect to. We need these so
* that redacted sends and receives can be correctly resumed.
*/
static void
redaction_list_update_sync(void *arg, dmu_tx_t *tx)
{
struct merge_data *md = arg;
uint64_t txg = dmu_tx_get_txg(tx);
list_t *list = &md->md_blocks[txg & TXG_MASK];
redact_block_phys_t *furthest_visited =
&md->md_furthest[txg & TXG_MASK];
objset_t *mos = tx->tx_pool->dp_meta_objset;
redaction_list_t *rl = md->md_redaction_list;
int bufsize = redact_sync_bufsize;
redact_block_phys_t *buf = kmem_alloc(bufsize * sizeof (*buf),
KM_SLEEP);
int index = 0;
dmu_buf_will_dirty(rl->rl_dbuf, tx);
for (struct redact_block_list_node *rbln = list_remove_head(list);
rbln != NULL; rbln = list_remove_head(list)) {
ASSERT3U(rbln->block.rbp_object, <=,
furthest_visited->rbp_object);
ASSERT(rbln->block.rbp_object < furthest_visited->rbp_object ||
rbln->block.rbp_blkid <= furthest_visited->rbp_blkid);
buf[index] = rbln->block;
index++;
if (index == bufsize) {
dmu_write(mos, rl->rl_object,
rl->rl_phys->rlp_num_entries * sizeof (*buf),
bufsize * sizeof (*buf), buf, tx);
rl->rl_phys->rlp_num_entries += bufsize;
index = 0;
}
kmem_free(rbln, sizeof (*rbln));
}
if (index > 0) {
dmu_write(mos, rl->rl_object, rl->rl_phys->rlp_num_entries *
sizeof (*buf), index * sizeof (*buf), buf, tx);
rl->rl_phys->rlp_num_entries += index;
}
kmem_free(buf, bufsize * sizeof (*buf));
md->md_synctask_txg[txg & TXG_MASK] = B_FALSE;
rl->rl_phys->rlp_last_object = furthest_visited->rbp_object;
rl->rl_phys->rlp_last_blkid = furthest_visited->rbp_blkid;
}
static void
commit_rl_updates(objset_t *os, struct merge_data *md, uint64_t object,
uint64_t blkid)
{
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(os->os_spa)->dp_mos_dir);
dmu_tx_hold_space(tx, sizeof (struct redact_block_list_node));
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
uint64_t txg = dmu_tx_get_txg(tx);
if (!md->md_synctask_txg[txg & TXG_MASK]) {
dsl_sync_task_nowait(dmu_tx_pool(tx),
redaction_list_update_sync, md, tx);
md->md_synctask_txg[txg & TXG_MASK] = B_TRUE;
md->md_latest_synctask_txg = txg;
}
md->md_furthest[txg & TXG_MASK].rbp_object = object;
md->md_furthest[txg & TXG_MASK].rbp_blkid = blkid;
list_move_tail(&md->md_blocks[txg & TXG_MASK],
&md->md_redact_block_pending);
dmu_tx_commit(tx);
md->md_last_time = gethrtime();
}
/*
* We want to store the list of blocks that we're redacting in the bookmark's
* redaction list. However, this list is stored in the MOS, which means it can
* only be written to in syncing context. To get around this, we create a
* synctask that will write to the mos for us. We tell it what to write by
* a linked list for each current transaction group; every time we decide to
* redact a block, we append it to the transaction group that is currently in
* open context. We also update some progress information that the synctask
* will store to enable resumable redacted sends.
*/
static void
update_redaction_list(struct merge_data *md, objset_t *os,
uint64_t object, uint64_t blkid, uint64_t endblkid, uint32_t blksz)
{
boolean_t enqueue = B_FALSE;
redact_block_phys_t cur = {0};
uint64_t count = endblkid - blkid + 1;
while (count > REDACT_BLOCK_MAX_COUNT) {
update_redaction_list(md, os, object, blkid,
blkid + REDACT_BLOCK_MAX_COUNT - 1, blksz);
blkid += REDACT_BLOCK_MAX_COUNT;
count -= REDACT_BLOCK_MAX_COUNT;
}
redact_block_phys_t *coalesce = &md->md_coalesce_block;
boolean_t new;
if (coalesce->rbp_size_count == 0) {
new = B_TRUE;
enqueue = B_FALSE;
} else {
uint64_t old_count = redact_block_get_count(coalesce);
if (coalesce->rbp_object == object &&
coalesce->rbp_blkid + old_count == blkid &&
old_count + count <= REDACT_BLOCK_MAX_COUNT) {
ASSERT3U(redact_block_get_size(coalesce), ==, blksz);
redact_block_set_count(coalesce, old_count + count);
new = B_FALSE;
enqueue = B_FALSE;
} else {
new = B_TRUE;
enqueue = B_TRUE;
}
}
if (new) {
cur = *coalesce;
coalesce->rbp_blkid = blkid;
coalesce->rbp_object = object;
redact_block_set_count(coalesce, count);
redact_block_set_size(coalesce, blksz);
}
if (enqueue && redact_block_get_size(&cur) != 0) {
struct redact_block_list_node *rbln =
kmem_alloc(sizeof (struct redact_block_list_node),
KM_SLEEP);
rbln->block = cur;
list_insert_tail(&md->md_redact_block_pending, rbln);
}
if (gethrtime() > md->md_last_time +
redaction_list_update_interval_ns) {
commit_rl_updates(os, md, object, blkid);
}
}
/*
* This thread merges all the redaction records provided by the worker threads,
* and determines which blocks are redacted by all the snapshots. The algorithm
* for doing so is similar to performing a merge in mergesort with n sub-lists
* instead of 2, with some added complexity due to the fact that the entries are
* ranges, not just single blocks. This algorithm relies on the fact that the
* queues are sorted, which is ensured by the fact that traverse_dataset
* traverses the dataset in a consistent order. We pull one entry off the front
* of the queues of each secure dataset traversal thread. Then we repeat the
* following: each record represents a range of blocks modified by one of the
* redaction snapshots, and each block in that range may need to be redacted in
* the send stream. Find the record with the latest start of its range, and the
* record with the earliest end of its range. If the last start is before the
* first end, then we know that the blocks in the range [last_start, first_end]
* are covered by all of the ranges at the front of the queues, which means
* every thread redacts that whole range. For example, let's say the ranges on
* each queue look like this:
*
* Block Id 1 2 3 4 5 6 7 8 9 10 11
* Thread 1 | [====================]
* Thread 2 | [========]
* Thread 3 | [=================]
*
* Thread 3 has the last start (5), and the thread 2 has the last end (6). All
* three threads modified the range [5,6], so that data should not be sent over
* the wire. After we've determined whether or not to redact anything, we take
* the record with the first end. We discard that record, and pull a new one
* off the front of the queue it came from. In the above example, we would
* discard Thread 2's record, and pull a new one. Let's say the next record we
* pulled from Thread 2 covered range [10,11]. The new layout would look like
* this:
*
* Block Id 1 2 3 4 5 6 7 8 9 10 11
* Thread 1 | [====================]
* Thread 2 | [==]
* Thread 3 | [=================]
*
* When we compare the last start (10, from Thread 2) and the first end (9, from
* Thread 1), we see that the last start is greater than the first end.
* Therefore, we do not redact anything from these records. We'll iterate by
* replacing the record from Thread 1.
*
* We iterate by replacing the record with the lowest end because we know
* that the record with the lowest end has helped us as much as it can. All the
* ranges before it that we will ever redact have been redacted. In addition,
* by replacing the one with the lowest end, we guarantee we catch all ranges
* that need to be redacted. For example, if in the case above we had replaced
* the record from Thread 1 instead, we might have ended up with the following:
*
* Block Id 1 2 3 4 5 6 7 8 9 10 11 12
* Thread 1 | [==]
* Thread 2 | [========]
* Thread 3 | [=================]
*
* If the next record from Thread 2 had been [8,10], for example, we should have
* redacted part of that range, but because we updated Thread 1's record, we
* missed it.
*
* We implement this algorithm by using two trees. The first sorts the
* redaction records by their start_zb, and the second sorts them by their
* end_zb. We use these to find the record with the last start and the record
* with the first end. We create a record with that start and end, and send it
* on. The overall runtime of this implementation is O(n log m), where n is the
* total number of redaction records from all the different redaction snapshots,
* and m is the number of redaction snapshots.
*
* If we redact with respect to zero snapshots, we create a redaction
* record with the start object and blkid to 0, and the end object and blkid to
* UINT64_MAX. This will result in us redacting every block.
*/
static int
perform_thread_merge(bqueue_t *q, uint32_t num_threads,
struct redact_thread_arg *thread_args, boolean_t *cancel)
{
struct redact_node *redact_nodes = NULL;
avl_tree_t start_tree, end_tree;
struct redact_record *record;
struct redact_record *current_record = NULL;
int err = 0;
struct merge_data md = { {0} };
list_create(&md.md_redact_block_pending,
sizeof (struct redact_block_list_node),
offsetof(struct redact_block_list_node, node));
/*
* If we're redacting with respect to zero snapshots, then no data is
* permitted to be sent. We enqueue a record that redacts all blocks,
* and an eos marker.
*/
if (num_threads == 0) {
record = kmem_zalloc(sizeof (struct redact_record),
KM_SLEEP);
// We can't redact object 0, so don't try.
record->start_object = 1;
record->start_blkid = 0;
record->end_object = record->end_blkid = UINT64_MAX;
bqueue_enqueue(q, record, sizeof (*record));
return (0);
}
if (num_threads > 0) {
redact_nodes = kmem_zalloc(num_threads *
sizeof (*redact_nodes), KM_SLEEP);
}
avl_create(&start_tree, redact_node_compare_start,
sizeof (struct redact_node),
offsetof(struct redact_node, avl_node_start));
avl_create(&end_tree, redact_node_compare_end,
sizeof (struct redact_node),
offsetof(struct redact_node, avl_node_end));
for (int i = 0; i < num_threads; i++) {
struct redact_node *node = &redact_nodes[i];
struct redact_thread_arg *targ = &thread_args[i];
node->record = bqueue_dequeue(&targ->q);
node->rt_arg = targ;
node->thread_num = i;
avl_add(&start_tree, node);
avl_add(&end_tree, node);
}
/*
* Once the first record in the end tree has returned EOS, every record
* must be an EOS record, so we should stop.
*/
while (err == 0 && !((struct redact_node *)avl_first(&end_tree))->
record->eos_marker) {
if (*cancel) {
err = EINTR;
break;
}
struct redact_node *last_start = avl_last(&start_tree);
struct redact_node *first_end = avl_first(&end_tree);
/*
* If the last start record is before the first end record,
* then we have blocks that are redacted by all threads.
* Therefore, we should redact them. Copy the record, and send
* it to the main thread.
*/
if (redact_record_before(last_start->record,
first_end->record)) {
record = kmem_zalloc(sizeof (struct redact_record),
KM_SLEEP);
*record = *first_end->record;
record->start_object = last_start->record->start_object;
record->start_blkid = last_start->record->start_blkid;
record_merge_enqueue(q, &current_record,
record);
}
err = update_avl_trees(&start_tree, &end_tree, first_end);
}
/*
* We're done; if we were cancelled, we need to cancel our workers and
* clear out their queues. Either way, we need to remove every thread's
* redact_node struct from the avl trees.
*/
for (int i = 0; i < num_threads; i++) {
if (err != 0) {
thread_args[i].cancel = B_TRUE;
while (!redact_nodes[i].record->eos_marker) {
(void) update_avl_trees(&start_tree, &end_tree,
&redact_nodes[i]);
}
}
avl_remove(&start_tree, &redact_nodes[i]);
avl_remove(&end_tree, &redact_nodes[i]);
kmem_free(redact_nodes[i].record,
sizeof (struct redact_record));
bqueue_destroy(&thread_args[i].q);
}
avl_destroy(&start_tree);
avl_destroy(&end_tree);
kmem_free(redact_nodes, num_threads * sizeof (*redact_nodes));
if (current_record != NULL)
- bqueue_enqueue(q, current_record, sizeof (current_record));
+ bqueue_enqueue(q, current_record, sizeof (*current_record));
return (err);
}
struct redact_merge_thread_arg {
bqueue_t q;
spa_t *spa;
int numsnaps;
struct redact_thread_arg *thr_args;
boolean_t cancel;
int error_code;
};
static __attribute__((noreturn)) void
redact_merge_thread(void *arg)
{
struct redact_merge_thread_arg *rmta = arg;
rmta->error_code = perform_thread_merge(&rmta->q,
rmta->numsnaps, rmta->thr_args, &rmta->cancel);
struct redact_record *rec = kmem_zalloc(sizeof (*rec), KM_SLEEP);
rec->eos_marker = B_TRUE;
bqueue_enqueue_flush(&rmta->q, rec, 1);
thread_exit();
}
/*
* Find the next object in or after the redaction range passed in, and hold
* its dnode with the provided tag. Also update *object to contain the new
* object number.
*/
static int
hold_next_object(objset_t *os, struct redact_record *rec, const void *tag,
uint64_t *object, dnode_t **dn)
{
int err = 0;
if (*dn != NULL)
dnode_rele(*dn, tag);
*dn = NULL;
if (*object < rec->start_object) {
*object = rec->start_object - 1;
}
err = dmu_object_next(os, object, B_FALSE, 0);
if (err != 0)
return (err);
err = dnode_hold(os, *object, tag, dn);
while (err == 0 && (*object < rec->start_object ||
DMU_OT_IS_METADATA((*dn)->dn_type))) {
dnode_rele(*dn, tag);
*dn = NULL;
err = dmu_object_next(os, object, B_FALSE, 0);
if (err != 0)
break;
err = dnode_hold(os, *object, tag, dn);
}
return (err);
}
static int
perform_redaction(objset_t *os, redaction_list_t *rl,
struct redact_merge_thread_arg *rmta)
{
int err = 0;
bqueue_t *q = &rmta->q;
struct redact_record *rec = NULL;
struct merge_data md = { {0} };
list_create(&md.md_redact_block_pending,
sizeof (struct redact_block_list_node),
offsetof(struct redact_block_list_node, node));
md.md_redaction_list = rl;
for (int i = 0; i < TXG_SIZE; i++) {
list_create(&md.md_blocks[i],
sizeof (struct redact_block_list_node),
offsetof(struct redact_block_list_node, node));
}
dnode_t *dn = NULL;
uint64_t prev_obj = 0;
for (rec = bqueue_dequeue(q); !rec->eos_marker && err == 0;
rec = get_next_redact_record(q, rec)) {
ASSERT3U(rec->start_object, !=, 0);
uint64_t object;
if (prev_obj != rec->start_object) {
object = rec->start_object - 1;
err = hold_next_object(os, rec, FTAG, &object, &dn);
} else {
object = prev_obj;
}
while (err == 0 && object <= rec->end_object) {
if (issig(JUSTLOOKING) && issig(FORREAL)) {
err = EINTR;
break;
}
/*
* Part of the current object is contained somewhere in
* the range covered by rec.
*/
uint64_t startblkid;
uint64_t endblkid;
uint64_t maxblkid = dn->dn_phys->dn_maxblkid;
if (rec->start_object < object)
startblkid = 0;
else if (rec->start_blkid > maxblkid)
break;
else
startblkid = rec->start_blkid;
if (rec->end_object > object || rec->end_blkid >
maxblkid) {
endblkid = maxblkid;
} else {
endblkid = rec->end_blkid;
}
update_redaction_list(&md, os, object, startblkid,
endblkid, dn->dn_datablksz);
if (object == rec->end_object)
break;
err = hold_next_object(os, rec, FTAG, &object, &dn);
}
if (err == ESRCH)
err = 0;
if (dn != NULL)
prev_obj = object;
}
if (err == 0 && dn != NULL)
dnode_rele(dn, FTAG);
if (err == ESRCH)
err = 0;
rmta->cancel = B_TRUE;
while (!rec->eos_marker)
rec = get_next_redact_record(q, rec);
kmem_free(rec, sizeof (*rec));
/*
* There may be a block that's being coalesced, sync that out before we
* return.
*/
if (err == 0 && md.md_coalesce_block.rbp_size_count != 0) {
struct redact_block_list_node *rbln =
kmem_alloc(sizeof (struct redact_block_list_node),
KM_SLEEP);
rbln->block = md.md_coalesce_block;
list_insert_tail(&md.md_redact_block_pending, rbln);
}
commit_rl_updates(os, &md, UINT64_MAX, UINT64_MAX);
/*
* Wait for all the redaction info to sync out before we return, so that
* anyone who attempts to resume this redaction will have all the data
* they need.
*/
dsl_pool_t *dp = spa_get_dsl(os->os_spa);
if (md.md_latest_synctask_txg != 0)
txg_wait_synced(dp, md.md_latest_synctask_txg);
for (int i = 0; i < TXG_SIZE; i++)
list_destroy(&md.md_blocks[i]);
return (err);
}
static boolean_t
redact_snaps_contains(uint64_t *snaps, uint64_t num_snaps, uint64_t guid)
{
for (int i = 0; i < num_snaps; i++) {
if (snaps[i] == guid)
return (B_TRUE);
}
return (B_FALSE);
}
int
dmu_redact_snap(const char *snapname, nvlist_t *redactnvl,
const char *redactbook)
{
int err = 0;
dsl_pool_t *dp = NULL;
dsl_dataset_t *ds = NULL;
int numsnaps = 0;
objset_t *os;
struct redact_thread_arg *args = NULL;
redaction_list_t *new_rl = NULL;
char *newredactbook;
if ((err = dsl_pool_hold(snapname, FTAG, &dp)) != 0)
return (err);
newredactbook = kmem_zalloc(sizeof (char) * ZFS_MAX_DATASET_NAME_LEN,
KM_SLEEP);
if ((err = dsl_dataset_hold_flags(dp, snapname, DS_HOLD_FLAG_DECRYPT,
FTAG, &ds)) != 0) {
goto out;
}
dsl_dataset_long_hold(ds, FTAG);
if (!ds->ds_is_snapshot || dmu_objset_from_ds(ds, &os) != 0) {
err = EINVAL;
goto out;
}
if (dsl_dataset_feature_is_active(ds, SPA_FEATURE_REDACTED_DATASETS)) {
err = EALREADY;
goto out;
}
numsnaps = fnvlist_num_pairs(redactnvl);
if (numsnaps > 0)
args = kmem_zalloc(numsnaps * sizeof (*args), KM_SLEEP);
nvpair_t *pair = NULL;
for (int i = 0; i < numsnaps; i++) {
pair = nvlist_next_nvpair(redactnvl, pair);
const char *name = nvpair_name(pair);
struct redact_thread_arg *rta = &args[i];
err = dsl_dataset_hold_flags(dp, name, DS_HOLD_FLAG_DECRYPT,
FTAG, &rta->ds);
if (err != 0)
break;
/*
* We want to do the long hold before we can get any other
* errors, because the cleanup code will release the long
* hold if rta->ds is filled in.
*/
dsl_dataset_long_hold(rta->ds, FTAG);
err = dmu_objset_from_ds(rta->ds, &rta->os);
if (err != 0)
break;
if (!dsl_dataset_is_before(rta->ds, ds, 0)) {
err = EINVAL;
break;
}
if (dsl_dataset_feature_is_active(rta->ds,
SPA_FEATURE_REDACTED_DATASETS)) {
err = EALREADY;
break;
}
}
if (err != 0)
goto out;
VERIFY3P(nvlist_next_nvpair(redactnvl, pair), ==, NULL);
boolean_t resuming = B_FALSE;
zfs_bookmark_phys_t bookmark;
(void) strlcpy(newredactbook, snapname, ZFS_MAX_DATASET_NAME_LEN);
char *c = strchr(newredactbook, '@');
ASSERT3P(c, !=, NULL);
int n = snprintf(c, ZFS_MAX_DATASET_NAME_LEN - (c - newredactbook),
"#%s", redactbook);
if (n >= ZFS_MAX_DATASET_NAME_LEN - (c - newredactbook)) {
dsl_pool_rele(dp, FTAG);
kmem_free(newredactbook,
sizeof (char) * ZFS_MAX_DATASET_NAME_LEN);
if (args != NULL)
kmem_free(args, numsnaps * sizeof (*args));
return (SET_ERROR(ENAMETOOLONG));
}
err = dsl_bookmark_lookup(dp, newredactbook, NULL, &bookmark);
if (err == 0) {
resuming = B_TRUE;
if (bookmark.zbm_redaction_obj == 0) {
err = EEXIST;
goto out;
}
err = dsl_redaction_list_hold_obj(dp,
bookmark.zbm_redaction_obj, FTAG, &new_rl);
if (err != 0) {
err = EIO;
goto out;
}
dsl_redaction_list_long_hold(dp, new_rl, FTAG);
if (new_rl->rl_phys->rlp_num_snaps != numsnaps) {
err = ESRCH;
goto out;
}
for (int i = 0; i < numsnaps; i++) {
struct redact_thread_arg *rta = &args[i];
if (!redact_snaps_contains(new_rl->rl_phys->rlp_snaps,
new_rl->rl_phys->rlp_num_snaps,
dsl_dataset_phys(rta->ds)->ds_guid)) {
err = ESRCH;
goto out;
}
}
if (new_rl->rl_phys->rlp_last_blkid == UINT64_MAX &&
new_rl->rl_phys->rlp_last_object == UINT64_MAX) {
err = EEXIST;
goto out;
}
dsl_pool_rele(dp, FTAG);
dp = NULL;
} else {
uint64_t *guids = NULL;
if (numsnaps > 0) {
guids = kmem_zalloc(numsnaps * sizeof (uint64_t),
KM_SLEEP);
}
for (int i = 0; i < numsnaps; i++) {
struct redact_thread_arg *rta = &args[i];
guids[i] = dsl_dataset_phys(rta->ds)->ds_guid;
}
dsl_pool_rele(dp, FTAG);
dp = NULL;
err = dsl_bookmark_create_redacted(newredactbook, snapname,
numsnaps, guids, FTAG, &new_rl);
kmem_free(guids, numsnaps * sizeof (uint64_t));
if (err != 0) {
goto out;
}
}
for (int i = 0; i < numsnaps; i++) {
struct redact_thread_arg *rta = &args[i];
(void) bqueue_init(&rta->q, zfs_redact_queue_ff,
zfs_redact_queue_length,
offsetof(struct redact_record, ln));
if (resuming) {
rta->resume.zb_blkid =
new_rl->rl_phys->rlp_last_blkid;
rta->resume.zb_object =
new_rl->rl_phys->rlp_last_object;
}
rta->txg = dsl_dataset_phys(ds)->ds_creation_txg;
(void) thread_create(NULL, 0, redact_traverse_thread, rta,
0, curproc, TS_RUN, minclsyspri);
}
struct redact_merge_thread_arg *rmta;
rmta = kmem_zalloc(sizeof (struct redact_merge_thread_arg), KM_SLEEP);
(void) bqueue_init(&rmta->q, zfs_redact_queue_ff,
zfs_redact_queue_length, offsetof(struct redact_record, ln));
rmta->numsnaps = numsnaps;
rmta->spa = os->os_spa;
rmta->thr_args = args;
(void) thread_create(NULL, 0, redact_merge_thread, rmta, 0, curproc,
TS_RUN, minclsyspri);
err = perform_redaction(os, new_rl, rmta);
bqueue_destroy(&rmta->q);
kmem_free(rmta, sizeof (struct redact_merge_thread_arg));
out:
kmem_free(newredactbook, sizeof (char) * ZFS_MAX_DATASET_NAME_LEN);
if (new_rl != NULL) {
dsl_redaction_list_long_rele(new_rl, FTAG);
dsl_redaction_list_rele(new_rl, FTAG);
}
for (int i = 0; i < numsnaps; i++) {
struct redact_thread_arg *rta = &args[i];
/*
* rta->ds may be NULL if we got an error while filling
* it in.
*/
if (rta->ds != NULL) {
dsl_dataset_long_rele(rta->ds, FTAG);
dsl_dataset_rele_flags(rta->ds,
DS_HOLD_FLAG_DECRYPT, FTAG);
}
}
if (args != NULL)
kmem_free(args, numsnaps * sizeof (*args));
if (dp != NULL)
dsl_pool_rele(dp, FTAG);
if (ds != NULL) {
dsl_dataset_long_rele(ds, FTAG);
dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
}
return (SET_ERROR(err));
}
diff --git a/sys/contrib/openzfs/module/zfs/dmu_send.c b/sys/contrib/openzfs/module/zfs/dmu_send.c
index 5e6ced2bb30a..283e2d3b37bb 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_send.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_send.c
@@ -1,3112 +1,3108 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2011, 2018 by Delphix. All rights reserved.
* Copyright (c) 2014, Joyent, Inc. All rights reserved.
* Copyright 2014 HybridCluster. All rights reserved.
* Copyright 2016 RackTop Systems.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
*/
#include <sys/dmu.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_tx.h>
#include <sys/dbuf.h>
#include <sys/dnode.h>
#include <sys/zfs_context.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_synctask.h>
#include <sys/spa_impl.h>
#include <sys/zfs_ioctl.h>
#include <sys/zap.h>
#include <sys/zio_checksum.h>
#include <sys/zfs_znode.h>
#include <zfs_fletcher.h>
#include <sys/avl.h>
#include <sys/ddt.h>
#include <sys/zfs_onexit.h>
#include <sys/dmu_send.h>
#include <sys/dmu_recv.h>
#include <sys/dsl_destroy.h>
#include <sys/blkptr.h>
#include <sys/dsl_bookmark.h>
#include <sys/zfeature.h>
#include <sys/bqueue.h>
#include <sys/zvol.h>
#include <sys/policy.h>
#include <sys/objlist.h>
#ifdef _KERNEL
#include <sys/zfs_vfsops.h>
#endif
/* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
static int zfs_send_corrupt_data = B_FALSE;
/*
* This tunable controls the amount of data (measured in bytes) that will be
* prefetched by zfs send. If the main thread is blocking on reads that haven't
* completed, this variable might need to be increased. If instead the main
* thread is issuing new reads because the prefetches have fallen out of the
* cache, this may need to be decreased.
*/
static int zfs_send_queue_length = SPA_MAXBLOCKSIZE;
/*
* This tunable controls the length of the queues that zfs send worker threads
* use to communicate. If the send_main_thread is blocking on these queues,
* this variable may need to be increased. If there is a significant slowdown
* at the start of a send as these threads consume all the available IO
* resources, this variable may need to be decreased.
*/
static int zfs_send_no_prefetch_queue_length = 1024 * 1024;
/*
* These tunables control the fill fraction of the queues by zfs send. The fill
* fraction controls the frequency with which threads have to be cv_signaled.
* If a lot of cpu time is being spent on cv_signal, then these should be tuned
* down. If the queues empty before the signalled thread can catch up, then
* these should be tuned up.
*/
static int zfs_send_queue_ff = 20;
static int zfs_send_no_prefetch_queue_ff = 20;
/*
* Use this to override the recordsize calculation for fast zfs send estimates.
*/
static int zfs_override_estimate_recordsize = 0;
/* Set this tunable to FALSE to disable setting of DRR_FLAG_FREERECORDS */
static const boolean_t zfs_send_set_freerecords_bit = B_TRUE;
/* Set this tunable to FALSE is disable sending unmodified spill blocks. */
static int zfs_send_unmodified_spill_blocks = B_TRUE;
static inline boolean_t
overflow_multiply(uint64_t a, uint64_t b, uint64_t *c)
{
uint64_t temp = a * b;
if (b != 0 && temp / b != a)
return (B_FALSE);
*c = temp;
return (B_TRUE);
}
struct send_thread_arg {
bqueue_t q;
objset_t *os; /* Objset to traverse */
uint64_t fromtxg; /* Traverse from this txg */
int flags; /* flags to pass to traverse_dataset */
int error_code;
boolean_t cancel;
zbookmark_phys_t resume;
uint64_t *num_blocks_visited;
};
struct redact_list_thread_arg {
boolean_t cancel;
bqueue_t q;
zbookmark_phys_t resume;
redaction_list_t *rl;
boolean_t mark_redact;
int error_code;
uint64_t *num_blocks_visited;
};
struct send_merge_thread_arg {
bqueue_t q;
objset_t *os;
struct redact_list_thread_arg *from_arg;
struct send_thread_arg *to_arg;
struct redact_list_thread_arg *redact_arg;
int error;
boolean_t cancel;
};
struct send_range {
boolean_t eos_marker; /* Marks the end of the stream */
uint64_t object;
uint64_t start_blkid;
uint64_t end_blkid;
bqueue_node_t ln;
enum type {DATA, HOLE, OBJECT, OBJECT_RANGE, REDACT,
PREVIOUSLY_REDACTED} type;
union {
struct srd {
dmu_object_type_t obj_type;
uint32_t datablksz; // logical size
uint32_t datasz; // payload size
blkptr_t bp;
arc_buf_t *abuf;
abd_t *abd;
kmutex_t lock;
kcondvar_t cv;
boolean_t io_outstanding;
boolean_t io_compressed;
int io_err;
} data;
struct srh {
uint32_t datablksz;
} hole;
struct sro {
/*
* This is a pointer because embedding it in the
* struct causes these structures to be massively larger
* for all range types; this makes the code much less
* memory efficient.
*/
dnode_phys_t *dnp;
blkptr_t bp;
} object;
struct srr {
uint32_t datablksz;
} redact;
struct sror {
blkptr_t bp;
} object_range;
} sru;
};
/*
* The list of data whose inclusion in a send stream can be pending from
* one call to backup_cb to another. Multiple calls to dump_free(),
* dump_freeobjects(), and dump_redact() can be aggregated into a single
* DRR_FREE, DRR_FREEOBJECTS, or DRR_REDACT replay record.
*/
typedef enum {
PENDING_NONE,
PENDING_FREE,
PENDING_FREEOBJECTS,
PENDING_REDACT
} dmu_pendop_t;
typedef struct dmu_send_cookie {
dmu_replay_record_t *dsc_drr;
dmu_send_outparams_t *dsc_dso;
offset_t *dsc_off;
objset_t *dsc_os;
zio_cksum_t dsc_zc;
uint64_t dsc_toguid;
uint64_t dsc_fromtxg;
int dsc_err;
dmu_pendop_t dsc_pending_op;
uint64_t dsc_featureflags;
uint64_t dsc_last_data_object;
uint64_t dsc_last_data_offset;
uint64_t dsc_resume_object;
uint64_t dsc_resume_offset;
boolean_t dsc_sent_begin;
boolean_t dsc_sent_end;
} dmu_send_cookie_t;
static int do_dump(dmu_send_cookie_t *dscp, struct send_range *range);
static void
range_free(struct send_range *range)
{
if (range->type == OBJECT) {
size_t size = sizeof (dnode_phys_t) *
(range->sru.object.dnp->dn_extra_slots + 1);
kmem_free(range->sru.object.dnp, size);
} else if (range->type == DATA) {
mutex_enter(&range->sru.data.lock);
while (range->sru.data.io_outstanding)
cv_wait(&range->sru.data.cv, &range->sru.data.lock);
if (range->sru.data.abd != NULL)
abd_free(range->sru.data.abd);
if (range->sru.data.abuf != NULL) {
arc_buf_destroy(range->sru.data.abuf,
&range->sru.data.abuf);
}
mutex_exit(&range->sru.data.lock);
cv_destroy(&range->sru.data.cv);
mutex_destroy(&range->sru.data.lock);
}
kmem_free(range, sizeof (*range));
}
/*
* For all record types except BEGIN, fill in the checksum (overlaid in
* drr_u.drr_checksum.drr_checksum). The checksum verifies everything
* up to the start of the checksum itself.
*/
static int
dump_record(dmu_send_cookie_t *dscp, void *payload, int payload_len)
{
dmu_send_outparams_t *dso = dscp->dsc_dso;
ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
(void) fletcher_4_incremental_native(dscp->dsc_drr,
offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
&dscp->dsc_zc);
if (dscp->dsc_drr->drr_type == DRR_BEGIN) {
dscp->dsc_sent_begin = B_TRUE;
} else {
ASSERT(ZIO_CHECKSUM_IS_ZERO(&dscp->dsc_drr->drr_u.
drr_checksum.drr_checksum));
dscp->dsc_drr->drr_u.drr_checksum.drr_checksum = dscp->dsc_zc;
}
if (dscp->dsc_drr->drr_type == DRR_END) {
dscp->dsc_sent_end = B_TRUE;
}
(void) fletcher_4_incremental_native(&dscp->dsc_drr->
drr_u.drr_checksum.drr_checksum,
sizeof (zio_cksum_t), &dscp->dsc_zc);
*dscp->dsc_off += sizeof (dmu_replay_record_t);
dscp->dsc_err = dso->dso_outfunc(dscp->dsc_os, dscp->dsc_drr,
sizeof (dmu_replay_record_t), dso->dso_arg);
if (dscp->dsc_err != 0)
return (SET_ERROR(EINTR));
if (payload_len != 0) {
*dscp->dsc_off += payload_len;
/*
* payload is null when dso_dryrun == B_TRUE (i.e. when we're
* doing a send size calculation)
*/
if (payload != NULL) {
(void) fletcher_4_incremental_native(
payload, payload_len, &dscp->dsc_zc);
}
/*
* The code does not rely on this (len being a multiple of 8).
* We keep this assertion because of the corresponding assertion
* in receive_read(). Keeping this assertion ensures that we do
* not inadvertently break backwards compatibility (causing the
* assertion in receive_read() to trigger on old software).
*
* Raw sends cannot be received on old software, and so can
* bypass this assertion.
*/
ASSERT((payload_len % 8 == 0) ||
(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW));
dscp->dsc_err = dso->dso_outfunc(dscp->dsc_os, payload,
payload_len, dso->dso_arg);
if (dscp->dsc_err != 0)
return (SET_ERROR(EINTR));
}
return (0);
}
/*
* Fill in the drr_free struct, or perform aggregation if the previous record is
* also a free record, and the two are adjacent.
*
* Note that we send free records even for a full send, because we want to be
* able to receive a full send as a clone, which requires a list of all the free
* and freeobject records that were generated on the source.
*/
static int
dump_free(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset,
uint64_t length)
{
struct drr_free *drrf = &(dscp->dsc_drr->drr_u.drr_free);
/*
* When we receive a free record, dbuf_free_range() assumes
* that the receiving system doesn't have any dbufs in the range
* being freed. This is always true because there is a one-record
* constraint: we only send one WRITE record for any given
* object,offset. We know that the one-record constraint is
* true because we always send data in increasing order by
* object,offset.
*
* If the increasing-order constraint ever changes, we should find
* another way to assert that the one-record constraint is still
* satisfied.
*/
ASSERT(object > dscp->dsc_last_data_object ||
(object == dscp->dsc_last_data_object &&
offset > dscp->dsc_last_data_offset));
/*
* If there is a pending op, but it's not PENDING_FREE, push it out,
* since free block aggregation can only be done for blocks of the
* same type (i.e., DRR_FREE records can only be aggregated with
* other DRR_FREE records. DRR_FREEOBJECTS records can only be
* aggregated with other DRR_FREEOBJECTS records).
*/
if (dscp->dsc_pending_op != PENDING_NONE &&
dscp->dsc_pending_op != PENDING_FREE) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
if (dscp->dsc_pending_op == PENDING_FREE) {
/*
* Check to see whether this free block can be aggregated
* with pending one.
*/
if (drrf->drr_object == object && drrf->drr_offset +
drrf->drr_length == offset) {
if (offset + length < offset || length == UINT64_MAX)
drrf->drr_length = UINT64_MAX;
else
drrf->drr_length += length;
return (0);
} else {
/* not a continuation. Push out pending record */
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
}
/* create a FREE record and make it pending */
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_FREE;
drrf->drr_object = object;
drrf->drr_offset = offset;
if (offset + length < offset)
drrf->drr_length = DMU_OBJECT_END;
else
drrf->drr_length = length;
drrf->drr_toguid = dscp->dsc_toguid;
if (length == DMU_OBJECT_END) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
} else {
dscp->dsc_pending_op = PENDING_FREE;
}
return (0);
}
/*
* Fill in the drr_redact struct, or perform aggregation if the previous record
* is also a redaction record, and the two are adjacent.
*/
static int
dump_redact(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset,
uint64_t length)
{
struct drr_redact *drrr = &dscp->dsc_drr->drr_u.drr_redact;
/*
* If there is a pending op, but it's not PENDING_REDACT, push it out,
* since free block aggregation can only be done for blocks of the
* same type (i.e., DRR_REDACT records can only be aggregated with
* other DRR_REDACT records).
*/
if (dscp->dsc_pending_op != PENDING_NONE &&
dscp->dsc_pending_op != PENDING_REDACT) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
if (dscp->dsc_pending_op == PENDING_REDACT) {
/*
* Check to see whether this redacted block can be aggregated
* with pending one.
*/
if (drrr->drr_object == object && drrr->drr_offset +
drrr->drr_length == offset) {
drrr->drr_length += length;
return (0);
} else {
/* not a continuation. Push out pending record */
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
}
/* create a REDACT record and make it pending */
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_REDACT;
drrr->drr_object = object;
drrr->drr_offset = offset;
drrr->drr_length = length;
drrr->drr_toguid = dscp->dsc_toguid;
dscp->dsc_pending_op = PENDING_REDACT;
return (0);
}
static int
dmu_dump_write(dmu_send_cookie_t *dscp, dmu_object_type_t type, uint64_t object,
uint64_t offset, int lsize, int psize, const blkptr_t *bp,
boolean_t io_compressed, void *data)
{
uint64_t payload_size;
boolean_t raw = (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW);
struct drr_write *drrw = &(dscp->dsc_drr->drr_u.drr_write);
/*
* We send data in increasing object, offset order.
* See comment in dump_free() for details.
*/
ASSERT(object > dscp->dsc_last_data_object ||
(object == dscp->dsc_last_data_object &&
offset > dscp->dsc_last_data_offset));
dscp->dsc_last_data_object = object;
dscp->dsc_last_data_offset = offset + lsize - 1;
/*
* If there is any kind of pending aggregation (currently either
* a grouping of free objects or free blocks), push it out to
* the stream, since aggregation can't be done across operations
* of different types.
*/
if (dscp->dsc_pending_op != PENDING_NONE) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
/* write a WRITE record */
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_WRITE;
drrw->drr_object = object;
drrw->drr_type = type;
drrw->drr_offset = offset;
drrw->drr_toguid = dscp->dsc_toguid;
drrw->drr_logical_size = lsize;
/* only set the compression fields if the buf is compressed or raw */
boolean_t compressed =
(bp != NULL ? BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF &&
io_compressed : lsize != psize);
if (raw || compressed) {
- ASSERT(bp != NULL);
ASSERT(raw || dscp->dsc_featureflags &
DMU_BACKUP_FEATURE_COMPRESSED);
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT3S(psize, >, 0);
if (raw) {
ASSERT(BP_IS_PROTECTED(bp));
/*
* This is a raw protected block so we need to pass
* along everything the receiving side will need to
* interpret this block, including the byteswap, salt,
* IV, and MAC.
*/
if (BP_SHOULD_BYTESWAP(bp))
drrw->drr_flags |= DRR_RAW_BYTESWAP;
zio_crypt_decode_params_bp(bp, drrw->drr_salt,
drrw->drr_iv);
zio_crypt_decode_mac_bp(bp, drrw->drr_mac);
} else {
/* this is a compressed block */
ASSERT(dscp->dsc_featureflags &
DMU_BACKUP_FEATURE_COMPRESSED);
ASSERT(!BP_SHOULD_BYTESWAP(bp));
ASSERT(!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)));
ASSERT3U(BP_GET_COMPRESS(bp), !=, ZIO_COMPRESS_OFF);
ASSERT3S(lsize, >=, psize);
}
/* set fields common to compressed and raw sends */
drrw->drr_compressiontype = BP_GET_COMPRESS(bp);
drrw->drr_compressed_size = psize;
payload_size = drrw->drr_compressed_size;
} else {
payload_size = drrw->drr_logical_size;
}
if (bp == NULL || BP_IS_EMBEDDED(bp) || (BP_IS_PROTECTED(bp) && !raw)) {
/*
* There's no pre-computed checksum for partial-block writes,
* embedded BP's, or encrypted BP's that are being sent as
* plaintext, so (like fletcher4-checksummed blocks) userland
* will have to compute a dedup-capable checksum itself.
*/
drrw->drr_checksumtype = ZIO_CHECKSUM_OFF;
} else {
drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
if (zio_checksum_table[drrw->drr_checksumtype].ci_flags &
ZCHECKSUM_FLAG_DEDUP)
drrw->drr_flags |= DRR_CHECKSUM_DEDUP;
DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
DDK_SET_CRYPT(&drrw->drr_key, BP_IS_PROTECTED(bp));
drrw->drr_key.ddk_cksum = bp->blk_cksum;
}
if (dump_record(dscp, data, payload_size) != 0)
return (SET_ERROR(EINTR));
return (0);
}
static int
dump_write_embedded(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset,
int blksz, const blkptr_t *bp)
{
char buf[BPE_PAYLOAD_SIZE];
struct drr_write_embedded *drrw =
&(dscp->dsc_drr->drr_u.drr_write_embedded);
if (dscp->dsc_pending_op != PENDING_NONE) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
ASSERT(BP_IS_EMBEDDED(bp));
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_WRITE_EMBEDDED;
drrw->drr_object = object;
drrw->drr_offset = offset;
drrw->drr_length = blksz;
drrw->drr_toguid = dscp->dsc_toguid;
drrw->drr_compression = BP_GET_COMPRESS(bp);
drrw->drr_etype = BPE_GET_ETYPE(bp);
drrw->drr_lsize = BPE_GET_LSIZE(bp);
drrw->drr_psize = BPE_GET_PSIZE(bp);
decode_embedded_bp_compressed(bp, buf);
if (dump_record(dscp, buf, P2ROUNDUP(drrw->drr_psize, 8)) != 0)
return (SET_ERROR(EINTR));
return (0);
}
static int
dump_spill(dmu_send_cookie_t *dscp, const blkptr_t *bp, uint64_t object,
void *data)
{
struct drr_spill *drrs = &(dscp->dsc_drr->drr_u.drr_spill);
uint64_t blksz = BP_GET_LSIZE(bp);
uint64_t payload_size = blksz;
if (dscp->dsc_pending_op != PENDING_NONE) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
/* write a SPILL record */
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_SPILL;
drrs->drr_object = object;
drrs->drr_length = blksz;
drrs->drr_toguid = dscp->dsc_toguid;
/* See comment in dump_dnode() for full details */
if (zfs_send_unmodified_spill_blocks &&
(bp->blk_birth <= dscp->dsc_fromtxg)) {
drrs->drr_flags |= DRR_SPILL_UNMODIFIED;
}
/* handle raw send fields */
if (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW) {
ASSERT(BP_IS_PROTECTED(bp));
if (BP_SHOULD_BYTESWAP(bp))
drrs->drr_flags |= DRR_RAW_BYTESWAP;
drrs->drr_compressiontype = BP_GET_COMPRESS(bp);
drrs->drr_compressed_size = BP_GET_PSIZE(bp);
zio_crypt_decode_params_bp(bp, drrs->drr_salt, drrs->drr_iv);
zio_crypt_decode_mac_bp(bp, drrs->drr_mac);
payload_size = drrs->drr_compressed_size;
}
if (dump_record(dscp, data, payload_size) != 0)
return (SET_ERROR(EINTR));
return (0);
}
static int
dump_freeobjects(dmu_send_cookie_t *dscp, uint64_t firstobj, uint64_t numobjs)
{
struct drr_freeobjects *drrfo = &(dscp->dsc_drr->drr_u.drr_freeobjects);
uint64_t maxobj = DNODES_PER_BLOCK *
(DMU_META_DNODE(dscp->dsc_os)->dn_maxblkid + 1);
/*
* ZoL < 0.7 does not handle large FREEOBJECTS records correctly,
* leading to zfs recv never completing. to avoid this issue, don't
* send FREEOBJECTS records for object IDs which cannot exist on the
* receiving side.
*/
if (maxobj > 0) {
if (maxobj <= firstobj)
return (0);
if (maxobj < firstobj + numobjs)
numobjs = maxobj - firstobj;
}
/*
* If there is a pending op, but it's not PENDING_FREEOBJECTS,
* push it out, since free block aggregation can only be done for
* blocks of the same type (i.e., DRR_FREE records can only be
* aggregated with other DRR_FREE records. DRR_FREEOBJECTS records
* can only be aggregated with other DRR_FREEOBJECTS records).
*/
if (dscp->dsc_pending_op != PENDING_NONE &&
dscp->dsc_pending_op != PENDING_FREEOBJECTS) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
if (dscp->dsc_pending_op == PENDING_FREEOBJECTS) {
/*
* See whether this free object array can be aggregated
* with pending one
*/
if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
drrfo->drr_numobjs += numobjs;
return (0);
} else {
/* can't be aggregated. Push out pending record */
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
}
/* write a FREEOBJECTS record */
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_FREEOBJECTS;
drrfo->drr_firstobj = firstobj;
drrfo->drr_numobjs = numobjs;
drrfo->drr_toguid = dscp->dsc_toguid;
dscp->dsc_pending_op = PENDING_FREEOBJECTS;
return (0);
}
static int
dump_dnode(dmu_send_cookie_t *dscp, const blkptr_t *bp, uint64_t object,
dnode_phys_t *dnp)
{
struct drr_object *drro = &(dscp->dsc_drr->drr_u.drr_object);
int bonuslen;
if (object < dscp->dsc_resume_object) {
/*
* Note: when resuming, we will visit all the dnodes in
* the block of dnodes that we are resuming from. In
* this case it's unnecessary to send the dnodes prior to
* the one we are resuming from. We should be at most one
* block's worth of dnodes behind the resume point.
*/
ASSERT3U(dscp->dsc_resume_object - object, <,
1 << (DNODE_BLOCK_SHIFT - DNODE_SHIFT));
return (0);
}
if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
return (dump_freeobjects(dscp, object, 1));
if (dscp->dsc_pending_op != PENDING_NONE) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
/* write an OBJECT record */
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_OBJECT;
drro->drr_object = object;
drro->drr_type = dnp->dn_type;
drro->drr_bonustype = dnp->dn_bonustype;
drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
drro->drr_bonuslen = dnp->dn_bonuslen;
drro->drr_dn_slots = dnp->dn_extra_slots + 1;
drro->drr_checksumtype = dnp->dn_checksum;
drro->drr_compress = dnp->dn_compress;
drro->drr_toguid = dscp->dsc_toguid;
if (!(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE)
drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE;
bonuslen = P2ROUNDUP(dnp->dn_bonuslen, 8);
if ((dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW)) {
ASSERT(BP_IS_ENCRYPTED(bp));
if (BP_SHOULD_BYTESWAP(bp))
drro->drr_flags |= DRR_RAW_BYTESWAP;
/* needed for reconstructing dnp on recv side */
drro->drr_maxblkid = dnp->dn_maxblkid;
drro->drr_indblkshift = dnp->dn_indblkshift;
drro->drr_nlevels = dnp->dn_nlevels;
drro->drr_nblkptr = dnp->dn_nblkptr;
/*
* Since we encrypt the entire bonus area, the (raw) part
* beyond the bonuslen is actually nonzero, so we need
* to send it.
*/
if (bonuslen != 0) {
if (drro->drr_bonuslen > DN_MAX_BONUS_LEN(dnp))
return (SET_ERROR(EINVAL));
drro->drr_raw_bonuslen = DN_MAX_BONUS_LEN(dnp);
bonuslen = drro->drr_raw_bonuslen;
}
}
/*
* DRR_OBJECT_SPILL is set for every dnode which references a
* spill block. This allows the receiving pool to definitively
* determine when a spill block should be kept or freed.
*/
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
drro->drr_flags |= DRR_OBJECT_SPILL;
if (dump_record(dscp, DN_BONUS(dnp), bonuslen) != 0)
return (SET_ERROR(EINTR));
/* Free anything past the end of the file. */
if (dump_free(dscp, object, (dnp->dn_maxblkid + 1) *
(dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), DMU_OBJECT_END) != 0)
return (SET_ERROR(EINTR));
/*
* Send DRR_SPILL records for unmodified spill blocks. This is useful
* because changing certain attributes of the object (e.g. blocksize)
* can cause old versions of ZFS to incorrectly remove a spill block.
* Including these records in the stream forces an up to date version
* to always be written ensuring they're never lost. Current versions
* of the code which understand the DRR_FLAG_SPILL_BLOCK feature can
* ignore these unmodified spill blocks.
*/
if (zfs_send_unmodified_spill_blocks &&
(dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) &&
(DN_SPILL_BLKPTR(dnp)->blk_birth <= dscp->dsc_fromtxg)) {
struct send_range record;
blkptr_t *bp = DN_SPILL_BLKPTR(dnp);
memset(&record, 0, sizeof (struct send_range));
record.type = DATA;
record.object = object;
record.eos_marker = B_FALSE;
record.start_blkid = DMU_SPILL_BLKID;
record.end_blkid = record.start_blkid + 1;
record.sru.data.bp = *bp;
record.sru.data.obj_type = dnp->dn_type;
record.sru.data.datablksz = BP_GET_LSIZE(bp);
if (do_dump(dscp, &record) != 0)
return (SET_ERROR(EINTR));
}
if (dscp->dsc_err != 0)
return (SET_ERROR(EINTR));
return (0);
}
static int
dump_object_range(dmu_send_cookie_t *dscp, const blkptr_t *bp,
uint64_t firstobj, uint64_t numslots)
{
struct drr_object_range *drror =
&(dscp->dsc_drr->drr_u.drr_object_range);
/* we only use this record type for raw sends */
ASSERT(BP_IS_PROTECTED(bp));
ASSERT(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW);
ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_DNODE);
ASSERT0(BP_GET_LEVEL(bp));
if (dscp->dsc_pending_op != PENDING_NONE) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_OBJECT_RANGE;
drror->drr_firstobj = firstobj;
drror->drr_numslots = numslots;
drror->drr_toguid = dscp->dsc_toguid;
if (BP_SHOULD_BYTESWAP(bp))
drror->drr_flags |= DRR_RAW_BYTESWAP;
zio_crypt_decode_params_bp(bp, drror->drr_salt, drror->drr_iv);
zio_crypt_decode_mac_bp(bp, drror->drr_mac);
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
return (0);
}
static boolean_t
send_do_embed(const blkptr_t *bp, uint64_t featureflags)
{
if (!BP_IS_EMBEDDED(bp))
return (B_FALSE);
/*
* Compression function must be legacy, or explicitly enabled.
*/
if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS &&
!(featureflags & DMU_BACKUP_FEATURE_LZ4)))
return (B_FALSE);
/*
* If we have not set the ZSTD feature flag, we can't send ZSTD
* compressed embedded blocks, as the receiver may not support them.
*/
if ((BP_GET_COMPRESS(bp) == ZIO_COMPRESS_ZSTD &&
!(featureflags & DMU_BACKUP_FEATURE_ZSTD)))
return (B_FALSE);
/*
* Embed type must be explicitly enabled.
*/
switch (BPE_GET_ETYPE(bp)) {
case BP_EMBEDDED_TYPE_DATA:
if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
return (B_TRUE);
break;
default:
return (B_FALSE);
}
return (B_FALSE);
}
/*
* This function actually handles figuring out what kind of record needs to be
* dumped, and calling the appropriate helper function. In most cases,
* the data has already been read by send_reader_thread().
*/
static int
do_dump(dmu_send_cookie_t *dscp, struct send_range *range)
{
int err = 0;
switch (range->type) {
case OBJECT:
err = dump_dnode(dscp, &range->sru.object.bp, range->object,
range->sru.object.dnp);
return (err);
case OBJECT_RANGE: {
ASSERT3U(range->start_blkid + 1, ==, range->end_blkid);
if (!(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW)) {
return (0);
}
uint64_t epb = BP_GET_LSIZE(&range->sru.object_range.bp) >>
DNODE_SHIFT;
uint64_t firstobj = range->start_blkid * epb;
err = dump_object_range(dscp, &range->sru.object_range.bp,
firstobj, epb);
break;
}
case REDACT: {
struct srr *srrp = &range->sru.redact;
err = dump_redact(dscp, range->object, range->start_blkid *
srrp->datablksz, (range->end_blkid - range->start_blkid) *
srrp->datablksz);
return (err);
}
case DATA: {
struct srd *srdp = &range->sru.data;
blkptr_t *bp = &srdp->bp;
spa_t *spa =
dmu_objset_spa(dscp->dsc_os);
ASSERT3U(srdp->datablksz, ==, BP_GET_LSIZE(bp));
ASSERT3U(range->start_blkid + 1, ==, range->end_blkid);
if (BP_GET_TYPE(bp) == DMU_OT_SA) {
arc_flags_t aflags = ARC_FLAG_WAIT;
enum zio_flag zioflags = ZIO_FLAG_CANFAIL;
if (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW) {
ASSERT(BP_IS_PROTECTED(bp));
zioflags |= ZIO_FLAG_RAW;
}
zbookmark_phys_t zb;
ASSERT3U(range->start_blkid, ==, DMU_SPILL_BLKID);
zb.zb_objset = dmu_objset_id(dscp->dsc_os);
zb.zb_object = range->object;
zb.zb_level = 0;
zb.zb_blkid = range->start_blkid;
arc_buf_t *abuf = NULL;
if (!dscp->dsc_dso->dso_dryrun && arc_read(NULL, spa,
bp, arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ,
zioflags, &aflags, &zb) != 0)
return (SET_ERROR(EIO));
err = dump_spill(dscp, bp, zb.zb_object,
(abuf == NULL ? NULL : abuf->b_data));
if (abuf != NULL)
arc_buf_destroy(abuf, &abuf);
return (err);
}
if (send_do_embed(bp, dscp->dsc_featureflags)) {
err = dump_write_embedded(dscp, range->object,
range->start_blkid * srdp->datablksz,
srdp->datablksz, bp);
return (err);
}
ASSERT(range->object > dscp->dsc_resume_object ||
(range->object == dscp->dsc_resume_object &&
range->start_blkid * srdp->datablksz >=
dscp->dsc_resume_offset));
/* it's a level-0 block of a regular object */
mutex_enter(&srdp->lock);
while (srdp->io_outstanding)
cv_wait(&srdp->cv, &srdp->lock);
err = srdp->io_err;
mutex_exit(&srdp->lock);
if (err != 0) {
if (zfs_send_corrupt_data &&
!dscp->dsc_dso->dso_dryrun) {
/*
* Send a block filled with 0x"zfs badd bloc"
*/
srdp->abuf = arc_alloc_buf(spa, &srdp->abuf,
ARC_BUFC_DATA, srdp->datablksz);
uint64_t *ptr;
for (ptr = srdp->abuf->b_data;
(char *)ptr < (char *)srdp->abuf->b_data +
srdp->datablksz; ptr++)
*ptr = 0x2f5baddb10cULL;
} else {
return (SET_ERROR(EIO));
}
}
ASSERT(dscp->dsc_dso->dso_dryrun ||
srdp->abuf != NULL || srdp->abd != NULL);
uint64_t offset = range->start_blkid * srdp->datablksz;
char *data = NULL;
if (srdp->abd != NULL) {
data = abd_to_buf(srdp->abd);
ASSERT3P(srdp->abuf, ==, NULL);
} else if (srdp->abuf != NULL) {
data = srdp->abuf->b_data;
}
/*
* If we have large blocks stored on disk but the send flags
* don't allow us to send large blocks, we split the data from
* the arc buf into chunks.
*/
if (srdp->datablksz > SPA_OLD_MAXBLOCKSIZE &&
!(dscp->dsc_featureflags &
DMU_BACKUP_FEATURE_LARGE_BLOCKS)) {
- if (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW)
- return (SET_ERROR(ENOTSUP));
-
while (srdp->datablksz > 0 && err == 0) {
int n = MIN(srdp->datablksz,
SPA_OLD_MAXBLOCKSIZE);
err = dmu_dump_write(dscp, srdp->obj_type,
range->object, offset, n, n, NULL, B_FALSE,
data);
offset += n;
/*
* When doing dry run, data==NULL is used as a
* sentinel value by
* dmu_dump_write()->dump_record().
*/
if (data != NULL)
data += n;
srdp->datablksz -= n;
}
} else {
err = dmu_dump_write(dscp, srdp->obj_type,
range->object, offset,
srdp->datablksz, srdp->datasz, bp,
srdp->io_compressed, data);
}
return (err);
}
case HOLE: {
struct srh *srhp = &range->sru.hole;
if (range->object == DMU_META_DNODE_OBJECT) {
uint32_t span = srhp->datablksz >> DNODE_SHIFT;
uint64_t first_obj = range->start_blkid * span;
uint64_t numobj = range->end_blkid * span - first_obj;
return (dump_freeobjects(dscp, first_obj, numobj));
}
uint64_t offset = 0;
/*
* If this multiply overflows, we don't need to send this block.
* Even if it has a birth time, it can never not be a hole, so
* we don't need to send records for it.
*/
if (!overflow_multiply(range->start_blkid, srhp->datablksz,
&offset)) {
return (0);
}
uint64_t len = 0;
if (!overflow_multiply(range->end_blkid, srhp->datablksz, &len))
len = UINT64_MAX;
len = len - offset;
return (dump_free(dscp, range->object, offset, len));
}
default:
panic("Invalid range type in do_dump: %d", range->type);
}
return (err);
}
static struct send_range *
range_alloc(enum type type, uint64_t object, uint64_t start_blkid,
uint64_t end_blkid, boolean_t eos)
{
struct send_range *range = kmem_alloc(sizeof (*range), KM_SLEEP);
range->type = type;
range->object = object;
range->start_blkid = start_blkid;
range->end_blkid = end_blkid;
range->eos_marker = eos;
if (type == DATA) {
range->sru.data.abd = NULL;
range->sru.data.abuf = NULL;
mutex_init(&range->sru.data.lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&range->sru.data.cv, NULL, CV_DEFAULT, NULL);
range->sru.data.io_outstanding = 0;
range->sru.data.io_err = 0;
range->sru.data.io_compressed = B_FALSE;
}
return (range);
}
/*
* This is the callback function to traverse_dataset that acts as a worker
* thread for dmu_send_impl.
*/
static int
send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg)
{
(void) zilog;
struct send_thread_arg *sta = arg;
struct send_range *record;
ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT ||
zb->zb_object >= sta->resume.zb_object);
/*
* All bps of an encrypted os should have the encryption bit set.
* If this is not true it indicates tampering and we report an error.
*/
if (sta->os->os_encrypted &&
!BP_IS_HOLE(bp) && !BP_USES_CRYPT(bp)) {
spa_log_error(spa, zb);
zfs_panic_recover("unencrypted block in encrypted "
"object set %llu", dmu_objset_id(sta->os));
return (SET_ERROR(EIO));
}
if (sta->cancel)
return (SET_ERROR(EINTR));
if (zb->zb_object != DMU_META_DNODE_OBJECT &&
DMU_OBJECT_IS_SPECIAL(zb->zb_object))
return (0);
atomic_inc_64(sta->num_blocks_visited);
if (zb->zb_level == ZB_DNODE_LEVEL) {
if (zb->zb_object == DMU_META_DNODE_OBJECT)
return (0);
record = range_alloc(OBJECT, zb->zb_object, 0, 0, B_FALSE);
record->sru.object.bp = *bp;
size_t size = sizeof (*dnp) * (dnp->dn_extra_slots + 1);
record->sru.object.dnp = kmem_alloc(size, KM_SLEEP);
memcpy(record->sru.object.dnp, dnp, size);
bqueue_enqueue(&sta->q, record, sizeof (*record));
return (0);
}
if (zb->zb_level == 0 && zb->zb_object == DMU_META_DNODE_OBJECT &&
!BP_IS_HOLE(bp)) {
record = range_alloc(OBJECT_RANGE, 0, zb->zb_blkid,
zb->zb_blkid + 1, B_FALSE);
record->sru.object_range.bp = *bp;
bqueue_enqueue(&sta->q, record, sizeof (*record));
return (0);
}
if (zb->zb_level < 0 || (zb->zb_level > 0 && !BP_IS_HOLE(bp)))
return (0);
if (zb->zb_object == DMU_META_DNODE_OBJECT && !BP_IS_HOLE(bp))
return (0);
uint64_t span = bp_span_in_blocks(dnp->dn_indblkshift, zb->zb_level);
uint64_t start;
/*
* If this multiply overflows, we don't need to send this block.
* Even if it has a birth time, it can never not be a hole, so
* we don't need to send records for it.
*/
if (!overflow_multiply(span, zb->zb_blkid, &start) || (!(zb->zb_blkid ==
DMU_SPILL_BLKID || DMU_OT_IS_METADATA(dnp->dn_type)) &&
span * zb->zb_blkid > dnp->dn_maxblkid)) {
ASSERT(BP_IS_HOLE(bp));
return (0);
}
if (zb->zb_blkid == DMU_SPILL_BLKID)
ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_SA);
enum type record_type = DATA;
if (BP_IS_HOLE(bp))
record_type = HOLE;
else if (BP_IS_REDACTED(bp))
record_type = REDACT;
else
record_type = DATA;
record = range_alloc(record_type, zb->zb_object, start,
(start + span < start ? 0 : start + span), B_FALSE);
uint64_t datablksz = (zb->zb_blkid == DMU_SPILL_BLKID ?
BP_GET_LSIZE(bp) : dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
if (BP_IS_HOLE(bp)) {
record->sru.hole.datablksz = datablksz;
} else if (BP_IS_REDACTED(bp)) {
record->sru.redact.datablksz = datablksz;
} else {
record->sru.data.datablksz = datablksz;
record->sru.data.obj_type = dnp->dn_type;
record->sru.data.bp = *bp;
}
bqueue_enqueue(&sta->q, record, sizeof (*record));
return (0);
}
struct redact_list_cb_arg {
uint64_t *num_blocks_visited;
bqueue_t *q;
boolean_t *cancel;
boolean_t mark_redact;
};
static int
redact_list_cb(redact_block_phys_t *rb, void *arg)
{
struct redact_list_cb_arg *rlcap = arg;
atomic_inc_64(rlcap->num_blocks_visited);
if (*rlcap->cancel)
return (-1);
struct send_range *data = range_alloc(REDACT, rb->rbp_object,
rb->rbp_blkid, rb->rbp_blkid + redact_block_get_count(rb), B_FALSE);
ASSERT3U(data->end_blkid, >, rb->rbp_blkid);
if (rlcap->mark_redact) {
data->type = REDACT;
data->sru.redact.datablksz = redact_block_get_size(rb);
} else {
data->type = PREVIOUSLY_REDACTED;
}
bqueue_enqueue(rlcap->q, data, sizeof (*data));
return (0);
}
/*
* This function kicks off the traverse_dataset. It also handles setting the
* error code of the thread in case something goes wrong, and pushes the End of
* Stream record when the traverse_dataset call has finished.
*/
static __attribute__((noreturn)) void
send_traverse_thread(void *arg)
{
struct send_thread_arg *st_arg = arg;
int err = 0;
struct send_range *data;
fstrans_cookie_t cookie = spl_fstrans_mark();
err = traverse_dataset_resume(st_arg->os->os_dsl_dataset,
st_arg->fromtxg, &st_arg->resume,
st_arg->flags, send_cb, st_arg);
if (err != EINTR)
st_arg->error_code = err;
data = range_alloc(DATA, 0, 0, 0, B_TRUE);
bqueue_enqueue_flush(&st_arg->q, data, sizeof (*data));
spl_fstrans_unmark(cookie);
thread_exit();
}
/*
* Utility function that causes End of Stream records to compare after of all
* others, so that other threads' comparison logic can stay simple.
*/
static int __attribute__((unused))
send_range_after(const struct send_range *from, const struct send_range *to)
{
if (from->eos_marker == B_TRUE)
return (1);
if (to->eos_marker == B_TRUE)
return (-1);
uint64_t from_obj = from->object;
uint64_t from_end_obj = from->object + 1;
uint64_t to_obj = to->object;
uint64_t to_end_obj = to->object + 1;
if (from_obj == 0) {
ASSERT(from->type == HOLE || from->type == OBJECT_RANGE);
from_obj = from->start_blkid << DNODES_PER_BLOCK_SHIFT;
from_end_obj = from->end_blkid << DNODES_PER_BLOCK_SHIFT;
}
if (to_obj == 0) {
ASSERT(to->type == HOLE || to->type == OBJECT_RANGE);
to_obj = to->start_blkid << DNODES_PER_BLOCK_SHIFT;
to_end_obj = to->end_blkid << DNODES_PER_BLOCK_SHIFT;
}
if (from_end_obj <= to_obj)
return (-1);
if (from_obj >= to_end_obj)
return (1);
int64_t cmp = TREE_CMP(to->type == OBJECT_RANGE, from->type ==
OBJECT_RANGE);
if (unlikely(cmp))
return (cmp);
cmp = TREE_CMP(to->type == OBJECT, from->type == OBJECT);
if (unlikely(cmp))
return (cmp);
if (from->end_blkid <= to->start_blkid)
return (-1);
if (from->start_blkid >= to->end_blkid)
return (1);
return (0);
}
/*
* Pop the new data off the queue, check that the records we receive are in
* the right order, but do not free the old data. This is used so that the
* records can be sent on to the main thread without copying the data.
*/
static struct send_range *
get_next_range_nofree(bqueue_t *bq, struct send_range *prev)
{
struct send_range *next = bqueue_dequeue(bq);
ASSERT3S(send_range_after(prev, next), ==, -1);
return (next);
}
/*
* Pop the new data off the queue, check that the records we receive are in
* the right order, and free the old data.
*/
static struct send_range *
get_next_range(bqueue_t *bq, struct send_range *prev)
{
struct send_range *next = get_next_range_nofree(bq, prev);
range_free(prev);
return (next);
}
static __attribute__((noreturn)) void
redact_list_thread(void *arg)
{
struct redact_list_thread_arg *rlt_arg = arg;
struct send_range *record;
fstrans_cookie_t cookie = spl_fstrans_mark();
if (rlt_arg->rl != NULL) {
struct redact_list_cb_arg rlcba = {0};
rlcba.cancel = &rlt_arg->cancel;
rlcba.q = &rlt_arg->q;
rlcba.num_blocks_visited = rlt_arg->num_blocks_visited;
rlcba.mark_redact = rlt_arg->mark_redact;
int err = dsl_redaction_list_traverse(rlt_arg->rl,
&rlt_arg->resume, redact_list_cb, &rlcba);
if (err != EINTR)
rlt_arg->error_code = err;
}
record = range_alloc(DATA, 0, 0, 0, B_TRUE);
bqueue_enqueue_flush(&rlt_arg->q, record, sizeof (*record));
spl_fstrans_unmark(cookie);
thread_exit();
}
/*
* Compare the start point of the two provided ranges. End of stream ranges
* compare last, objects compare before any data or hole inside that object and
* multi-object holes that start at the same object.
*/
static int
send_range_start_compare(struct send_range *r1, struct send_range *r2)
{
uint64_t r1_objequiv = r1->object;
uint64_t r1_l0equiv = r1->start_blkid;
uint64_t r2_objequiv = r2->object;
uint64_t r2_l0equiv = r2->start_blkid;
int64_t cmp = TREE_CMP(r1->eos_marker, r2->eos_marker);
if (unlikely(cmp))
return (cmp);
if (r1->object == 0) {
r1_objequiv = r1->start_blkid * DNODES_PER_BLOCK;
r1_l0equiv = 0;
}
if (r2->object == 0) {
r2_objequiv = r2->start_blkid * DNODES_PER_BLOCK;
r2_l0equiv = 0;
}
cmp = TREE_CMP(r1_objequiv, r2_objequiv);
if (likely(cmp))
return (cmp);
cmp = TREE_CMP(r2->type == OBJECT_RANGE, r1->type == OBJECT_RANGE);
if (unlikely(cmp))
return (cmp);
cmp = TREE_CMP(r2->type == OBJECT, r1->type == OBJECT);
if (unlikely(cmp))
return (cmp);
return (TREE_CMP(r1_l0equiv, r2_l0equiv));
}
enum q_idx {
REDACT_IDX = 0,
TO_IDX,
FROM_IDX,
NUM_THREADS
};
/*
* This function returns the next range the send_merge_thread should operate on.
* The inputs are two arrays; the first one stores the range at the front of the
* queues stored in the second one. The ranges are sorted in descending
* priority order; the metadata from earlier ranges overrules metadata from
* later ranges. out_mask is used to return which threads the ranges came from;
* bit i is set if ranges[i] started at the same place as the returned range.
*
* This code is not hardcoded to compare a specific number of threads; it could
* be used with any number, just by changing the q_idx enum.
*
* The "next range" is the one with the earliest start; if two starts are equal,
* the highest-priority range is the next to operate on. If a higher-priority
* range starts in the middle of the first range, then the first range will be
* truncated to end where the higher-priority range starts, and we will operate
* on that one next time. In this way, we make sure that each block covered by
* some range gets covered by a returned range, and each block covered is
* returned using the metadata of the highest-priority range it appears in.
*
* For example, if the three ranges at the front of the queues were [2,4),
* [3,5), and [1,3), then the ranges returned would be [1,2) with the metadata
* from the third range, [2,4) with the metadata from the first range, and then
* [4,5) with the metadata from the second.
*/
static struct send_range *
find_next_range(struct send_range **ranges, bqueue_t **qs, uint64_t *out_mask)
{
int idx = 0; // index of the range with the earliest start
int i;
uint64_t bmask = 0;
for (i = 1; i < NUM_THREADS; i++) {
if (send_range_start_compare(ranges[i], ranges[idx]) < 0)
idx = i;
}
if (ranges[idx]->eos_marker) {
struct send_range *ret = range_alloc(DATA, 0, 0, 0, B_TRUE);
*out_mask = 0;
return (ret);
}
/*
* Find all the ranges that start at that same point.
*/
for (i = 0; i < NUM_THREADS; i++) {
if (send_range_start_compare(ranges[i], ranges[idx]) == 0)
bmask |= 1 << i;
}
*out_mask = bmask;
/*
* OBJECT_RANGE records only come from the TO thread, and should always
* be treated as overlapping with nothing and sent on immediately. They
* are only used in raw sends, and are never redacted.
*/
if (ranges[idx]->type == OBJECT_RANGE) {
ASSERT3U(idx, ==, TO_IDX);
ASSERT3U(*out_mask, ==, 1 << TO_IDX);
struct send_range *ret = ranges[idx];
ranges[idx] = get_next_range_nofree(qs[idx], ranges[idx]);
return (ret);
}
/*
* Find the first start or end point after the start of the first range.
*/
uint64_t first_change = ranges[idx]->end_blkid;
for (i = 0; i < NUM_THREADS; i++) {
if (i == idx || ranges[i]->eos_marker ||
ranges[i]->object > ranges[idx]->object ||
ranges[i]->object == DMU_META_DNODE_OBJECT)
continue;
ASSERT3U(ranges[i]->object, ==, ranges[idx]->object);
if (first_change > ranges[i]->start_blkid &&
(bmask & (1 << i)) == 0)
first_change = ranges[i]->start_blkid;
else if (first_change > ranges[i]->end_blkid)
first_change = ranges[i]->end_blkid;
}
/*
* Update all ranges to no longer overlap with the range we're
* returning. All such ranges must start at the same place as the range
* being returned, and end at or after first_change. Thus we update
* their start to first_change. If that makes them size 0, then free
* them and pull a new range from that thread.
*/
for (i = 0; i < NUM_THREADS; i++) {
if (i == idx || (bmask & (1 << i)) == 0)
continue;
ASSERT3U(first_change, >, ranges[i]->start_blkid);
ranges[i]->start_blkid = first_change;
ASSERT3U(ranges[i]->start_blkid, <=, ranges[i]->end_blkid);
if (ranges[i]->start_blkid == ranges[i]->end_blkid)
ranges[i] = get_next_range(qs[i], ranges[i]);
}
/*
* Short-circuit the simple case; if the range doesn't overlap with
* anything else, or it only overlaps with things that start at the same
* place and are longer, send it on.
*/
if (first_change == ranges[idx]->end_blkid) {
struct send_range *ret = ranges[idx];
ranges[idx] = get_next_range_nofree(qs[idx], ranges[idx]);
return (ret);
}
/*
* Otherwise, return a truncated copy of ranges[idx] and move the start
* of ranges[idx] back to first_change.
*/
struct send_range *ret = kmem_alloc(sizeof (*ret), KM_SLEEP);
*ret = *ranges[idx];
ret->end_blkid = first_change;
ranges[idx]->start_blkid = first_change;
return (ret);
}
#define FROM_AND_REDACT_BITS ((1 << REDACT_IDX) | (1 << FROM_IDX))
/*
* Merge the results from the from thread and the to thread, and then hand the
* records off to send_prefetch_thread to prefetch them. If this is not a
* send from a redaction bookmark, the from thread will push an end of stream
* record and stop, and we'll just send everything that was changed in the
* to_ds since the ancestor's creation txg. If it is, then since
* traverse_dataset has a canonical order, we can compare each change as
* they're pulled off the queues. That will give us a stream that is
* appropriately sorted, and covers all records. In addition, we pull the
* data from the redact_list_thread and use that to determine which blocks
* should be redacted.
*/
static __attribute__((noreturn)) void
send_merge_thread(void *arg)
{
struct send_merge_thread_arg *smt_arg = arg;
struct send_range *front_ranges[NUM_THREADS];
bqueue_t *queues[NUM_THREADS];
int err = 0;
fstrans_cookie_t cookie = spl_fstrans_mark();
if (smt_arg->redact_arg == NULL) {
front_ranges[REDACT_IDX] =
kmem_zalloc(sizeof (struct send_range), KM_SLEEP);
front_ranges[REDACT_IDX]->eos_marker = B_TRUE;
front_ranges[REDACT_IDX]->type = REDACT;
queues[REDACT_IDX] = NULL;
} else {
front_ranges[REDACT_IDX] =
bqueue_dequeue(&smt_arg->redact_arg->q);
queues[REDACT_IDX] = &smt_arg->redact_arg->q;
}
front_ranges[TO_IDX] = bqueue_dequeue(&smt_arg->to_arg->q);
queues[TO_IDX] = &smt_arg->to_arg->q;
front_ranges[FROM_IDX] = bqueue_dequeue(&smt_arg->from_arg->q);
queues[FROM_IDX] = &smt_arg->from_arg->q;
uint64_t mask = 0;
struct send_range *range;
for (range = find_next_range(front_ranges, queues, &mask);
!range->eos_marker && err == 0 && !smt_arg->cancel;
range = find_next_range(front_ranges, queues, &mask)) {
/*
* If the range in question was in both the from redact bookmark
* and the bookmark we're using to redact, then don't send it.
* It's already redacted on the receiving system, so a redaction
* record would be redundant.
*/
if ((mask & FROM_AND_REDACT_BITS) == FROM_AND_REDACT_BITS) {
ASSERT3U(range->type, ==, REDACT);
range_free(range);
continue;
}
bqueue_enqueue(&smt_arg->q, range, sizeof (*range));
if (smt_arg->to_arg->error_code != 0) {
err = smt_arg->to_arg->error_code;
} else if (smt_arg->from_arg->error_code != 0) {
err = smt_arg->from_arg->error_code;
} else if (smt_arg->redact_arg != NULL &&
smt_arg->redact_arg->error_code != 0) {
err = smt_arg->redact_arg->error_code;
}
}
if (smt_arg->cancel && err == 0)
err = SET_ERROR(EINTR);
smt_arg->error = err;
if (smt_arg->error != 0) {
smt_arg->to_arg->cancel = B_TRUE;
smt_arg->from_arg->cancel = B_TRUE;
if (smt_arg->redact_arg != NULL)
smt_arg->redact_arg->cancel = B_TRUE;
}
for (int i = 0; i < NUM_THREADS; i++) {
while (!front_ranges[i]->eos_marker) {
front_ranges[i] = get_next_range(queues[i],
front_ranges[i]);
}
range_free(front_ranges[i]);
}
if (range == NULL)
range = kmem_zalloc(sizeof (*range), KM_SLEEP);
range->eos_marker = B_TRUE;
bqueue_enqueue_flush(&smt_arg->q, range, 1);
spl_fstrans_unmark(cookie);
thread_exit();
}
struct send_reader_thread_arg {
struct send_merge_thread_arg *smta;
bqueue_t q;
boolean_t cancel;
boolean_t issue_reads;
uint64_t featureflags;
int error;
};
static void
dmu_send_read_done(zio_t *zio)
{
struct send_range *range = zio->io_private;
mutex_enter(&range->sru.data.lock);
if (zio->io_error != 0) {
abd_free(range->sru.data.abd);
range->sru.data.abd = NULL;
range->sru.data.io_err = zio->io_error;
}
ASSERT(range->sru.data.io_outstanding);
range->sru.data.io_outstanding = B_FALSE;
cv_broadcast(&range->sru.data.cv);
mutex_exit(&range->sru.data.lock);
}
static void
issue_data_read(struct send_reader_thread_arg *srta, struct send_range *range)
{
struct srd *srdp = &range->sru.data;
blkptr_t *bp = &srdp->bp;
objset_t *os = srta->smta->os;
ASSERT3U(range->type, ==, DATA);
ASSERT3U(range->start_blkid + 1, ==, range->end_blkid);
/*
* If we have large blocks stored on disk but
* the send flags don't allow us to send large
* blocks, we split the data from the arc buf
* into chunks.
*/
boolean_t split_large_blocks =
srdp->datablksz > SPA_OLD_MAXBLOCKSIZE &&
!(srta->featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS);
/*
* We should only request compressed data from the ARC if all
* the following are true:
* - stream compression was requested
* - we aren't splitting large blocks into smaller chunks
* - the data won't need to be byteswapped before sending
* - this isn't an embedded block
* - this isn't metadata (if receiving on a different endian
* system it can be byteswapped more easily)
*/
boolean_t request_compressed =
(srta->featureflags & DMU_BACKUP_FEATURE_COMPRESSED) &&
!split_large_blocks && !BP_SHOULD_BYTESWAP(bp) &&
!BP_IS_EMBEDDED(bp) && !DMU_OT_IS_METADATA(BP_GET_TYPE(bp));
enum zio_flag zioflags = ZIO_FLAG_CANFAIL;
if (srta->featureflags & DMU_BACKUP_FEATURE_RAW) {
zioflags |= ZIO_FLAG_RAW;
srdp->io_compressed = B_TRUE;
} else if (request_compressed) {
zioflags |= ZIO_FLAG_RAW_COMPRESS;
srdp->io_compressed = B_TRUE;
}
srdp->datasz = (zioflags & ZIO_FLAG_RAW_COMPRESS) ?
BP_GET_PSIZE(bp) : BP_GET_LSIZE(bp);
if (!srta->issue_reads)
return;
if (BP_IS_REDACTED(bp))
return;
if (send_do_embed(bp, srta->featureflags))
return;
zbookmark_phys_t zb = {
.zb_objset = dmu_objset_id(os),
.zb_object = range->object,
.zb_level = 0,
.zb_blkid = range->start_blkid,
};
arc_flags_t aflags = ARC_FLAG_CACHED_ONLY;
int arc_err = arc_read(NULL, os->os_spa, bp,
arc_getbuf_func, &srdp->abuf, ZIO_PRIORITY_ASYNC_READ,
zioflags, &aflags, &zb);
/*
* If the data is not already cached in the ARC, we read directly
* from zio. This avoids the performance overhead of adding a new
* entry to the ARC, and we also avoid polluting the ARC cache with
* data that is not likely to be used in the future.
*/
if (arc_err != 0) {
srdp->abd = abd_alloc_linear(srdp->datasz, B_FALSE);
srdp->io_outstanding = B_TRUE;
zio_nowait(zio_read(NULL, os->os_spa, bp, srdp->abd,
srdp->datasz, dmu_send_read_done, range,
ZIO_PRIORITY_ASYNC_READ, zioflags, &zb));
}
}
/*
* Create a new record with the given values.
*/
static void
enqueue_range(struct send_reader_thread_arg *srta, bqueue_t *q, dnode_t *dn,
uint64_t blkid, uint64_t count, const blkptr_t *bp, uint32_t datablksz)
{
enum type range_type = (bp == NULL || BP_IS_HOLE(bp) ? HOLE :
(BP_IS_REDACTED(bp) ? REDACT : DATA));
struct send_range *range = range_alloc(range_type, dn->dn_object,
blkid, blkid + count, B_FALSE);
if (blkid == DMU_SPILL_BLKID)
ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_SA);
switch (range_type) {
case HOLE:
range->sru.hole.datablksz = datablksz;
break;
case DATA:
ASSERT3U(count, ==, 1);
range->sru.data.datablksz = datablksz;
range->sru.data.obj_type = dn->dn_type;
range->sru.data.bp = *bp;
issue_data_read(srta, range);
break;
case REDACT:
range->sru.redact.datablksz = datablksz;
break;
default:
break;
}
bqueue_enqueue(q, range, datablksz);
}
/*
* This thread is responsible for two things: First, it retrieves the correct
* blkptr in the to ds if we need to send the data because of something from
* the from thread. As a result of this, we're the first ones to discover that
* some indirect blocks can be discarded because they're not holes. Second,
* it issues prefetches for the data we need to send.
*/
static __attribute__((noreturn)) void
send_reader_thread(void *arg)
{
struct send_reader_thread_arg *srta = arg;
struct send_merge_thread_arg *smta = srta->smta;
bqueue_t *inq = &smta->q;
bqueue_t *outq = &srta->q;
objset_t *os = smta->os;
fstrans_cookie_t cookie = spl_fstrans_mark();
struct send_range *range = bqueue_dequeue(inq);
int err = 0;
/*
* If the record we're analyzing is from a redaction bookmark from the
* fromds, then we need to know whether or not it exists in the tods so
* we know whether to create records for it or not. If it does, we need
* the datablksz so we can generate an appropriate record for it.
* Finally, if it isn't redacted, we need the blkptr so that we can send
* a WRITE record containing the actual data.
*/
uint64_t last_obj = UINT64_MAX;
uint64_t last_obj_exists = B_TRUE;
while (!range->eos_marker && !srta->cancel && smta->error == 0 &&
err == 0) {
switch (range->type) {
case DATA:
issue_data_read(srta, range);
bqueue_enqueue(outq, range, range->sru.data.datablksz);
range = get_next_range_nofree(inq, range);
break;
case HOLE:
case OBJECT:
case OBJECT_RANGE:
case REDACT: // Redacted blocks must exist
bqueue_enqueue(outq, range, sizeof (*range));
range = get_next_range_nofree(inq, range);
break;
case PREVIOUSLY_REDACTED: {
/*
* This entry came from the "from bookmark" when
* sending from a bookmark that has a redaction
* list. We need to check if this object/blkid
* exists in the target ("to") dataset, and if
* not then we drop this entry. We also need
* to fill in the block pointer so that we know
* what to prefetch.
*
* To accomplish the above, we first cache whether or
* not the last object we examined exists. If it
* doesn't, we can drop this record. If it does, we hold
* the dnode and use it to call dbuf_dnode_findbp. We do
* this instead of dbuf_bookmark_findbp because we will
* often operate on large ranges, and holding the dnode
* once is more efficient.
*/
boolean_t object_exists = B_TRUE;
/*
* If the data is redacted, we only care if it exists,
* so that we don't send records for objects that have
* been deleted.
*/
dnode_t *dn;
if (range->object == last_obj && !last_obj_exists) {
/*
* If we're still examining the same object as
* previously, and it doesn't exist, we don't
* need to call dbuf_bookmark_findbp.
*/
object_exists = B_FALSE;
} else {
err = dnode_hold(os, range->object, FTAG, &dn);
if (err == ENOENT) {
object_exists = B_FALSE;
err = 0;
}
last_obj = range->object;
last_obj_exists = object_exists;
}
if (err != 0) {
break;
} else if (!object_exists) {
/*
* The block was modified, but doesn't
* exist in the to dataset; if it was
* deleted in the to dataset, then we'll
* visit the hole bp for it at some point.
*/
range = get_next_range(inq, range);
continue;
}
uint64_t file_max =
(dn->dn_maxblkid < range->end_blkid ?
dn->dn_maxblkid : range->end_blkid);
/*
* The object exists, so we need to try to find the
* blkptr for each block in the range we're processing.
*/
rw_enter(&dn->dn_struct_rwlock, RW_READER);
for (uint64_t blkid = range->start_blkid;
blkid < file_max; blkid++) {
blkptr_t bp;
uint32_t datablksz =
dn->dn_phys->dn_datablkszsec <<
SPA_MINBLOCKSHIFT;
uint64_t offset = blkid * datablksz;
/*
* This call finds the next non-hole block in
* the object. This is to prevent a
* performance problem where we're unredacting
* a large hole. Using dnode_next_offset to
* skip over the large hole avoids iterating
* over every block in it.
*/
err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK,
&offset, 1, 1, 0);
if (err == ESRCH) {
offset = UINT64_MAX;
err = 0;
} else if (err != 0) {
break;
}
if (offset != blkid * datablksz) {
/*
* if there is a hole from here
* (blkid) to offset
*/
offset = MIN(offset, file_max *
datablksz);
uint64_t nblks = (offset / datablksz) -
blkid;
enqueue_range(srta, outq, dn, blkid,
nblks, NULL, datablksz);
blkid += nblks;
}
if (blkid >= file_max)
break;
err = dbuf_dnode_findbp(dn, 0, blkid, &bp,
NULL, NULL);
if (err != 0)
break;
ASSERT(!BP_IS_HOLE(&bp));
enqueue_range(srta, outq, dn, blkid, 1, &bp,
datablksz);
}
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
range = get_next_range(inq, range);
}
}
}
if (srta->cancel || err != 0) {
smta->cancel = B_TRUE;
srta->error = err;
} else if (smta->error != 0) {
srta->error = smta->error;
}
while (!range->eos_marker)
range = get_next_range(inq, range);
bqueue_enqueue_flush(outq, range, 1);
spl_fstrans_unmark(cookie);
thread_exit();
}
#define NUM_SNAPS_NOT_REDACTED UINT64_MAX
struct dmu_send_params {
/* Pool args */
const void *tag; // Tag dp was held with, will be used to release dp.
dsl_pool_t *dp;
/* To snapshot args */
const char *tosnap;
dsl_dataset_t *to_ds;
/* From snapshot args */
zfs_bookmark_phys_t ancestor_zb;
uint64_t *fromredactsnaps;
/* NUM_SNAPS_NOT_REDACTED if not sending from redaction bookmark */
uint64_t numfromredactsnaps;
/* Stream params */
boolean_t is_clone;
boolean_t embedok;
boolean_t large_block_ok;
boolean_t compressok;
boolean_t rawok;
boolean_t savedok;
uint64_t resumeobj;
uint64_t resumeoff;
uint64_t saved_guid;
zfs_bookmark_phys_t *redactbook;
/* Stream output params */
dmu_send_outparams_t *dso;
/* Stream progress params */
offset_t *off;
int outfd;
char saved_toname[MAXNAMELEN];
};
static int
setup_featureflags(struct dmu_send_params *dspp, objset_t *os,
uint64_t *featureflags)
{
dsl_dataset_t *to_ds = dspp->to_ds;
dsl_pool_t *dp = dspp->dp;
#ifdef _KERNEL
if (dmu_objset_type(os) == DMU_OST_ZFS) {
uint64_t version;
if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0)
return (SET_ERROR(EINVAL));
if (version >= ZPL_VERSION_SA)
*featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
}
#endif
/* raw sends imply large_block_ok */
if ((dspp->rawok || dspp->large_block_ok) &&
dsl_dataset_feature_is_active(to_ds, SPA_FEATURE_LARGE_BLOCKS)) {
*featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS;
}
/* encrypted datasets will not have embedded blocks */
if ((dspp->embedok || dspp->rawok) && !os->os_encrypted &&
spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) {
*featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA;
}
/* raw send implies compressok */
if (dspp->compressok || dspp->rawok)
*featureflags |= DMU_BACKUP_FEATURE_COMPRESSED;
if (dspp->rawok && os->os_encrypted)
*featureflags |= DMU_BACKUP_FEATURE_RAW;
if ((*featureflags &
(DMU_BACKUP_FEATURE_EMBED_DATA | DMU_BACKUP_FEATURE_COMPRESSED |
DMU_BACKUP_FEATURE_RAW)) != 0 &&
spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) {
*featureflags |= DMU_BACKUP_FEATURE_LZ4;
}
/*
* We specifically do not include DMU_BACKUP_FEATURE_EMBED_DATA here to
* allow sending ZSTD compressed datasets to a receiver that does not
* support ZSTD
*/
if ((*featureflags &
(DMU_BACKUP_FEATURE_COMPRESSED | DMU_BACKUP_FEATURE_RAW)) != 0 &&
dsl_dataset_feature_is_active(to_ds, SPA_FEATURE_ZSTD_COMPRESS)) {
*featureflags |= DMU_BACKUP_FEATURE_ZSTD;
}
if (dspp->resumeobj != 0 || dspp->resumeoff != 0) {
*featureflags |= DMU_BACKUP_FEATURE_RESUMING;
}
if (dspp->redactbook != NULL) {
*featureflags |= DMU_BACKUP_FEATURE_REDACTED;
}
if (dsl_dataset_feature_is_active(to_ds, SPA_FEATURE_LARGE_DNODE)) {
*featureflags |= DMU_BACKUP_FEATURE_LARGE_DNODE;
}
return (0);
}
static dmu_replay_record_t *
create_begin_record(struct dmu_send_params *dspp, objset_t *os,
uint64_t featureflags)
{
dmu_replay_record_t *drr = kmem_zalloc(sizeof (dmu_replay_record_t),
KM_SLEEP);
drr->drr_type = DRR_BEGIN;
struct drr_begin *drrb = &drr->drr_u.drr_begin;
dsl_dataset_t *to_ds = dspp->to_ds;
drrb->drr_magic = DMU_BACKUP_MAGIC;
drrb->drr_creation_time = dsl_dataset_phys(to_ds)->ds_creation_time;
drrb->drr_type = dmu_objset_type(os);
drrb->drr_toguid = dsl_dataset_phys(to_ds)->ds_guid;
drrb->drr_fromguid = dspp->ancestor_zb.zbm_guid;
DMU_SET_STREAM_HDRTYPE(drrb->drr_versioninfo, DMU_SUBSTREAM);
DMU_SET_FEATUREFLAGS(drrb->drr_versioninfo, featureflags);
if (dspp->is_clone)
drrb->drr_flags |= DRR_FLAG_CLONE;
if (dsl_dataset_phys(dspp->to_ds)->ds_flags & DS_FLAG_CI_DATASET)
drrb->drr_flags |= DRR_FLAG_CI_DATA;
if (zfs_send_set_freerecords_bit)
drrb->drr_flags |= DRR_FLAG_FREERECORDS;
drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_SPILL_BLOCK;
if (dspp->savedok) {
drrb->drr_toguid = dspp->saved_guid;
strlcpy(drrb->drr_toname, dspp->saved_toname,
sizeof (drrb->drr_toname));
} else {
dsl_dataset_name(to_ds, drrb->drr_toname);
if (!to_ds->ds_is_snapshot) {
(void) strlcat(drrb->drr_toname, "@--head--",
sizeof (drrb->drr_toname));
}
}
return (drr);
}
static void
setup_to_thread(struct send_thread_arg *to_arg, objset_t *to_os,
dmu_sendstatus_t *dssp, uint64_t fromtxg, boolean_t rawok)
{
VERIFY0(bqueue_init(&to_arg->q, zfs_send_no_prefetch_queue_ff,
MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize),
offsetof(struct send_range, ln)));
to_arg->error_code = 0;
to_arg->cancel = B_FALSE;
to_arg->os = to_os;
to_arg->fromtxg = fromtxg;
to_arg->flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA;
if (rawok)
to_arg->flags |= TRAVERSE_NO_DECRYPT;
if (zfs_send_corrupt_data)
to_arg->flags |= TRAVERSE_HARD;
to_arg->num_blocks_visited = &dssp->dss_blocks;
(void) thread_create(NULL, 0, send_traverse_thread, to_arg, 0,
curproc, TS_RUN, minclsyspri);
}
static void
setup_from_thread(struct redact_list_thread_arg *from_arg,
redaction_list_t *from_rl, dmu_sendstatus_t *dssp)
{
VERIFY0(bqueue_init(&from_arg->q, zfs_send_no_prefetch_queue_ff,
MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize),
offsetof(struct send_range, ln)));
from_arg->error_code = 0;
from_arg->cancel = B_FALSE;
from_arg->rl = from_rl;
from_arg->mark_redact = B_FALSE;
from_arg->num_blocks_visited = &dssp->dss_blocks;
/*
* If from_ds is null, send_traverse_thread just returns success and
* enqueues an eos marker.
*/
(void) thread_create(NULL, 0, redact_list_thread, from_arg, 0,
curproc, TS_RUN, minclsyspri);
}
static void
setup_redact_list_thread(struct redact_list_thread_arg *rlt_arg,
struct dmu_send_params *dspp, redaction_list_t *rl, dmu_sendstatus_t *dssp)
{
if (dspp->redactbook == NULL)
return;
rlt_arg->cancel = B_FALSE;
VERIFY0(bqueue_init(&rlt_arg->q, zfs_send_no_prefetch_queue_ff,
MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize),
offsetof(struct send_range, ln)));
rlt_arg->error_code = 0;
rlt_arg->mark_redact = B_TRUE;
rlt_arg->rl = rl;
rlt_arg->num_blocks_visited = &dssp->dss_blocks;
(void) thread_create(NULL, 0, redact_list_thread, rlt_arg, 0,
curproc, TS_RUN, minclsyspri);
}
static void
setup_merge_thread(struct send_merge_thread_arg *smt_arg,
struct dmu_send_params *dspp, struct redact_list_thread_arg *from_arg,
struct send_thread_arg *to_arg, struct redact_list_thread_arg *rlt_arg,
objset_t *os)
{
VERIFY0(bqueue_init(&smt_arg->q, zfs_send_no_prefetch_queue_ff,
MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize),
offsetof(struct send_range, ln)));
smt_arg->cancel = B_FALSE;
smt_arg->error = 0;
smt_arg->from_arg = from_arg;
smt_arg->to_arg = to_arg;
if (dspp->redactbook != NULL)
smt_arg->redact_arg = rlt_arg;
smt_arg->os = os;
(void) thread_create(NULL, 0, send_merge_thread, smt_arg, 0, curproc,
TS_RUN, minclsyspri);
}
static void
setup_reader_thread(struct send_reader_thread_arg *srt_arg,
struct dmu_send_params *dspp, struct send_merge_thread_arg *smt_arg,
uint64_t featureflags)
{
VERIFY0(bqueue_init(&srt_arg->q, zfs_send_queue_ff,
MAX(zfs_send_queue_length, 2 * zfs_max_recordsize),
offsetof(struct send_range, ln)));
srt_arg->smta = smt_arg;
srt_arg->issue_reads = !dspp->dso->dso_dryrun;
srt_arg->featureflags = featureflags;
(void) thread_create(NULL, 0, send_reader_thread, srt_arg, 0,
curproc, TS_RUN, minclsyspri);
}
static int
setup_resume_points(struct dmu_send_params *dspp,
struct send_thread_arg *to_arg, struct redact_list_thread_arg *from_arg,
struct redact_list_thread_arg *rlt_arg,
struct send_merge_thread_arg *smt_arg, boolean_t resuming, objset_t *os,
redaction_list_t *redact_rl, nvlist_t *nvl)
{
(void) smt_arg;
dsl_dataset_t *to_ds = dspp->to_ds;
int err = 0;
uint64_t obj = 0;
uint64_t blkid = 0;
if (resuming) {
obj = dspp->resumeobj;
dmu_object_info_t to_doi;
err = dmu_object_info(os, obj, &to_doi);
if (err != 0)
return (err);
blkid = dspp->resumeoff / to_doi.doi_data_block_size;
}
/*
* If we're resuming a redacted send, we can skip to the appropriate
* point in the redaction bookmark by binary searching through it.
*/
if (redact_rl != NULL) {
SET_BOOKMARK(&rlt_arg->resume, to_ds->ds_object, obj, 0, blkid);
}
SET_BOOKMARK(&to_arg->resume, to_ds->ds_object, obj, 0, blkid);
if (nvlist_exists(nvl, BEGINNV_REDACT_FROM_SNAPS)) {
uint64_t objset = dspp->ancestor_zb.zbm_redaction_obj;
/*
* Note: If the resume point is in an object whose
* blocksize is different in the from vs to snapshots,
* we will have divided by the "wrong" blocksize.
* However, in this case fromsnap's send_cb() will
* detect that the blocksize has changed and therefore
* ignore this object.
*
* If we're resuming a send from a redaction bookmark,
* we still cannot accidentally suggest blocks behind
* the to_ds. In addition, we know that any blocks in
* the object in the to_ds will have to be sent, since
* the size changed. Therefore, we can't cause any harm
* this way either.
*/
SET_BOOKMARK(&from_arg->resume, objset, obj, 0, blkid);
}
if (resuming) {
fnvlist_add_uint64(nvl, BEGINNV_RESUME_OBJECT, dspp->resumeobj);
fnvlist_add_uint64(nvl, BEGINNV_RESUME_OFFSET, dspp->resumeoff);
}
return (0);
}
static dmu_sendstatus_t *
setup_send_progress(struct dmu_send_params *dspp)
{
dmu_sendstatus_t *dssp = kmem_zalloc(sizeof (*dssp), KM_SLEEP);
dssp->dss_outfd = dspp->outfd;
dssp->dss_off = dspp->off;
dssp->dss_proc = curproc;
mutex_enter(&dspp->to_ds->ds_sendstream_lock);
list_insert_head(&dspp->to_ds->ds_sendstreams, dssp);
mutex_exit(&dspp->to_ds->ds_sendstream_lock);
return (dssp);
}
/*
* Actually do the bulk of the work in a zfs send.
*
* The idea is that we want to do a send from ancestor_zb to to_ds. We also
* want to not send any data that has been modified by all the datasets in
* redactsnaparr, and store the list of blocks that are redacted in this way in
* a bookmark named redactbook, created on the to_ds. We do this by creating
* several worker threads, whose function is described below.
*
* There are three cases.
* The first case is a redacted zfs send. In this case there are 5 threads.
* The first thread is the to_ds traversal thread: it calls dataset_traverse on
* the to_ds and finds all the blocks that have changed since ancestor_zb (if
* it's a full send, that's all blocks in the dataset). It then sends those
* blocks on to the send merge thread. The redact list thread takes the data
* from the redaction bookmark and sends those blocks on to the send merge
* thread. The send merge thread takes the data from the to_ds traversal
* thread, and combines it with the redaction records from the redact list
* thread. If a block appears in both the to_ds's data and the redaction data,
* the send merge thread will mark it as redacted and send it on to the prefetch
* thread. Otherwise, the send merge thread will send the block on to the
* prefetch thread unchanged. The prefetch thread will issue prefetch reads for
* any data that isn't redacted, and then send the data on to the main thread.
* The main thread behaves the same as in a normal send case, issuing demand
* reads for data blocks and sending out records over the network
*
* The graphic below diagrams the flow of data in the case of a redacted zfs
* send. Each box represents a thread, and each line represents the flow of
* data.
*
* Records from the |
* redaction bookmark |
* +--------------------+ | +---------------------------+
* | | v | Send Merge Thread |
* | Redact List Thread +----------> Apply redaction marks to |
* | | | records as specified by |
* +--------------------+ | redaction ranges |
* +----^---------------+------+
* | | Merged data
* | |
* | +------------v--------+
* | | Prefetch Thread |
* +--------------------+ | | Issues prefetch |
* | to_ds Traversal | | | reads of data blocks|
* | Thread (finds +---------------+ +------------+--------+
* | candidate blocks) | Blocks modified | Prefetched data
* +--------------------+ by to_ds since |
* ancestor_zb +------------v----+
* | Main Thread | File Descriptor
* | Sends data over +->(to zfs receive)
* | wire |
* +-----------------+
*
* The second case is an incremental send from a redaction bookmark. The to_ds
* traversal thread and the main thread behave the same as in the redacted
* send case. The new thread is the from bookmark traversal thread. It
* iterates over the redaction list in the redaction bookmark, and enqueues
* records for each block that was redacted in the original send. The send
* merge thread now has to merge the data from the two threads. For details
* about that process, see the header comment of send_merge_thread(). Any data
* it decides to send on will be prefetched by the prefetch thread. Note that
* you can perform a redacted send from a redaction bookmark; in that case,
* the data flow behaves very similarly to the flow in the redacted send case,
* except with the addition of the bookmark traversal thread iterating over the
* redaction bookmark. The send_merge_thread also has to take on the
* responsibility of merging the redact list thread's records, the bookmark
* traversal thread's records, and the to_ds records.
*
* +---------------------+
* | |
* | Redact List Thread +--------------+
* | | |
* +---------------------+ |
* Blocks in redaction list | Ranges modified by every secure snap
* of from bookmark | (or EOS if not readcted)
* |
* +---------------------+ | +----v----------------------+
* | bookmark Traversal | v | Send Merge Thread |
* | Thread (finds +---------> Merges bookmark, rlt, and |
* | candidate blocks) | | to_ds send records |
* +---------------------+ +----^---------------+------+
* | | Merged data
* | +------------v--------+
* | | Prefetch Thread |
* +--------------------+ | | Issues prefetch |
* | to_ds Traversal | | | reads of data blocks|
* | Thread (finds +---------------+ +------------+--------+
* | candidate blocks) | Blocks modified | Prefetched data
* +--------------------+ by to_ds since +------------v----+
* ancestor_zb | Main Thread | File Descriptor
* | Sends data over +->(to zfs receive)
* | wire |
* +-----------------+
*
* The final case is a simple zfs full or incremental send. The to_ds traversal
* thread behaves the same as always. The redact list thread is never started.
* The send merge thread takes all the blocks that the to_ds traversal thread
* sends it, prefetches the data, and sends the blocks on to the main thread.
* The main thread sends the data over the wire.
*
* To keep performance acceptable, we want to prefetch the data in the worker
* threads. While the to_ds thread could simply use the TRAVERSE_PREFETCH
* feature built into traverse_dataset, the combining and deletion of records
* due to redaction and sends from redaction bookmarks mean that we could
* issue many unnecessary prefetches. As a result, we only prefetch data
* after we've determined that the record is not going to be redacted. To
* prevent the prefetching from getting too far ahead of the main thread, the
* blocking queues that are used for communication are capped not by the
* number of entries in the queue, but by the sum of the size of the
* prefetches associated with them. The limit on the amount of data that the
* thread can prefetch beyond what the main thread has reached is controlled
* by the global variable zfs_send_queue_length. In addition, to prevent poor
* performance in the beginning of a send, we also limit the distance ahead
* that the traversal threads can be. That distance is controlled by the
* zfs_send_no_prefetch_queue_length tunable.
*
* Note: Releases dp using the specified tag.
*/
static int
dmu_send_impl(struct dmu_send_params *dspp)
{
objset_t *os;
dmu_replay_record_t *drr;
dmu_sendstatus_t *dssp;
dmu_send_cookie_t dsc = {0};
int err;
uint64_t fromtxg = dspp->ancestor_zb.zbm_creation_txg;
uint64_t featureflags = 0;
struct redact_list_thread_arg *from_arg;
struct send_thread_arg *to_arg;
struct redact_list_thread_arg *rlt_arg;
struct send_merge_thread_arg *smt_arg;
struct send_reader_thread_arg *srt_arg;
struct send_range *range;
redaction_list_t *from_rl = NULL;
redaction_list_t *redact_rl = NULL;
boolean_t resuming = (dspp->resumeobj != 0 || dspp->resumeoff != 0);
boolean_t book_resuming = resuming;
dsl_dataset_t *to_ds = dspp->to_ds;
zfs_bookmark_phys_t *ancestor_zb = &dspp->ancestor_zb;
dsl_pool_t *dp = dspp->dp;
const void *tag = dspp->tag;
err = dmu_objset_from_ds(to_ds, &os);
if (err != 0) {
dsl_pool_rele(dp, tag);
return (err);
}
/*
* If this is a non-raw send of an encrypted ds, we can ensure that
* the objset_phys_t is authenticated. This is safe because this is
* either a snapshot or we have owned the dataset, ensuring that
* it can't be modified.
*/
if (!dspp->rawok && os->os_encrypted &&
arc_is_unauthenticated(os->os_phys_buf)) {
zbookmark_phys_t zb;
SET_BOOKMARK(&zb, to_ds->ds_object, ZB_ROOT_OBJECT,
ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
err = arc_untransform(os->os_phys_buf, os->os_spa,
&zb, B_FALSE);
if (err != 0) {
dsl_pool_rele(dp, tag);
return (err);
}
ASSERT0(arc_is_unauthenticated(os->os_phys_buf));
}
if ((err = setup_featureflags(dspp, os, &featureflags)) != 0) {
dsl_pool_rele(dp, tag);
return (err);
}
/*
* If we're doing a redacted send, hold the bookmark's redaction list.
*/
if (dspp->redactbook != NULL) {
err = dsl_redaction_list_hold_obj(dp,
dspp->redactbook->zbm_redaction_obj, FTAG,
&redact_rl);
if (err != 0) {
dsl_pool_rele(dp, tag);
return (SET_ERROR(EINVAL));
}
dsl_redaction_list_long_hold(dp, redact_rl, FTAG);
}
/*
* If we're sending from a redaction bookmark, hold the redaction list
* so that we can consider sending the redacted blocks.
*/
if (ancestor_zb->zbm_redaction_obj != 0) {
err = dsl_redaction_list_hold_obj(dp,
ancestor_zb->zbm_redaction_obj, FTAG, &from_rl);
if (err != 0) {
if (redact_rl != NULL) {
dsl_redaction_list_long_rele(redact_rl, FTAG);
dsl_redaction_list_rele(redact_rl, FTAG);
}
dsl_pool_rele(dp, tag);
return (SET_ERROR(EINVAL));
}
dsl_redaction_list_long_hold(dp, from_rl, FTAG);
}
dsl_dataset_long_hold(to_ds, FTAG);
from_arg = kmem_zalloc(sizeof (*from_arg), KM_SLEEP);
to_arg = kmem_zalloc(sizeof (*to_arg), KM_SLEEP);
rlt_arg = kmem_zalloc(sizeof (*rlt_arg), KM_SLEEP);
smt_arg = kmem_zalloc(sizeof (*smt_arg), KM_SLEEP);
srt_arg = kmem_zalloc(sizeof (*srt_arg), KM_SLEEP);
drr = create_begin_record(dspp, os, featureflags);
dssp = setup_send_progress(dspp);
dsc.dsc_drr = drr;
dsc.dsc_dso = dspp->dso;
dsc.dsc_os = os;
dsc.dsc_off = dspp->off;
dsc.dsc_toguid = dsl_dataset_phys(to_ds)->ds_guid;
dsc.dsc_fromtxg = fromtxg;
dsc.dsc_pending_op = PENDING_NONE;
dsc.dsc_featureflags = featureflags;
dsc.dsc_resume_object = dspp->resumeobj;
dsc.dsc_resume_offset = dspp->resumeoff;
dsl_pool_rele(dp, tag);
void *payload = NULL;
size_t payload_len = 0;
nvlist_t *nvl = fnvlist_alloc();
/*
* If we're doing a redacted send, we include the snapshots we're
* redacted with respect to so that the target system knows what send
* streams can be correctly received on top of this dataset. If we're
* instead sending a redacted dataset, we include the snapshots that the
* dataset was created with respect to.
*/
if (dspp->redactbook != NULL) {
fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_SNAPS,
redact_rl->rl_phys->rlp_snaps,
redact_rl->rl_phys->rlp_num_snaps);
} else if (dsl_dataset_feature_is_active(to_ds,
SPA_FEATURE_REDACTED_DATASETS)) {
uint64_t *tods_guids;
uint64_t length;
VERIFY(dsl_dataset_get_uint64_array_feature(to_ds,
SPA_FEATURE_REDACTED_DATASETS, &length, &tods_guids));
fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_SNAPS, tods_guids,
length);
}
/*
* If we're sending from a redaction bookmark, then we should retrieve
* the guids of that bookmark so we can send them over the wire.
*/
if (from_rl != NULL) {
fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_FROM_SNAPS,
from_rl->rl_phys->rlp_snaps,
from_rl->rl_phys->rlp_num_snaps);
}
/*
* If the snapshot we're sending from is redacted, include the redaction
* list in the stream.
*/
if (dspp->numfromredactsnaps != NUM_SNAPS_NOT_REDACTED) {
ASSERT3P(from_rl, ==, NULL);
fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_FROM_SNAPS,
dspp->fromredactsnaps, (uint_t)dspp->numfromredactsnaps);
if (dspp->numfromredactsnaps > 0) {
kmem_free(dspp->fromredactsnaps,
dspp->numfromredactsnaps * sizeof (uint64_t));
dspp->fromredactsnaps = NULL;
}
}
if (resuming || book_resuming) {
err = setup_resume_points(dspp, to_arg, from_arg,
rlt_arg, smt_arg, resuming, os, redact_rl, nvl);
if (err != 0)
goto out;
}
if (featureflags & DMU_BACKUP_FEATURE_RAW) {
uint64_t ivset_guid = (ancestor_zb != NULL) ?
ancestor_zb->zbm_ivset_guid : 0;
nvlist_t *keynvl = NULL;
ASSERT(os->os_encrypted);
err = dsl_crypto_populate_key_nvlist(os, ivset_guid,
&keynvl);
if (err != 0) {
fnvlist_free(nvl);
goto out;
}
fnvlist_add_nvlist(nvl, "crypt_keydata", keynvl);
fnvlist_free(keynvl);
}
if (!nvlist_empty(nvl)) {
payload = fnvlist_pack(nvl, &payload_len);
drr->drr_payloadlen = payload_len;
}
fnvlist_free(nvl);
err = dump_record(&dsc, payload, payload_len);
fnvlist_pack_free(payload, payload_len);
if (err != 0) {
err = dsc.dsc_err;
goto out;
}
setup_to_thread(to_arg, os, dssp, fromtxg, dspp->rawok);
setup_from_thread(from_arg, from_rl, dssp);
setup_redact_list_thread(rlt_arg, dspp, redact_rl, dssp);
setup_merge_thread(smt_arg, dspp, from_arg, to_arg, rlt_arg, os);
setup_reader_thread(srt_arg, dspp, smt_arg, featureflags);
range = bqueue_dequeue(&srt_arg->q);
while (err == 0 && !range->eos_marker) {
err = do_dump(&dsc, range);
range = get_next_range(&srt_arg->q, range);
if (issig(JUSTLOOKING) && issig(FORREAL))
err = SET_ERROR(EINTR);
}
/*
* If we hit an error or are interrupted, cancel our worker threads and
* clear the queue of any pending records. The threads will pass the
* cancel up the tree of worker threads, and each one will clean up any
* pending records before exiting.
*/
if (err != 0) {
srt_arg->cancel = B_TRUE;
while (!range->eos_marker) {
range = get_next_range(&srt_arg->q, range);
}
}
range_free(range);
bqueue_destroy(&srt_arg->q);
bqueue_destroy(&smt_arg->q);
if (dspp->redactbook != NULL)
bqueue_destroy(&rlt_arg->q);
bqueue_destroy(&to_arg->q);
bqueue_destroy(&from_arg->q);
if (err == 0 && srt_arg->error != 0)
err = srt_arg->error;
if (err != 0)
goto out;
if (dsc.dsc_pending_op != PENDING_NONE)
if (dump_record(&dsc, NULL, 0) != 0)
err = SET_ERROR(EINTR);
if (err != 0) {
if (err == EINTR && dsc.dsc_err != 0)
err = dsc.dsc_err;
goto out;
}
/*
* Send the DRR_END record if this is not a saved stream.
* Otherwise, the omitted DRR_END record will signal to
* the receive side that the stream is incomplete.
*/
if (!dspp->savedok) {
memset(drr, 0, sizeof (dmu_replay_record_t));
drr->drr_type = DRR_END;
drr->drr_u.drr_end.drr_checksum = dsc.dsc_zc;
drr->drr_u.drr_end.drr_toguid = dsc.dsc_toguid;
if (dump_record(&dsc, NULL, 0) != 0)
err = dsc.dsc_err;
}
out:
mutex_enter(&to_ds->ds_sendstream_lock);
list_remove(&to_ds->ds_sendstreams, dssp);
mutex_exit(&to_ds->ds_sendstream_lock);
VERIFY(err != 0 || (dsc.dsc_sent_begin &&
(dsc.dsc_sent_end || dspp->savedok)));
kmem_free(drr, sizeof (dmu_replay_record_t));
kmem_free(dssp, sizeof (dmu_sendstatus_t));
kmem_free(from_arg, sizeof (*from_arg));
kmem_free(to_arg, sizeof (*to_arg));
kmem_free(rlt_arg, sizeof (*rlt_arg));
kmem_free(smt_arg, sizeof (*smt_arg));
kmem_free(srt_arg, sizeof (*srt_arg));
dsl_dataset_long_rele(to_ds, FTAG);
if (from_rl != NULL) {
dsl_redaction_list_long_rele(from_rl, FTAG);
dsl_redaction_list_rele(from_rl, FTAG);
}
if (redact_rl != NULL) {
dsl_redaction_list_long_rele(redact_rl, FTAG);
dsl_redaction_list_rele(redact_rl, FTAG);
}
return (err);
}
int
dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
boolean_t embedok, boolean_t large_block_ok, boolean_t compressok,
boolean_t rawok, boolean_t savedok, int outfd, offset_t *off,
dmu_send_outparams_t *dsop)
{
int err;
dsl_dataset_t *fromds;
ds_hold_flags_t dsflags;
struct dmu_send_params dspp = {0};
dspp.embedok = embedok;
dspp.large_block_ok = large_block_ok;
dspp.compressok = compressok;
dspp.outfd = outfd;
dspp.off = off;
dspp.dso = dsop;
dspp.tag = FTAG;
dspp.rawok = rawok;
dspp.savedok = savedok;
dsflags = (rawok) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT;
err = dsl_pool_hold(pool, FTAG, &dspp.dp);
if (err != 0)
return (err);
err = dsl_dataset_hold_obj_flags(dspp.dp, tosnap, dsflags, FTAG,
&dspp.to_ds);
if (err != 0) {
dsl_pool_rele(dspp.dp, FTAG);
return (err);
}
if (fromsnap != 0) {
err = dsl_dataset_hold_obj_flags(dspp.dp, fromsnap, dsflags,
FTAG, &fromds);
if (err != 0) {
dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG);
dsl_pool_rele(dspp.dp, FTAG);
return (err);
}
dspp.ancestor_zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
dspp.ancestor_zb.zbm_creation_txg =
dsl_dataset_phys(fromds)->ds_creation_txg;
dspp.ancestor_zb.zbm_creation_time =
dsl_dataset_phys(fromds)->ds_creation_time;
if (dsl_dataset_is_zapified(fromds)) {
(void) zap_lookup(dspp.dp->dp_meta_objset,
fromds->ds_object, DS_FIELD_IVSET_GUID, 8, 1,
&dspp.ancestor_zb.zbm_ivset_guid);
}
/* See dmu_send for the reasons behind this. */
uint64_t *fromredact;
if (!dsl_dataset_get_uint64_array_feature(fromds,
SPA_FEATURE_REDACTED_DATASETS,
&dspp.numfromredactsnaps,
&fromredact)) {
dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED;
} else if (dspp.numfromredactsnaps > 0) {
uint64_t size = dspp.numfromredactsnaps *
sizeof (uint64_t);
dspp.fromredactsnaps = kmem_zalloc(size, KM_SLEEP);
memcpy(dspp.fromredactsnaps, fromredact, size);
}
boolean_t is_before =
dsl_dataset_is_before(dspp.to_ds, fromds, 0);
dspp.is_clone = (dspp.to_ds->ds_dir !=
fromds->ds_dir);
dsl_dataset_rele(fromds, FTAG);
if (!is_before) {
dsl_pool_rele(dspp.dp, FTAG);
err = SET_ERROR(EXDEV);
} else {
err = dmu_send_impl(&dspp);
}
} else {
dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED;
err = dmu_send_impl(&dspp);
}
dsl_dataset_rele(dspp.to_ds, FTAG);
return (err);
}
int
dmu_send(const char *tosnap, const char *fromsnap, boolean_t embedok,
boolean_t large_block_ok, boolean_t compressok, boolean_t rawok,
boolean_t savedok, uint64_t resumeobj, uint64_t resumeoff,
const char *redactbook, int outfd, offset_t *off,
dmu_send_outparams_t *dsop)
{
int err = 0;
ds_hold_flags_t dsflags;
boolean_t owned = B_FALSE;
dsl_dataset_t *fromds = NULL;
zfs_bookmark_phys_t book = {0};
struct dmu_send_params dspp = {0};
dsflags = (rawok) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT;
dspp.tosnap = tosnap;
dspp.embedok = embedok;
dspp.large_block_ok = large_block_ok;
dspp.compressok = compressok;
dspp.outfd = outfd;
dspp.off = off;
dspp.dso = dsop;
dspp.tag = FTAG;
dspp.resumeobj = resumeobj;
dspp.resumeoff = resumeoff;
dspp.rawok = rawok;
dspp.savedok = savedok;
if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL)
return (SET_ERROR(EINVAL));
err = dsl_pool_hold(tosnap, FTAG, &dspp.dp);
if (err != 0)
return (err);
if (strchr(tosnap, '@') == NULL && spa_writeable(dspp.dp->dp_spa)) {
/*
* We are sending a filesystem or volume. Ensure
* that it doesn't change by owning the dataset.
*/
if (savedok) {
/*
* We are looking for the dataset that represents the
* partially received send stream. If this stream was
* received as a new snapshot of an existing dataset,
* this will be saved in a hidden clone named
* "<pool>/<dataset>/%recv". Otherwise, the stream
* will be saved in the live dataset itself. In
* either case we need to use dsl_dataset_own_force()
* because the stream is marked as inconsistent,
* which would normally make it unavailable to be
* owned.
*/
char *name = kmem_asprintf("%s/%s", tosnap,
recv_clone_name);
err = dsl_dataset_own_force(dspp.dp, name, dsflags,
FTAG, &dspp.to_ds);
if (err == ENOENT) {
err = dsl_dataset_own_force(dspp.dp, tosnap,
dsflags, FTAG, &dspp.to_ds);
}
if (err == 0) {
err = zap_lookup(dspp.dp->dp_meta_objset,
dspp.to_ds->ds_object,
DS_FIELD_RESUME_TOGUID, 8, 1,
&dspp.saved_guid);
}
if (err == 0) {
err = zap_lookup(dspp.dp->dp_meta_objset,
dspp.to_ds->ds_object,
DS_FIELD_RESUME_TONAME, 1,
sizeof (dspp.saved_toname),
dspp.saved_toname);
}
if (err != 0)
dsl_dataset_disown(dspp.to_ds, dsflags, FTAG);
kmem_strfree(name);
} else {
err = dsl_dataset_own(dspp.dp, tosnap, dsflags,
FTAG, &dspp.to_ds);
}
owned = B_TRUE;
} else {
err = dsl_dataset_hold_flags(dspp.dp, tosnap, dsflags, FTAG,
&dspp.to_ds);
}
if (err != 0) {
dsl_pool_rele(dspp.dp, FTAG);
return (err);
}
if (redactbook != NULL) {
char path[ZFS_MAX_DATASET_NAME_LEN];
(void) strlcpy(path, tosnap, sizeof (path));
char *at = strchr(path, '@');
if (at == NULL) {
err = EINVAL;
} else {
(void) snprintf(at, sizeof (path) - (at - path), "#%s",
redactbook);
err = dsl_bookmark_lookup(dspp.dp, path,
NULL, &book);
dspp.redactbook = &book;
}
}
if (err != 0) {
dsl_pool_rele(dspp.dp, FTAG);
if (owned)
dsl_dataset_disown(dspp.to_ds, dsflags, FTAG);
else
dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG);
return (err);
}
if (fromsnap != NULL) {
zfs_bookmark_phys_t *zb = &dspp.ancestor_zb;
int fsnamelen;
if (strpbrk(tosnap, "@#") != NULL)
fsnamelen = strpbrk(tosnap, "@#") - tosnap;
else
fsnamelen = strlen(tosnap);
/*
* If the fromsnap is in a different filesystem, then
* mark the send stream as a clone.
*/
if (strncmp(tosnap, fromsnap, fsnamelen) != 0 ||
(fromsnap[fsnamelen] != '@' &&
fromsnap[fsnamelen] != '#')) {
dspp.is_clone = B_TRUE;
}
if (strchr(fromsnap, '@') != NULL) {
err = dsl_dataset_hold(dspp.dp, fromsnap, FTAG,
&fromds);
if (err != 0) {
ASSERT3P(fromds, ==, NULL);
} else {
/*
* We need to make a deep copy of the redact
* snapshots of the from snapshot, because the
* array will be freed when we evict from_ds.
*/
uint64_t *fromredact;
if (!dsl_dataset_get_uint64_array_feature(
fromds, SPA_FEATURE_REDACTED_DATASETS,
&dspp.numfromredactsnaps,
&fromredact)) {
dspp.numfromredactsnaps =
NUM_SNAPS_NOT_REDACTED;
} else if (dspp.numfromredactsnaps > 0) {
uint64_t size =
dspp.numfromredactsnaps *
sizeof (uint64_t);
dspp.fromredactsnaps = kmem_zalloc(size,
KM_SLEEP);
memcpy(dspp.fromredactsnaps, fromredact,
size);
}
if (!dsl_dataset_is_before(dspp.to_ds, fromds,
0)) {
err = SET_ERROR(EXDEV);
} else {
zb->zbm_creation_txg =
dsl_dataset_phys(fromds)->
ds_creation_txg;
zb->zbm_creation_time =
dsl_dataset_phys(fromds)->
ds_creation_time;
zb->zbm_guid =
dsl_dataset_phys(fromds)->ds_guid;
zb->zbm_redaction_obj = 0;
if (dsl_dataset_is_zapified(fromds)) {
(void) zap_lookup(
dspp.dp->dp_meta_objset,
fromds->ds_object,
DS_FIELD_IVSET_GUID, 8, 1,
&zb->zbm_ivset_guid);
}
}
dsl_dataset_rele(fromds, FTAG);
}
} else {
dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED;
err = dsl_bookmark_lookup(dspp.dp, fromsnap, dspp.to_ds,
zb);
if (err == EXDEV && zb->zbm_redaction_obj != 0 &&
zb->zbm_guid ==
dsl_dataset_phys(dspp.to_ds)->ds_guid)
err = 0;
}
if (err == 0) {
/* dmu_send_impl will call dsl_pool_rele for us. */
err = dmu_send_impl(&dspp);
} else {
dsl_pool_rele(dspp.dp, FTAG);
}
} else {
dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED;
err = dmu_send_impl(&dspp);
}
if (owned)
dsl_dataset_disown(dspp.to_ds, dsflags, FTAG);
else
dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG);
return (err);
}
static int
dmu_adjust_send_estimate_for_indirects(dsl_dataset_t *ds, uint64_t uncompressed,
uint64_t compressed, boolean_t stream_compressed, uint64_t *sizep)
{
int err = 0;
uint64_t size;
/*
* Assume that space (both on-disk and in-stream) is dominated by
* data. We will adjust for indirect blocks and the copies property,
* but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
*/
uint64_t recordsize;
uint64_t record_count;
objset_t *os;
VERIFY0(dmu_objset_from_ds(ds, &os));
/* Assume all (uncompressed) blocks are recordsize. */
if (zfs_override_estimate_recordsize != 0) {
recordsize = zfs_override_estimate_recordsize;
} else if (os->os_phys->os_type == DMU_OST_ZVOL) {
err = dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &recordsize);
} else {
err = dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_RECORDSIZE), &recordsize);
}
if (err != 0)
return (err);
record_count = uncompressed / recordsize;
/*
* If we're estimating a send size for a compressed stream, use the
* compressed data size to estimate the stream size. Otherwise, use the
* uncompressed data size.
*/
size = stream_compressed ? compressed : uncompressed;
/*
* Subtract out approximate space used by indirect blocks.
* Assume most space is used by data blocks (non-indirect, non-dnode).
* Assume no ditto blocks or internal fragmentation.
*
* Therefore, space used by indirect blocks is sizeof(blkptr_t) per
* block.
*/
size -= record_count * sizeof (blkptr_t);
/* Add in the space for the record associated with each block. */
size += record_count * sizeof (dmu_replay_record_t);
*sizep = size;
return (0);
}
int
dmu_send_estimate_fast(dsl_dataset_t *origds, dsl_dataset_t *fromds,
zfs_bookmark_phys_t *frombook, boolean_t stream_compressed,
boolean_t saved, uint64_t *sizep)
{
int err;
dsl_dataset_t *ds = origds;
uint64_t uncomp, comp;
ASSERT(dsl_pool_config_held(origds->ds_dir->dd_pool));
ASSERT(fromds == NULL || frombook == NULL);
/*
* If this is a saved send we may actually be sending
* from the %recv clone used for resuming.
*/
if (saved) {
objset_t *mos = origds->ds_dir->dd_pool->dp_meta_objset;
uint64_t guid;
char dsname[ZFS_MAX_DATASET_NAME_LEN + 6];
dsl_dataset_name(origds, dsname);
(void) strcat(dsname, "/");
(void) strcat(dsname, recv_clone_name);
err = dsl_dataset_hold(origds->ds_dir->dd_pool,
dsname, FTAG, &ds);
if (err != ENOENT && err != 0) {
return (err);
} else if (err == ENOENT) {
ds = origds;
}
/* check that this dataset has partially received data */
err = zap_lookup(mos, ds->ds_object,
DS_FIELD_RESUME_TOGUID, 8, 1, &guid);
if (err != 0) {
err = SET_ERROR(err == ENOENT ? EINVAL : err);
goto out;
}
err = zap_lookup(mos, ds->ds_object,
DS_FIELD_RESUME_TONAME, 1, sizeof (dsname), dsname);
if (err != 0) {
err = SET_ERROR(err == ENOENT ? EINVAL : err);
goto out;
}
}
/* tosnap must be a snapshot or the target of a saved send */
if (!ds->ds_is_snapshot && ds == origds)
return (SET_ERROR(EINVAL));
if (fromds != NULL) {
uint64_t used;
if (!fromds->ds_is_snapshot) {
err = SET_ERROR(EINVAL);
goto out;
}
if (!dsl_dataset_is_before(ds, fromds, 0)) {
err = SET_ERROR(EXDEV);
goto out;
}
err = dsl_dataset_space_written(fromds, ds, &used, &comp,
&uncomp);
if (err != 0)
goto out;
} else if (frombook != NULL) {
uint64_t used;
err = dsl_dataset_space_written_bookmark(frombook, ds, &used,
&comp, &uncomp);
if (err != 0)
goto out;
} else {
uncomp = dsl_dataset_phys(ds)->ds_uncompressed_bytes;
comp = dsl_dataset_phys(ds)->ds_compressed_bytes;
}
err = dmu_adjust_send_estimate_for_indirects(ds, uncomp, comp,
stream_compressed, sizep);
/*
* Add the size of the BEGIN and END records to the estimate.
*/
*sizep += 2 * sizeof (dmu_replay_record_t);
out:
if (ds != origds)
dsl_dataset_rele(ds, FTAG);
return (err);
}
ZFS_MODULE_PARAM(zfs_send, zfs_send_, corrupt_data, INT, ZMOD_RW,
"Allow sending corrupt data");
ZFS_MODULE_PARAM(zfs_send, zfs_send_, queue_length, INT, ZMOD_RW,
"Maximum send queue length");
ZFS_MODULE_PARAM(zfs_send, zfs_send_, unmodified_spill_blocks, INT, ZMOD_RW,
"Send unmodified spill blocks");
ZFS_MODULE_PARAM(zfs_send, zfs_send_, no_prefetch_queue_length, INT, ZMOD_RW,
"Maximum send queue length for non-prefetch queues");
ZFS_MODULE_PARAM(zfs_send, zfs_send_, queue_ff, INT, ZMOD_RW,
"Send queue fill fraction");
ZFS_MODULE_PARAM(zfs_send, zfs_send_, no_prefetch_queue_ff, INT, ZMOD_RW,
"Send queue fill fraction for non-prefetch queues");
ZFS_MODULE_PARAM(zfs_send, zfs_, override_estimate_recordsize, INT, ZMOD_RW,
"Override block size estimate with fixed size");
diff --git a/sys/contrib/openzfs/module/zfs/dnode.c b/sys/contrib/openzfs/module/zfs/dnode.c
index 67fe1e2c9a0f..ef27dfd40af1 100644
--- a/sys/contrib/openzfs/module/zfs/dnode.c
+++ b/sys/contrib/openzfs/module/zfs/dnode.c
@@ -1,2579 +1,2579 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/dbuf.h>
#include <sys/dnode.h>
#include <sys/dmu.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_tx.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_dataset.h>
#include <sys/spa.h>
#include <sys/zio.h>
#include <sys/dmu_zfetch.h>
#include <sys/range_tree.h>
#include <sys/trace_zfs.h>
#include <sys/zfs_project.h>
dnode_stats_t dnode_stats = {
{ "dnode_hold_dbuf_hold", KSTAT_DATA_UINT64 },
{ "dnode_hold_dbuf_read", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_hits", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_misses", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_interior", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_lock_retry", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_lock_misses", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_type_none", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_hits", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_misses", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_lock_misses", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_lock_retry", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_overflow", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_refcount", KSTAT_DATA_UINT64 },
{ "dnode_free_interior_lock_retry", KSTAT_DATA_UINT64 },
{ "dnode_allocate", KSTAT_DATA_UINT64 },
{ "dnode_reallocate", KSTAT_DATA_UINT64 },
{ "dnode_buf_evict", KSTAT_DATA_UINT64 },
{ "dnode_alloc_next_chunk", KSTAT_DATA_UINT64 },
{ "dnode_alloc_race", KSTAT_DATA_UINT64 },
{ "dnode_alloc_next_block", KSTAT_DATA_UINT64 },
{ "dnode_move_invalid", KSTAT_DATA_UINT64 },
{ "dnode_move_recheck1", KSTAT_DATA_UINT64 },
{ "dnode_move_recheck2", KSTAT_DATA_UINT64 },
{ "dnode_move_special", KSTAT_DATA_UINT64 },
{ "dnode_move_handle", KSTAT_DATA_UINT64 },
{ "dnode_move_rwlock", KSTAT_DATA_UINT64 },
{ "dnode_move_active", KSTAT_DATA_UINT64 },
};
static kstat_t *dnode_ksp;
static kmem_cache_t *dnode_cache;
static dnode_phys_t dnode_phys_zero __maybe_unused;
int zfs_default_bs = SPA_MINBLOCKSHIFT;
int zfs_default_ibs = DN_MAX_INDBLKSHIFT;
#ifdef _KERNEL
static kmem_cbrc_t dnode_move(void *, void *, size_t, void *);
#endif /* _KERNEL */
static int
dbuf_compare(const void *x1, const void *x2)
{
const dmu_buf_impl_t *d1 = x1;
const dmu_buf_impl_t *d2 = x2;
int cmp = TREE_CMP(d1->db_level, d2->db_level);
if (likely(cmp))
return (cmp);
cmp = TREE_CMP(d1->db_blkid, d2->db_blkid);
if (likely(cmp))
return (cmp);
if (d1->db_state == DB_SEARCH) {
ASSERT3S(d2->db_state, !=, DB_SEARCH);
return (-1);
} else if (d2->db_state == DB_SEARCH) {
ASSERT3S(d1->db_state, !=, DB_SEARCH);
return (1);
}
return (TREE_PCMP(d1, d2));
}
static int
dnode_cons(void *arg, void *unused, int kmflag)
{
(void) unused, (void) kmflag;
dnode_t *dn = arg;
rw_init(&dn->dn_struct_rwlock, NULL, RW_NOLOCKDEP, NULL);
mutex_init(&dn->dn_mtx, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&dn->dn_dbufs_mtx, NULL, MUTEX_DEFAULT, NULL);
cv_init(&dn->dn_notxholds, NULL, CV_DEFAULT, NULL);
cv_init(&dn->dn_nodnholds, NULL, CV_DEFAULT, NULL);
/*
* Every dbuf has a reference, and dropping a tracked reference is
* O(number of references), so don't track dn_holds.
*/
zfs_refcount_create_untracked(&dn->dn_holds);
zfs_refcount_create(&dn->dn_tx_holds);
list_link_init(&dn->dn_link);
memset(dn->dn_next_type, 0, sizeof (dn->dn_next_type));
memset(dn->dn_next_nblkptr, 0, sizeof (dn->dn_next_nblkptr));
memset(dn->dn_next_nlevels, 0, sizeof (dn->dn_next_nlevels));
memset(dn->dn_next_indblkshift, 0, sizeof (dn->dn_next_indblkshift));
memset(dn->dn_next_bonustype, 0, sizeof (dn->dn_next_bonustype));
memset(dn->dn_rm_spillblk, 0, sizeof (dn->dn_rm_spillblk));
memset(dn->dn_next_bonuslen, 0, sizeof (dn->dn_next_bonuslen));
memset(dn->dn_next_blksz, 0, sizeof (dn->dn_next_blksz));
memset(dn->dn_next_maxblkid, 0, sizeof (dn->dn_next_maxblkid));
for (int i = 0; i < TXG_SIZE; i++) {
multilist_link_init(&dn->dn_dirty_link[i]);
dn->dn_free_ranges[i] = NULL;
list_create(&dn->dn_dirty_records[i],
sizeof (dbuf_dirty_record_t),
offsetof(dbuf_dirty_record_t, dr_dirty_node));
}
dn->dn_allocated_txg = 0;
dn->dn_free_txg = 0;
dn->dn_assigned_txg = 0;
dn->dn_dirty_txg = 0;
dn->dn_dirtyctx = 0;
dn->dn_dirtyctx_firstset = NULL;
dn->dn_bonus = NULL;
dn->dn_have_spill = B_FALSE;
dn->dn_zio = NULL;
dn->dn_oldused = 0;
dn->dn_oldflags = 0;
dn->dn_olduid = 0;
dn->dn_oldgid = 0;
dn->dn_oldprojid = ZFS_DEFAULT_PROJID;
dn->dn_newuid = 0;
dn->dn_newgid = 0;
dn->dn_newprojid = ZFS_DEFAULT_PROJID;
dn->dn_id_flags = 0;
dn->dn_dbufs_count = 0;
avl_create(&dn->dn_dbufs, dbuf_compare, sizeof (dmu_buf_impl_t),
offsetof(dmu_buf_impl_t, db_link));
dn->dn_moved = 0;
return (0);
}
static void
dnode_dest(void *arg, void *unused)
{
(void) unused;
dnode_t *dn = arg;
rw_destroy(&dn->dn_struct_rwlock);
mutex_destroy(&dn->dn_mtx);
mutex_destroy(&dn->dn_dbufs_mtx);
cv_destroy(&dn->dn_notxholds);
cv_destroy(&dn->dn_nodnholds);
zfs_refcount_destroy(&dn->dn_holds);
zfs_refcount_destroy(&dn->dn_tx_holds);
ASSERT(!list_link_active(&dn->dn_link));
for (int i = 0; i < TXG_SIZE; i++) {
ASSERT(!multilist_link_active(&dn->dn_dirty_link[i]));
ASSERT3P(dn->dn_free_ranges[i], ==, NULL);
list_destroy(&dn->dn_dirty_records[i]);
ASSERT0(dn->dn_next_nblkptr[i]);
ASSERT0(dn->dn_next_nlevels[i]);
ASSERT0(dn->dn_next_indblkshift[i]);
ASSERT0(dn->dn_next_bonustype[i]);
ASSERT0(dn->dn_rm_spillblk[i]);
ASSERT0(dn->dn_next_bonuslen[i]);
ASSERT0(dn->dn_next_blksz[i]);
ASSERT0(dn->dn_next_maxblkid[i]);
}
ASSERT0(dn->dn_allocated_txg);
ASSERT0(dn->dn_free_txg);
ASSERT0(dn->dn_assigned_txg);
ASSERT0(dn->dn_dirty_txg);
ASSERT0(dn->dn_dirtyctx);
ASSERT3P(dn->dn_dirtyctx_firstset, ==, NULL);
ASSERT3P(dn->dn_bonus, ==, NULL);
ASSERT(!dn->dn_have_spill);
ASSERT3P(dn->dn_zio, ==, NULL);
ASSERT0(dn->dn_oldused);
ASSERT0(dn->dn_oldflags);
ASSERT0(dn->dn_olduid);
ASSERT0(dn->dn_oldgid);
ASSERT0(dn->dn_oldprojid);
ASSERT0(dn->dn_newuid);
ASSERT0(dn->dn_newgid);
ASSERT0(dn->dn_newprojid);
ASSERT0(dn->dn_id_flags);
ASSERT0(dn->dn_dbufs_count);
avl_destroy(&dn->dn_dbufs);
}
void
dnode_init(void)
{
ASSERT(dnode_cache == NULL);
dnode_cache = kmem_cache_create("dnode_t", sizeof (dnode_t),
0, dnode_cons, dnode_dest, NULL, NULL, NULL, 0);
kmem_cache_set_move(dnode_cache, dnode_move);
dnode_ksp = kstat_create("zfs", 0, "dnodestats", "misc",
KSTAT_TYPE_NAMED, sizeof (dnode_stats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (dnode_ksp != NULL) {
dnode_ksp->ks_data = &dnode_stats;
kstat_install(dnode_ksp);
}
}
void
dnode_fini(void)
{
if (dnode_ksp != NULL) {
kstat_delete(dnode_ksp);
dnode_ksp = NULL;
}
kmem_cache_destroy(dnode_cache);
dnode_cache = NULL;
}
#ifdef ZFS_DEBUG
void
dnode_verify(dnode_t *dn)
{
int drop_struct_lock = FALSE;
ASSERT(dn->dn_phys);
ASSERT(dn->dn_objset);
ASSERT(dn->dn_handle->dnh_dnode == dn);
ASSERT(DMU_OT_IS_VALID(dn->dn_phys->dn_type));
if (!(zfs_flags & ZFS_DEBUG_DNODE_VERIFY))
return;
if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
rw_enter(&dn->dn_struct_rwlock, RW_READER);
drop_struct_lock = TRUE;
}
if (dn->dn_phys->dn_type != DMU_OT_NONE || dn->dn_allocated_txg != 0) {
int i;
int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
ASSERT3U(dn->dn_indblkshift, <=, SPA_MAXBLOCKSHIFT);
if (dn->dn_datablkshift) {
ASSERT3U(dn->dn_datablkshift, >=, SPA_MINBLOCKSHIFT);
ASSERT3U(dn->dn_datablkshift, <=, SPA_MAXBLOCKSHIFT);
ASSERT3U(1<<dn->dn_datablkshift, ==, dn->dn_datablksz);
}
ASSERT3U(dn->dn_nlevels, <=, 30);
ASSERT(DMU_OT_IS_VALID(dn->dn_type));
ASSERT3U(dn->dn_nblkptr, >=, 1);
ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
ASSERT3U(dn->dn_bonuslen, <=, max_bonuslen);
ASSERT3U(dn->dn_datablksz, ==,
dn->dn_datablkszsec << SPA_MINBLOCKSHIFT);
ASSERT3U(ISP2(dn->dn_datablksz), ==, dn->dn_datablkshift != 0);
ASSERT3U((dn->dn_nblkptr - 1) * sizeof (blkptr_t) +
dn->dn_bonuslen, <=, max_bonuslen);
for (i = 0; i < TXG_SIZE; i++) {
ASSERT3U(dn->dn_next_nlevels[i], <=, dn->dn_nlevels);
}
}
if (dn->dn_phys->dn_type != DMU_OT_NONE)
ASSERT3U(dn->dn_phys->dn_nlevels, <=, dn->dn_nlevels);
ASSERT(DMU_OBJECT_IS_SPECIAL(dn->dn_object) || dn->dn_dbuf != NULL);
if (dn->dn_dbuf != NULL) {
ASSERT3P(dn->dn_phys, ==,
(dnode_phys_t *)dn->dn_dbuf->db.db_data +
(dn->dn_object % (dn->dn_dbuf->db.db_size >> DNODE_SHIFT)));
}
if (drop_struct_lock)
rw_exit(&dn->dn_struct_rwlock);
}
#endif
void
dnode_byteswap(dnode_phys_t *dnp)
{
uint64_t *buf64 = (void*)&dnp->dn_blkptr;
int i;
if (dnp->dn_type == DMU_OT_NONE) {
memset(dnp, 0, sizeof (dnode_phys_t));
return;
}
dnp->dn_datablkszsec = BSWAP_16(dnp->dn_datablkszsec);
dnp->dn_bonuslen = BSWAP_16(dnp->dn_bonuslen);
dnp->dn_extra_slots = BSWAP_8(dnp->dn_extra_slots);
dnp->dn_maxblkid = BSWAP_64(dnp->dn_maxblkid);
dnp->dn_used = BSWAP_64(dnp->dn_used);
/*
* dn_nblkptr is only one byte, so it's OK to read it in either
* byte order. We can't read dn_bouslen.
*/
ASSERT(dnp->dn_indblkshift <= SPA_MAXBLOCKSHIFT);
ASSERT(dnp->dn_nblkptr <= DN_MAX_NBLKPTR);
for (i = 0; i < dnp->dn_nblkptr * sizeof (blkptr_t)/8; i++)
buf64[i] = BSWAP_64(buf64[i]);
/*
* OK to check dn_bonuslen for zero, because it won't matter if
* we have the wrong byte order. This is necessary because the
* dnode dnode is smaller than a regular dnode.
*/
if (dnp->dn_bonuslen != 0) {
dmu_object_byteswap_t byteswap;
ASSERT(DMU_OT_IS_VALID(dnp->dn_bonustype));
byteswap = DMU_OT_BYTESWAP(dnp->dn_bonustype);
dmu_ot_byteswap[byteswap].ob_func(DN_BONUS(dnp),
DN_MAX_BONUS_LEN(dnp));
}
/* Swap SPILL block if we have one */
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
byteswap_uint64_array(DN_SPILL_BLKPTR(dnp), sizeof (blkptr_t));
}
void
dnode_buf_byteswap(void *vbuf, size_t size)
{
int i = 0;
ASSERT3U(sizeof (dnode_phys_t), ==, (1<<DNODE_SHIFT));
ASSERT((size & (sizeof (dnode_phys_t)-1)) == 0);
while (i < size) {
dnode_phys_t *dnp = (void *)(((char *)vbuf) + i);
dnode_byteswap(dnp);
i += DNODE_MIN_SIZE;
if (dnp->dn_type != DMU_OT_NONE)
i += dnp->dn_extra_slots * DNODE_MIN_SIZE;
}
}
void
dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx)
{
ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
dnode_setdirty(dn, tx);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
ASSERT3U(newsize, <=, DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
(dn->dn_nblkptr-1) * sizeof (blkptr_t));
if (newsize < dn->dn_bonuslen) {
/* clear any data after the end of the new size */
size_t diff = dn->dn_bonuslen - newsize;
char *data_end = ((char *)dn->dn_bonus->db.db_data) + newsize;
memset(data_end, 0, diff);
}
dn->dn_bonuslen = newsize;
if (newsize == 0)
dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = DN_ZERO_BONUSLEN;
else
dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
rw_exit(&dn->dn_struct_rwlock);
}
void
dnode_setbonus_type(dnode_t *dn, dmu_object_type_t newtype, dmu_tx_t *tx)
{
ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
dnode_setdirty(dn, tx);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
dn->dn_bonustype = newtype;
dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype;
rw_exit(&dn->dn_struct_rwlock);
}
void
dnode_rm_spill(dnode_t *dn, dmu_tx_t *tx)
{
ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
dnode_setdirty(dn, tx);
dn->dn_rm_spillblk[tx->tx_txg & TXG_MASK] = DN_KILL_SPILLBLK;
dn->dn_have_spill = B_FALSE;
}
static void
dnode_setdblksz(dnode_t *dn, int size)
{
ASSERT0(P2PHASE(size, SPA_MINBLOCKSIZE));
ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
ASSERT3U(size, >=, SPA_MINBLOCKSIZE);
ASSERT3U(size >> SPA_MINBLOCKSHIFT, <,
1<<(sizeof (dn->dn_phys->dn_datablkszsec) * 8));
dn->dn_datablksz = size;
dn->dn_datablkszsec = size >> SPA_MINBLOCKSHIFT;
dn->dn_datablkshift = ISP2(size) ? highbit64(size - 1) : 0;
}
static dnode_t *
dnode_create(objset_t *os, dnode_phys_t *dnp, dmu_buf_impl_t *db,
uint64_t object, dnode_handle_t *dnh)
{
dnode_t *dn;
dn = kmem_cache_alloc(dnode_cache, KM_SLEEP);
dn->dn_moved = 0;
/*
* Defer setting dn_objset until the dnode is ready to be a candidate
* for the dnode_move() callback.
*/
dn->dn_object = object;
dn->dn_dbuf = db;
dn->dn_handle = dnh;
dn->dn_phys = dnp;
if (dnp->dn_datablkszsec) {
dnode_setdblksz(dn, dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
} else {
dn->dn_datablksz = 0;
dn->dn_datablkszsec = 0;
dn->dn_datablkshift = 0;
}
dn->dn_indblkshift = dnp->dn_indblkshift;
dn->dn_nlevels = dnp->dn_nlevels;
dn->dn_type = dnp->dn_type;
dn->dn_nblkptr = dnp->dn_nblkptr;
dn->dn_checksum = dnp->dn_checksum;
dn->dn_compress = dnp->dn_compress;
dn->dn_bonustype = dnp->dn_bonustype;
dn->dn_bonuslen = dnp->dn_bonuslen;
dn->dn_num_slots = dnp->dn_extra_slots + 1;
dn->dn_maxblkid = dnp->dn_maxblkid;
dn->dn_have_spill = ((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) != 0);
dn->dn_id_flags = 0;
dmu_zfetch_init(&dn->dn_zfetch, dn);
ASSERT(DMU_OT_IS_VALID(dn->dn_phys->dn_type));
ASSERT(zrl_is_locked(&dnh->dnh_zrlock));
ASSERT(!DN_SLOT_IS_PTR(dnh->dnh_dnode));
mutex_enter(&os->os_lock);
/*
* Exclude special dnodes from os_dnodes so an empty os_dnodes
* signifies that the special dnodes have no references from
* their children (the entries in os_dnodes). This allows
* dnode_destroy() to easily determine if the last child has
* been removed and then complete eviction of the objset.
*/
if (!DMU_OBJECT_IS_SPECIAL(object))
list_insert_head(&os->os_dnodes, dn);
membar_producer();
/*
* Everything else must be valid before assigning dn_objset
* makes the dnode eligible for dnode_move().
*/
dn->dn_objset = os;
dnh->dnh_dnode = dn;
mutex_exit(&os->os_lock);
arc_space_consume(sizeof (dnode_t), ARC_SPACE_DNODE);
return (dn);
}
/*
* Caller must be holding the dnode handle, which is released upon return.
*/
static void
dnode_destroy(dnode_t *dn)
{
objset_t *os = dn->dn_objset;
boolean_t complete_os_eviction = B_FALSE;
ASSERT((dn->dn_id_flags & DN_ID_NEW_EXIST) == 0);
mutex_enter(&os->os_lock);
POINTER_INVALIDATE(&dn->dn_objset);
if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
list_remove(&os->os_dnodes, dn);
complete_os_eviction =
list_is_empty(&os->os_dnodes) &&
list_link_active(&os->os_evicting_node);
}
mutex_exit(&os->os_lock);
/* the dnode can no longer move, so we can release the handle */
if (!zrl_is_locked(&dn->dn_handle->dnh_zrlock))
zrl_remove(&dn->dn_handle->dnh_zrlock);
dn->dn_allocated_txg = 0;
dn->dn_free_txg = 0;
dn->dn_assigned_txg = 0;
dn->dn_dirty_txg = 0;
dn->dn_dirtyctx = 0;
dn->dn_dirtyctx_firstset = NULL;
if (dn->dn_bonus != NULL) {
mutex_enter(&dn->dn_bonus->db_mtx);
dbuf_destroy(dn->dn_bonus);
dn->dn_bonus = NULL;
}
dn->dn_zio = NULL;
dn->dn_have_spill = B_FALSE;
dn->dn_oldused = 0;
dn->dn_oldflags = 0;
dn->dn_olduid = 0;
dn->dn_oldgid = 0;
dn->dn_oldprojid = ZFS_DEFAULT_PROJID;
dn->dn_newuid = 0;
dn->dn_newgid = 0;
dn->dn_newprojid = ZFS_DEFAULT_PROJID;
dn->dn_id_flags = 0;
dmu_zfetch_fini(&dn->dn_zfetch);
kmem_cache_free(dnode_cache, dn);
arc_space_return(sizeof (dnode_t), ARC_SPACE_DNODE);
if (complete_os_eviction)
dmu_objset_evict_done(os);
}
void
dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
dmu_object_type_t bonustype, int bonuslen, int dn_slots, dmu_tx_t *tx)
{
int i;
ASSERT3U(dn_slots, >, 0);
ASSERT3U(dn_slots << DNODE_SHIFT, <=,
spa_maxdnodesize(dmu_objset_spa(dn->dn_objset)));
ASSERT3U(blocksize, <=,
spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
if (blocksize == 0)
blocksize = 1 << zfs_default_bs;
else
blocksize = P2ROUNDUP(blocksize, SPA_MINBLOCKSIZE);
if (ibs == 0)
ibs = zfs_default_ibs;
ibs = MIN(MAX(ibs, DN_MIN_INDBLKSHIFT), DN_MAX_INDBLKSHIFT);
dprintf("os=%p obj=%llu txg=%llu blocksize=%d ibs=%d dn_slots=%d\n",
dn->dn_objset, (u_longlong_t)dn->dn_object,
(u_longlong_t)tx->tx_txg, blocksize, ibs, dn_slots);
DNODE_STAT_BUMP(dnode_allocate);
ASSERT(dn->dn_type == DMU_OT_NONE);
ASSERT0(memcmp(dn->dn_phys, &dnode_phys_zero, sizeof (dnode_phys_t)));
ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE);
ASSERT(ot != DMU_OT_NONE);
ASSERT(DMU_OT_IS_VALID(ot));
ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
(bonustype == DMU_OT_SA && bonuslen == 0) ||
(bonustype != DMU_OT_NONE && bonuslen != 0));
ASSERT(DMU_OT_IS_VALID(bonustype));
ASSERT3U(bonuslen, <=, DN_SLOTS_TO_BONUSLEN(dn_slots));
ASSERT(dn->dn_type == DMU_OT_NONE);
ASSERT0(dn->dn_maxblkid);
ASSERT0(dn->dn_allocated_txg);
ASSERT0(dn->dn_assigned_txg);
ASSERT(zfs_refcount_is_zero(&dn->dn_tx_holds));
ASSERT3U(zfs_refcount_count(&dn->dn_holds), <=, 1);
ASSERT(avl_is_empty(&dn->dn_dbufs));
for (i = 0; i < TXG_SIZE; i++) {
ASSERT0(dn->dn_next_nblkptr[i]);
ASSERT0(dn->dn_next_nlevels[i]);
ASSERT0(dn->dn_next_indblkshift[i]);
ASSERT0(dn->dn_next_bonuslen[i]);
ASSERT0(dn->dn_next_bonustype[i]);
ASSERT0(dn->dn_rm_spillblk[i]);
ASSERT0(dn->dn_next_blksz[i]);
ASSERT0(dn->dn_next_maxblkid[i]);
ASSERT(!multilist_link_active(&dn->dn_dirty_link[i]));
ASSERT3P(list_head(&dn->dn_dirty_records[i]), ==, NULL);
ASSERT3P(dn->dn_free_ranges[i], ==, NULL);
}
dn->dn_type = ot;
dnode_setdblksz(dn, blocksize);
dn->dn_indblkshift = ibs;
dn->dn_nlevels = 1;
dn->dn_num_slots = dn_slots;
if (bonustype == DMU_OT_SA) /* Maximize bonus space for SA */
dn->dn_nblkptr = 1;
else {
dn->dn_nblkptr = MIN(DN_MAX_NBLKPTR,
1 + ((DN_SLOTS_TO_BONUSLEN(dn_slots) - bonuslen) >>
SPA_BLKPTRSHIFT));
}
dn->dn_bonustype = bonustype;
dn->dn_bonuslen = bonuslen;
dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
dn->dn_compress = ZIO_COMPRESS_INHERIT;
dn->dn_dirtyctx = 0;
dn->dn_free_txg = 0;
dn->dn_dirtyctx_firstset = NULL;
dn->dn_dirty_txg = 0;
dn->dn_allocated_txg = tx->tx_txg;
dn->dn_id_flags = 0;
dnode_setdirty(dn, tx);
dn->dn_next_indblkshift[tx->tx_txg & TXG_MASK] = ibs;
dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype;
dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = dn->dn_datablksz;
}
void
dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
dmu_object_type_t bonustype, int bonuslen, int dn_slots,
boolean_t keep_spill, dmu_tx_t *tx)
{
int nblkptr;
ASSERT3U(blocksize, >=, SPA_MINBLOCKSIZE);
ASSERT3U(blocksize, <=,
spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
ASSERT0(blocksize % SPA_MINBLOCKSIZE);
ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT || dmu_tx_private_ok(tx));
ASSERT(tx->tx_txg != 0);
ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
(bonustype != DMU_OT_NONE && bonuslen != 0) ||
(bonustype == DMU_OT_SA && bonuslen == 0));
ASSERT(DMU_OT_IS_VALID(bonustype));
ASSERT3U(bonuslen, <=,
DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(dn->dn_objset))));
ASSERT3U(bonuslen, <=, DN_BONUS_SIZE(dn_slots << DNODE_SHIFT));
dnode_free_interior_slots(dn);
DNODE_STAT_BUMP(dnode_reallocate);
/* clean up any unreferenced dbufs */
dnode_evict_dbufs(dn);
dn->dn_id_flags = 0;
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
dnode_setdirty(dn, tx);
if (dn->dn_datablksz != blocksize) {
/* change blocksize */
ASSERT0(dn->dn_maxblkid);
ASSERT(BP_IS_HOLE(&dn->dn_phys->dn_blkptr[0]) ||
dnode_block_freed(dn, 0));
dnode_setdblksz(dn, blocksize);
dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = blocksize;
}
if (dn->dn_bonuslen != bonuslen)
dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = bonuslen;
if (bonustype == DMU_OT_SA) /* Maximize bonus space for SA */
nblkptr = 1;
else
nblkptr = MIN(DN_MAX_NBLKPTR,
1 + ((DN_SLOTS_TO_BONUSLEN(dn_slots) - bonuslen) >>
SPA_BLKPTRSHIFT));
if (dn->dn_bonustype != bonustype)
dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = bonustype;
if (dn->dn_nblkptr != nblkptr)
dn->dn_next_nblkptr[tx->tx_txg & TXG_MASK] = nblkptr;
if (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR && !keep_spill) {
dbuf_rm_spill(dn, tx);
dnode_rm_spill(dn, tx);
}
rw_exit(&dn->dn_struct_rwlock);
/* change type */
dn->dn_type = ot;
/* change bonus size and type */
mutex_enter(&dn->dn_mtx);
dn->dn_bonustype = bonustype;
dn->dn_bonuslen = bonuslen;
dn->dn_num_slots = dn_slots;
dn->dn_nblkptr = nblkptr;
dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
dn->dn_compress = ZIO_COMPRESS_INHERIT;
ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
/* fix up the bonus db_size */
if (dn->dn_bonus) {
dn->dn_bonus->db.db_size =
DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
(dn->dn_nblkptr-1) * sizeof (blkptr_t);
ASSERT(dn->dn_bonuslen <= dn->dn_bonus->db.db_size);
}
dn->dn_allocated_txg = tx->tx_txg;
mutex_exit(&dn->dn_mtx);
}
#ifdef _KERNEL
static void
dnode_move_impl(dnode_t *odn, dnode_t *ndn)
{
ASSERT(!RW_LOCK_HELD(&odn->dn_struct_rwlock));
ASSERT(MUTEX_NOT_HELD(&odn->dn_mtx));
ASSERT(MUTEX_NOT_HELD(&odn->dn_dbufs_mtx));
/* Copy fields. */
ndn->dn_objset = odn->dn_objset;
ndn->dn_object = odn->dn_object;
ndn->dn_dbuf = odn->dn_dbuf;
ndn->dn_handle = odn->dn_handle;
ndn->dn_phys = odn->dn_phys;
ndn->dn_type = odn->dn_type;
ndn->dn_bonuslen = odn->dn_bonuslen;
ndn->dn_bonustype = odn->dn_bonustype;
ndn->dn_nblkptr = odn->dn_nblkptr;
ndn->dn_checksum = odn->dn_checksum;
ndn->dn_compress = odn->dn_compress;
ndn->dn_nlevels = odn->dn_nlevels;
ndn->dn_indblkshift = odn->dn_indblkshift;
ndn->dn_datablkshift = odn->dn_datablkshift;
ndn->dn_datablkszsec = odn->dn_datablkszsec;
ndn->dn_datablksz = odn->dn_datablksz;
ndn->dn_maxblkid = odn->dn_maxblkid;
ndn->dn_num_slots = odn->dn_num_slots;
memcpy(ndn->dn_next_type, odn->dn_next_type,
sizeof (odn->dn_next_type));
memcpy(ndn->dn_next_nblkptr, odn->dn_next_nblkptr,
sizeof (odn->dn_next_nblkptr));
memcpy(ndn->dn_next_nlevels, odn->dn_next_nlevels,
sizeof (odn->dn_next_nlevels));
memcpy(ndn->dn_next_indblkshift, odn->dn_next_indblkshift,
sizeof (odn->dn_next_indblkshift));
memcpy(ndn->dn_next_bonustype, odn->dn_next_bonustype,
sizeof (odn->dn_next_bonustype));
memcpy(ndn->dn_rm_spillblk, odn->dn_rm_spillblk,
sizeof (odn->dn_rm_spillblk));
memcpy(ndn->dn_next_bonuslen, odn->dn_next_bonuslen,
sizeof (odn->dn_next_bonuslen));
memcpy(ndn->dn_next_blksz, odn->dn_next_blksz,
sizeof (odn->dn_next_blksz));
memcpy(ndn->dn_next_maxblkid, odn->dn_next_maxblkid,
sizeof (odn->dn_next_maxblkid));
for (int i = 0; i < TXG_SIZE; i++) {
list_move_tail(&ndn->dn_dirty_records[i],
&odn->dn_dirty_records[i]);
}
memcpy(ndn->dn_free_ranges, odn->dn_free_ranges,
sizeof (odn->dn_free_ranges));
ndn->dn_allocated_txg = odn->dn_allocated_txg;
ndn->dn_free_txg = odn->dn_free_txg;
ndn->dn_assigned_txg = odn->dn_assigned_txg;
ndn->dn_dirty_txg = odn->dn_dirty_txg;
ndn->dn_dirtyctx = odn->dn_dirtyctx;
ndn->dn_dirtyctx_firstset = odn->dn_dirtyctx_firstset;
ASSERT(zfs_refcount_count(&odn->dn_tx_holds) == 0);
zfs_refcount_transfer(&ndn->dn_holds, &odn->dn_holds);
ASSERT(avl_is_empty(&ndn->dn_dbufs));
avl_swap(&ndn->dn_dbufs, &odn->dn_dbufs);
ndn->dn_dbufs_count = odn->dn_dbufs_count;
ndn->dn_bonus = odn->dn_bonus;
ndn->dn_have_spill = odn->dn_have_spill;
ndn->dn_zio = odn->dn_zio;
ndn->dn_oldused = odn->dn_oldused;
ndn->dn_oldflags = odn->dn_oldflags;
ndn->dn_olduid = odn->dn_olduid;
ndn->dn_oldgid = odn->dn_oldgid;
ndn->dn_oldprojid = odn->dn_oldprojid;
ndn->dn_newuid = odn->dn_newuid;
ndn->dn_newgid = odn->dn_newgid;
ndn->dn_newprojid = odn->dn_newprojid;
ndn->dn_id_flags = odn->dn_id_flags;
dmu_zfetch_init(&ndn->dn_zfetch, ndn);
/*
* Update back pointers. Updating the handle fixes the back pointer of
* every descendant dbuf as well as the bonus dbuf.
*/
ASSERT(ndn->dn_handle->dnh_dnode == odn);
ndn->dn_handle->dnh_dnode = ndn;
/*
* Invalidate the original dnode by clearing all of its back pointers.
*/
odn->dn_dbuf = NULL;
odn->dn_handle = NULL;
avl_create(&odn->dn_dbufs, dbuf_compare, sizeof (dmu_buf_impl_t),
offsetof(dmu_buf_impl_t, db_link));
odn->dn_dbufs_count = 0;
odn->dn_bonus = NULL;
dmu_zfetch_fini(&odn->dn_zfetch);
/*
* Set the low bit of the objset pointer to ensure that dnode_move()
* recognizes the dnode as invalid in any subsequent callback.
*/
POINTER_INVALIDATE(&odn->dn_objset);
/*
* Satisfy the destructor.
*/
for (int i = 0; i < TXG_SIZE; i++) {
list_create(&odn->dn_dirty_records[i],
sizeof (dbuf_dirty_record_t),
offsetof(dbuf_dirty_record_t, dr_dirty_node));
odn->dn_free_ranges[i] = NULL;
odn->dn_next_nlevels[i] = 0;
odn->dn_next_indblkshift[i] = 0;
odn->dn_next_bonustype[i] = 0;
odn->dn_rm_spillblk[i] = 0;
odn->dn_next_bonuslen[i] = 0;
odn->dn_next_blksz[i] = 0;
}
odn->dn_allocated_txg = 0;
odn->dn_free_txg = 0;
odn->dn_assigned_txg = 0;
odn->dn_dirty_txg = 0;
odn->dn_dirtyctx = 0;
odn->dn_dirtyctx_firstset = NULL;
odn->dn_have_spill = B_FALSE;
odn->dn_zio = NULL;
odn->dn_oldused = 0;
odn->dn_oldflags = 0;
odn->dn_olduid = 0;
odn->dn_oldgid = 0;
odn->dn_oldprojid = ZFS_DEFAULT_PROJID;
odn->dn_newuid = 0;
odn->dn_newgid = 0;
odn->dn_newprojid = ZFS_DEFAULT_PROJID;
odn->dn_id_flags = 0;
/*
* Mark the dnode.
*/
ndn->dn_moved = 1;
odn->dn_moved = (uint8_t)-1;
}
static kmem_cbrc_t
dnode_move(void *buf, void *newbuf, size_t size, void *arg)
{
dnode_t *odn = buf, *ndn = newbuf;
objset_t *os;
int64_t refcount;
uint32_t dbufs;
/*
* The dnode is on the objset's list of known dnodes if the objset
* pointer is valid. We set the low bit of the objset pointer when
* freeing the dnode to invalidate it, and the memory patterns written
* by kmem (baddcafe and deadbeef) set at least one of the two low bits.
* A newly created dnode sets the objset pointer last of all to indicate
* that the dnode is known and in a valid state to be moved by this
* function.
*/
os = odn->dn_objset;
if (!POINTER_IS_VALID(os)) {
DNODE_STAT_BUMP(dnode_move_invalid);
return (KMEM_CBRC_DONT_KNOW);
}
/*
* Ensure that the objset does not go away during the move.
*/
rw_enter(&os_lock, RW_WRITER);
if (os != odn->dn_objset) {
rw_exit(&os_lock);
DNODE_STAT_BUMP(dnode_move_recheck1);
return (KMEM_CBRC_DONT_KNOW);
}
/*
* If the dnode is still valid, then so is the objset. We know that no
* valid objset can be freed while we hold os_lock, so we can safely
* ensure that the objset remains in use.
*/
mutex_enter(&os->os_lock);
/*
* Recheck the objset pointer in case the dnode was removed just before
* acquiring the lock.
*/
if (os != odn->dn_objset) {
mutex_exit(&os->os_lock);
rw_exit(&os_lock);
DNODE_STAT_BUMP(dnode_move_recheck2);
return (KMEM_CBRC_DONT_KNOW);
}
/*
* At this point we know that as long as we hold os->os_lock, the dnode
* cannot be freed and fields within the dnode can be safely accessed.
* The objset listing this dnode cannot go away as long as this dnode is
* on its list.
*/
rw_exit(&os_lock);
if (DMU_OBJECT_IS_SPECIAL(odn->dn_object)) {
mutex_exit(&os->os_lock);
DNODE_STAT_BUMP(dnode_move_special);
return (KMEM_CBRC_NO);
}
ASSERT(odn->dn_dbuf != NULL); /* only "special" dnodes have no parent */
/*
* Lock the dnode handle to prevent the dnode from obtaining any new
* holds. This also prevents the descendant dbufs and the bonus dbuf
* from accessing the dnode, so that we can discount their holds. The
* handle is safe to access because we know that while the dnode cannot
* go away, neither can its handle. Once we hold dnh_zrlock, we can
* safely move any dnode referenced only by dbufs.
*/
if (!zrl_tryenter(&odn->dn_handle->dnh_zrlock)) {
mutex_exit(&os->os_lock);
DNODE_STAT_BUMP(dnode_move_handle);
return (KMEM_CBRC_LATER);
}
/*
* Ensure a consistent view of the dnode's holds and the dnode's dbufs.
* We need to guarantee that there is a hold for every dbuf in order to
* determine whether the dnode is actively referenced. Falsely matching
* a dbuf to an active hold would lead to an unsafe move. It's possible
* that a thread already having an active dnode hold is about to add a
* dbuf, and we can't compare hold and dbuf counts while the add is in
* progress.
*/
if (!rw_tryenter(&odn->dn_struct_rwlock, RW_WRITER)) {
zrl_exit(&odn->dn_handle->dnh_zrlock);
mutex_exit(&os->os_lock);
DNODE_STAT_BUMP(dnode_move_rwlock);
return (KMEM_CBRC_LATER);
}
/*
* A dbuf may be removed (evicted) without an active dnode hold. In that
* case, the dbuf count is decremented under the handle lock before the
* dbuf's hold is released. This order ensures that if we count the hold
* after the dbuf is removed but before its hold is released, we will
* treat the unmatched hold as active and exit safely. If we count the
* hold before the dbuf is removed, the hold is discounted, and the
* removal is blocked until the move completes.
*/
refcount = zfs_refcount_count(&odn->dn_holds);
ASSERT(refcount >= 0);
dbufs = DN_DBUFS_COUNT(odn);
/* We can't have more dbufs than dnode holds. */
ASSERT3U(dbufs, <=, refcount);
DTRACE_PROBE3(dnode__move, dnode_t *, odn, int64_t, refcount,
uint32_t, dbufs);
if (refcount > dbufs) {
rw_exit(&odn->dn_struct_rwlock);
zrl_exit(&odn->dn_handle->dnh_zrlock);
mutex_exit(&os->os_lock);
DNODE_STAT_BUMP(dnode_move_active);
return (KMEM_CBRC_LATER);
}
rw_exit(&odn->dn_struct_rwlock);
/*
* At this point we know that anyone with a hold on the dnode is not
* actively referencing it. The dnode is known and in a valid state to
* move. We're holding the locks needed to execute the critical section.
*/
dnode_move_impl(odn, ndn);
list_link_replace(&odn->dn_link, &ndn->dn_link);
/* If the dnode was safe to move, the refcount cannot have changed. */
ASSERT(refcount == zfs_refcount_count(&ndn->dn_holds));
ASSERT(dbufs == DN_DBUFS_COUNT(ndn));
zrl_exit(&ndn->dn_handle->dnh_zrlock); /* handle has moved */
mutex_exit(&os->os_lock);
return (KMEM_CBRC_YES);
}
#endif /* _KERNEL */
static void
dnode_slots_hold(dnode_children_t *children, int idx, int slots)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
zrl_add(&dnh->dnh_zrlock);
}
}
static void
dnode_slots_rele(dnode_children_t *children, int idx, int slots)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
if (zrl_is_locked(&dnh->dnh_zrlock))
zrl_exit(&dnh->dnh_zrlock);
else
zrl_remove(&dnh->dnh_zrlock);
}
}
static int
dnode_slots_tryenter(dnode_children_t *children, int idx, int slots)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
if (!zrl_tryenter(&dnh->dnh_zrlock)) {
for (int j = idx; j < i; j++) {
dnh = &children->dnc_children[j];
zrl_exit(&dnh->dnh_zrlock);
}
return (0);
}
}
return (1);
}
static void
dnode_set_slots(dnode_children_t *children, int idx, int slots, void *ptr)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
dnh->dnh_dnode = ptr;
}
}
static boolean_t
dnode_check_slots_free(dnode_children_t *children, int idx, int slots)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
/*
* If all dnode slots are either already free or
* evictable return B_TRUE.
*/
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
dnode_t *dn = dnh->dnh_dnode;
if (dn == DN_SLOT_FREE) {
continue;
} else if (DN_SLOT_IS_PTR(dn)) {
mutex_enter(&dn->dn_mtx);
boolean_t can_free = (dn->dn_type == DMU_OT_NONE &&
zfs_refcount_is_zero(&dn->dn_holds) &&
!DNODE_IS_DIRTY(dn));
mutex_exit(&dn->dn_mtx);
if (!can_free)
return (B_FALSE);
else
continue;
} else {
return (B_FALSE);
}
}
return (B_TRUE);
}
static void
dnode_reclaim_slots(dnode_children_t *children, int idx, int slots)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
ASSERT(zrl_is_locked(&dnh->dnh_zrlock));
if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
ASSERT3S(dnh->dnh_dnode->dn_type, ==, DMU_OT_NONE);
dnode_destroy(dnh->dnh_dnode);
dnh->dnh_dnode = DN_SLOT_FREE;
}
}
}
void
dnode_free_interior_slots(dnode_t *dn)
{
dnode_children_t *children = dmu_buf_get_user(&dn->dn_dbuf->db);
int epb = dn->dn_dbuf->db.db_size >> DNODE_SHIFT;
int idx = (dn->dn_object & (epb - 1)) + 1;
int slots = dn->dn_num_slots - 1;
if (slots == 0)
return;
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
while (!dnode_slots_tryenter(children, idx, slots)) {
DNODE_STAT_BUMP(dnode_free_interior_lock_retry);
- cond_resched();
+ kpreempt(KPREEMPT_SYNC);
}
dnode_set_slots(children, idx, slots, DN_SLOT_FREE);
dnode_slots_rele(children, idx, slots);
}
void
dnode_special_close(dnode_handle_t *dnh)
{
dnode_t *dn = dnh->dnh_dnode;
/*
* Ensure dnode_rele_and_unlock() has released dn_mtx, after final
* zfs_refcount_remove()
*/
mutex_enter(&dn->dn_mtx);
if (zfs_refcount_count(&dn->dn_holds) > 0)
cv_wait(&dn->dn_nodnholds, &dn->dn_mtx);
mutex_exit(&dn->dn_mtx);
ASSERT3U(zfs_refcount_count(&dn->dn_holds), ==, 0);
ASSERT(dn->dn_dbuf == NULL ||
dmu_buf_get_user(&dn->dn_dbuf->db) == NULL);
zrl_add(&dnh->dnh_zrlock);
dnode_destroy(dn); /* implicit zrl_remove() */
zrl_destroy(&dnh->dnh_zrlock);
dnh->dnh_dnode = NULL;
}
void
dnode_special_open(objset_t *os, dnode_phys_t *dnp, uint64_t object,
dnode_handle_t *dnh)
{
dnode_t *dn;
zrl_init(&dnh->dnh_zrlock);
VERIFY3U(1, ==, zrl_tryenter(&dnh->dnh_zrlock));
dn = dnode_create(os, dnp, NULL, object, dnh);
DNODE_VERIFY(dn);
zrl_exit(&dnh->dnh_zrlock);
}
static void
dnode_buf_evict_async(void *dbu)
{
dnode_children_t *dnc = dbu;
DNODE_STAT_BUMP(dnode_buf_evict);
for (int i = 0; i < dnc->dnc_count; i++) {
dnode_handle_t *dnh = &dnc->dnc_children[i];
dnode_t *dn;
/*
* The dnode handle lock guards against the dnode moving to
* another valid address, so there is no need here to guard
* against changes to or from NULL.
*/
if (!DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
zrl_destroy(&dnh->dnh_zrlock);
dnh->dnh_dnode = DN_SLOT_UNINIT;
continue;
}
zrl_add(&dnh->dnh_zrlock);
dn = dnh->dnh_dnode;
/*
* If there are holds on this dnode, then there should
* be holds on the dnode's containing dbuf as well; thus
* it wouldn't be eligible for eviction and this function
* would not have been called.
*/
ASSERT(zfs_refcount_is_zero(&dn->dn_holds));
ASSERT(zfs_refcount_is_zero(&dn->dn_tx_holds));
dnode_destroy(dn); /* implicit zrl_remove() for first slot */
zrl_destroy(&dnh->dnh_zrlock);
dnh->dnh_dnode = DN_SLOT_UNINIT;
}
kmem_free(dnc, sizeof (dnode_children_t) +
dnc->dnc_count * sizeof (dnode_handle_t));
}
/*
* When the DNODE_MUST_BE_FREE flag is set, the "slots" parameter is used
* to ensure the hole at the specified object offset is large enough to
* hold the dnode being created. The slots parameter is also used to ensure
* a dnode does not span multiple dnode blocks. In both of these cases, if
* a failure occurs, ENOSPC is returned. Keep in mind, these failure cases
* are only possible when using DNODE_MUST_BE_FREE.
*
* If the DNODE_MUST_BE_ALLOCATED flag is set, "slots" must be 0.
* dnode_hold_impl() will check if the requested dnode is already consumed
* as an extra dnode slot by an large dnode, in which case it returns
* ENOENT.
*
* If the DNODE_DRY_RUN flag is set, we don't actually hold the dnode, just
* return whether the hold would succeed or not. tag and dnp should set to
* NULL in this case.
*
* errors:
* EINVAL - Invalid object number or flags.
* ENOSPC - Hole too small to fulfill "slots" request (DNODE_MUST_BE_FREE)
* EEXIST - Refers to an allocated dnode (DNODE_MUST_BE_FREE)
* - Refers to a freeing dnode (DNODE_MUST_BE_FREE)
* - Refers to an interior dnode slot (DNODE_MUST_BE_ALLOCATED)
* ENOENT - The requested dnode is not allocated (DNODE_MUST_BE_ALLOCATED)
* - The requested dnode is being freed (DNODE_MUST_BE_ALLOCATED)
* EIO - I/O error when reading the meta dnode dbuf.
*
* succeeds even for free dnodes.
*/
int
dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots,
const void *tag, dnode_t **dnp)
{
int epb, idx, err;
int drop_struct_lock = FALSE;
int type;
uint64_t blk;
dnode_t *mdn, *dn;
dmu_buf_impl_t *db;
dnode_children_t *dnc;
dnode_phys_t *dn_block;
dnode_handle_t *dnh;
ASSERT(!(flag & DNODE_MUST_BE_ALLOCATED) || (slots == 0));
ASSERT(!(flag & DNODE_MUST_BE_FREE) || (slots > 0));
IMPLY(flag & DNODE_DRY_RUN, (tag == NULL) && (dnp == NULL));
/*
* If you are holding the spa config lock as writer, you shouldn't
* be asking the DMU to do *anything* unless it's the root pool
* which may require us to read from the root filesystem while
* holding some (not all) of the locks as writer.
*/
ASSERT(spa_config_held(os->os_spa, SCL_ALL, RW_WRITER) == 0 ||
(spa_is_root(os->os_spa) &&
spa_config_held(os->os_spa, SCL_STATE, RW_WRITER)));
ASSERT((flag & DNODE_MUST_BE_ALLOCATED) || (flag & DNODE_MUST_BE_FREE));
if (object == DMU_USERUSED_OBJECT || object == DMU_GROUPUSED_OBJECT ||
object == DMU_PROJECTUSED_OBJECT) {
if (object == DMU_USERUSED_OBJECT)
dn = DMU_USERUSED_DNODE(os);
else if (object == DMU_GROUPUSED_OBJECT)
dn = DMU_GROUPUSED_DNODE(os);
else
dn = DMU_PROJECTUSED_DNODE(os);
if (dn == NULL)
return (SET_ERROR(ENOENT));
type = dn->dn_type;
if ((flag & DNODE_MUST_BE_ALLOCATED) && type == DMU_OT_NONE)
return (SET_ERROR(ENOENT));
if ((flag & DNODE_MUST_BE_FREE) && type != DMU_OT_NONE)
return (SET_ERROR(EEXIST));
DNODE_VERIFY(dn);
/* Don't actually hold if dry run, just return 0 */
if (!(flag & DNODE_DRY_RUN)) {
(void) zfs_refcount_add(&dn->dn_holds, tag);
*dnp = dn;
}
return (0);
}
if (object == 0 || object >= DN_MAX_OBJECT)
return (SET_ERROR(EINVAL));
mdn = DMU_META_DNODE(os);
ASSERT(mdn->dn_object == DMU_META_DNODE_OBJECT);
DNODE_VERIFY(mdn);
if (!RW_WRITE_HELD(&mdn->dn_struct_rwlock)) {
rw_enter(&mdn->dn_struct_rwlock, RW_READER);
drop_struct_lock = TRUE;
}
blk = dbuf_whichblock(mdn, 0, object * sizeof (dnode_phys_t));
db = dbuf_hold(mdn, blk, FTAG);
if (drop_struct_lock)
rw_exit(&mdn->dn_struct_rwlock);
if (db == NULL) {
DNODE_STAT_BUMP(dnode_hold_dbuf_hold);
return (SET_ERROR(EIO));
}
/*
* We do not need to decrypt to read the dnode so it doesn't matter
* if we get the encrypted or decrypted version.
*/
err = dbuf_read(db, NULL, DB_RF_CANFAIL |
DB_RF_NO_DECRYPT | DB_RF_NOPREFETCH);
if (err) {
DNODE_STAT_BUMP(dnode_hold_dbuf_read);
dbuf_rele(db, FTAG);
return (err);
}
ASSERT3U(db->db.db_size, >=, 1<<DNODE_SHIFT);
epb = db->db.db_size >> DNODE_SHIFT;
idx = object & (epb - 1);
dn_block = (dnode_phys_t *)db->db.db_data;
ASSERT(DB_DNODE(db)->dn_type == DMU_OT_DNODE);
dnc = dmu_buf_get_user(&db->db);
dnh = NULL;
if (dnc == NULL) {
dnode_children_t *winner;
int skip = 0;
dnc = kmem_zalloc(sizeof (dnode_children_t) +
epb * sizeof (dnode_handle_t), KM_SLEEP);
dnc->dnc_count = epb;
dnh = &dnc->dnc_children[0];
/* Initialize dnode slot status from dnode_phys_t */
for (int i = 0; i < epb; i++) {
zrl_init(&dnh[i].dnh_zrlock);
if (skip) {
skip--;
continue;
}
if (dn_block[i].dn_type != DMU_OT_NONE) {
int interior = dn_block[i].dn_extra_slots;
dnode_set_slots(dnc, i, 1, DN_SLOT_ALLOCATED);
dnode_set_slots(dnc, i + 1, interior,
DN_SLOT_INTERIOR);
skip = interior;
} else {
dnh[i].dnh_dnode = DN_SLOT_FREE;
skip = 0;
}
}
dmu_buf_init_user(&dnc->dnc_dbu, NULL,
dnode_buf_evict_async, NULL);
winner = dmu_buf_set_user(&db->db, &dnc->dnc_dbu);
if (winner != NULL) {
for (int i = 0; i < epb; i++)
zrl_destroy(&dnh[i].dnh_zrlock);
kmem_free(dnc, sizeof (dnode_children_t) +
epb * sizeof (dnode_handle_t));
dnc = winner;
}
}
ASSERT(dnc->dnc_count == epb);
if (flag & DNODE_MUST_BE_ALLOCATED) {
slots = 1;
dnode_slots_hold(dnc, idx, slots);
dnh = &dnc->dnc_children[idx];
if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
dn = dnh->dnh_dnode;
} else if (dnh->dnh_dnode == DN_SLOT_INTERIOR) {
DNODE_STAT_BUMP(dnode_hold_alloc_interior);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(EEXIST));
} else if (dnh->dnh_dnode != DN_SLOT_ALLOCATED) {
DNODE_STAT_BUMP(dnode_hold_alloc_misses);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(ENOENT));
} else {
dnode_slots_rele(dnc, idx, slots);
while (!dnode_slots_tryenter(dnc, idx, slots)) {
DNODE_STAT_BUMP(dnode_hold_alloc_lock_retry);
- cond_resched();
+ kpreempt(KPREEMPT_SYNC);
}
/*
* Someone else won the race and called dnode_create()
* after we checked DN_SLOT_IS_PTR() above but before
* we acquired the lock.
*/
if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
DNODE_STAT_BUMP(dnode_hold_alloc_lock_misses);
dn = dnh->dnh_dnode;
} else {
dn = dnode_create(os, dn_block + idx, db,
object, dnh);
}
}
mutex_enter(&dn->dn_mtx);
if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg != 0) {
DNODE_STAT_BUMP(dnode_hold_alloc_type_none);
mutex_exit(&dn->dn_mtx);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(ENOENT));
}
/* Don't actually hold if dry run, just return 0 */
if (flag & DNODE_DRY_RUN) {
mutex_exit(&dn->dn_mtx);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (0);
}
DNODE_STAT_BUMP(dnode_hold_alloc_hits);
} else if (flag & DNODE_MUST_BE_FREE) {
if (idx + slots - 1 >= DNODES_PER_BLOCK) {
DNODE_STAT_BUMP(dnode_hold_free_overflow);
dbuf_rele(db, FTAG);
return (SET_ERROR(ENOSPC));
}
dnode_slots_hold(dnc, idx, slots);
if (!dnode_check_slots_free(dnc, idx, slots)) {
DNODE_STAT_BUMP(dnode_hold_free_misses);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(ENOSPC));
}
dnode_slots_rele(dnc, idx, slots);
while (!dnode_slots_tryenter(dnc, idx, slots)) {
DNODE_STAT_BUMP(dnode_hold_free_lock_retry);
- cond_resched();
+ kpreempt(KPREEMPT_SYNC);
}
if (!dnode_check_slots_free(dnc, idx, slots)) {
DNODE_STAT_BUMP(dnode_hold_free_lock_misses);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(ENOSPC));
}
/*
* Allocated but otherwise free dnodes which would
* be in the interior of a multi-slot dnodes need
* to be freed. Single slot dnodes can be safely
* re-purposed as a performance optimization.
*/
if (slots > 1)
dnode_reclaim_slots(dnc, idx + 1, slots - 1);
dnh = &dnc->dnc_children[idx];
if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
dn = dnh->dnh_dnode;
} else {
dn = dnode_create(os, dn_block + idx, db,
object, dnh);
}
mutex_enter(&dn->dn_mtx);
if (!zfs_refcount_is_zero(&dn->dn_holds) || dn->dn_free_txg) {
DNODE_STAT_BUMP(dnode_hold_free_refcount);
mutex_exit(&dn->dn_mtx);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(EEXIST));
}
/* Don't actually hold if dry run, just return 0 */
if (flag & DNODE_DRY_RUN) {
mutex_exit(&dn->dn_mtx);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (0);
}
dnode_set_slots(dnc, idx + 1, slots - 1, DN_SLOT_INTERIOR);
DNODE_STAT_BUMP(dnode_hold_free_hits);
} else {
dbuf_rele(db, FTAG);
return (SET_ERROR(EINVAL));
}
ASSERT0(dn->dn_free_txg);
if (zfs_refcount_add(&dn->dn_holds, tag) == 1)
dbuf_add_ref(db, dnh);
mutex_exit(&dn->dn_mtx);
/* Now we can rely on the hold to prevent the dnode from moving. */
dnode_slots_rele(dnc, idx, slots);
DNODE_VERIFY(dn);
ASSERT3P(dnp, !=, NULL);
ASSERT3P(dn->dn_dbuf, ==, db);
ASSERT3U(dn->dn_object, ==, object);
dbuf_rele(db, FTAG);
*dnp = dn;
return (0);
}
/*
* Return held dnode if the object is allocated, NULL if not.
*/
int
dnode_hold(objset_t *os, uint64_t object, const void *tag, dnode_t **dnp)
{
return (dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, 0, tag,
dnp));
}
/*
* Can only add a reference if there is already at least one
* reference on the dnode. Returns FALSE if unable to add a
* new reference.
*/
boolean_t
dnode_add_ref(dnode_t *dn, const void *tag)
{
mutex_enter(&dn->dn_mtx);
if (zfs_refcount_is_zero(&dn->dn_holds)) {
mutex_exit(&dn->dn_mtx);
return (FALSE);
}
VERIFY(1 < zfs_refcount_add(&dn->dn_holds, tag));
mutex_exit(&dn->dn_mtx);
return (TRUE);
}
void
dnode_rele(dnode_t *dn, const void *tag)
{
mutex_enter(&dn->dn_mtx);
dnode_rele_and_unlock(dn, tag, B_FALSE);
}
void
dnode_rele_and_unlock(dnode_t *dn, const void *tag, boolean_t evicting)
{
uint64_t refs;
/* Get while the hold prevents the dnode from moving. */
dmu_buf_impl_t *db = dn->dn_dbuf;
dnode_handle_t *dnh = dn->dn_handle;
refs = zfs_refcount_remove(&dn->dn_holds, tag);
if (refs == 0)
cv_broadcast(&dn->dn_nodnholds);
mutex_exit(&dn->dn_mtx);
/* dnode could get destroyed at this point, so don't use it anymore */
/*
* It's unsafe to release the last hold on a dnode by dnode_rele() or
* indirectly by dbuf_rele() while relying on the dnode handle to
* prevent the dnode from moving, since releasing the last hold could
* result in the dnode's parent dbuf evicting its dnode handles. For
* that reason anyone calling dnode_rele() or dbuf_rele() without some
* other direct or indirect hold on the dnode must first drop the dnode
* handle.
*/
#ifdef ZFS_DEBUG
ASSERT(refs > 0 || dnh->dnh_zrlock.zr_owner != curthread);
#endif
/* NOTE: the DNODE_DNODE does not have a dn_dbuf */
if (refs == 0 && db != NULL) {
/*
* Another thread could add a hold to the dnode handle in
* dnode_hold_impl() while holding the parent dbuf. Since the
* hold on the parent dbuf prevents the handle from being
* destroyed, the hold on the handle is OK. We can't yet assert
* that the handle has zero references, but that will be
* asserted anyway when the handle gets destroyed.
*/
mutex_enter(&db->db_mtx);
dbuf_rele_and_unlock(db, dnh, evicting);
}
}
/*
* Test whether we can create a dnode at the specified location.
*/
int
dnode_try_claim(objset_t *os, uint64_t object, int slots)
{
return (dnode_hold_impl(os, object, DNODE_MUST_BE_FREE | DNODE_DRY_RUN,
slots, NULL, NULL));
}
/*
* Checks if the dnode contains any uncommitted dirty records.
*/
boolean_t
dnode_is_dirty(dnode_t *dn)
{
mutex_enter(&dn->dn_mtx);
for (int i = 0; i < TXG_SIZE; i++) {
if (multilist_link_active(&dn->dn_dirty_link[i])) {
mutex_exit(&dn->dn_mtx);
return (B_TRUE);
}
}
mutex_exit(&dn->dn_mtx);
return (B_FALSE);
}
void
dnode_setdirty(dnode_t *dn, dmu_tx_t *tx)
{
objset_t *os = dn->dn_objset;
uint64_t txg = tx->tx_txg;
if (DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
dsl_dataset_dirty(os->os_dsl_dataset, tx);
return;
}
DNODE_VERIFY(dn);
#ifdef ZFS_DEBUG
mutex_enter(&dn->dn_mtx);
ASSERT(dn->dn_phys->dn_type || dn->dn_allocated_txg);
ASSERT(dn->dn_free_txg == 0 || dn->dn_free_txg >= txg);
mutex_exit(&dn->dn_mtx);
#endif
/*
* Determine old uid/gid when necessary
*/
dmu_objset_userquota_get_ids(dn, B_TRUE, tx);
multilist_t *dirtylist = &os->os_dirty_dnodes[txg & TXG_MASK];
multilist_sublist_t *mls = multilist_sublist_lock_obj(dirtylist, dn);
/*
* If we are already marked dirty, we're done.
*/
if (multilist_link_active(&dn->dn_dirty_link[txg & TXG_MASK])) {
multilist_sublist_unlock(mls);
return;
}
ASSERT(!zfs_refcount_is_zero(&dn->dn_holds) ||
!avl_is_empty(&dn->dn_dbufs));
ASSERT(dn->dn_datablksz != 0);
ASSERT0(dn->dn_next_bonuslen[txg & TXG_MASK]);
ASSERT0(dn->dn_next_blksz[txg & TXG_MASK]);
ASSERT0(dn->dn_next_bonustype[txg & TXG_MASK]);
dprintf_ds(os->os_dsl_dataset, "obj=%llu txg=%llu\n",
(u_longlong_t)dn->dn_object, (u_longlong_t)txg);
multilist_sublist_insert_head(mls, dn);
multilist_sublist_unlock(mls);
/*
* The dnode maintains a hold on its containing dbuf as
* long as there are holds on it. Each instantiated child
* dbuf maintains a hold on the dnode. When the last child
* drops its hold, the dnode will drop its hold on the
* containing dbuf. We add a "dirty hold" here so that the
* dnode will hang around after we finish processing its
* children.
*/
VERIFY(dnode_add_ref(dn, (void *)(uintptr_t)tx->tx_txg));
(void) dbuf_dirty(dn->dn_dbuf, tx);
dsl_dataset_dirty(os->os_dsl_dataset, tx);
}
void
dnode_free(dnode_t *dn, dmu_tx_t *tx)
{
mutex_enter(&dn->dn_mtx);
if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg) {
mutex_exit(&dn->dn_mtx);
return;
}
dn->dn_free_txg = tx->tx_txg;
mutex_exit(&dn->dn_mtx);
dnode_setdirty(dn, tx);
}
/*
* Try to change the block size for the indicated dnode. This can only
* succeed if there are no blocks allocated or dirty beyond first block
*/
int
dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx)
{
dmu_buf_impl_t *db;
int err;
ASSERT3U(size, <=, spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
if (size == 0)
size = SPA_MINBLOCKSIZE;
else
size = P2ROUNDUP(size, SPA_MINBLOCKSIZE);
if (ibs == dn->dn_indblkshift)
ibs = 0;
if (size >> SPA_MINBLOCKSHIFT == dn->dn_datablkszsec && ibs == 0)
return (0);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
/* Check for any allocated blocks beyond the first */
if (dn->dn_maxblkid != 0)
goto fail;
mutex_enter(&dn->dn_dbufs_mtx);
for (db = avl_first(&dn->dn_dbufs); db != NULL;
db = AVL_NEXT(&dn->dn_dbufs, db)) {
if (db->db_blkid != 0 && db->db_blkid != DMU_BONUS_BLKID &&
db->db_blkid != DMU_SPILL_BLKID) {
mutex_exit(&dn->dn_dbufs_mtx);
goto fail;
}
}
mutex_exit(&dn->dn_dbufs_mtx);
if (ibs && dn->dn_nlevels != 1)
goto fail;
/* resize the old block */
err = dbuf_hold_impl(dn, 0, 0, TRUE, FALSE, FTAG, &db);
if (err == 0) {
dbuf_new_size(db, size, tx);
} else if (err != ENOENT) {
goto fail;
}
dnode_setdblksz(dn, size);
dnode_setdirty(dn, tx);
dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = size;
if (ibs) {
dn->dn_indblkshift = ibs;
dn->dn_next_indblkshift[tx->tx_txg&TXG_MASK] = ibs;
}
/* release after we have fixed the blocksize in the dnode */
if (db)
dbuf_rele(db, FTAG);
rw_exit(&dn->dn_struct_rwlock);
return (0);
fail:
rw_exit(&dn->dn_struct_rwlock);
return (SET_ERROR(ENOTSUP));
}
static void
dnode_set_nlevels_impl(dnode_t *dn, int new_nlevels, dmu_tx_t *tx)
{
uint64_t txgoff = tx->tx_txg & TXG_MASK;
int old_nlevels = dn->dn_nlevels;
dmu_buf_impl_t *db;
list_t *list;
dbuf_dirty_record_t *new, *dr, *dr_next;
ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
ASSERT3U(new_nlevels, >, dn->dn_nlevels);
dn->dn_nlevels = new_nlevels;
ASSERT3U(new_nlevels, >, dn->dn_next_nlevels[txgoff]);
dn->dn_next_nlevels[txgoff] = new_nlevels;
/* dirty the left indirects */
db = dbuf_hold_level(dn, old_nlevels, 0, FTAG);
ASSERT(db != NULL);
new = dbuf_dirty(db, tx);
dbuf_rele(db, FTAG);
/* transfer the dirty records to the new indirect */
mutex_enter(&dn->dn_mtx);
mutex_enter(&new->dt.di.dr_mtx);
list = &dn->dn_dirty_records[txgoff];
for (dr = list_head(list); dr; dr = dr_next) {
dr_next = list_next(&dn->dn_dirty_records[txgoff], dr);
IMPLY(dr->dr_dbuf == NULL, old_nlevels == 1);
if (dr->dr_dbuf == NULL ||
(dr->dr_dbuf->db_level == old_nlevels - 1 &&
dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID)) {
list_remove(&dn->dn_dirty_records[txgoff], dr);
list_insert_tail(&new->dt.di.dr_children, dr);
dr->dr_parent = new;
}
}
mutex_exit(&new->dt.di.dr_mtx);
mutex_exit(&dn->dn_mtx);
}
int
dnode_set_nlevels(dnode_t *dn, int nlevels, dmu_tx_t *tx)
{
int ret = 0;
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
if (dn->dn_nlevels == nlevels) {
ret = 0;
goto out;
} else if (nlevels < dn->dn_nlevels) {
ret = SET_ERROR(EINVAL);
goto out;
}
dnode_set_nlevels_impl(dn, nlevels, tx);
out:
rw_exit(&dn->dn_struct_rwlock);
return (ret);
}
/* read-holding callers must not rely on the lock being continuously held */
void
dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx, boolean_t have_read,
boolean_t force)
{
int epbs, new_nlevels;
uint64_t sz;
ASSERT(blkid != DMU_BONUS_BLKID);
ASSERT(have_read ?
RW_READ_HELD(&dn->dn_struct_rwlock) :
RW_WRITE_HELD(&dn->dn_struct_rwlock));
/*
* if we have a read-lock, check to see if we need to do any work
* before upgrading to a write-lock.
*/
if (have_read) {
if (blkid <= dn->dn_maxblkid)
return;
if (!rw_tryupgrade(&dn->dn_struct_rwlock)) {
rw_exit(&dn->dn_struct_rwlock);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
}
}
/*
* Raw sends (indicated by the force flag) require that we take the
* given blkid even if the value is lower than the current value.
*/
if (!force && blkid <= dn->dn_maxblkid)
goto out;
/*
* We use the (otherwise unused) top bit of dn_next_maxblkid[txgoff]
* to indicate that this field is set. This allows us to set the
* maxblkid to 0 on an existing object in dnode_sync().
*/
dn->dn_maxblkid = blkid;
dn->dn_next_maxblkid[tx->tx_txg & TXG_MASK] =
blkid | DMU_NEXT_MAXBLKID_SET;
/*
* Compute the number of levels necessary to support the new maxblkid.
* Raw sends will ensure nlevels is set correctly for us.
*/
new_nlevels = 1;
epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
for (sz = dn->dn_nblkptr;
sz <= blkid && sz >= dn->dn_nblkptr; sz <<= epbs)
new_nlevels++;
ASSERT3U(new_nlevels, <=, DN_MAX_LEVELS);
if (!force) {
if (new_nlevels > dn->dn_nlevels)
dnode_set_nlevels_impl(dn, new_nlevels, tx);
} else {
ASSERT3U(dn->dn_nlevels, >=, new_nlevels);
}
out:
if (have_read)
rw_downgrade(&dn->dn_struct_rwlock);
}
static void
dnode_dirty_l1(dnode_t *dn, uint64_t l1blkid, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = dbuf_hold_level(dn, 1, l1blkid, FTAG);
if (db != NULL) {
dmu_buf_will_dirty(&db->db, tx);
dbuf_rele(db, FTAG);
}
}
/*
* Dirty all the in-core level-1 dbufs in the range specified by start_blkid
* and end_blkid.
*/
static void
dnode_dirty_l1range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
dmu_tx_t *tx)
{
dmu_buf_impl_t *db_search;
dmu_buf_impl_t *db;
avl_index_t where;
db_search = kmem_zalloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
mutex_enter(&dn->dn_dbufs_mtx);
db_search->db_level = 1;
db_search->db_blkid = start_blkid + 1;
db_search->db_state = DB_SEARCH;
for (;;) {
db = avl_find(&dn->dn_dbufs, db_search, &where);
if (db == NULL)
db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
if (db == NULL || db->db_level != 1 ||
db->db_blkid >= end_blkid) {
break;
}
/*
* Setup the next blkid we want to search for.
*/
db_search->db_blkid = db->db_blkid + 1;
ASSERT3U(db->db_blkid, >=, start_blkid);
/*
* If the dbuf transitions to DB_EVICTING while we're trying
* to dirty it, then we will be unable to discover it in
* the dbuf hash table. This will result in a call to
* dbuf_create() which needs to acquire the dn_dbufs_mtx
* lock. To avoid a deadlock, we drop the lock before
* dirtying the level-1 dbuf.
*/
mutex_exit(&dn->dn_dbufs_mtx);
dnode_dirty_l1(dn, db->db_blkid, tx);
mutex_enter(&dn->dn_dbufs_mtx);
}
#ifdef ZFS_DEBUG
/*
* Walk all the in-core level-1 dbufs and verify they have been dirtied.
*/
db_search->db_level = 1;
db_search->db_blkid = start_blkid + 1;
db_search->db_state = DB_SEARCH;
db = avl_find(&dn->dn_dbufs, db_search, &where);
if (db == NULL)
db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
for (; db != NULL; db = AVL_NEXT(&dn->dn_dbufs, db)) {
if (db->db_level != 1 || db->db_blkid >= end_blkid)
break;
if (db->db_state != DB_EVICTING)
ASSERT(db->db_dirtycnt > 0);
}
#endif
kmem_free(db_search, sizeof (dmu_buf_impl_t));
mutex_exit(&dn->dn_dbufs_mtx);
}
void
dnode_set_dirtyctx(dnode_t *dn, dmu_tx_t *tx, const void *tag)
{
/*
* Don't set dirtyctx to SYNC if we're just modifying this as we
* initialize the objset.
*/
if (dn->dn_dirtyctx == DN_UNDIRTIED) {
dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
if (ds != NULL) {
rrw_enter(&ds->ds_bp_rwlock, RW_READER, tag);
}
if (!BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
if (dmu_tx_is_syncing(tx))
dn->dn_dirtyctx = DN_DIRTY_SYNC;
else
dn->dn_dirtyctx = DN_DIRTY_OPEN;
dn->dn_dirtyctx_firstset = tag;
}
if (ds != NULL) {
rrw_exit(&ds->ds_bp_rwlock, tag);
}
}
}
static void
dnode_partial_zero(dnode_t *dn, uint64_t off, uint64_t blkoff, uint64_t len,
dmu_tx_t *tx)
{
dmu_buf_impl_t *db;
int res;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
res = dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, 0, off), TRUE, FALSE,
FTAG, &db);
rw_exit(&dn->dn_struct_rwlock);
if (res == 0) {
db_lock_type_t dblt;
boolean_t dirty;
dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
/* don't dirty if not on disk and not dirty */
dirty = !list_is_empty(&db->db_dirty_records) ||
(db->db_blkptr && !BP_IS_HOLE(db->db_blkptr));
dmu_buf_unlock_parent(db, dblt, FTAG);
if (dirty) {
caddr_t data;
dmu_buf_will_dirty(&db->db, tx);
data = db->db.db_data;
memset(data + blkoff, 0, len);
}
dbuf_rele(db, FTAG);
}
}
void
dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx)
{
uint64_t blkoff, blkid, nblks;
int blksz, blkshift, head, tail;
int trunc = FALSE;
int epbs;
blksz = dn->dn_datablksz;
blkshift = dn->dn_datablkshift;
epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
if (len == DMU_OBJECT_END) {
len = UINT64_MAX - off;
trunc = TRUE;
}
/*
* First, block align the region to free:
*/
if (ISP2(blksz)) {
head = P2NPHASE(off, blksz);
blkoff = P2PHASE(off, blksz);
if ((off >> blkshift) > dn->dn_maxblkid)
return;
} else {
ASSERT(dn->dn_maxblkid == 0);
if (off == 0 && len >= blksz) {
/*
* Freeing the whole block; fast-track this request.
*/
blkid = 0;
nblks = 1;
if (dn->dn_nlevels > 1) {
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
dnode_dirty_l1(dn, 0, tx);
rw_exit(&dn->dn_struct_rwlock);
}
goto done;
} else if (off >= blksz) {
/* Freeing past end-of-data */
return;
} else {
/* Freeing part of the block. */
head = blksz - off;
ASSERT3U(head, >, 0);
}
blkoff = off;
}
/* zero out any partial block data at the start of the range */
if (head) {
ASSERT3U(blkoff + head, ==, blksz);
if (len < head)
head = len;
dnode_partial_zero(dn, off, blkoff, head, tx);
off += head;
len -= head;
}
/* If the range was less than one block, we're done */
if (len == 0)
return;
/* If the remaining range is past end of file, we're done */
if ((off >> blkshift) > dn->dn_maxblkid)
return;
ASSERT(ISP2(blksz));
if (trunc)
tail = 0;
else
tail = P2PHASE(len, blksz);
ASSERT0(P2PHASE(off, blksz));
/* zero out any partial block data at the end of the range */
if (tail) {
if (len < tail)
tail = len;
dnode_partial_zero(dn, off + len, 0, tail, tx);
len -= tail;
}
/* If the range did not include a full block, we are done */
if (len == 0)
return;
ASSERT(IS_P2ALIGNED(off, blksz));
ASSERT(trunc || IS_P2ALIGNED(len, blksz));
blkid = off >> blkshift;
nblks = len >> blkshift;
if (trunc)
nblks += 1;
/*
* Dirty all the indirect blocks in this range. Note that only
* the first and last indirect blocks can actually be written
* (if they were partially freed) -- they must be dirtied, even if
* they do not exist on disk yet. The interior blocks will
* be freed by free_children(), so they will not actually be written.
* Even though these interior blocks will not be written, we
* dirty them for two reasons:
*
* - It ensures that the indirect blocks remain in memory until
* syncing context. (They have already been prefetched by
* dmu_tx_hold_free(), so we don't have to worry about reading
* them serially here.)
*
* - The dirty space accounting will put pressure on the txg sync
* mechanism to begin syncing, and to delay transactions if there
* is a large amount of freeing. Even though these indirect
* blocks will not be written, we could need to write the same
* amount of space if we copy the freed BPs into deadlists.
*/
if (dn->dn_nlevels > 1) {
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
uint64_t first, last;
first = blkid >> epbs;
dnode_dirty_l1(dn, first, tx);
if (trunc)
last = dn->dn_maxblkid >> epbs;
else
last = (blkid + nblks - 1) >> epbs;
if (last != first)
dnode_dirty_l1(dn, last, tx);
dnode_dirty_l1range(dn, first, last, tx);
int shift = dn->dn_datablkshift + dn->dn_indblkshift -
SPA_BLKPTRSHIFT;
for (uint64_t i = first + 1; i < last; i++) {
/*
* Set i to the blockid of the next non-hole
* level-1 indirect block at or after i. Note
* that dnode_next_offset() operates in terms of
* level-0-equivalent bytes.
*/
uint64_t ibyte = i << shift;
int err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK,
&ibyte, 2, 1, 0);
i = ibyte >> shift;
if (i >= last)
break;
/*
* Normally we should not see an error, either
* from dnode_next_offset() or dbuf_hold_level()
* (except for ESRCH from dnode_next_offset).
* If there is an i/o error, then when we read
* this block in syncing context, it will use
* ZIO_FLAG_MUSTSUCCEED, and thus hang/panic according
* to the "failmode" property. dnode_next_offset()
* doesn't have a flag to indicate MUSTSUCCEED.
*/
if (err != 0)
break;
dnode_dirty_l1(dn, i, tx);
}
rw_exit(&dn->dn_struct_rwlock);
}
done:
/*
* Add this range to the dnode range list.
* We will finish up this free operation in the syncing phase.
*/
mutex_enter(&dn->dn_mtx);
{
int txgoff = tx->tx_txg & TXG_MASK;
if (dn->dn_free_ranges[txgoff] == NULL) {
dn->dn_free_ranges[txgoff] = range_tree_create(NULL,
RANGE_SEG64, NULL, 0, 0);
}
range_tree_clear(dn->dn_free_ranges[txgoff], blkid, nblks);
range_tree_add(dn->dn_free_ranges[txgoff], blkid, nblks);
}
dprintf_dnode(dn, "blkid=%llu nblks=%llu txg=%llu\n",
(u_longlong_t)blkid, (u_longlong_t)nblks,
(u_longlong_t)tx->tx_txg);
mutex_exit(&dn->dn_mtx);
dbuf_free_range(dn, blkid, blkid + nblks - 1, tx);
dnode_setdirty(dn, tx);
}
static boolean_t
dnode_spill_freed(dnode_t *dn)
{
int i;
mutex_enter(&dn->dn_mtx);
for (i = 0; i < TXG_SIZE; i++) {
if (dn->dn_rm_spillblk[i] == DN_KILL_SPILLBLK)
break;
}
mutex_exit(&dn->dn_mtx);
return (i < TXG_SIZE);
}
/* return TRUE if this blkid was freed in a recent txg, or FALSE if it wasn't */
uint64_t
dnode_block_freed(dnode_t *dn, uint64_t blkid)
{
void *dp = spa_get_dsl(dn->dn_objset->os_spa);
int i;
if (blkid == DMU_BONUS_BLKID)
return (FALSE);
/*
* If we're in the process of opening the pool, dp will not be
* set yet, but there shouldn't be anything dirty.
*/
if (dp == NULL)
return (FALSE);
if (dn->dn_free_txg)
return (TRUE);
if (blkid == DMU_SPILL_BLKID)
return (dnode_spill_freed(dn));
mutex_enter(&dn->dn_mtx);
for (i = 0; i < TXG_SIZE; i++) {
if (dn->dn_free_ranges[i] != NULL &&
range_tree_contains(dn->dn_free_ranges[i], blkid, 1))
break;
}
mutex_exit(&dn->dn_mtx);
return (i < TXG_SIZE);
}
/* call from syncing context when we actually write/free space for this dnode */
void
dnode_diduse_space(dnode_t *dn, int64_t delta)
{
uint64_t space;
dprintf_dnode(dn, "dn=%p dnp=%p used=%llu delta=%lld\n",
dn, dn->dn_phys,
(u_longlong_t)dn->dn_phys->dn_used,
(longlong_t)delta);
mutex_enter(&dn->dn_mtx);
space = DN_USED_BYTES(dn->dn_phys);
if (delta > 0) {
ASSERT3U(space + delta, >=, space); /* no overflow */
} else {
ASSERT3U(space, >=, -delta); /* no underflow */
}
space += delta;
if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_DNODE_BYTES) {
ASSERT((dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) == 0);
ASSERT0(P2PHASE(space, 1<<DEV_BSHIFT));
dn->dn_phys->dn_used = space >> DEV_BSHIFT;
} else {
dn->dn_phys->dn_used = space;
dn->dn_phys->dn_flags |= DNODE_FLAG_USED_BYTES;
}
mutex_exit(&dn->dn_mtx);
}
/*
* Scans a block at the indicated "level" looking for a hole or data,
* depending on 'flags'.
*
* If level > 0, then we are scanning an indirect block looking at its
* pointers. If level == 0, then we are looking at a block of dnodes.
*
* If we don't find what we are looking for in the block, we return ESRCH.
* Otherwise, return with *offset pointing to the beginning (if searching
* forwards) or end (if searching backwards) of the range covered by the
* block pointer we matched on (or dnode).
*
* The basic search algorithm used below by dnode_next_offset() is to
* use this function to search up the block tree (widen the search) until
* we find something (i.e., we don't return ESRCH) and then search back
* down the tree (narrow the search) until we reach our original search
* level.
*/
static int
dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset,
int lvl, uint64_t blkfill, uint64_t txg)
{
dmu_buf_impl_t *db = NULL;
void *data = NULL;
uint64_t epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
uint64_t epb = 1ULL << epbs;
uint64_t minfill, maxfill;
boolean_t hole;
int i, inc, error, span;
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
hole = ((flags & DNODE_FIND_HOLE) != 0);
inc = (flags & DNODE_FIND_BACKWARDS) ? -1 : 1;
ASSERT(txg == 0 || !hole);
if (lvl == dn->dn_phys->dn_nlevels) {
error = 0;
epb = dn->dn_phys->dn_nblkptr;
data = dn->dn_phys->dn_blkptr;
} else {
uint64_t blkid = dbuf_whichblock(dn, lvl, *offset);
error = dbuf_hold_impl(dn, lvl, blkid, TRUE, FALSE, FTAG, &db);
if (error) {
if (error != ENOENT)
return (error);
if (hole)
return (0);
/*
* This can only happen when we are searching up
* the block tree for data. We don't really need to
* adjust the offset, as we will just end up looking
* at the pointer to this block in its parent, and its
* going to be unallocated, so we will skip over it.
*/
return (SET_ERROR(ESRCH));
}
error = dbuf_read(db, NULL,
DB_RF_CANFAIL | DB_RF_HAVESTRUCT |
DB_RF_NO_DECRYPT | DB_RF_NOPREFETCH);
if (error) {
dbuf_rele(db, FTAG);
return (error);
}
data = db->db.db_data;
rw_enter(&db->db_rwlock, RW_READER);
}
if (db != NULL && txg != 0 && (db->db_blkptr == NULL ||
db->db_blkptr->blk_birth <= txg ||
BP_IS_HOLE(db->db_blkptr))) {
/*
* This can only happen when we are searching up the tree
* and these conditions mean that we need to keep climbing.
*/
error = SET_ERROR(ESRCH);
} else if (lvl == 0) {
dnode_phys_t *dnp = data;
ASSERT(dn->dn_type == DMU_OT_DNODE);
ASSERT(!(flags & DNODE_FIND_BACKWARDS));
for (i = (*offset >> DNODE_SHIFT) & (blkfill - 1);
i < blkfill; i += dnp[i].dn_extra_slots + 1) {
if ((dnp[i].dn_type == DMU_OT_NONE) == hole)
break;
}
if (i == blkfill)
error = SET_ERROR(ESRCH);
*offset = (*offset & ~(DNODE_BLOCK_SIZE - 1)) +
(i << DNODE_SHIFT);
} else {
blkptr_t *bp = data;
uint64_t start = *offset;
span = (lvl - 1) * epbs + dn->dn_datablkshift;
minfill = 0;
maxfill = blkfill << ((lvl - 1) * epbs);
if (hole)
maxfill--;
else
minfill++;
if (span >= 8 * sizeof (*offset)) {
/* This only happens on the highest indirection level */
ASSERT3U((lvl - 1), ==, dn->dn_phys->dn_nlevels - 1);
*offset = 0;
} else {
*offset = *offset >> span;
}
for (i = BF64_GET(*offset, 0, epbs);
i >= 0 && i < epb; i += inc) {
if (BP_GET_FILL(&bp[i]) >= minfill &&
BP_GET_FILL(&bp[i]) <= maxfill &&
(hole || bp[i].blk_birth > txg))
break;
if (inc > 0 || *offset > 0)
*offset += inc;
}
if (span >= 8 * sizeof (*offset)) {
*offset = start;
} else {
*offset = *offset << span;
}
if (inc < 0) {
/* traversing backwards; position offset at the end */
ASSERT3U(*offset, <=, start);
*offset = MIN(*offset + (1ULL << span) - 1, start);
} else if (*offset < start) {
*offset = start;
}
if (i < 0 || i >= epb)
error = SET_ERROR(ESRCH);
}
if (db != NULL) {
rw_exit(&db->db_rwlock);
dbuf_rele(db, FTAG);
}
return (error);
}
/*
* Find the next hole, data, or sparse region at or after *offset.
* The value 'blkfill' tells us how many items we expect to find
* in an L0 data block; this value is 1 for normal objects,
* DNODES_PER_BLOCK for the meta dnode, and some fraction of
* DNODES_PER_BLOCK when searching for sparse regions thereof.
*
* Examples:
*
* dnode_next_offset(dn, flags, offset, 1, 1, 0);
* Finds the next/previous hole/data in a file.
* Used in dmu_offset_next().
*
* dnode_next_offset(mdn, flags, offset, 0, DNODES_PER_BLOCK, txg);
* Finds the next free/allocated dnode an objset's meta-dnode.
* Only finds objects that have new contents since txg (ie.
* bonus buffer changes and content removal are ignored).
* Used in dmu_object_next().
*
* dnode_next_offset(mdn, DNODE_FIND_HOLE, offset, 2, DNODES_PER_BLOCK >> 2, 0);
* Finds the next L2 meta-dnode bp that's at most 1/4 full.
* Used in dmu_object_alloc().
*/
int
dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
int minlvl, uint64_t blkfill, uint64_t txg)
{
uint64_t initial_offset = *offset;
int lvl, maxlvl;
int error = 0;
if (!(flags & DNODE_FIND_HAVELOCK))
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (dn->dn_phys->dn_nlevels == 0) {
error = SET_ERROR(ESRCH);
goto out;
}
if (dn->dn_datablkshift == 0) {
if (*offset < dn->dn_datablksz) {
if (flags & DNODE_FIND_HOLE)
*offset = dn->dn_datablksz;
} else {
error = SET_ERROR(ESRCH);
}
goto out;
}
maxlvl = dn->dn_phys->dn_nlevels;
for (lvl = minlvl; lvl <= maxlvl; lvl++) {
error = dnode_next_offset_level(dn,
flags, offset, lvl, blkfill, txg);
if (error != ESRCH)
break;
}
while (error == 0 && --lvl >= minlvl) {
error = dnode_next_offset_level(dn,
flags, offset, lvl, blkfill, txg);
}
/*
* There's always a "virtual hole" at the end of the object, even
* if all BP's which physically exist are non-holes.
*/
if ((flags & DNODE_FIND_HOLE) && error == ESRCH && txg == 0 &&
minlvl == 1 && blkfill == 1 && !(flags & DNODE_FIND_BACKWARDS)) {
error = 0;
}
if (error == 0 && (flags & DNODE_FIND_BACKWARDS ?
initial_offset < *offset : initial_offset > *offset))
error = SET_ERROR(ESRCH);
out:
if (!(flags & DNODE_FIND_HAVELOCK))
rw_exit(&dn->dn_struct_rwlock);
return (error);
}
#if defined(_KERNEL)
EXPORT_SYMBOL(dnode_hold);
EXPORT_SYMBOL(dnode_rele);
EXPORT_SYMBOL(dnode_set_nlevels);
EXPORT_SYMBOL(dnode_set_blksz);
EXPORT_SYMBOL(dnode_free_range);
EXPORT_SYMBOL(dnode_evict_dbufs);
EXPORT_SYMBOL(dnode_evict_bonus);
#endif
diff --git a/sys/contrib/openzfs/module/zfs/dsl_dataset.c b/sys/contrib/openzfs/module/zfs/dsl_dataset.c
index 21fef4c621b5..94b77aa1b743 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_dataset.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_dataset.c
@@ -1,5030 +1,5017 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2014, Joyent, Inc. All rights reserved.
* Copyright (c) 2014 RackTop Systems.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright 2016, OmniTI Computer Consulting, Inc. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2020 The FreeBSD Foundation [1]
*
* [1] Portions of this software were developed by Allan Jude
* under sponsorship from the FreeBSD Foundation.
*/
#include <sys/dmu_objset.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_synctask.h>
#include <sys/dmu_traverse.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_tx.h>
#include <sys/arc.h>
#include <sys/zio.h>
#include <sys/zap.h>
#include <sys/zfeature.h>
#include <sys/unique.h>
#include <sys/zfs_context.h>
#include <sys/zfs_ioctl.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/vdev.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_onexit.h>
#include <sys/zvol.h>
#include <sys/dsl_scan.h>
#include <sys/dsl_deadlist.h>
#include <sys/dsl_destroy.h>
#include <sys/dsl_userhold.h>
#include <sys/dsl_bookmark.h>
#include <sys/policy.h>
#include <sys/dmu_send.h>
#include <sys/dmu_recv.h>
#include <sys/zio_compress.h>
#include <zfs_fletcher.h>
#include <sys/zio_checksum.h>
/*
* The SPA supports block sizes up to 16MB. However, very large blocks
* can have an impact on i/o latency (e.g. tying up a spinning disk for
* ~300ms), and also potentially on the memory allocator. Therefore,
* we did not allow the recordsize to be set larger than zfs_max_recordsize
* (former default: 1MB). Larger blocks could be created by changing this
* tunable, and pools with larger blocks could always be imported and used,
* regardless of this setting.
*
* We do, however, still limit it by default to 1M on x86_32, because Linux's
* 3/1 memory split doesn't leave much room for 16M chunks.
*/
#ifdef _ILP32
int zfs_max_recordsize = 1 * 1024 * 1024;
#else
int zfs_max_recordsize = 16 * 1024 * 1024;
#endif
static int zfs_allow_redacted_dataset_mount = 0;
int zfs_snapshot_history_enabled = 1;
#define SWITCH64(x, y) \
{ \
uint64_t __tmp = (x); \
(x) = (y); \
(y) = __tmp; \
}
#define DS_REF_MAX (1ULL << 62)
static void dsl_dataset_set_remap_deadlist_object(dsl_dataset_t *ds,
uint64_t obj, dmu_tx_t *tx);
static void dsl_dataset_unset_remap_deadlist_object(dsl_dataset_t *ds,
dmu_tx_t *tx);
static void unload_zfeature(dsl_dataset_t *ds, spa_feature_t f);
extern int spa_asize_inflation;
static zil_header_t zero_zil;
/*
* Figure out how much of this delta should be propagated to the dsl_dir
* layer. If there's a refreservation, that space has already been
* partially accounted for in our ancestors.
*/
static int64_t
parent_delta(dsl_dataset_t *ds, int64_t delta)
{
dsl_dataset_phys_t *ds_phys;
uint64_t old_bytes, new_bytes;
if (ds->ds_reserved == 0)
return (delta);
ds_phys = dsl_dataset_phys(ds);
old_bytes = MAX(ds_phys->ds_unique_bytes, ds->ds_reserved);
new_bytes = MAX(ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
return (new_bytes - old_bytes);
}
-void
-dsl_dataset_feature_set_activation(const blkptr_t *bp, dsl_dataset_t *ds)
-{
- spa_feature_t f;
- if (BP_GET_LSIZE(bp) > SPA_OLD_MAXBLOCKSIZE) {
- ds->ds_feature_activation[SPA_FEATURE_LARGE_BLOCKS] =
- (void *)B_TRUE;
- }
-
- f = zio_checksum_to_feature(BP_GET_CHECKSUM(bp));
- if (f != SPA_FEATURE_NONE) {
- ASSERT3S(spa_feature_table[f].fi_type, ==,
- ZFEATURE_TYPE_BOOLEAN);
- ds->ds_feature_activation[f] = (void *)B_TRUE;
- }
-
- f = zio_compress_to_feature(BP_GET_COMPRESS(bp));
- if (f != SPA_FEATURE_NONE) {
- ASSERT3S(spa_feature_table[f].fi_type, ==,
- ZFEATURE_TYPE_BOOLEAN);
- ds->ds_feature_activation[f] = (void *)B_TRUE;
- }
-}
-
void
dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
{
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
int used = bp_get_dsize_sync(spa, bp);
int compressed = BP_GET_PSIZE(bp);
int uncompressed = BP_GET_UCSIZE(bp);
int64_t delta;
+ spa_feature_t f;
dprintf_bp(bp, "ds=%p", ds);
ASSERT(dmu_tx_is_syncing(tx));
/* It could have been compressed away to nothing */
if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp))
return;
ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
ASSERT(DMU_OT_IS_VALID(BP_GET_TYPE(bp)));
if (ds == NULL) {
dsl_pool_mos_diduse_space(tx->tx_pool,
used, compressed, uncompressed);
return;
}
ASSERT3U(bp->blk_birth, >, dsl_dataset_phys(ds)->ds_prev_snap_txg);
dmu_buf_will_dirty(ds->ds_dbuf, tx);
mutex_enter(&ds->ds_lock);
delta = parent_delta(ds, used);
dsl_dataset_phys(ds)->ds_referenced_bytes += used;
dsl_dataset_phys(ds)->ds_compressed_bytes += compressed;
dsl_dataset_phys(ds)->ds_uncompressed_bytes += uncompressed;
dsl_dataset_phys(ds)->ds_unique_bytes += used;
- dsl_dataset_feature_set_activation(bp, ds);
+ if (BP_GET_LSIZE(bp) > SPA_OLD_MAXBLOCKSIZE) {
+ ds->ds_feature_activation[SPA_FEATURE_LARGE_BLOCKS] =
+ (void *)B_TRUE;
+ }
+
+
+ f = zio_checksum_to_feature(BP_GET_CHECKSUM(bp));
+ if (f != SPA_FEATURE_NONE) {
+ ASSERT3S(spa_feature_table[f].fi_type, ==,
+ ZFEATURE_TYPE_BOOLEAN);
+ ds->ds_feature_activation[f] = (void *)B_TRUE;
+ }
+
+ f = zio_compress_to_feature(BP_GET_COMPRESS(bp));
+ if (f != SPA_FEATURE_NONE) {
+ ASSERT3S(spa_feature_table[f].fi_type, ==,
+ ZFEATURE_TYPE_BOOLEAN);
+ ds->ds_feature_activation[f] = (void *)B_TRUE;
+ }
/*
* Track block for livelist, but ignore embedded blocks because
* they do not need to be freed.
*/
if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
bp->blk_birth > ds->ds_dir->dd_origin_txg &&
!(BP_IS_EMBEDDED(bp))) {
ASSERT(dsl_dir_is_clone(ds->ds_dir));
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_LIVELIST));
bplist_append(&ds->ds_dir->dd_pending_allocs, bp);
}
mutex_exit(&ds->ds_lock);
dsl_dir_diduse_transfer_space(ds->ds_dir, delta,
compressed, uncompressed, used,
DD_USED_REFRSRV, DD_USED_HEAD, tx);
}
/*
* Called when the specified segment has been remapped, and is thus no
* longer referenced in the head dataset. The vdev must be indirect.
*
* If the segment is referenced by a snapshot, put it on the remap deadlist.
* Otherwise, add this segment to the obsolete spacemap.
*/
void
dsl_dataset_block_remapped(dsl_dataset_t *ds, uint64_t vdev, uint64_t offset,
uint64_t size, uint64_t birth, dmu_tx_t *tx)
{
spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(birth <= tx->tx_txg);
ASSERT(!ds->ds_is_snapshot);
if (birth > dsl_dataset_phys(ds)->ds_prev_snap_txg) {
spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx);
} else {
blkptr_t fakebp;
dva_t *dva = &fakebp.blk_dva[0];
ASSERT(ds != NULL);
mutex_enter(&ds->ds_remap_deadlist_lock);
if (!dsl_dataset_remap_deadlist_exists(ds)) {
dsl_dataset_create_remap_deadlist(ds, tx);
}
mutex_exit(&ds->ds_remap_deadlist_lock);
BP_ZERO(&fakebp);
fakebp.blk_birth = birth;
DVA_SET_VDEV(dva, vdev);
DVA_SET_OFFSET(dva, offset);
DVA_SET_ASIZE(dva, size);
dsl_deadlist_insert(&ds->ds_remap_deadlist, &fakebp, B_FALSE,
tx);
}
}
int
dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
boolean_t async)
{
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
int used = bp_get_dsize_sync(spa, bp);
int compressed = BP_GET_PSIZE(bp);
int uncompressed = BP_GET_UCSIZE(bp);
if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp))
return (0);
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(bp->blk_birth <= tx->tx_txg);
if (ds == NULL) {
dsl_free(tx->tx_pool, tx->tx_txg, bp);
dsl_pool_mos_diduse_space(tx->tx_pool,
-used, -compressed, -uncompressed);
return (used);
}
ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
ASSERT(!ds->ds_is_snapshot);
dmu_buf_will_dirty(ds->ds_dbuf, tx);
/*
* Track block for livelist, but ignore embedded blocks because
* they do not need to be freed.
*/
if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
bp->blk_birth > ds->ds_dir->dd_origin_txg &&
!(BP_IS_EMBEDDED(bp))) {
ASSERT(dsl_dir_is_clone(ds->ds_dir));
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_LIVELIST));
bplist_append(&ds->ds_dir->dd_pending_frees, bp);
}
if (bp->blk_birth > dsl_dataset_phys(ds)->ds_prev_snap_txg) {
int64_t delta;
dprintf_bp(bp, "freeing ds=%llu", (u_longlong_t)ds->ds_object);
dsl_free(tx->tx_pool, tx->tx_txg, bp);
mutex_enter(&ds->ds_lock);
ASSERT(dsl_dataset_phys(ds)->ds_unique_bytes >= used ||
!DS_UNIQUE_IS_ACCURATE(ds));
delta = parent_delta(ds, -used);
dsl_dataset_phys(ds)->ds_unique_bytes -= used;
mutex_exit(&ds->ds_lock);
dsl_dir_diduse_transfer_space(ds->ds_dir,
delta, -compressed, -uncompressed, -used,
DD_USED_REFRSRV, DD_USED_HEAD, tx);
} else {
dprintf_bp(bp, "putting on dead list: %s", "");
if (async) {
/*
* We are here as part of zio's write done callback,
* which means we're a zio interrupt thread. We can't
* call dsl_deadlist_insert() now because it may block
* waiting for I/O. Instead, put bp on the deferred
* queue and let dsl_pool_sync() finish the job.
*/
bplist_append(&ds->ds_pending_deadlist, bp);
} else {
dsl_deadlist_insert(&ds->ds_deadlist, bp, B_FALSE, tx);
}
ASSERT3U(ds->ds_prev->ds_object, ==,
dsl_dataset_phys(ds)->ds_prev_snap_obj);
ASSERT(dsl_dataset_phys(ds->ds_prev)->ds_num_children > 0);
/* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
if (dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj ==
ds->ds_object && bp->blk_birth >
dsl_dataset_phys(ds->ds_prev)->ds_prev_snap_txg) {
dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
mutex_enter(&ds->ds_prev->ds_lock);
dsl_dataset_phys(ds->ds_prev)->ds_unique_bytes += used;
mutex_exit(&ds->ds_prev->ds_lock);
}
if (bp->blk_birth > ds->ds_dir->dd_origin_txg) {
dsl_dir_transfer_space(ds->ds_dir, used,
DD_USED_HEAD, DD_USED_SNAP, tx);
}
}
dsl_bookmark_block_killed(ds, bp, tx);
mutex_enter(&ds->ds_lock);
ASSERT3U(dsl_dataset_phys(ds)->ds_referenced_bytes, >=, used);
dsl_dataset_phys(ds)->ds_referenced_bytes -= used;
ASSERT3U(dsl_dataset_phys(ds)->ds_compressed_bytes, >=, compressed);
dsl_dataset_phys(ds)->ds_compressed_bytes -= compressed;
ASSERT3U(dsl_dataset_phys(ds)->ds_uncompressed_bytes, >=, uncompressed);
dsl_dataset_phys(ds)->ds_uncompressed_bytes -= uncompressed;
mutex_exit(&ds->ds_lock);
return (used);
}
struct feature_type_uint64_array_arg {
uint64_t length;
uint64_t *array;
};
static void
unload_zfeature(dsl_dataset_t *ds, spa_feature_t f)
{
switch (spa_feature_table[f].fi_type) {
case ZFEATURE_TYPE_BOOLEAN:
break;
case ZFEATURE_TYPE_UINT64_ARRAY:
{
struct feature_type_uint64_array_arg *ftuaa = ds->ds_feature[f];
kmem_free(ftuaa->array, ftuaa->length * sizeof (uint64_t));
kmem_free(ftuaa, sizeof (*ftuaa));
break;
}
default:
panic("Invalid zfeature type %d", spa_feature_table[f].fi_type);
}
}
static int
load_zfeature(objset_t *mos, dsl_dataset_t *ds, spa_feature_t f)
{
int err = 0;
switch (spa_feature_table[f].fi_type) {
case ZFEATURE_TYPE_BOOLEAN:
err = zap_contains(mos, ds->ds_object,
spa_feature_table[f].fi_guid);
if (err == 0) {
ds->ds_feature[f] = (void *)B_TRUE;
} else {
ASSERT3U(err, ==, ENOENT);
err = 0;
}
break;
case ZFEATURE_TYPE_UINT64_ARRAY:
{
uint64_t int_size, num_int;
uint64_t *data;
err = zap_length(mos, ds->ds_object,
spa_feature_table[f].fi_guid, &int_size, &num_int);
if (err != 0) {
ASSERT3U(err, ==, ENOENT);
err = 0;
break;
}
ASSERT3U(int_size, ==, sizeof (uint64_t));
data = kmem_alloc(int_size * num_int, KM_SLEEP);
VERIFY0(zap_lookup(mos, ds->ds_object,
spa_feature_table[f].fi_guid, int_size, num_int, data));
struct feature_type_uint64_array_arg *ftuaa =
kmem_alloc(sizeof (*ftuaa), KM_SLEEP);
ftuaa->length = num_int;
ftuaa->array = data;
ds->ds_feature[f] = ftuaa;
break;
}
default:
panic("Invalid zfeature type %d", spa_feature_table[f].fi_type);
}
return (err);
}
/*
* We have to release the fsid synchronously or we risk that a subsequent
* mount of the same dataset will fail to unique_insert the fsid. This
* failure would manifest itself as the fsid of this dataset changing
* between mounts which makes NFS clients quite unhappy.
*/
static void
dsl_dataset_evict_sync(void *dbu)
{
dsl_dataset_t *ds = dbu;
ASSERT(ds->ds_owner == NULL);
unique_remove(ds->ds_fsid_guid);
}
static void
dsl_dataset_evict_async(void *dbu)
{
dsl_dataset_t *ds = dbu;
ASSERT(ds->ds_owner == NULL);
ds->ds_dbuf = NULL;
if (ds->ds_objset != NULL)
dmu_objset_evict(ds->ds_objset);
if (ds->ds_prev) {
dsl_dataset_rele(ds->ds_prev, ds);
ds->ds_prev = NULL;
}
dsl_bookmark_fini_ds(ds);
bplist_destroy(&ds->ds_pending_deadlist);
if (dsl_deadlist_is_open(&ds->ds_deadlist))
dsl_deadlist_close(&ds->ds_deadlist);
if (dsl_deadlist_is_open(&ds->ds_remap_deadlist))
dsl_deadlist_close(&ds->ds_remap_deadlist);
if (ds->ds_dir)
dsl_dir_async_rele(ds->ds_dir, ds);
ASSERT(!list_link_active(&ds->ds_synced_link));
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (dsl_dataset_feature_is_active(ds, f))
unload_zfeature(ds, f);
}
list_destroy(&ds->ds_prop_cbs);
mutex_destroy(&ds->ds_lock);
mutex_destroy(&ds->ds_opening_lock);
mutex_destroy(&ds->ds_sendstream_lock);
mutex_destroy(&ds->ds_remap_deadlist_lock);
zfs_refcount_destroy(&ds->ds_longholds);
rrw_destroy(&ds->ds_bp_rwlock);
kmem_free(ds, sizeof (dsl_dataset_t));
}
int
dsl_dataset_get_snapname(dsl_dataset_t *ds)
{
dsl_dataset_phys_t *headphys;
int err;
dmu_buf_t *headdbuf;
dsl_pool_t *dp = ds->ds_dir->dd_pool;
objset_t *mos = dp->dp_meta_objset;
if (ds->ds_snapname[0])
return (0);
if (dsl_dataset_phys(ds)->ds_next_snap_obj == 0)
return (0);
err = dmu_bonus_hold(mos, dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj,
FTAG, &headdbuf);
if (err != 0)
return (err);
headphys = headdbuf->db_data;
err = zap_value_search(dp->dp_meta_objset,
headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
if (err != 0 && zfs_recover == B_TRUE) {
err = 0;
(void) snprintf(ds->ds_snapname, sizeof (ds->ds_snapname),
"SNAPOBJ=%llu-ERR=%d",
(unsigned long long)ds->ds_object, err);
}
dmu_buf_rele(headdbuf, FTAG);
return (err);
}
int
dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
{
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
uint64_t snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj;
matchtype_t mt = 0;
int err;
if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
mt = MT_NORMALIZE;
err = zap_lookup_norm(mos, snapobj, name, 8, 1,
value, mt, NULL, 0, NULL);
if (err == ENOTSUP && (mt & MT_NORMALIZE))
err = zap_lookup(mos, snapobj, name, 8, 1, value);
return (err);
}
int
dsl_dataset_snap_remove(dsl_dataset_t *ds, const char *name, dmu_tx_t *tx,
boolean_t adj_cnt)
{
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
uint64_t snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj;
matchtype_t mt = 0;
int err;
dsl_dir_snap_cmtime_update(ds->ds_dir, tx);
if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
mt = MT_NORMALIZE;
err = zap_remove_norm(mos, snapobj, name, mt, tx);
if (err == ENOTSUP && (mt & MT_NORMALIZE))
err = zap_remove(mos, snapobj, name, tx);
if (err == 0 && adj_cnt)
dsl_fs_ss_count_adjust(ds->ds_dir, -1,
DD_FIELD_SNAPSHOT_COUNT, tx);
return (err);
}
boolean_t
dsl_dataset_try_add_ref(dsl_pool_t *dp, dsl_dataset_t *ds, const void *tag)
{
dmu_buf_t *dbuf = ds->ds_dbuf;
boolean_t result = B_FALSE;
if (dbuf != NULL && dmu_buf_try_add_ref(dbuf, dp->dp_meta_objset,
ds->ds_object, DMU_BONUS_BLKID, tag)) {
if (ds == dmu_buf_get_user(dbuf))
result = B_TRUE;
else
dmu_buf_rele(dbuf, tag);
}
return (result);
}
int
dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, const void *tag,
dsl_dataset_t **dsp)
{
objset_t *mos = dp->dp_meta_objset;
dmu_buf_t *dbuf;
dsl_dataset_t *ds;
int err;
dmu_object_info_t doi;
ASSERT(dsl_pool_config_held(dp));
err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
if (err != 0)
return (err);
/* Make sure dsobj has the correct object type. */
dmu_object_info_from_db(dbuf, &doi);
if (doi.doi_bonus_type != DMU_OT_DSL_DATASET) {
dmu_buf_rele(dbuf, tag);
return (SET_ERROR(EINVAL));
}
ds = dmu_buf_get_user(dbuf);
if (ds == NULL) {
dsl_dataset_t *winner = NULL;
ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
ds->ds_dbuf = dbuf;
ds->ds_object = dsobj;
ds->ds_is_snapshot = dsl_dataset_phys(ds)->ds_num_children != 0;
list_link_init(&ds->ds_synced_link);
err = dsl_dir_hold_obj(dp, dsl_dataset_phys(ds)->ds_dir_obj,
NULL, ds, &ds->ds_dir);
if (err != 0) {
kmem_free(ds, sizeof (dsl_dataset_t));
dmu_buf_rele(dbuf, tag);
return (err);
}
mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&ds->ds_sendstream_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&ds->ds_remap_deadlist_lock,
NULL, MUTEX_DEFAULT, NULL);
rrw_init(&ds->ds_bp_rwlock, B_FALSE);
zfs_refcount_create(&ds->ds_longholds);
bplist_create(&ds->ds_pending_deadlist);
list_create(&ds->ds_sendstreams, sizeof (dmu_sendstatus_t),
offsetof(dmu_sendstatus_t, dss_link));
list_create(&ds->ds_prop_cbs, sizeof (dsl_prop_cb_record_t),
offsetof(dsl_prop_cb_record_t, cbr_ds_node));
if (doi.doi_type == DMU_OTN_ZAP_METADATA) {
spa_feature_t f;
for (f = 0; f < SPA_FEATURES; f++) {
if (!(spa_feature_table[f].fi_flags &
ZFEATURE_FLAG_PER_DATASET))
continue;
err = load_zfeature(mos, ds, f);
}
}
if (!ds->ds_is_snapshot) {
ds->ds_snapname[0] = '\0';
if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
err = dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj,
ds, &ds->ds_prev);
}
err = dsl_bookmark_init_ds(ds);
} else {
if (zfs_flags & ZFS_DEBUG_SNAPNAMES)
err = dsl_dataset_get_snapname(ds);
if (err == 0 &&
dsl_dataset_phys(ds)->ds_userrefs_obj != 0) {
err = zap_count(
ds->ds_dir->dd_pool->dp_meta_objset,
dsl_dataset_phys(ds)->ds_userrefs_obj,
&ds->ds_userrefs);
}
}
if (err == 0 && !ds->ds_is_snapshot) {
err = dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
&ds->ds_reserved);
if (err == 0) {
err = dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_REFQUOTA),
&ds->ds_quota);
}
} else {
ds->ds_reserved = ds->ds_quota = 0;
}
if (err == 0 && ds->ds_dir->dd_crypto_obj != 0 &&
ds->ds_is_snapshot &&
zap_contains(mos, dsobj, DS_FIELD_IVSET_GUID) != 0) {
dp->dp_spa->spa_errata =
ZPOOL_ERRATA_ZOL_8308_ENCRYPTION;
}
dsl_deadlist_open(&ds->ds_deadlist,
mos, dsl_dataset_phys(ds)->ds_deadlist_obj);
uint64_t remap_deadlist_obj =
dsl_dataset_get_remap_deadlist_object(ds);
if (remap_deadlist_obj != 0) {
dsl_deadlist_open(&ds->ds_remap_deadlist, mos,
remap_deadlist_obj);
}
dmu_buf_init_user(&ds->ds_dbu, dsl_dataset_evict_sync,
dsl_dataset_evict_async, &ds->ds_dbuf);
if (err == 0)
winner = dmu_buf_set_user_ie(dbuf, &ds->ds_dbu);
if (err != 0 || winner != NULL) {
bplist_destroy(&ds->ds_pending_deadlist);
dsl_deadlist_close(&ds->ds_deadlist);
if (dsl_deadlist_is_open(&ds->ds_remap_deadlist))
dsl_deadlist_close(&ds->ds_remap_deadlist);
dsl_bookmark_fini_ds(ds);
if (ds->ds_prev)
dsl_dataset_rele(ds->ds_prev, ds);
dsl_dir_rele(ds->ds_dir, ds);
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (dsl_dataset_feature_is_active(ds, f))
unload_zfeature(ds, f);
}
list_destroy(&ds->ds_prop_cbs);
list_destroy(&ds->ds_sendstreams);
mutex_destroy(&ds->ds_lock);
mutex_destroy(&ds->ds_opening_lock);
mutex_destroy(&ds->ds_sendstream_lock);
mutex_destroy(&ds->ds_remap_deadlist_lock);
zfs_refcount_destroy(&ds->ds_longholds);
rrw_destroy(&ds->ds_bp_rwlock);
kmem_free(ds, sizeof (dsl_dataset_t));
if (err != 0) {
dmu_buf_rele(dbuf, tag);
return (err);
}
ds = winner;
} else {
ds->ds_fsid_guid =
unique_insert(dsl_dataset_phys(ds)->ds_fsid_guid);
if (ds->ds_fsid_guid !=
dsl_dataset_phys(ds)->ds_fsid_guid) {
zfs_dbgmsg("ds_fsid_guid changed from "
"%llx to %llx for pool %s dataset id %llu",
(long long)
dsl_dataset_phys(ds)->ds_fsid_guid,
(long long)ds->ds_fsid_guid,
spa_name(dp->dp_spa),
(u_longlong_t)dsobj);
}
}
}
ASSERT3P(ds->ds_dbuf, ==, dbuf);
ASSERT3P(dsl_dataset_phys(ds), ==, dbuf->db_data);
ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0 ||
spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
*dsp = ds;
return (0);
}
int
dsl_dataset_create_key_mapping(dsl_dataset_t *ds)
{
dsl_dir_t *dd = ds->ds_dir;
if (dd->dd_crypto_obj == 0)
return (0);
return (spa_keystore_create_mapping(dd->dd_pool->dp_spa,
ds, ds, &ds->ds_key_mapping));
}
int
dsl_dataset_hold_obj_flags(dsl_pool_t *dp, uint64_t dsobj,
ds_hold_flags_t flags, const void *tag, dsl_dataset_t **dsp)
{
int err;
err = dsl_dataset_hold_obj(dp, dsobj, tag, dsp);
if (err != 0)
return (err);
ASSERT3P(*dsp, !=, NULL);
if (flags & DS_HOLD_FLAG_DECRYPT) {
err = dsl_dataset_create_key_mapping(*dsp);
if (err != 0)
dsl_dataset_rele(*dsp, tag);
}
return (err);
}
int
dsl_dataset_hold_flags(dsl_pool_t *dp, const char *name, ds_hold_flags_t flags,
const void *tag, dsl_dataset_t **dsp)
{
dsl_dir_t *dd;
const char *snapname;
uint64_t obj;
int err = 0;
dsl_dataset_t *ds;
err = dsl_dir_hold(dp, name, FTAG, &dd, &snapname);
if (err != 0)
return (err);
ASSERT(dsl_pool_config_held(dp));
obj = dsl_dir_phys(dd)->dd_head_dataset_obj;
if (obj != 0)
err = dsl_dataset_hold_obj_flags(dp, obj, flags, tag, &ds);
else
err = SET_ERROR(ENOENT);
/* we may be looking for a snapshot */
if (err == 0 && snapname != NULL) {
dsl_dataset_t *snap_ds;
if (*snapname++ != '@') {
dsl_dataset_rele_flags(ds, flags, tag);
dsl_dir_rele(dd, FTAG);
return (SET_ERROR(ENOENT));
}
dprintf("looking for snapshot '%s'\n", snapname);
err = dsl_dataset_snap_lookup(ds, snapname, &obj);
if (err == 0) {
err = dsl_dataset_hold_obj_flags(dp, obj, flags, tag,
&snap_ds);
}
dsl_dataset_rele_flags(ds, flags, tag);
if (err == 0) {
mutex_enter(&snap_ds->ds_lock);
if (snap_ds->ds_snapname[0] == 0)
(void) strlcpy(snap_ds->ds_snapname, snapname,
sizeof (snap_ds->ds_snapname));
mutex_exit(&snap_ds->ds_lock);
ds = snap_ds;
}
}
if (err == 0)
*dsp = ds;
dsl_dir_rele(dd, FTAG);
return (err);
}
int
dsl_dataset_hold(dsl_pool_t *dp, const char *name, const void *tag,
dsl_dataset_t **dsp)
{
return (dsl_dataset_hold_flags(dp, name, 0, tag, dsp));
}
static int
dsl_dataset_own_obj_impl(dsl_pool_t *dp, uint64_t dsobj, ds_hold_flags_t flags,
const void *tag, boolean_t override, dsl_dataset_t **dsp)
{
int err = dsl_dataset_hold_obj_flags(dp, dsobj, flags, tag, dsp);
if (err != 0)
return (err);
if (!dsl_dataset_tryown(*dsp, tag, override)) {
dsl_dataset_rele_flags(*dsp, flags, tag);
*dsp = NULL;
return (SET_ERROR(EBUSY));
}
return (0);
}
int
dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, ds_hold_flags_t flags,
const void *tag, dsl_dataset_t **dsp)
{
return (dsl_dataset_own_obj_impl(dp, dsobj, flags, tag, B_FALSE, dsp));
}
int
dsl_dataset_own_obj_force(dsl_pool_t *dp, uint64_t dsobj,
ds_hold_flags_t flags, const void *tag, dsl_dataset_t **dsp)
{
return (dsl_dataset_own_obj_impl(dp, dsobj, flags, tag, B_TRUE, dsp));
}
static int
dsl_dataset_own_impl(dsl_pool_t *dp, const char *name, ds_hold_flags_t flags,
const void *tag, boolean_t override, dsl_dataset_t **dsp)
{
int err = dsl_dataset_hold_flags(dp, name, flags, tag, dsp);
if (err != 0)
return (err);
if (!dsl_dataset_tryown(*dsp, tag, override)) {
dsl_dataset_rele_flags(*dsp, flags, tag);
return (SET_ERROR(EBUSY));
}
return (0);
}
int
dsl_dataset_own_force(dsl_pool_t *dp, const char *name, ds_hold_flags_t flags,
const void *tag, dsl_dataset_t **dsp)
{
return (dsl_dataset_own_impl(dp, name, flags, tag, B_TRUE, dsp));
}
int
dsl_dataset_own(dsl_pool_t *dp, const char *name, ds_hold_flags_t flags,
const void *tag, dsl_dataset_t **dsp)
{
return (dsl_dataset_own_impl(dp, name, flags, tag, B_FALSE, dsp));
}
/*
* See the comment above dsl_pool_hold() for details. In summary, a long
* hold is used to prevent destruction of a dataset while the pool hold
* is dropped, allowing other concurrent operations (e.g. spa_sync()).
*
* The dataset and pool must be held when this function is called. After it
* is called, the pool hold may be released while the dataset is still held
* and accessed.
*/
void
dsl_dataset_long_hold(dsl_dataset_t *ds, const void *tag)
{
ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
(void) zfs_refcount_add(&ds->ds_longholds, tag);
}
void
dsl_dataset_long_rele(dsl_dataset_t *ds, const void *tag)
{
(void) zfs_refcount_remove(&ds->ds_longholds, tag);
}
/* Return B_TRUE if there are any long holds on this dataset. */
boolean_t
dsl_dataset_long_held(dsl_dataset_t *ds)
{
return (!zfs_refcount_is_zero(&ds->ds_longholds));
}
void
dsl_dataset_name(dsl_dataset_t *ds, char *name)
{
if (ds == NULL) {
(void) strlcpy(name, "mos", ZFS_MAX_DATASET_NAME_LEN);
} else {
dsl_dir_name(ds->ds_dir, name);
VERIFY0(dsl_dataset_get_snapname(ds));
if (ds->ds_snapname[0]) {
VERIFY3U(strlcat(name, "@", ZFS_MAX_DATASET_NAME_LEN),
<, ZFS_MAX_DATASET_NAME_LEN);
/*
* We use a "recursive" mutex so that we
* can call dprintf_ds() with ds_lock held.
*/
if (!MUTEX_HELD(&ds->ds_lock)) {
mutex_enter(&ds->ds_lock);
VERIFY3U(strlcat(name, ds->ds_snapname,
ZFS_MAX_DATASET_NAME_LEN), <,
ZFS_MAX_DATASET_NAME_LEN);
mutex_exit(&ds->ds_lock);
} else {
VERIFY3U(strlcat(name, ds->ds_snapname,
ZFS_MAX_DATASET_NAME_LEN), <,
ZFS_MAX_DATASET_NAME_LEN);
}
}
}
}
int
dsl_dataset_namelen(dsl_dataset_t *ds)
{
VERIFY0(dsl_dataset_get_snapname(ds));
mutex_enter(&ds->ds_lock);
int len = strlen(ds->ds_snapname);
mutex_exit(&ds->ds_lock);
/* add '@' if ds is a snap */
if (len > 0)
len++;
len += dsl_dir_namelen(ds->ds_dir);
return (len);
}
void
dsl_dataset_rele(dsl_dataset_t *ds, const void *tag)
{
dmu_buf_rele(ds->ds_dbuf, tag);
}
void
dsl_dataset_remove_key_mapping(dsl_dataset_t *ds)
{
dsl_dir_t *dd = ds->ds_dir;
if (dd == NULL || dd->dd_crypto_obj == 0)
return;
(void) spa_keystore_remove_mapping(dd->dd_pool->dp_spa,
ds->ds_object, ds);
}
void
dsl_dataset_rele_flags(dsl_dataset_t *ds, ds_hold_flags_t flags,
const void *tag)
{
if (flags & DS_HOLD_FLAG_DECRYPT)
dsl_dataset_remove_key_mapping(ds);
dsl_dataset_rele(ds, tag);
}
void
dsl_dataset_disown(dsl_dataset_t *ds, ds_hold_flags_t flags, const void *tag)
{
ASSERT3P(ds->ds_owner, ==, tag);
ASSERT(ds->ds_dbuf != NULL);
mutex_enter(&ds->ds_lock);
ds->ds_owner = NULL;
mutex_exit(&ds->ds_lock);
dsl_dataset_long_rele(ds, tag);
dsl_dataset_rele_flags(ds, flags, tag);
}
boolean_t
dsl_dataset_tryown(dsl_dataset_t *ds, const void *tag, boolean_t override)
{
boolean_t gotit = FALSE;
ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
mutex_enter(&ds->ds_lock);
if (ds->ds_owner == NULL && (override || !(DS_IS_INCONSISTENT(ds) ||
(dsl_dataset_feature_is_active(ds,
SPA_FEATURE_REDACTED_DATASETS) &&
!zfs_allow_redacted_dataset_mount)))) {
ds->ds_owner = tag;
dsl_dataset_long_hold(ds, tag);
gotit = TRUE;
}
mutex_exit(&ds->ds_lock);
return (gotit);
}
boolean_t
dsl_dataset_has_owner(dsl_dataset_t *ds)
{
boolean_t rv;
mutex_enter(&ds->ds_lock);
rv = (ds->ds_owner != NULL);
mutex_exit(&ds->ds_lock);
return (rv);
}
static boolean_t
zfeature_active(spa_feature_t f, void *arg)
{
switch (spa_feature_table[f].fi_type) {
case ZFEATURE_TYPE_BOOLEAN: {
boolean_t val = (boolean_t)(uintptr_t)arg;
ASSERT(val == B_FALSE || val == B_TRUE);
return (val);
}
case ZFEATURE_TYPE_UINT64_ARRAY:
/*
* In this case, arg is a uint64_t array. The feature is active
* if the array is non-null.
*/
return (arg != NULL);
default:
panic("Invalid zfeature type %d", spa_feature_table[f].fi_type);
return (B_FALSE);
}
}
boolean_t
dsl_dataset_feature_is_active(dsl_dataset_t *ds, spa_feature_t f)
{
return (zfeature_active(f, ds->ds_feature[f]));
}
/*
* The buffers passed out by this function are references to internal buffers;
* they should not be freed by callers of this function, and they should not be
* used after the dataset has been released.
*/
boolean_t
dsl_dataset_get_uint64_array_feature(dsl_dataset_t *ds, spa_feature_t f,
uint64_t *outlength, uint64_t **outp)
{
VERIFY(spa_feature_table[f].fi_type & ZFEATURE_TYPE_UINT64_ARRAY);
if (!dsl_dataset_feature_is_active(ds, f)) {
return (B_FALSE);
}
struct feature_type_uint64_array_arg *ftuaa = ds->ds_feature[f];
*outp = ftuaa->array;
*outlength = ftuaa->length;
return (B_TRUE);
}
void
dsl_dataset_activate_feature(uint64_t dsobj, spa_feature_t f, void *arg,
dmu_tx_t *tx)
{
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
objset_t *mos = dmu_tx_pool(tx)->dp_meta_objset;
uint64_t zero = 0;
VERIFY(spa_feature_table[f].fi_flags & ZFEATURE_FLAG_PER_DATASET);
spa_feature_incr(spa, f, tx);
dmu_object_zapify(mos, dsobj, DMU_OT_DSL_DATASET, tx);
switch (spa_feature_table[f].fi_type) {
case ZFEATURE_TYPE_BOOLEAN:
ASSERT3S((boolean_t)(uintptr_t)arg, ==, B_TRUE);
VERIFY0(zap_add(mos, dsobj, spa_feature_table[f].fi_guid,
sizeof (zero), 1, &zero, tx));
break;
case ZFEATURE_TYPE_UINT64_ARRAY:
{
struct feature_type_uint64_array_arg *ftuaa = arg;
VERIFY0(zap_add(mos, dsobj, spa_feature_table[f].fi_guid,
sizeof (uint64_t), ftuaa->length, ftuaa->array, tx));
break;
}
default:
panic("Invalid zfeature type %d", spa_feature_table[f].fi_type);
}
}
static void
dsl_dataset_deactivate_feature_impl(dsl_dataset_t *ds, spa_feature_t f,
dmu_tx_t *tx)
{
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
objset_t *mos = dmu_tx_pool(tx)->dp_meta_objset;
uint64_t dsobj = ds->ds_object;
VERIFY(spa_feature_table[f].fi_flags & ZFEATURE_FLAG_PER_DATASET);
VERIFY0(zap_remove(mos, dsobj, spa_feature_table[f].fi_guid, tx));
spa_feature_decr(spa, f, tx);
ds->ds_feature[f] = NULL;
}
void
dsl_dataset_deactivate_feature(dsl_dataset_t *ds, spa_feature_t f, dmu_tx_t *tx)
{
unload_zfeature(ds, f);
dsl_dataset_deactivate_feature_impl(ds, f, tx);
}
uint64_t
dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
dsl_crypto_params_t *dcp, uint64_t flags, dmu_tx_t *tx)
{
dsl_pool_t *dp = dd->dd_pool;
dmu_buf_t *dbuf;
dsl_dataset_phys_t *dsphys;
uint64_t dsobj;
objset_t *mos = dp->dp_meta_objset;
if (origin == NULL)
origin = dp->dp_origin_snap;
ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
ASSERT(origin == NULL || dsl_dataset_phys(origin)->ds_num_children > 0);
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(dsl_dir_phys(dd)->dd_head_dataset_obj == 0);
dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
VERIFY0(dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
dmu_buf_will_dirty(dbuf, tx);
dsphys = dbuf->db_data;
memset(dsphys, 0, sizeof (dsl_dataset_phys_t));
dsphys->ds_dir_obj = dd->dd_object;
dsphys->ds_flags = flags;
dsphys->ds_fsid_guid = unique_create();
(void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
sizeof (dsphys->ds_guid));
dsphys->ds_snapnames_zapobj =
zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
DMU_OT_NONE, 0, tx);
dsphys->ds_creation_time = gethrestime_sec();
dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
if (origin == NULL) {
dsphys->ds_deadlist_obj = dsl_deadlist_alloc(mos, tx);
} else {
dsl_dataset_t *ohds; /* head of the origin snapshot */
dsphys->ds_prev_snap_obj = origin->ds_object;
dsphys->ds_prev_snap_txg =
dsl_dataset_phys(origin)->ds_creation_txg;
dsphys->ds_referenced_bytes =
dsl_dataset_phys(origin)->ds_referenced_bytes;
dsphys->ds_compressed_bytes =
dsl_dataset_phys(origin)->ds_compressed_bytes;
dsphys->ds_uncompressed_bytes =
dsl_dataset_phys(origin)->ds_uncompressed_bytes;
rrw_enter(&origin->ds_bp_rwlock, RW_READER, FTAG);
dsphys->ds_bp = dsl_dataset_phys(origin)->ds_bp;
rrw_exit(&origin->ds_bp_rwlock, FTAG);
/*
* Inherit flags that describe the dataset's contents
* (INCONSISTENT) or properties (Case Insensitive).
*/
dsphys->ds_flags |= dsl_dataset_phys(origin)->ds_flags &
(DS_FLAG_INCONSISTENT | DS_FLAG_CI_DATASET);
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (zfeature_active(f, origin->ds_feature[f])) {
dsl_dataset_activate_feature(dsobj, f,
origin->ds_feature[f], tx);
}
}
dmu_buf_will_dirty(origin->ds_dbuf, tx);
dsl_dataset_phys(origin)->ds_num_children++;
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dir_phys(origin->ds_dir)->dd_head_dataset_obj,
FTAG, &ohds));
dsphys->ds_deadlist_obj = dsl_deadlist_clone(&ohds->ds_deadlist,
dsphys->ds_prev_snap_txg, dsphys->ds_prev_snap_obj, tx);
dsl_dataset_rele(ohds, FTAG);
if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
if (dsl_dataset_phys(origin)->ds_next_clones_obj == 0) {
dsl_dataset_phys(origin)->ds_next_clones_obj =
zap_create(mos,
DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
}
VERIFY0(zap_add_int(mos,
dsl_dataset_phys(origin)->ds_next_clones_obj,
dsobj, tx));
}
dmu_buf_will_dirty(dd->dd_dbuf, tx);
dsl_dir_phys(dd)->dd_origin_obj = origin->ds_object;
if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
if (dsl_dir_phys(origin->ds_dir)->dd_clones == 0) {
dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
dsl_dir_phys(origin->ds_dir)->dd_clones =
zap_create(mos,
DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
}
VERIFY0(zap_add_int(mos,
dsl_dir_phys(origin->ds_dir)->dd_clones,
dsobj, tx));
}
}
/* handle encryption */
dsl_dataset_create_crypt_sync(dsobj, dd, origin, dcp, tx);
if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
dmu_buf_rele(dbuf, FTAG);
dmu_buf_will_dirty(dd->dd_dbuf, tx);
dsl_dir_phys(dd)->dd_head_dataset_obj = dsobj;
return (dsobj);
}
static void
dsl_dataset_zero_zil(dsl_dataset_t *ds, dmu_tx_t *tx)
{
objset_t *os;
VERIFY0(dmu_objset_from_ds(ds, &os));
if (memcmp(&os->os_zil_header, &zero_zil, sizeof (zero_zil)) != 0) {
dsl_pool_t *dp = ds->ds_dir->dd_pool;
zio_t *zio;
memset(&os->os_zil_header, 0, sizeof (os->os_zil_header));
if (os->os_encrypted)
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
dsl_dataset_sync(ds, zio, tx);
VERIFY0(zio_wait(zio));
/* dsl_dataset_sync_done will drop this reference. */
dmu_buf_add_ref(ds->ds_dbuf, ds);
dsl_dataset_sync_done(ds, tx);
}
}
uint64_t
dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
dsl_dataset_t *origin, uint64_t flags, cred_t *cr,
dsl_crypto_params_t *dcp, dmu_tx_t *tx)
{
dsl_pool_t *dp = pdd->dd_pool;
uint64_t dsobj, ddobj;
dsl_dir_t *dd;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(lastname[0] != '@');
/*
* Filesystems will eventually have their origin set to dp_origin_snap,
* but that's taken care of in dsl_dataset_create_sync_dd. When
* creating a filesystem, this function is called with origin equal to
* NULL.
*/
if (origin != NULL)
ASSERT3P(origin, !=, dp->dp_origin_snap);
ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
VERIFY0(dsl_dir_hold_obj(dp, ddobj, lastname, FTAG, &dd));
dsobj = dsl_dataset_create_sync_dd(dd, origin, dcp,
flags & ~DS_CREATE_FLAG_NODIRTY, tx);
dsl_deleg_set_create_perms(dd, tx, cr);
/*
* If we are creating a clone and the livelist feature is enabled,
* add the entry DD_FIELD_LIVELIST to ZAP.
*/
if (origin != NULL &&
spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LIVELIST)) {
objset_t *mos = dd->dd_pool->dp_meta_objset;
dsl_dir_zapify(dd, tx);
uint64_t obj = dsl_deadlist_alloc(mos, tx);
VERIFY0(zap_add(mos, dd->dd_object, DD_FIELD_LIVELIST,
sizeof (uint64_t), 1, &obj, tx));
spa_feature_incr(dp->dp_spa, SPA_FEATURE_LIVELIST, tx);
}
/*
* Since we're creating a new node we know it's a leaf, so we can
* initialize the counts if the limit feature is active.
*/
if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT)) {
uint64_t cnt = 0;
objset_t *os = dd->dd_pool->dp_meta_objset;
dsl_dir_zapify(dd, tx);
VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT,
sizeof (cnt), 1, &cnt, tx));
VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT,
sizeof (cnt), 1, &cnt, tx));
}
dsl_dir_rele(dd, FTAG);
/*
* If we are creating a clone, make sure we zero out any stale
* data from the origin snapshots zil header.
*/
if (origin != NULL && !(flags & DS_CREATE_FLAG_NODIRTY)) {
dsl_dataset_t *ds;
VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
dsl_dataset_zero_zil(ds, tx);
dsl_dataset_rele(ds, FTAG);
}
return (dsobj);
}
/*
* The unique space in the head dataset can be calculated by subtracting
* the space used in the most recent snapshot, that is still being used
* in this file system, from the space currently in use. To figure out
* the space in the most recent snapshot still in use, we need to take
* the total space used in the snapshot and subtract out the space that
* has been freed up since the snapshot was taken.
*/
void
dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
{
uint64_t mrs_used;
uint64_t dlused, dlcomp, dluncomp;
ASSERT(!ds->ds_is_snapshot);
if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0)
mrs_used = dsl_dataset_phys(ds->ds_prev)->ds_referenced_bytes;
else
mrs_used = 0;
dsl_deadlist_space(&ds->ds_deadlist, &dlused, &dlcomp, &dluncomp);
ASSERT3U(dlused, <=, mrs_used);
dsl_dataset_phys(ds)->ds_unique_bytes =
dsl_dataset_phys(ds)->ds_referenced_bytes - (mrs_used - dlused);
if (spa_version(ds->ds_dir->dd_pool->dp_spa) >=
SPA_VERSION_UNIQUE_ACCURATE)
dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
}
void
dsl_dataset_remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj,
dmu_tx_t *tx)
{
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
uint64_t count __maybe_unused;
int err;
ASSERT(dsl_dataset_phys(ds)->ds_num_children >= 2);
err = zap_remove_int(mos, dsl_dataset_phys(ds)->ds_next_clones_obj,
obj, tx);
/*
* The err should not be ENOENT, but a bug in a previous version
* of the code could cause upgrade_clones_cb() to not set
* ds_next_snap_obj when it should, leading to a missing entry.
* If we knew that the pool was created after
* SPA_VERSION_NEXT_CLONES, we could assert that it isn't
* ENOENT. However, at least we can check that we don't have
* too many entries in the next_clones_obj even after failing to
* remove this one.
*/
if (err != ENOENT)
VERIFY0(err);
ASSERT0(zap_count(mos, dsl_dataset_phys(ds)->ds_next_clones_obj,
&count));
ASSERT3U(count, <=, dsl_dataset_phys(ds)->ds_num_children - 2);
}
blkptr_t *
dsl_dataset_get_blkptr(dsl_dataset_t *ds)
{
return (&dsl_dataset_phys(ds)->ds_bp);
}
spa_t *
dsl_dataset_get_spa(dsl_dataset_t *ds)
{
return (ds->ds_dir->dd_pool->dp_spa);
}
void
dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
{
dsl_pool_t *dp;
if (ds == NULL) /* this is the meta-objset */
return;
ASSERT(ds->ds_objset != NULL);
if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0)
panic("dirtying snapshot!");
/* Must not dirty a dataset in the same txg where it got snapshotted. */
ASSERT3U(tx->tx_txg, >, dsl_dataset_phys(ds)->ds_prev_snap_txg);
dp = ds->ds_dir->dd_pool;
if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg)) {
objset_t *os = ds->ds_objset;
/* up the hold count until we can be written out */
dmu_buf_add_ref(ds->ds_dbuf, ds);
/* if this dataset is encrypted, grab a reference to the DCK */
if (ds->ds_dir->dd_crypto_obj != 0 &&
!os->os_raw_receive &&
!os->os_next_write_raw[tx->tx_txg & TXG_MASK]) {
ASSERT3P(ds->ds_key_mapping, !=, NULL);
key_mapping_add_ref(ds->ds_key_mapping, ds);
}
}
}
static int
dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
{
uint64_t asize;
if (!dmu_tx_is_syncing(tx))
return (0);
/*
* If there's an fs-only reservation, any blocks that might become
* owned by the snapshot dataset must be accommodated by space
* outside of the reservation.
*/
ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds));
asize = MIN(dsl_dataset_phys(ds)->ds_unique_bytes, ds->ds_reserved);
if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
return (SET_ERROR(ENOSPC));
/*
* Propagate any reserved space for this snapshot to other
* snapshot checks in this sync group.
*/
if (asize > 0)
dsl_dir_willuse_space(ds->ds_dir, asize, tx);
return (0);
}
int
dsl_dataset_snapshot_check_impl(dsl_dataset_t *ds, const char *snapname,
dmu_tx_t *tx, boolean_t recv, uint64_t cnt, cred_t *cr, proc_t *proc)
{
int error;
uint64_t value;
ds->ds_trysnap_txg = tx->tx_txg;
if (!dmu_tx_is_syncing(tx))
return (0);
/*
* We don't allow multiple snapshots of the same txg. If there
* is already one, try again.
*/
if (dsl_dataset_phys(ds)->ds_prev_snap_txg >= tx->tx_txg)
return (SET_ERROR(EAGAIN));
/*
* Check for conflicting snapshot name.
*/
error = dsl_dataset_snap_lookup(ds, snapname, &value);
if (error == 0)
return (SET_ERROR(EEXIST));
if (error != ENOENT)
return (error);
/*
* We don't allow taking snapshots of inconsistent datasets, such as
* those into which we are currently receiving. However, if we are
* creating this snapshot as part of a receive, this check will be
* executed atomically with respect to the completion of the receive
* itself but prior to the clearing of DS_FLAG_INCONSISTENT; in this
* case we ignore this, knowing it will be fixed up for us shortly in
* dmu_recv_end_sync().
*/
if (!recv && DS_IS_INCONSISTENT(ds))
return (SET_ERROR(EBUSY));
/*
* Skip the check for temporary snapshots or if we have already checked
* the counts in dsl_dataset_snapshot_check. This means we really only
* check the count here when we're receiving a stream.
*/
if (cnt != 0 && cr != NULL) {
error = dsl_fs_ss_limit_check(ds->ds_dir, cnt,
ZFS_PROP_SNAPSHOT_LIMIT, NULL, cr, proc);
if (error != 0)
return (error);
}
error = dsl_dataset_snapshot_reserve_space(ds, tx);
if (error != 0)
return (error);
return (0);
}
int
dsl_dataset_snapshot_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_snapshot_arg_t *ddsa = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
nvpair_t *pair;
int rv = 0;
/*
* Pre-compute how many total new snapshots will be created for each
* level in the tree and below. This is needed for validating the
* snapshot limit when either taking a recursive snapshot or when
* taking multiple snapshots.
*
* The problem is that the counts are not actually adjusted when
* we are checking, only when we finally sync. For a single snapshot,
* this is easy, the count will increase by 1 at each node up the tree,
* but its more complicated for the recursive/multiple snapshot case.
*
* The dsl_fs_ss_limit_check function does recursively check the count
* at each level up the tree but since it is validating each snapshot
* independently we need to be sure that we are validating the complete
* count for the entire set of snapshots. We do this by rolling up the
* counts for each component of the name into an nvlist and then
* checking each of those cases with the aggregated count.
*
* This approach properly handles not only the recursive snapshot
* case (where we get all of those on the ddsa_snaps list) but also
* the sibling case (e.g. snapshot a/b and a/c so that we will also
* validate the limit on 'a' using a count of 2).
*
* We validate the snapshot names in the third loop and only report
* name errors once.
*/
if (dmu_tx_is_syncing(tx)) {
char *nm;
nvlist_t *cnt_track = NULL;
cnt_track = fnvlist_alloc();
nm = kmem_alloc(MAXPATHLEN, KM_SLEEP);
/* Rollup aggregated counts into the cnt_track list */
for (pair = nvlist_next_nvpair(ddsa->ddsa_snaps, NULL);
pair != NULL;
pair = nvlist_next_nvpair(ddsa->ddsa_snaps, pair)) {
char *pdelim;
uint64_t val;
(void) strlcpy(nm, nvpair_name(pair), MAXPATHLEN);
pdelim = strchr(nm, '@');
if (pdelim == NULL)
continue;
*pdelim = '\0';
do {
if (nvlist_lookup_uint64(cnt_track, nm,
&val) == 0) {
/* update existing entry */
fnvlist_add_uint64(cnt_track, nm,
val + 1);
} else {
/* add to list */
fnvlist_add_uint64(cnt_track, nm, 1);
}
pdelim = strrchr(nm, '/');
if (pdelim != NULL)
*pdelim = '\0';
} while (pdelim != NULL);
}
kmem_free(nm, MAXPATHLEN);
/* Check aggregated counts at each level */
for (pair = nvlist_next_nvpair(cnt_track, NULL);
pair != NULL; pair = nvlist_next_nvpair(cnt_track, pair)) {
int error = 0;
char *name;
uint64_t cnt = 0;
dsl_dataset_t *ds;
name = nvpair_name(pair);
cnt = fnvpair_value_uint64(pair);
ASSERT(cnt > 0);
error = dsl_dataset_hold(dp, name, FTAG, &ds);
if (error == 0) {
error = dsl_fs_ss_limit_check(ds->ds_dir, cnt,
ZFS_PROP_SNAPSHOT_LIMIT, NULL,
ddsa->ddsa_cr, ddsa->ddsa_proc);
dsl_dataset_rele(ds, FTAG);
}
if (error != 0) {
if (ddsa->ddsa_errors != NULL)
fnvlist_add_int32(ddsa->ddsa_errors,
name, error);
rv = error;
/* only report one error for this check */
break;
}
}
nvlist_free(cnt_track);
}
for (pair = nvlist_next_nvpair(ddsa->ddsa_snaps, NULL);
pair != NULL; pair = nvlist_next_nvpair(ddsa->ddsa_snaps, pair)) {
int error = 0;
dsl_dataset_t *ds;
char *name, *atp = NULL;
char dsname[ZFS_MAX_DATASET_NAME_LEN];
name = nvpair_name(pair);
if (strlen(name) >= ZFS_MAX_DATASET_NAME_LEN)
error = SET_ERROR(ENAMETOOLONG);
if (error == 0) {
atp = strchr(name, '@');
if (atp == NULL)
error = SET_ERROR(EINVAL);
if (error == 0)
(void) strlcpy(dsname, name, atp - name + 1);
}
if (error == 0)
error = dsl_dataset_hold(dp, dsname, FTAG, &ds);
if (error == 0) {
/* passing 0/NULL skips dsl_fs_ss_limit_check */
error = dsl_dataset_snapshot_check_impl(ds,
atp + 1, tx, B_FALSE, 0, NULL, NULL);
dsl_dataset_rele(ds, FTAG);
}
if (error != 0) {
if (ddsa->ddsa_errors != NULL) {
fnvlist_add_int32(ddsa->ddsa_errors,
name, error);
}
rv = error;
}
}
return (rv);
}
void
dsl_dataset_snapshot_sync_impl(dsl_dataset_t *ds, const char *snapname,
dmu_tx_t *tx)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
dmu_buf_t *dbuf;
dsl_dataset_phys_t *dsphys;
uint64_t dsobj, crtxg;
objset_t *mos = dp->dp_meta_objset;
static zil_header_t zero_zil __maybe_unused;
objset_t *os __maybe_unused;
ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
/*
* If we are on an old pool, the zil must not be active, in which
* case it will be zeroed. Usually zil_suspend() accomplishes this.
*/
ASSERT(spa_version(dmu_tx_pool(tx)->dp_spa) >= SPA_VERSION_FAST_SNAP ||
dmu_objset_from_ds(ds, &os) != 0 ||
memcmp(&os->os_phys->os_zil_header, &zero_zil,
sizeof (zero_zil)) == 0);
/* Should not snapshot a dirty dataset. */
ASSERT(!txg_list_member(&ds->ds_dir->dd_pool->dp_dirty_datasets,
ds, tx->tx_txg));
dsl_fs_ss_count_adjust(ds->ds_dir, 1, DD_FIELD_SNAPSHOT_COUNT, tx);
/*
* The origin's ds_creation_txg has to be < TXG_INITIAL
*/
if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
crtxg = 1;
else
crtxg = tx->tx_txg;
dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
VERIFY0(dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
dmu_buf_will_dirty(dbuf, tx);
dsphys = dbuf->db_data;
memset(dsphys, 0, sizeof (dsl_dataset_phys_t));
dsphys->ds_dir_obj = ds->ds_dir->dd_object;
dsphys->ds_fsid_guid = unique_create();
(void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
sizeof (dsphys->ds_guid));
dsphys->ds_prev_snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
dsphys->ds_prev_snap_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
dsphys->ds_next_snap_obj = ds->ds_object;
dsphys->ds_num_children = 1;
dsphys->ds_creation_time = gethrestime_sec();
dsphys->ds_creation_txg = crtxg;
dsphys->ds_deadlist_obj = dsl_dataset_phys(ds)->ds_deadlist_obj;
dsphys->ds_referenced_bytes = dsl_dataset_phys(ds)->ds_referenced_bytes;
dsphys->ds_compressed_bytes = dsl_dataset_phys(ds)->ds_compressed_bytes;
dsphys->ds_uncompressed_bytes =
dsl_dataset_phys(ds)->ds_uncompressed_bytes;
dsphys->ds_flags = dsl_dataset_phys(ds)->ds_flags;
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
dsphys->ds_bp = dsl_dataset_phys(ds)->ds_bp;
rrw_exit(&ds->ds_bp_rwlock, FTAG);
dmu_buf_rele(dbuf, FTAG);
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (zfeature_active(f, ds->ds_feature[f])) {
dsl_dataset_activate_feature(dsobj, f,
ds->ds_feature[f], tx);
}
}
ASSERT3U(ds->ds_prev != 0, ==,
dsl_dataset_phys(ds)->ds_prev_snap_obj != 0);
if (ds->ds_prev) {
uint64_t next_clones_obj =
dsl_dataset_phys(ds->ds_prev)->ds_next_clones_obj;
ASSERT(dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj ==
ds->ds_object ||
dsl_dataset_phys(ds->ds_prev)->ds_num_children > 1);
if (dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj ==
ds->ds_object) {
dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_txg, ==,
dsl_dataset_phys(ds->ds_prev)->ds_creation_txg);
dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj = dsobj;
} else if (next_clones_obj != 0) {
dsl_dataset_remove_from_next_clones(ds->ds_prev,
dsphys->ds_next_snap_obj, tx);
VERIFY0(zap_add_int(mos,
next_clones_obj, dsobj, tx));
}
}
/*
* If we have a reference-reservation on this dataset, we will
* need to increase the amount of refreservation being charged
* since our unique space is going to zero.
*/
if (ds->ds_reserved) {
int64_t delta;
ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
delta = MIN(dsl_dataset_phys(ds)->ds_unique_bytes,
ds->ds_reserved);
dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV,
delta, 0, 0, tx);
}
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_dataset_phys(ds)->ds_deadlist_obj =
dsl_deadlist_clone(&ds->ds_deadlist, UINT64_MAX,
dsl_dataset_phys(ds)->ds_prev_snap_obj, tx);
dsl_deadlist_close(&ds->ds_deadlist);
dsl_deadlist_open(&ds->ds_deadlist, mos,
dsl_dataset_phys(ds)->ds_deadlist_obj);
dsl_deadlist_add_key(&ds->ds_deadlist,
dsl_dataset_phys(ds)->ds_prev_snap_txg, tx);
dsl_bookmark_snapshotted(ds, tx);
if (dsl_dataset_remap_deadlist_exists(ds)) {
uint64_t remap_deadlist_obj =
dsl_dataset_get_remap_deadlist_object(ds);
/*
* Move the remap_deadlist to the snapshot. The head
* will create a new remap deadlist on demand, from
* dsl_dataset_block_remapped().
*/
dsl_dataset_unset_remap_deadlist_object(ds, tx);
dsl_deadlist_close(&ds->ds_remap_deadlist);
dmu_object_zapify(mos, dsobj, DMU_OT_DSL_DATASET, tx);
VERIFY0(zap_add(mos, dsobj, DS_FIELD_REMAP_DEADLIST,
sizeof (remap_deadlist_obj), 1, &remap_deadlist_obj, tx));
}
/*
* Create a ivset guid for this snapshot if the dataset is
* encrypted. This may be overridden by a raw receive. A
* previous implementation of this code did not have this
* field as part of the on-disk format for ZFS encryption
* (see errata #4). As part of the remediation for this
* issue, we ask the user to enable the bookmark_v2 feature
* which is now a dependency of the encryption feature. We
* use this as a heuristic to determine when the user has
* elected to correct any datasets created with the old code.
* As a result, we only do this step if the bookmark_v2
* feature is enabled, which limits the number of states a
* given pool / dataset can be in with regards to terms of
* correcting the issue.
*/
if (ds->ds_dir->dd_crypto_obj != 0 &&
spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARK_V2)) {
uint64_t ivset_guid = unique_create();
dmu_object_zapify(mos, dsobj, DMU_OT_DSL_DATASET, tx);
VERIFY0(zap_add(mos, dsobj, DS_FIELD_IVSET_GUID,
sizeof (ivset_guid), 1, &ivset_guid, tx));
}
ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_txg, <, tx->tx_txg);
dsl_dataset_phys(ds)->ds_prev_snap_obj = dsobj;
dsl_dataset_phys(ds)->ds_prev_snap_txg = crtxg;
dsl_dataset_phys(ds)->ds_unique_bytes = 0;
if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
VERIFY0(zap_add(mos, dsl_dataset_phys(ds)->ds_snapnames_zapobj,
snapname, 8, 1, &dsobj, tx));
if (ds->ds_prev)
dsl_dataset_rele(ds->ds_prev, ds);
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj, ds, &ds->ds_prev));
dsl_scan_ds_snapshotted(ds, tx);
dsl_dir_snap_cmtime_update(ds->ds_dir, tx);
if (zfs_snapshot_history_enabled)
spa_history_log_internal_ds(ds->ds_prev, "snapshot", tx, " ");
}
void
dsl_dataset_snapshot_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_snapshot_arg_t *ddsa = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
nvpair_t *pair;
for (pair = nvlist_next_nvpair(ddsa->ddsa_snaps, NULL);
pair != NULL; pair = nvlist_next_nvpair(ddsa->ddsa_snaps, pair)) {
dsl_dataset_t *ds;
char *name, *atp;
char dsname[ZFS_MAX_DATASET_NAME_LEN];
name = nvpair_name(pair);
atp = strchr(name, '@');
(void) strlcpy(dsname, name, atp - name + 1);
VERIFY0(dsl_dataset_hold(dp, dsname, FTAG, &ds));
dsl_dataset_snapshot_sync_impl(ds, atp + 1, tx);
if (ddsa->ddsa_props != NULL) {
dsl_props_set_sync_impl(ds->ds_prev,
ZPROP_SRC_LOCAL, ddsa->ddsa_props, tx);
}
dsl_dataset_rele(ds, FTAG);
}
}
/*
* The snapshots must all be in the same pool.
* All-or-nothing: if there are any failures, nothing will be modified.
*/
int
dsl_dataset_snapshot(nvlist_t *snaps, nvlist_t *props, nvlist_t *errors)
{
dsl_dataset_snapshot_arg_t ddsa;
nvpair_t *pair;
boolean_t needsuspend;
int error;
spa_t *spa;
char *firstname;
nvlist_t *suspended = NULL;
pair = nvlist_next_nvpair(snaps, NULL);
if (pair == NULL)
return (0);
firstname = nvpair_name(pair);
error = spa_open(firstname, &spa, FTAG);
if (error != 0)
return (error);
needsuspend = (spa_version(spa) < SPA_VERSION_FAST_SNAP);
spa_close(spa, FTAG);
if (needsuspend) {
suspended = fnvlist_alloc();
for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
pair = nvlist_next_nvpair(snaps, pair)) {
char fsname[ZFS_MAX_DATASET_NAME_LEN];
char *snapname = nvpair_name(pair);
char *atp;
void *cookie;
atp = strchr(snapname, '@');
if (atp == NULL) {
error = SET_ERROR(EINVAL);
break;
}
(void) strlcpy(fsname, snapname, atp - snapname + 1);
error = zil_suspend(fsname, &cookie);
if (error != 0)
break;
fnvlist_add_uint64(suspended, fsname,
(uintptr_t)cookie);
}
}
ddsa.ddsa_snaps = snaps;
ddsa.ddsa_props = props;
ddsa.ddsa_errors = errors;
ddsa.ddsa_cr = CRED();
ddsa.ddsa_proc = curproc;
if (error == 0) {
error = dsl_sync_task(firstname, dsl_dataset_snapshot_check,
dsl_dataset_snapshot_sync, &ddsa,
fnvlist_num_pairs(snaps) * 3, ZFS_SPACE_CHECK_NORMAL);
}
if (suspended != NULL) {
for (pair = nvlist_next_nvpair(suspended, NULL); pair != NULL;
pair = nvlist_next_nvpair(suspended, pair)) {
zil_resume((void *)(uintptr_t)
fnvpair_value_uint64(pair));
}
fnvlist_free(suspended);
}
if (error == 0) {
for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
pair = nvlist_next_nvpair(snaps, pair)) {
zvol_create_minor(nvpair_name(pair));
}
}
return (error);
}
typedef struct dsl_dataset_snapshot_tmp_arg {
const char *ddsta_fsname;
const char *ddsta_snapname;
minor_t ddsta_cleanup_minor;
const char *ddsta_htag;
} dsl_dataset_snapshot_tmp_arg_t;
static int
dsl_dataset_snapshot_tmp_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_snapshot_tmp_arg_t *ddsta = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
int error;
error = dsl_dataset_hold(dp, ddsta->ddsta_fsname, FTAG, &ds);
if (error != 0)
return (error);
/* NULL cred means no limit check for tmp snapshot */
error = dsl_dataset_snapshot_check_impl(ds, ddsta->ddsta_snapname,
tx, B_FALSE, 0, NULL, NULL);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
if (spa_version(dp->dp_spa) < SPA_VERSION_USERREFS) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ENOTSUP));
}
error = dsl_dataset_user_hold_check_one(NULL, ddsta->ddsta_htag,
B_TRUE, tx);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
dsl_dataset_rele(ds, FTAG);
return (0);
}
static void
dsl_dataset_snapshot_tmp_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_snapshot_tmp_arg_t *ddsta = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds = NULL;
VERIFY0(dsl_dataset_hold(dp, ddsta->ddsta_fsname, FTAG, &ds));
dsl_dataset_snapshot_sync_impl(ds, ddsta->ddsta_snapname, tx);
dsl_dataset_user_hold_sync_one(ds->ds_prev, ddsta->ddsta_htag,
ddsta->ddsta_cleanup_minor, gethrestime_sec(), tx);
dsl_destroy_snapshot_sync_impl(ds->ds_prev, B_TRUE, tx);
dsl_dataset_rele(ds, FTAG);
}
int
dsl_dataset_snapshot_tmp(const char *fsname, const char *snapname,
minor_t cleanup_minor, const char *htag)
{
dsl_dataset_snapshot_tmp_arg_t ddsta;
int error;
spa_t *spa;
boolean_t needsuspend;
void *cookie;
ddsta.ddsta_fsname = fsname;
ddsta.ddsta_snapname = snapname;
ddsta.ddsta_cleanup_minor = cleanup_minor;
ddsta.ddsta_htag = htag;
error = spa_open(fsname, &spa, FTAG);
if (error != 0)
return (error);
needsuspend = (spa_version(spa) < SPA_VERSION_FAST_SNAP);
spa_close(spa, FTAG);
if (needsuspend) {
error = zil_suspend(fsname, &cookie);
if (error != 0)
return (error);
}
error = dsl_sync_task(fsname, dsl_dataset_snapshot_tmp_check,
dsl_dataset_snapshot_tmp_sync, &ddsta, 3, ZFS_SPACE_CHECK_RESERVED);
if (needsuspend)
zil_resume(cookie);
return (error);
}
void
dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
{
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(ds->ds_objset != NULL);
ASSERT(dsl_dataset_phys(ds)->ds_next_snap_obj == 0);
/*
* in case we had to change ds_fsid_guid when we opened it,
* sync it out now.
*/
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_dataset_phys(ds)->ds_fsid_guid = ds->ds_fsid_guid;
if (ds->ds_resume_bytes[tx->tx_txg & TXG_MASK] != 0) {
VERIFY0(zap_update(tx->tx_pool->dp_meta_objset,
ds->ds_object, DS_FIELD_RESUME_OBJECT, 8, 1,
&ds->ds_resume_object[tx->tx_txg & TXG_MASK], tx));
VERIFY0(zap_update(tx->tx_pool->dp_meta_objset,
ds->ds_object, DS_FIELD_RESUME_OFFSET, 8, 1,
&ds->ds_resume_offset[tx->tx_txg & TXG_MASK], tx));
VERIFY0(zap_update(tx->tx_pool->dp_meta_objset,
ds->ds_object, DS_FIELD_RESUME_BYTES, 8, 1,
&ds->ds_resume_bytes[tx->tx_txg & TXG_MASK], tx));
ds->ds_resume_object[tx->tx_txg & TXG_MASK] = 0;
ds->ds_resume_offset[tx->tx_txg & TXG_MASK] = 0;
ds->ds_resume_bytes[tx->tx_txg & TXG_MASK] = 0;
}
dmu_objset_sync(ds->ds_objset, zio, tx);
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (zfeature_active(f, ds->ds_feature_activation[f])) {
if (zfeature_active(f, ds->ds_feature[f]))
continue;
dsl_dataset_activate_feature(ds->ds_object, f,
ds->ds_feature_activation[f], tx);
ds->ds_feature[f] = ds->ds_feature_activation[f];
}
}
}
/*
* Check if the percentage of blocks shared between the clone and the
* snapshot (as opposed to those that are clone only) is below a certain
* threshold
*/
static boolean_t
dsl_livelist_should_disable(dsl_dataset_t *ds)
{
uint64_t used, referenced;
int percent_shared;
used = dsl_dir_get_usedds(ds->ds_dir);
referenced = dsl_get_referenced(ds);
ASSERT3U(referenced, >=, 0);
ASSERT3U(used, >=, 0);
if (referenced == 0)
return (B_FALSE);
percent_shared = (100 * (referenced - used)) / referenced;
if (percent_shared <= zfs_livelist_min_percent_shared)
return (B_TRUE);
return (B_FALSE);
}
/*
* Check if it is possible to combine two livelist entries into one.
* This is the case if the combined number of 'live' blkptrs (ALLOCs that
* don't have a matching FREE) is under the maximum sublist size.
* We check this by subtracting twice the total number of frees from the total
* number of blkptrs. FREEs are counted twice because each FREE blkptr
* will cancel out an ALLOC blkptr when the livelist is processed.
*/
static boolean_t
dsl_livelist_should_condense(dsl_deadlist_entry_t *first,
dsl_deadlist_entry_t *next)
{
uint64_t total_free = first->dle_bpobj.bpo_phys->bpo_num_freed +
next->dle_bpobj.bpo_phys->bpo_num_freed;
uint64_t total_entries = first->dle_bpobj.bpo_phys->bpo_num_blkptrs +
next->dle_bpobj.bpo_phys->bpo_num_blkptrs;
if ((total_entries - (2 * total_free)) < zfs_livelist_max_entries)
return (B_TRUE);
return (B_FALSE);
}
typedef struct try_condense_arg {
spa_t *spa;
dsl_dataset_t *ds;
} try_condense_arg_t;
/*
* Iterate over the livelist entries, searching for a pair to condense.
* A nonzero return value means stop, 0 means keep looking.
*/
static int
dsl_livelist_try_condense(void *arg, dsl_deadlist_entry_t *first)
{
try_condense_arg_t *tca = arg;
spa_t *spa = tca->spa;
dsl_dataset_t *ds = tca->ds;
dsl_deadlist_t *ll = &ds->ds_dir->dd_livelist;
dsl_deadlist_entry_t *next;
/* The condense thread has not yet been created at import */
if (spa->spa_livelist_condense_zthr == NULL)
return (1);
/* A condense is already in progress */
if (spa->spa_to_condense.ds != NULL)
return (1);
next = AVL_NEXT(&ll->dl_tree, &first->dle_node);
/* The livelist has only one entry - don't condense it */
if (next == NULL)
return (1);
/* Next is the newest entry - don't condense it */
if (AVL_NEXT(&ll->dl_tree, &next->dle_node) == NULL)
return (1);
/* This pair is not ready to condense but keep looking */
if (!dsl_livelist_should_condense(first, next))
return (0);
/*
* Add a ref to prevent the dataset from being evicted while
* the condense zthr or synctask are running. Ref will be
* released at the end of the condense synctask
*/
dmu_buf_add_ref(ds->ds_dbuf, spa);
spa->spa_to_condense.ds = ds;
spa->spa_to_condense.first = first;
spa->spa_to_condense.next = next;
spa->spa_to_condense.syncing = B_FALSE;
spa->spa_to_condense.cancelled = B_FALSE;
zthr_wakeup(spa->spa_livelist_condense_zthr);
return (1);
}
static void
dsl_flush_pending_livelist(dsl_dataset_t *ds, dmu_tx_t *tx)
{
dsl_dir_t *dd = ds->ds_dir;
spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
dsl_deadlist_entry_t *last = dsl_deadlist_last(&dd->dd_livelist);
/* Check if we need to add a new sub-livelist */
if (last == NULL) {
/* The livelist is empty */
dsl_deadlist_add_key(&dd->dd_livelist,
tx->tx_txg - 1, tx);
} else if (spa_sync_pass(spa) == 1) {
/*
* Check if the newest entry is full. If it is, make a new one.
* We only do this once per sync because we could overfill a
* sublist in one sync pass and don't want to add another entry
* for a txg that is already represented. This ensures that
* blkptrs born in the same txg are stored in the same sublist.
*/
bpobj_t bpobj = last->dle_bpobj;
uint64_t all = bpobj.bpo_phys->bpo_num_blkptrs;
uint64_t free = bpobj.bpo_phys->bpo_num_freed;
uint64_t alloc = all - free;
if (alloc > zfs_livelist_max_entries) {
dsl_deadlist_add_key(&dd->dd_livelist,
tx->tx_txg - 1, tx);
}
}
/* Insert each entry into the on-disk livelist */
bplist_iterate(&dd->dd_pending_allocs,
dsl_deadlist_insert_alloc_cb, &dd->dd_livelist, tx);
bplist_iterate(&dd->dd_pending_frees,
dsl_deadlist_insert_free_cb, &dd->dd_livelist, tx);
/* Attempt to condense every pair of adjacent entries */
try_condense_arg_t arg = {
.spa = spa,
.ds = ds
};
dsl_deadlist_iterate(&dd->dd_livelist, dsl_livelist_try_condense,
&arg);
}
void
dsl_dataset_sync_done(dsl_dataset_t *ds, dmu_tx_t *tx)
{
objset_t *os = ds->ds_objset;
bplist_iterate(&ds->ds_pending_deadlist,
dsl_deadlist_insert_alloc_cb, &ds->ds_deadlist, tx);
if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist)) {
dsl_flush_pending_livelist(ds, tx);
if (dsl_livelist_should_disable(ds)) {
dsl_dir_remove_livelist(ds->ds_dir, tx, B_TRUE);
}
}
dsl_bookmark_sync_done(ds, tx);
multilist_destroy(&os->os_synced_dnodes);
if (os->os_encrypted)
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_FALSE;
else
ASSERT0(os->os_next_write_raw[tx->tx_txg & TXG_MASK]);
ASSERT(!dmu_objset_is_dirty(os, dmu_tx_get_txg(tx)));
dmu_buf_rele(ds->ds_dbuf, ds);
}
int
get_clones_stat_impl(dsl_dataset_t *ds, nvlist_t *val)
{
uint64_t count = 0;
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
zap_cursor_t zc;
zap_attribute_t za;
ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
/*
* There may be missing entries in ds_next_clones_obj
* due to a bug in a previous version of the code.
* Only trust it if it has the right number of entries.
*/
if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
VERIFY0(zap_count(mos, dsl_dataset_phys(ds)->ds_next_clones_obj,
&count));
}
if (count != dsl_dataset_phys(ds)->ds_num_children - 1) {
return (SET_ERROR(ENOENT));
}
for (zap_cursor_init(&zc, mos,
dsl_dataset_phys(ds)->ds_next_clones_obj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
dsl_dataset_t *clone;
char buf[ZFS_MAX_DATASET_NAME_LEN];
VERIFY0(dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
za.za_first_integer, FTAG, &clone));
dsl_dir_name(clone->ds_dir, buf);
fnvlist_add_boolean(val, buf);
dsl_dataset_rele(clone, FTAG);
}
zap_cursor_fini(&zc);
return (0);
}
void
get_clones_stat(dsl_dataset_t *ds, nvlist_t *nv)
{
nvlist_t *propval = fnvlist_alloc();
nvlist_t *val = fnvlist_alloc();
if (get_clones_stat_impl(ds, val) == 0) {
fnvlist_add_nvlist(propval, ZPROP_VALUE, val);
fnvlist_add_nvlist(nv, zfs_prop_to_name(ZFS_PROP_CLONES),
propval);
}
nvlist_free(val);
nvlist_free(propval);
}
static char *
get_receive_resume_token_impl(dsl_dataset_t *ds)
{
if (!dsl_dataset_has_resume_receive_state(ds))
return (NULL);
dsl_pool_t *dp = ds->ds_dir->dd_pool;
char *str;
void *packed;
uint8_t *compressed;
uint64_t val;
nvlist_t *token_nv = fnvlist_alloc();
size_t packed_size, compressed_size;
if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val) == 0) {
fnvlist_add_uint64(token_nv, "fromguid", val);
}
if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_OBJECT, sizeof (val), 1, &val) == 0) {
fnvlist_add_uint64(token_nv, "object", val);
}
if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_OFFSET, sizeof (val), 1, &val) == 0) {
fnvlist_add_uint64(token_nv, "offset", val);
}
if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_BYTES, sizeof (val), 1, &val) == 0) {
fnvlist_add_uint64(token_nv, "bytes", val);
}
if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val) == 0) {
fnvlist_add_uint64(token_nv, "toguid", val);
}
char buf[MAXNAMELEN];
if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_TONAME, 1, sizeof (buf), buf) == 0) {
fnvlist_add_string(token_nv, "toname", buf);
}
if (zap_contains(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_LARGEBLOCK) == 0) {
fnvlist_add_boolean(token_nv, "largeblockok");
}
if (zap_contains(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_EMBEDOK) == 0) {
fnvlist_add_boolean(token_nv, "embedok");
}
if (zap_contains(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_COMPRESSOK) == 0) {
fnvlist_add_boolean(token_nv, "compressok");
}
if (zap_contains(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_RAWOK) == 0) {
fnvlist_add_boolean(token_nv, "rawok");
}
if (dsl_dataset_feature_is_active(ds,
SPA_FEATURE_REDACTED_DATASETS)) {
uint64_t num_redact_snaps = 0;
uint64_t *redact_snaps = NULL;
VERIFY3B(dsl_dataset_get_uint64_array_feature(ds,
SPA_FEATURE_REDACTED_DATASETS, &num_redact_snaps,
&redact_snaps), ==, B_TRUE);
fnvlist_add_uint64_array(token_nv, "redact_snaps",
redact_snaps, num_redact_snaps);
}
if (zap_contains(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS) == 0) {
uint64_t num_redact_snaps = 0, int_size = 0;
uint64_t *redact_snaps = NULL;
VERIFY0(zap_length(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS, &int_size,
&num_redact_snaps));
ASSERT3U(int_size, ==, sizeof (uint64_t));
redact_snaps = kmem_alloc(int_size * num_redact_snaps,
KM_SLEEP);
VERIFY0(zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS, int_size,
num_redact_snaps, redact_snaps));
fnvlist_add_uint64_array(token_nv, "book_redact_snaps",
redact_snaps, num_redact_snaps);
kmem_free(redact_snaps, int_size * num_redact_snaps);
}
packed = fnvlist_pack(token_nv, &packed_size);
fnvlist_free(token_nv);
compressed = kmem_alloc(packed_size, KM_SLEEP);
compressed_size = gzip_compress(packed, compressed,
packed_size, packed_size, 6);
zio_cksum_t cksum;
fletcher_4_native_varsize(compressed, compressed_size, &cksum);
size_t alloc_size = compressed_size * 2 + 1;
str = kmem_alloc(alloc_size, KM_SLEEP);
for (int i = 0; i < compressed_size; i++) {
size_t offset = i * 2;
(void) snprintf(str + offset, alloc_size - offset,
"%02x", compressed[i]);
}
str[compressed_size * 2] = '\0';
char *propval = kmem_asprintf("%u-%llx-%llx-%s",
ZFS_SEND_RESUME_TOKEN_VERSION,
(longlong_t)cksum.zc_word[0],
(longlong_t)packed_size, str);
kmem_free(packed, packed_size);
kmem_free(str, alloc_size);
kmem_free(compressed, packed_size);
return (propval);
}
/*
* Returns a string that represents the receive resume state token. It should
* be freed with strfree(). NULL is returned if no resume state is present.
*/
char *
get_receive_resume_token(dsl_dataset_t *ds)
{
/*
* A failed "newfs" (e.g. full) resumable receive leaves
* the stats set on this dataset. Check here for the prop.
*/
char *token = get_receive_resume_token_impl(ds);
if (token != NULL)
return (token);
/*
* A failed incremental resumable receive leaves the
* stats set on our child named "%recv". Check the child
* for the prop.
*/
/* 6 extra bytes for /%recv */
char name[ZFS_MAX_DATASET_NAME_LEN + 6];
dsl_dataset_t *recv_ds;
dsl_dataset_name(ds, name);
if (strlcat(name, "/", sizeof (name)) < sizeof (name) &&
strlcat(name, recv_clone_name, sizeof (name)) < sizeof (name) &&
dsl_dataset_hold(ds->ds_dir->dd_pool, name, FTAG, &recv_ds) == 0) {
token = get_receive_resume_token_impl(recv_ds);
dsl_dataset_rele(recv_ds, FTAG);
}
return (token);
}
uint64_t
dsl_get_refratio(dsl_dataset_t *ds)
{
uint64_t ratio = dsl_dataset_phys(ds)->ds_compressed_bytes == 0 ? 100 :
(dsl_dataset_phys(ds)->ds_uncompressed_bytes * 100 /
dsl_dataset_phys(ds)->ds_compressed_bytes);
return (ratio);
}
uint64_t
dsl_get_logicalreferenced(dsl_dataset_t *ds)
{
return (dsl_dataset_phys(ds)->ds_uncompressed_bytes);
}
uint64_t
dsl_get_compressratio(dsl_dataset_t *ds)
{
if (ds->ds_is_snapshot) {
return (dsl_get_refratio(ds));
} else {
dsl_dir_t *dd = ds->ds_dir;
mutex_enter(&dd->dd_lock);
uint64_t val = dsl_dir_get_compressratio(dd);
mutex_exit(&dd->dd_lock);
return (val);
}
}
uint64_t
dsl_get_used(dsl_dataset_t *ds)
{
if (ds->ds_is_snapshot) {
return (dsl_dataset_phys(ds)->ds_unique_bytes);
} else {
dsl_dir_t *dd = ds->ds_dir;
mutex_enter(&dd->dd_lock);
uint64_t val = dsl_dir_get_used(dd);
mutex_exit(&dd->dd_lock);
return (val);
}
}
uint64_t
dsl_get_creation(dsl_dataset_t *ds)
{
return (dsl_dataset_phys(ds)->ds_creation_time);
}
uint64_t
dsl_get_creationtxg(dsl_dataset_t *ds)
{
return (dsl_dataset_phys(ds)->ds_creation_txg);
}
uint64_t
dsl_get_refquota(dsl_dataset_t *ds)
{
return (ds->ds_quota);
}
uint64_t
dsl_get_refreservation(dsl_dataset_t *ds)
{
return (ds->ds_reserved);
}
uint64_t
dsl_get_guid(dsl_dataset_t *ds)
{
return (dsl_dataset_phys(ds)->ds_guid);
}
uint64_t
dsl_get_unique(dsl_dataset_t *ds)
{
return (dsl_dataset_phys(ds)->ds_unique_bytes);
}
uint64_t
dsl_get_objsetid(dsl_dataset_t *ds)
{
return (ds->ds_object);
}
uint64_t
dsl_get_userrefs(dsl_dataset_t *ds)
{
return (ds->ds_userrefs);
}
uint64_t
dsl_get_defer_destroy(dsl_dataset_t *ds)
{
return (DS_IS_DEFER_DESTROY(ds) ? 1 : 0);
}
uint64_t
dsl_get_referenced(dsl_dataset_t *ds)
{
return (dsl_dataset_phys(ds)->ds_referenced_bytes);
}
uint64_t
dsl_get_numclones(dsl_dataset_t *ds)
{
ASSERT(ds->ds_is_snapshot);
return (dsl_dataset_phys(ds)->ds_num_children - 1);
}
uint64_t
dsl_get_inconsistent(dsl_dataset_t *ds)
{
return ((dsl_dataset_phys(ds)->ds_flags & DS_FLAG_INCONSISTENT) ?
1 : 0);
}
uint64_t
dsl_get_redacted(dsl_dataset_t *ds)
{
return (dsl_dataset_feature_is_active(ds,
SPA_FEATURE_REDACTED_DATASETS));
}
uint64_t
dsl_get_available(dsl_dataset_t *ds)
{
uint64_t refdbytes = dsl_get_referenced(ds);
uint64_t availbytes = dsl_dir_space_available(ds->ds_dir,
NULL, 0, TRUE);
if (ds->ds_reserved > dsl_dataset_phys(ds)->ds_unique_bytes) {
availbytes +=
ds->ds_reserved - dsl_dataset_phys(ds)->ds_unique_bytes;
}
if (ds->ds_quota != 0) {
/*
* Adjust available bytes according to refquota
*/
if (refdbytes < ds->ds_quota) {
availbytes = MIN(availbytes,
ds->ds_quota - refdbytes);
} else {
availbytes = 0;
}
}
return (availbytes);
}
int
dsl_get_written(dsl_dataset_t *ds, uint64_t *written)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
dsl_dataset_t *prev;
int err = dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
if (err == 0) {
uint64_t comp, uncomp;
err = dsl_dataset_space_written(prev, ds, written,
&comp, &uncomp);
dsl_dataset_rele(prev, FTAG);
}
return (err);
}
/*
* 'snap' should be a buffer of size ZFS_MAX_DATASET_NAME_LEN.
*/
int
dsl_get_prev_snap(dsl_dataset_t *ds, char *snap)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
if (ds->ds_prev != NULL && ds->ds_prev != dp->dp_origin_snap) {
dsl_dataset_name(ds->ds_prev, snap);
return (0);
} else {
return (SET_ERROR(ENOENT));
}
}
void
dsl_get_redact_snaps(dsl_dataset_t *ds, nvlist_t *propval)
{
uint64_t nsnaps;
uint64_t *snaps;
if (dsl_dataset_get_uint64_array_feature(ds,
SPA_FEATURE_REDACTED_DATASETS, &nsnaps, &snaps)) {
fnvlist_add_uint64_array(propval, ZPROP_VALUE, snaps,
nsnaps);
}
}
/*
* Returns the mountpoint property and source for the given dataset in the value
* and source buffers. The value buffer must be at least as large as MAXPATHLEN
* and the source buffer as least as large a ZFS_MAX_DATASET_NAME_LEN.
* Returns 0 on success and an error on failure.
*/
int
dsl_get_mountpoint(dsl_dataset_t *ds, const char *dsname, char *value,
char *source)
{
int error;
dsl_pool_t *dp = ds->ds_dir->dd_pool;
/* Retrieve the mountpoint value stored in the zap object */
error = dsl_prop_get_ds(ds, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT), 1,
ZAP_MAXVALUELEN, value, source);
if (error != 0) {
return (error);
}
/*
* Process the dsname and source to find the full mountpoint string.
* Can be skipped for 'legacy' or 'none'.
*/
if (value[0] == '/') {
char *buf = kmem_alloc(ZAP_MAXVALUELEN, KM_SLEEP);
char *root = buf;
const char *relpath;
/*
* If we inherit the mountpoint, even from a dataset
* with a received value, the source will be the path of
* the dataset we inherit from. If source is
* ZPROP_SOURCE_VAL_RECVD, the received value is not
* inherited.
*/
if (strcmp(source, ZPROP_SOURCE_VAL_RECVD) == 0) {
relpath = "";
} else {
ASSERT0(strncmp(dsname, source, strlen(source)));
relpath = dsname + strlen(source);
if (relpath[0] == '/')
relpath++;
}
spa_altroot(dp->dp_spa, root, ZAP_MAXVALUELEN);
/*
* Special case an alternate root of '/'. This will
* avoid having multiple leading slashes in the
* mountpoint path.
*/
if (strcmp(root, "/") == 0)
root++;
/*
* If the mountpoint is '/' then skip over this
* if we are obtaining either an alternate root or
* an inherited mountpoint.
*/
char *mnt = value;
if (value[1] == '\0' && (root[0] != '\0' ||
relpath[0] != '\0'))
mnt = value + 1;
if (relpath[0] == '\0') {
(void) snprintf(value, ZAP_MAXVALUELEN, "%s%s",
root, mnt);
} else {
(void) snprintf(value, ZAP_MAXVALUELEN, "%s%s%s%s",
root, mnt, relpath[0] == '@' ? "" : "/",
relpath);
}
kmem_free(buf, ZAP_MAXVALUELEN);
}
return (0);
}
void
dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
{
dsl_pool_t *dp __maybe_unused = ds->ds_dir->dd_pool;
ASSERT(dsl_pool_config_held(dp));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRATIO,
dsl_get_refratio(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_LOGICALREFERENCED,
dsl_get_logicalreferenced(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
dsl_get_compressratio(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
dsl_get_used(ds));
if (ds->ds_is_snapshot) {
get_clones_stat(ds, nv);
} else {
char buf[ZFS_MAX_DATASET_NAME_LEN];
if (dsl_get_prev_snap(ds, buf) == 0)
dsl_prop_nvlist_add_string(nv, ZFS_PROP_PREV_SNAP,
buf);
dsl_dir_stats(ds->ds_dir, nv);
}
nvlist_t *propval = fnvlist_alloc();
dsl_get_redact_snaps(ds, propval);
fnvlist_add_nvlist(nv, zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS),
propval);
nvlist_free(propval);
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE,
dsl_get_available(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED,
dsl_get_referenced(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
dsl_get_creation(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
dsl_get_creationtxg(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
dsl_get_refquota(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
dsl_get_refreservation(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
dsl_get_guid(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_UNIQUE,
dsl_get_unique(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_OBJSETID,
dsl_get_objsetid(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERREFS,
dsl_get_userrefs(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_DEFER_DESTROY,
dsl_get_defer_destroy(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_SNAPSHOTS_CHANGED,
dsl_dir_snap_cmtime(ds->ds_dir).tv_sec);
dsl_dataset_crypt_stats(ds, nv);
if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
uint64_t written;
if (dsl_get_written(ds, &written) == 0) {
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_WRITTEN,
written);
}
}
if (!dsl_dataset_is_snapshot(ds)) {
char *token = get_receive_resume_token(ds);
if (token != NULL) {
dsl_prop_nvlist_add_string(nv,
ZFS_PROP_RECEIVE_RESUME_TOKEN, token);
kmem_strfree(token);
}
}
}
void
dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
{
dsl_pool_t *dp __maybe_unused = ds->ds_dir->dd_pool;
ASSERT(dsl_pool_config_held(dp));
stat->dds_creation_txg = dsl_get_creationtxg(ds);
stat->dds_inconsistent = dsl_get_inconsistent(ds);
stat->dds_guid = dsl_get_guid(ds);
stat->dds_redacted = dsl_get_redacted(ds);
stat->dds_origin[0] = '\0';
if (ds->ds_is_snapshot) {
stat->dds_is_snapshot = B_TRUE;
stat->dds_num_clones = dsl_get_numclones(ds);
} else {
stat->dds_is_snapshot = B_FALSE;
stat->dds_num_clones = 0;
if (dsl_dir_is_clone(ds->ds_dir)) {
dsl_dir_get_origin(ds->ds_dir, stat->dds_origin);
}
}
}
uint64_t
dsl_dataset_fsid_guid(dsl_dataset_t *ds)
{
return (ds->ds_fsid_guid);
}
void
dsl_dataset_space(dsl_dataset_t *ds,
uint64_t *refdbytesp, uint64_t *availbytesp,
uint64_t *usedobjsp, uint64_t *availobjsp)
{
*refdbytesp = dsl_dataset_phys(ds)->ds_referenced_bytes;
*availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
if (ds->ds_reserved > dsl_dataset_phys(ds)->ds_unique_bytes)
*availbytesp +=
ds->ds_reserved - dsl_dataset_phys(ds)->ds_unique_bytes;
if (ds->ds_quota != 0) {
/*
* Adjust available bytes according to refquota
*/
if (*refdbytesp < ds->ds_quota)
*availbytesp = MIN(*availbytesp,
ds->ds_quota - *refdbytesp);
else
*availbytesp = 0;
}
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
*usedobjsp = BP_GET_FILL(&dsl_dataset_phys(ds)->ds_bp);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
*availobjsp = DN_MAX_OBJECT - *usedobjsp;
}
boolean_t
dsl_dataset_modified_since_snap(dsl_dataset_t *ds, dsl_dataset_t *snap)
{
dsl_pool_t *dp __maybe_unused = ds->ds_dir->dd_pool;
uint64_t birth;
ASSERT(dsl_pool_config_held(dp));
if (snap == NULL)
return (B_FALSE);
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
birth = dsl_dataset_get_blkptr(ds)->blk_birth;
rrw_exit(&ds->ds_bp_rwlock, FTAG);
if (birth > dsl_dataset_phys(snap)->ds_creation_txg) {
objset_t *os, *os_snap;
/*
* It may be that only the ZIL differs, because it was
* reset in the head. Don't count that as being
* modified.
*/
if (dmu_objset_from_ds(ds, &os) != 0)
return (B_TRUE);
if (dmu_objset_from_ds(snap, &os_snap) != 0)
return (B_TRUE);
return (memcmp(&os->os_phys->os_meta_dnode,
&os_snap->os_phys->os_meta_dnode,
sizeof (os->os_phys->os_meta_dnode)) != 0);
}
return (B_FALSE);
}
-typedef struct dsl_dataset_rename_snapshot_arg {
- const char *ddrsa_fsname;
- const char *ddrsa_oldsnapname;
- const char *ddrsa_newsnapname;
- boolean_t ddrsa_recursive;
- dmu_tx_t *ddrsa_tx;
-} dsl_dataset_rename_snapshot_arg_t;
-
static int
dsl_dataset_rename_snapshot_check_impl(dsl_pool_t *dp,
dsl_dataset_t *hds, void *arg)
{
(void) dp;
dsl_dataset_rename_snapshot_arg_t *ddrsa = arg;
int error;
uint64_t val;
error = dsl_dataset_snap_lookup(hds, ddrsa->ddrsa_oldsnapname, &val);
if (error != 0) {
/* ignore nonexistent snapshots */
return (error == ENOENT ? 0 : error);
}
/* new name should not exist */
error = dsl_dataset_snap_lookup(hds, ddrsa->ddrsa_newsnapname, &val);
if (error == 0)
error = SET_ERROR(EEXIST);
else if (error == ENOENT)
error = 0;
/* dataset name + 1 for the "@" + the new snapshot name must fit */
if (dsl_dir_namelen(hds->ds_dir) + 1 +
strlen(ddrsa->ddrsa_newsnapname) >= ZFS_MAX_DATASET_NAME_LEN)
error = SET_ERROR(ENAMETOOLONG);
return (error);
}
-static int
+int
dsl_dataset_rename_snapshot_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_rename_snapshot_arg_t *ddrsa = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *hds;
int error;
error = dsl_dataset_hold(dp, ddrsa->ddrsa_fsname, FTAG, &hds);
if (error != 0)
return (error);
if (ddrsa->ddrsa_recursive) {
error = dmu_objset_find_dp(dp, hds->ds_dir->dd_object,
dsl_dataset_rename_snapshot_check_impl, ddrsa,
DS_FIND_CHILDREN);
} else {
error = dsl_dataset_rename_snapshot_check_impl(dp, hds, ddrsa);
}
dsl_dataset_rele(hds, FTAG);
return (error);
}
static int
dsl_dataset_rename_snapshot_sync_impl(dsl_pool_t *dp,
dsl_dataset_t *hds, void *arg)
{
dsl_dataset_rename_snapshot_arg_t *ddrsa = arg;
dsl_dataset_t *ds;
uint64_t val;
dmu_tx_t *tx = ddrsa->ddrsa_tx;
int error;
error = dsl_dataset_snap_lookup(hds, ddrsa->ddrsa_oldsnapname, &val);
ASSERT(error == 0 || error == ENOENT);
if (error == ENOENT) {
/* ignore nonexistent snapshots */
return (0);
}
VERIFY0(dsl_dataset_hold_obj(dp, val, FTAG, &ds));
/* log before we change the name */
spa_history_log_internal_ds(ds, "rename", tx,
"-> @%s", ddrsa->ddrsa_newsnapname);
VERIFY0(dsl_dataset_snap_remove(hds, ddrsa->ddrsa_oldsnapname, tx,
B_FALSE));
mutex_enter(&ds->ds_lock);
(void) strlcpy(ds->ds_snapname, ddrsa->ddrsa_newsnapname,
sizeof (ds->ds_snapname));
mutex_exit(&ds->ds_lock);
VERIFY0(zap_add(dp->dp_meta_objset,
dsl_dataset_phys(hds)->ds_snapnames_zapobj,
ds->ds_snapname, 8, 1, &ds->ds_object, tx));
zvol_rename_minors(dp->dp_spa, ddrsa->ddrsa_oldsnapname,
ddrsa->ddrsa_newsnapname, B_TRUE);
dsl_dataset_rele(ds, FTAG);
return (0);
}
-static void
+void
dsl_dataset_rename_snapshot_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_rename_snapshot_arg_t *ddrsa = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *hds = NULL;
VERIFY0(dsl_dataset_hold(dp, ddrsa->ddrsa_fsname, FTAG, &hds));
ddrsa->ddrsa_tx = tx;
if (ddrsa->ddrsa_recursive) {
VERIFY0(dmu_objset_find_dp(dp, hds->ds_dir->dd_object,
dsl_dataset_rename_snapshot_sync_impl, ddrsa,
DS_FIND_CHILDREN));
} else {
VERIFY0(dsl_dataset_rename_snapshot_sync_impl(dp, hds, ddrsa));
}
dsl_dataset_rele(hds, FTAG);
}
int
dsl_dataset_rename_snapshot(const char *fsname,
const char *oldsnapname, const char *newsnapname, boolean_t recursive)
{
dsl_dataset_rename_snapshot_arg_t ddrsa;
ddrsa.ddrsa_fsname = fsname;
ddrsa.ddrsa_oldsnapname = oldsnapname;
ddrsa.ddrsa_newsnapname = newsnapname;
ddrsa.ddrsa_recursive = recursive;
return (dsl_sync_task(fsname, dsl_dataset_rename_snapshot_check,
dsl_dataset_rename_snapshot_sync, &ddrsa,
1, ZFS_SPACE_CHECK_RESERVED));
}
/*
* If we're doing an ownership handoff, we need to make sure that there is
* only one long hold on the dataset. We're not allowed to change anything here
* so we don't permanently release the long hold or regular hold here. We want
* to do this only when syncing to avoid the dataset unexpectedly going away
* when we release the long hold.
*/
static int
dsl_dataset_handoff_check(dsl_dataset_t *ds, void *owner, dmu_tx_t *tx)
{
boolean_t held = B_FALSE;
if (!dmu_tx_is_syncing(tx))
return (0);
dsl_dir_t *dd = ds->ds_dir;
mutex_enter(&dd->dd_activity_lock);
uint64_t holds = zfs_refcount_count(&ds->ds_longholds) -
(owner != NULL ? 1 : 0);
/*
* The value of dd_activity_waiters can chance as soon as we drop the
* lock, but we're fine with that; new waiters coming in or old
* waiters leaving doesn't cause problems, since we're going to cancel
* waiters later anyway. The goal of this check is to verify that no
* non-waiters have long-holds, and all new long-holds will be
* prevented because we're holding the pool config as writer.
*/
if (holds != dd->dd_activity_waiters)
held = B_TRUE;
mutex_exit(&dd->dd_activity_lock);
if (held)
return (SET_ERROR(EBUSY));
return (0);
}
int
dsl_dataset_rollback_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_rollback_arg_t *ddra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
int64_t unused_refres_delta;
int error;
error = dsl_dataset_hold(dp, ddra->ddra_fsname, FTAG, &ds);
if (error != 0)
return (error);
/* must not be a snapshot */
if (ds->ds_is_snapshot) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EINVAL));
}
/* must have a most recent snapshot */
if (dsl_dataset_phys(ds)->ds_prev_snap_txg < TXG_INITIAL) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ESRCH));
}
/*
* No rollback to a snapshot created in the current txg, because
* the rollback may dirty the dataset and create blocks that are
* not reachable from the rootbp while having a birth txg that
* falls into the snapshot's range.
*/
if (dmu_tx_is_syncing(tx) &&
dsl_dataset_phys(ds)->ds_prev_snap_txg >= tx->tx_txg) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EAGAIN));
}
/*
* If the expected target snapshot is specified, then check that
* the latest snapshot is it.
*/
if (ddra->ddra_tosnap != NULL) {
dsl_dataset_t *snapds;
/* Check if the target snapshot exists at all. */
error = dsl_dataset_hold(dp, ddra->ddra_tosnap, FTAG, &snapds);
if (error != 0) {
/*
* ESRCH is used to signal that the target snapshot does
* not exist, while ENOENT is used to report that
* the rolled back dataset does not exist.
* ESRCH is also used to cover other cases where the
* target snapshot is not related to the dataset being
* rolled back such as being in a different pool.
*/
if (error == ENOENT || error == EXDEV)
error = SET_ERROR(ESRCH);
dsl_dataset_rele(ds, FTAG);
return (error);
}
ASSERT(snapds->ds_is_snapshot);
/* Check if the snapshot is the latest snapshot indeed. */
if (snapds != ds->ds_prev) {
/*
* Distinguish between the case where the only problem
* is intervening snapshots (EEXIST) vs the snapshot
* not being a valid target for rollback (ESRCH).
*/
if (snapds->ds_dir == ds->ds_dir ||
(dsl_dir_is_clone(ds->ds_dir) &&
dsl_dir_phys(ds->ds_dir)->dd_origin_obj ==
snapds->ds_object)) {
error = SET_ERROR(EEXIST);
} else {
error = SET_ERROR(ESRCH);
}
dsl_dataset_rele(snapds, FTAG);
dsl_dataset_rele(ds, FTAG);
return (error);
}
dsl_dataset_rele(snapds, FTAG);
}
/* must not have any bookmarks after the most recent snapshot */
if (dsl_bookmark_latest_txg(ds) >
dsl_dataset_phys(ds)->ds_prev_snap_txg) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EEXIST));
}
error = dsl_dataset_handoff_check(ds, ddra->ddra_owner, tx);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
/*
* Check if the snap we are rolling back to uses more than
* the refquota.
*/
if (ds->ds_quota != 0 &&
dsl_dataset_phys(ds->ds_prev)->ds_referenced_bytes > ds->ds_quota) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EDQUOT));
}
/*
* When we do the clone swap, we will temporarily use more space
* due to the refreservation (the head will no longer have any
* unique space, so the entire amount of the refreservation will need
* to be free). We will immediately destroy the clone, freeing
* this space, but the freeing happens over many txg's.
*/
unused_refres_delta = (int64_t)MIN(ds->ds_reserved,
dsl_dataset_phys(ds)->ds_unique_bytes);
if (unused_refres_delta > 0 &&
unused_refres_delta >
dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE)) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ENOSPC));
}
dsl_dataset_rele(ds, FTAG);
return (0);
}
void
dsl_dataset_rollback_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_rollback_arg_t *ddra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds, *clone;
uint64_t cloneobj;
char namebuf[ZFS_MAX_DATASET_NAME_LEN];
VERIFY0(dsl_dataset_hold(dp, ddra->ddra_fsname, FTAG, &ds));
dsl_dataset_name(ds->ds_prev, namebuf);
fnvlist_add_string(ddra->ddra_result, "target", namebuf);
cloneobj = dsl_dataset_create_sync(ds->ds_dir, "%rollback",
ds->ds_prev, DS_CREATE_FLAG_NODIRTY, kcred, NULL, tx);
VERIFY0(dsl_dataset_hold_obj(dp, cloneobj, FTAG, &clone));
dsl_dataset_clone_swap_sync_impl(clone, ds, tx);
dsl_dataset_zero_zil(ds, tx);
dsl_destroy_head_sync_impl(clone, tx);
dsl_dataset_rele(clone, FTAG);
dsl_dataset_rele(ds, FTAG);
}
/*
* Rolls back the given filesystem or volume to the most recent snapshot.
* The name of the most recent snapshot will be returned under key "target"
* in the result nvlist.
*
* If owner != NULL:
* - The existing dataset MUST be owned by the specified owner at entry
* - Upon return, dataset will still be held by the same owner, whether we
* succeed or not.
*
* This mode is required any time the existing filesystem is mounted. See
* notes above zfs_suspend_fs() for further details.
*/
int
dsl_dataset_rollback(const char *fsname, const char *tosnap, void *owner,
nvlist_t *result)
{
dsl_dataset_rollback_arg_t ddra;
ddra.ddra_fsname = fsname;
ddra.ddra_tosnap = tosnap;
ddra.ddra_owner = owner;
ddra.ddra_result = result;
return (dsl_sync_task(fsname, dsl_dataset_rollback_check,
dsl_dataset_rollback_sync, &ddra,
1, ZFS_SPACE_CHECK_RESERVED));
}
struct promotenode {
list_node_t link;
dsl_dataset_t *ds;
};
static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
static int promote_hold(dsl_dataset_promote_arg_t *ddpa, dsl_pool_t *dp,
const void *tag);
static void promote_rele(dsl_dataset_promote_arg_t *ddpa, const void *tag);
int
dsl_dataset_promote_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_promote_arg_t *ddpa = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *hds;
struct promotenode *snap;
dsl_dataset_t *origin_ds, *origin_head;
int err;
uint64_t unused;
uint64_t ss_mv_cnt;
size_t max_snap_len;
boolean_t conflicting_snaps;
err = promote_hold(ddpa, dp, FTAG);
if (err != 0)
return (err);
hds = ddpa->ddpa_clone;
max_snap_len = MAXNAMELEN - strlen(ddpa->ddpa_clonename) - 1;
if (dsl_dataset_phys(hds)->ds_flags & DS_FLAG_NOPROMOTE) {
promote_rele(ddpa, FTAG);
return (SET_ERROR(EXDEV));
}
snap = list_head(&ddpa->shared_snaps);
origin_head = snap->ds;
if (snap == NULL) {
err = SET_ERROR(ENOENT);
goto out;
}
origin_ds = snap->ds;
/*
* Encrypted clones share a DSL Crypto Key with their origin's dsl dir.
* When doing a promote we must make sure the encryption root for
* both the target and the target's origin does not change to avoid
* needing to rewrap encryption keys
*/
err = dsl_dataset_promote_crypt_check(hds->ds_dir, origin_ds->ds_dir);
if (err != 0)
goto out;
/*
* Compute and check the amount of space to transfer. Since this is
* so expensive, don't do the preliminary check.
*/
if (!dmu_tx_is_syncing(tx)) {
promote_rele(ddpa, FTAG);
return (0);
}
/* compute origin's new unique space */
snap = list_tail(&ddpa->clone_snaps);
ASSERT(snap != NULL);
ASSERT3U(dsl_dataset_phys(snap->ds)->ds_prev_snap_obj, ==,
origin_ds->ds_object);
dsl_deadlist_space_range(&snap->ds->ds_deadlist,
dsl_dataset_phys(origin_ds)->ds_prev_snap_txg, UINT64_MAX,
&ddpa->unique, &unused, &unused);
/*
* Walk the snapshots that we are moving
*
* Compute space to transfer. Consider the incremental changes
* to used by each snapshot:
* (my used) = (prev's used) + (blocks born) - (blocks killed)
* So each snapshot gave birth to:
* (blocks born) = (my used) - (prev's used) + (blocks killed)
* So a sequence would look like:
* (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
* Which simplifies to:
* uN + kN + kN-1 + ... + k1 + k0
* Note however, if we stop before we reach the ORIGIN we get:
* uN + kN + kN-1 + ... + kM - uM-1
*/
conflicting_snaps = B_FALSE;
ss_mv_cnt = 0;
ddpa->used = dsl_dataset_phys(origin_ds)->ds_referenced_bytes;
ddpa->comp = dsl_dataset_phys(origin_ds)->ds_compressed_bytes;
ddpa->uncomp = dsl_dataset_phys(origin_ds)->ds_uncompressed_bytes;
for (snap = list_head(&ddpa->shared_snaps); snap;
snap = list_next(&ddpa->shared_snaps, snap)) {
uint64_t val, dlused, dlcomp, dluncomp;
dsl_dataset_t *ds = snap->ds;
ss_mv_cnt++;
/*
* If there are long holds, we won't be able to evict
* the objset.
*/
if (dsl_dataset_long_held(ds)) {
err = SET_ERROR(EBUSY);
goto out;
}
/* Check that the snapshot name does not conflict */
VERIFY0(dsl_dataset_get_snapname(ds));
if (strlen(ds->ds_snapname) >= max_snap_len) {
err = SET_ERROR(ENAMETOOLONG);
goto out;
}
err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
if (err == 0) {
fnvlist_add_boolean(ddpa->err_ds,
snap->ds->ds_snapname);
conflicting_snaps = B_TRUE;
} else if (err != ENOENT) {
goto out;
}
/* The very first snapshot does not have a deadlist */
if (dsl_dataset_phys(ds)->ds_prev_snap_obj == 0)
continue;
dsl_deadlist_space(&ds->ds_deadlist,
&dlused, &dlcomp, &dluncomp);
ddpa->used += dlused;
ddpa->comp += dlcomp;
ddpa->uncomp += dluncomp;
}
/*
* Check that bookmarks that are being transferred don't have
* name conflicts.
*/
for (dsl_bookmark_node_t *dbn = avl_first(&origin_head->ds_bookmarks);
dbn != NULL && dbn->dbn_phys.zbm_creation_txg <=
dsl_dataset_phys(origin_ds)->ds_creation_txg;
dbn = AVL_NEXT(&origin_head->ds_bookmarks, dbn)) {
if (strlen(dbn->dbn_name) >= max_snap_len) {
err = SET_ERROR(ENAMETOOLONG);
goto out;
}
zfs_bookmark_phys_t bm;
err = dsl_bookmark_lookup_impl(ddpa->ddpa_clone,
dbn->dbn_name, &bm);
if (err == 0) {
fnvlist_add_boolean(ddpa->err_ds, dbn->dbn_name);
conflicting_snaps = B_TRUE;
} else if (err == ESRCH) {
err = 0;
} else if (err != 0) {
goto out;
}
}
/*
* In order to return the full list of conflicting snapshots, we check
* whether there was a conflict after traversing all of them.
*/
if (conflicting_snaps) {
err = SET_ERROR(EEXIST);
goto out;
}
/*
* If we are a clone of a clone then we never reached ORIGIN,
* so we need to subtract out the clone origin's used space.
*/
if (ddpa->origin_origin) {
ddpa->used -=
dsl_dataset_phys(ddpa->origin_origin)->ds_referenced_bytes;
ddpa->comp -=
dsl_dataset_phys(ddpa->origin_origin)->ds_compressed_bytes;
ddpa->uncomp -=
dsl_dataset_phys(ddpa->origin_origin)->
ds_uncompressed_bytes;
}
/* Check that there is enough space and limit headroom here */
err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
0, ss_mv_cnt, ddpa->used, ddpa->cr, ddpa->proc);
if (err != 0)
goto out;
/*
* Compute the amounts of space that will be used by snapshots
* after the promotion (for both origin and clone). For each,
* it is the amount of space that will be on all of their
* deadlists (that was not born before their new origin).
*/
if (dsl_dir_phys(hds->ds_dir)->dd_flags & DD_FLAG_USED_BREAKDOWN) {
uint64_t space;
/*
* Note, typically this will not be a clone of a clone,
* so dd_origin_txg will be < TXG_INITIAL, so
* these snaplist_space() -> dsl_deadlist_space_range()
* calls will be fast because they do not have to
* iterate over all bps.
*/
snap = list_head(&ddpa->origin_snaps);
if (snap == NULL) {
err = SET_ERROR(ENOENT);
goto out;
}
err = snaplist_space(&ddpa->shared_snaps,
snap->ds->ds_dir->dd_origin_txg, &ddpa->cloneusedsnap);
if (err != 0)
goto out;
err = snaplist_space(&ddpa->clone_snaps,
snap->ds->ds_dir->dd_origin_txg, &space);
if (err != 0)
goto out;
ddpa->cloneusedsnap += space;
}
if (dsl_dir_phys(origin_ds->ds_dir)->dd_flags &
DD_FLAG_USED_BREAKDOWN) {
err = snaplist_space(&ddpa->origin_snaps,
dsl_dataset_phys(origin_ds)->ds_creation_txg,
&ddpa->originusedsnap);
if (err != 0)
goto out;
}
out:
promote_rele(ddpa, FTAG);
return (err);
}
void
dsl_dataset_promote_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_promote_arg_t *ddpa = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *hds;
struct promotenode *snap;
dsl_dataset_t *origin_ds;
dsl_dataset_t *origin_head;
dsl_dir_t *dd;
dsl_dir_t *odd = NULL;
uint64_t oldnext_obj;
int64_t delta;
ASSERT(nvlist_empty(ddpa->err_ds));
VERIFY0(promote_hold(ddpa, dp, FTAG));
hds = ddpa->ddpa_clone;
ASSERT0(dsl_dataset_phys(hds)->ds_flags & DS_FLAG_NOPROMOTE);
snap = list_head(&ddpa->shared_snaps);
origin_ds = snap->ds;
dd = hds->ds_dir;
snap = list_head(&ddpa->origin_snaps);
origin_head = snap->ds;
/*
* We need to explicitly open odd, since origin_ds's dd will be
* changing.
*/
VERIFY0(dsl_dir_hold_obj(dp, origin_ds->ds_dir->dd_object,
NULL, FTAG, &odd));
dsl_dataset_promote_crypt_sync(hds->ds_dir, odd, tx);
/* change origin's next snap */
dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
oldnext_obj = dsl_dataset_phys(origin_ds)->ds_next_snap_obj;
snap = list_tail(&ddpa->clone_snaps);
ASSERT3U(dsl_dataset_phys(snap->ds)->ds_prev_snap_obj, ==,
origin_ds->ds_object);
dsl_dataset_phys(origin_ds)->ds_next_snap_obj = snap->ds->ds_object;
/* change the origin's next clone */
if (dsl_dataset_phys(origin_ds)->ds_next_clones_obj) {
dsl_dataset_remove_from_next_clones(origin_ds,
snap->ds->ds_object, tx);
VERIFY0(zap_add_int(dp->dp_meta_objset,
dsl_dataset_phys(origin_ds)->ds_next_clones_obj,
oldnext_obj, tx));
}
/* change origin */
dmu_buf_will_dirty(dd->dd_dbuf, tx);
ASSERT3U(dsl_dir_phys(dd)->dd_origin_obj, ==, origin_ds->ds_object);
dsl_dir_phys(dd)->dd_origin_obj = dsl_dir_phys(odd)->dd_origin_obj;
dd->dd_origin_txg = origin_head->ds_dir->dd_origin_txg;
dmu_buf_will_dirty(odd->dd_dbuf, tx);
dsl_dir_phys(odd)->dd_origin_obj = origin_ds->ds_object;
origin_head->ds_dir->dd_origin_txg =
dsl_dataset_phys(origin_ds)->ds_creation_txg;
/* change dd_clone entries */
if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
VERIFY0(zap_remove_int(dp->dp_meta_objset,
dsl_dir_phys(odd)->dd_clones, hds->ds_object, tx));
VERIFY0(zap_add_int(dp->dp_meta_objset,
dsl_dir_phys(ddpa->origin_origin->ds_dir)->dd_clones,
hds->ds_object, tx));
VERIFY0(zap_remove_int(dp->dp_meta_objset,
dsl_dir_phys(ddpa->origin_origin->ds_dir)->dd_clones,
origin_head->ds_object, tx));
if (dsl_dir_phys(dd)->dd_clones == 0) {
dsl_dir_phys(dd)->dd_clones =
zap_create(dp->dp_meta_objset, DMU_OT_DSL_CLONES,
DMU_OT_NONE, 0, tx);
}
VERIFY0(zap_add_int(dp->dp_meta_objset,
dsl_dir_phys(dd)->dd_clones, origin_head->ds_object, tx));
}
/*
* Move bookmarks to this dir.
*/
dsl_bookmark_node_t *dbn_next;
for (dsl_bookmark_node_t *dbn = avl_first(&origin_head->ds_bookmarks);
dbn != NULL && dbn->dbn_phys.zbm_creation_txg <=
dsl_dataset_phys(origin_ds)->ds_creation_txg;
dbn = dbn_next) {
dbn_next = AVL_NEXT(&origin_head->ds_bookmarks, dbn);
avl_remove(&origin_head->ds_bookmarks, dbn);
VERIFY0(zap_remove(dp->dp_meta_objset,
origin_head->ds_bookmarks_obj, dbn->dbn_name, tx));
dsl_bookmark_node_add(hds, dbn, tx);
}
dsl_bookmark_next_changed(hds, origin_ds, tx);
/* move snapshots to this dir */
for (snap = list_head(&ddpa->shared_snaps); snap;
snap = list_next(&ddpa->shared_snaps, snap)) {
dsl_dataset_t *ds = snap->ds;
/*
* Property callbacks are registered to a particular
* dsl_dir. Since ours is changing, evict the objset
* so that they will be unregistered from the old dsl_dir.
*/
if (ds->ds_objset) {
dmu_objset_evict(ds->ds_objset);
ds->ds_objset = NULL;
}
/* move snap name entry */
VERIFY0(dsl_dataset_get_snapname(ds));
VERIFY0(dsl_dataset_snap_remove(origin_head,
ds->ds_snapname, tx, B_TRUE));
VERIFY0(zap_add(dp->dp_meta_objset,
dsl_dataset_phys(hds)->ds_snapnames_zapobj, ds->ds_snapname,
8, 1, &ds->ds_object, tx));
dsl_fs_ss_count_adjust(hds->ds_dir, 1,
DD_FIELD_SNAPSHOT_COUNT, tx);
/* change containing dsl_dir */
dmu_buf_will_dirty(ds->ds_dbuf, tx);
ASSERT3U(dsl_dataset_phys(ds)->ds_dir_obj, ==, odd->dd_object);
dsl_dataset_phys(ds)->ds_dir_obj = dd->dd_object;
ASSERT3P(ds->ds_dir, ==, odd);
dsl_dir_rele(ds->ds_dir, ds);
VERIFY0(dsl_dir_hold_obj(dp, dd->dd_object,
NULL, ds, &ds->ds_dir));
/* move any clone references */
if (dsl_dataset_phys(ds)->ds_next_clones_obj &&
spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, dp->dp_meta_objset,
dsl_dataset_phys(ds)->ds_next_clones_obj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
dsl_dataset_t *cnds;
uint64_t o;
if (za.za_first_integer == oldnext_obj) {
/*
* We've already moved the
* origin's reference.
*/
continue;
}
VERIFY0(dsl_dataset_hold_obj(dp,
za.za_first_integer, FTAG, &cnds));
o = dsl_dir_phys(cnds->ds_dir)->
dd_head_dataset_obj;
VERIFY0(zap_remove_int(dp->dp_meta_objset,
dsl_dir_phys(odd)->dd_clones, o, tx));
VERIFY0(zap_add_int(dp->dp_meta_objset,
dsl_dir_phys(dd)->dd_clones, o, tx));
dsl_dataset_rele(cnds, FTAG);
}
zap_cursor_fini(&zc);
}
ASSERT(!dsl_prop_hascb(ds));
}
/*
* Change space accounting.
* Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
* both be valid, or both be 0 (resulting in delta == 0). This
* is true for each of {clone,origin} independently.
*/
delta = ddpa->cloneusedsnap -
dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_SNAP];
ASSERT3S(delta, >=, 0);
ASSERT3U(ddpa->used, >=, delta);
dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx);
dsl_dir_diduse_space(dd, DD_USED_HEAD,
ddpa->used - delta, ddpa->comp, ddpa->uncomp, tx);
delta = ddpa->originusedsnap -
dsl_dir_phys(odd)->dd_used_breakdown[DD_USED_SNAP];
ASSERT3S(delta, <=, 0);
ASSERT3U(ddpa->used, >=, -delta);
dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx);
dsl_dir_diduse_space(odd, DD_USED_HEAD,
-ddpa->used - delta, -ddpa->comp, -ddpa->uncomp, tx);
dsl_dataset_phys(origin_ds)->ds_unique_bytes = ddpa->unique;
/*
* Since livelists are specific to a clone's origin txg, they
* are no longer accurate. Destroy the livelist from the clone being
* promoted. If the origin dataset is a clone, destroy its livelist
* as well.
*/
dsl_dir_remove_livelist(dd, tx, B_TRUE);
dsl_dir_remove_livelist(odd, tx, B_TRUE);
/* log history record */
spa_history_log_internal_ds(hds, "promote", tx, " ");
dsl_dir_rele(odd, FTAG);
promote_rele(ddpa, FTAG);
/*
* Transfer common error blocks from old head to new head.
*/
if (spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_HEAD_ERRLOG)) {
uint64_t old_head = origin_head->ds_object;
uint64_t new_head = hds->ds_object;
spa_swap_errlog(dp->dp_spa, new_head, old_head, tx);
}
}
/*
* Make a list of dsl_dataset_t's for the snapshots between first_obj
* (exclusive) and last_obj (inclusive). The list will be in reverse
* order (last_obj will be the list_head()). If first_obj == 0, do all
* snapshots back to this dataset's origin.
*/
static int
snaplist_make(dsl_pool_t *dp,
uint64_t first_obj, uint64_t last_obj, list_t *l, const void *tag)
{
uint64_t obj = last_obj;
list_create(l, sizeof (struct promotenode),
offsetof(struct promotenode, link));
while (obj != first_obj) {
dsl_dataset_t *ds;
struct promotenode *snap;
int err;
err = dsl_dataset_hold_obj(dp, obj, tag, &ds);
ASSERT(err != ENOENT);
if (err != 0)
return (err);
if (first_obj == 0)
first_obj = dsl_dir_phys(ds->ds_dir)->dd_origin_obj;
snap = kmem_alloc(sizeof (*snap), KM_SLEEP);
snap->ds = ds;
list_insert_tail(l, snap);
obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
}
return (0);
}
static int
snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
{
struct promotenode *snap;
*spacep = 0;
for (snap = list_head(l); snap; snap = list_next(l, snap)) {
uint64_t used, comp, uncomp;
dsl_deadlist_space_range(&snap->ds->ds_deadlist,
mintxg, UINT64_MAX, &used, &comp, &uncomp);
*spacep += used;
}
return (0);
}
static void
snaplist_destroy(list_t *l, const void *tag)
{
struct promotenode *snap;
if (l == NULL || !list_link_active(&l->list_head))
return;
while ((snap = list_tail(l)) != NULL) {
list_remove(l, snap);
dsl_dataset_rele(snap->ds, tag);
kmem_free(snap, sizeof (*snap));
}
list_destroy(l);
}
static int
promote_hold(dsl_dataset_promote_arg_t *ddpa, dsl_pool_t *dp, const void *tag)
{
int error;
dsl_dir_t *dd;
struct promotenode *snap;
error = dsl_dataset_hold(dp, ddpa->ddpa_clonename, tag,
&ddpa->ddpa_clone);
if (error != 0)
return (error);
dd = ddpa->ddpa_clone->ds_dir;
if (ddpa->ddpa_clone->ds_is_snapshot ||
!dsl_dir_is_clone(dd)) {
dsl_dataset_rele(ddpa->ddpa_clone, tag);
return (SET_ERROR(EINVAL));
}
error = snaplist_make(dp, 0, dsl_dir_phys(dd)->dd_origin_obj,
&ddpa->shared_snaps, tag);
if (error != 0)
goto out;
error = snaplist_make(dp, 0, ddpa->ddpa_clone->ds_object,
&ddpa->clone_snaps, tag);
if (error != 0)
goto out;
snap = list_head(&ddpa->shared_snaps);
ASSERT3U(snap->ds->ds_object, ==, dsl_dir_phys(dd)->dd_origin_obj);
error = snaplist_make(dp, dsl_dir_phys(dd)->dd_origin_obj,
dsl_dir_phys(snap->ds->ds_dir)->dd_head_dataset_obj,
&ddpa->origin_snaps, tag);
if (error != 0)
goto out;
if (dsl_dir_phys(snap->ds->ds_dir)->dd_origin_obj != 0) {
error = dsl_dataset_hold_obj(dp,
dsl_dir_phys(snap->ds->ds_dir)->dd_origin_obj,
tag, &ddpa->origin_origin);
if (error != 0)
goto out;
}
out:
if (error != 0)
promote_rele(ddpa, tag);
return (error);
}
static void
promote_rele(dsl_dataset_promote_arg_t *ddpa, const void *tag)
{
snaplist_destroy(&ddpa->shared_snaps, tag);
snaplist_destroy(&ddpa->clone_snaps, tag);
snaplist_destroy(&ddpa->origin_snaps, tag);
if (ddpa->origin_origin != NULL)
dsl_dataset_rele(ddpa->origin_origin, tag);
dsl_dataset_rele(ddpa->ddpa_clone, tag);
}
/*
* Promote a clone.
*
* If it fails due to a conflicting snapshot name, "conflsnap" will be filled
* in with the name. (It must be at least ZFS_MAX_DATASET_NAME_LEN bytes long.)
*/
int
dsl_dataset_promote(const char *name, char *conflsnap)
{
dsl_dataset_promote_arg_t ddpa = { 0 };
uint64_t numsnaps;
int error;
nvpair_t *snap_pair;
objset_t *os;
/*
* We will modify space proportional to the number of
* snapshots. Compute numsnaps.
*/
error = dmu_objset_hold(name, FTAG, &os);
if (error != 0)
return (error);
error = zap_count(dmu_objset_pool(os)->dp_meta_objset,
dsl_dataset_phys(dmu_objset_ds(os))->ds_snapnames_zapobj,
&numsnaps);
dmu_objset_rele(os, FTAG);
if (error != 0)
return (error);
ddpa.ddpa_clonename = name;
ddpa.err_ds = fnvlist_alloc();
ddpa.cr = CRED();
ddpa.proc = curproc;
error = dsl_sync_task(name, dsl_dataset_promote_check,
dsl_dataset_promote_sync, &ddpa,
2 + numsnaps, ZFS_SPACE_CHECK_RESERVED);
/*
* Return the first conflicting snapshot found.
*/
snap_pair = nvlist_next_nvpair(ddpa.err_ds, NULL);
if (snap_pair != NULL && conflsnap != NULL)
(void) strlcpy(conflsnap, nvpair_name(snap_pair),
ZFS_MAX_DATASET_NAME_LEN);
fnvlist_free(ddpa.err_ds);
return (error);
}
int
dsl_dataset_clone_swap_check_impl(dsl_dataset_t *clone,
dsl_dataset_t *origin_head, boolean_t force, void *owner, dmu_tx_t *tx)
{
/*
* "slack" factor for received datasets with refquota set on them.
* See the bottom of this function for details on its use.
*/
uint64_t refquota_slack = (uint64_t)DMU_MAX_ACCESS *
spa_asize_inflation;
int64_t unused_refres_delta;
/* they should both be heads */
if (clone->ds_is_snapshot ||
origin_head->ds_is_snapshot)
return (SET_ERROR(EINVAL));
/* if we are not forcing, the branch point should be just before them */
if (!force && clone->ds_prev != origin_head->ds_prev)
return (SET_ERROR(EINVAL));
/* clone should be the clone (unless they are unrelated) */
if (clone->ds_prev != NULL &&
clone->ds_prev != clone->ds_dir->dd_pool->dp_origin_snap &&
origin_head->ds_dir != clone->ds_prev->ds_dir)
return (SET_ERROR(EINVAL));
/* the clone should be a child of the origin */
if (clone->ds_dir->dd_parent != origin_head->ds_dir)
return (SET_ERROR(EINVAL));
/* origin_head shouldn't be modified unless 'force' */
if (!force &&
dsl_dataset_modified_since_snap(origin_head, origin_head->ds_prev))
return (SET_ERROR(ETXTBSY));
/* origin_head should have no long holds (e.g. is not mounted) */
if (dsl_dataset_handoff_check(origin_head, owner, tx))
return (SET_ERROR(EBUSY));
/* check amount of any unconsumed refreservation */
unused_refres_delta =
(int64_t)MIN(origin_head->ds_reserved,
dsl_dataset_phys(origin_head)->ds_unique_bytes) -
(int64_t)MIN(origin_head->ds_reserved,
dsl_dataset_phys(clone)->ds_unique_bytes);
if (unused_refres_delta > 0 &&
unused_refres_delta >
dsl_dir_space_available(origin_head->ds_dir, NULL, 0, TRUE))
return (SET_ERROR(ENOSPC));
/*
* The clone can't be too much over the head's refquota.
*
* To ensure that the entire refquota can be used, we allow one
* transaction to exceed the refquota. Therefore, this check
* needs to also allow for the space referenced to be more than the
* refquota. The maximum amount of space that one transaction can use
* on disk is DMU_MAX_ACCESS * spa_asize_inflation. Allowing this
* overage ensures that we are able to receive a filesystem that
* exceeds the refquota on the source system.
*
* So that overage is the refquota_slack we use below.
*/
if (origin_head->ds_quota != 0 &&
dsl_dataset_phys(clone)->ds_referenced_bytes >
origin_head->ds_quota + refquota_slack)
return (SET_ERROR(EDQUOT));
return (0);
}
static void
dsl_dataset_swap_remap_deadlists(dsl_dataset_t *clone,
dsl_dataset_t *origin, dmu_tx_t *tx)
{
uint64_t clone_remap_dl_obj, origin_remap_dl_obj;
dsl_pool_t *dp = dmu_tx_pool(tx);
ASSERT(dsl_pool_sync_context(dp));
clone_remap_dl_obj = dsl_dataset_get_remap_deadlist_object(clone);
origin_remap_dl_obj = dsl_dataset_get_remap_deadlist_object(origin);
if (clone_remap_dl_obj != 0) {
dsl_deadlist_close(&clone->ds_remap_deadlist);
dsl_dataset_unset_remap_deadlist_object(clone, tx);
}
if (origin_remap_dl_obj != 0) {
dsl_deadlist_close(&origin->ds_remap_deadlist);
dsl_dataset_unset_remap_deadlist_object(origin, tx);
}
if (clone_remap_dl_obj != 0) {
dsl_dataset_set_remap_deadlist_object(origin,
clone_remap_dl_obj, tx);
dsl_deadlist_open(&origin->ds_remap_deadlist,
dp->dp_meta_objset, clone_remap_dl_obj);
}
if (origin_remap_dl_obj != 0) {
dsl_dataset_set_remap_deadlist_object(clone,
origin_remap_dl_obj, tx);
dsl_deadlist_open(&clone->ds_remap_deadlist,
dp->dp_meta_objset, origin_remap_dl_obj);
}
}
void
dsl_dataset_clone_swap_sync_impl(dsl_dataset_t *clone,
dsl_dataset_t *origin_head, dmu_tx_t *tx)
{
dsl_pool_t *dp = dmu_tx_pool(tx);
int64_t unused_refres_delta;
ASSERT(clone->ds_reserved == 0);
/*
* NOTE: On DEBUG kernels there could be a race between this and
* the check function if spa_asize_inflation is adjusted...
*/
ASSERT(origin_head->ds_quota == 0 ||
dsl_dataset_phys(clone)->ds_unique_bytes <= origin_head->ds_quota +
DMU_MAX_ACCESS * spa_asize_inflation);
ASSERT3P(clone->ds_prev, ==, origin_head->ds_prev);
dsl_dir_cancel_waiters(origin_head->ds_dir);
/*
* Swap per-dataset feature flags.
*/
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (!(spa_feature_table[f].fi_flags &
ZFEATURE_FLAG_PER_DATASET)) {
ASSERT(!dsl_dataset_feature_is_active(clone, f));
ASSERT(!dsl_dataset_feature_is_active(origin_head, f));
continue;
}
boolean_t clone_inuse = dsl_dataset_feature_is_active(clone, f);
void *clone_feature = clone->ds_feature[f];
boolean_t origin_head_inuse =
dsl_dataset_feature_is_active(origin_head, f);
void *origin_head_feature = origin_head->ds_feature[f];
if (clone_inuse)
dsl_dataset_deactivate_feature_impl(clone, f, tx);
if (origin_head_inuse)
dsl_dataset_deactivate_feature_impl(origin_head, f, tx);
if (clone_inuse) {
dsl_dataset_activate_feature(origin_head->ds_object, f,
clone_feature, tx);
origin_head->ds_feature[f] = clone_feature;
}
if (origin_head_inuse) {
dsl_dataset_activate_feature(clone->ds_object, f,
origin_head_feature, tx);
clone->ds_feature[f] = origin_head_feature;
}
}
dmu_buf_will_dirty(clone->ds_dbuf, tx);
dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
if (clone->ds_objset != NULL) {
dmu_objset_evict(clone->ds_objset);
clone->ds_objset = NULL;
}
if (origin_head->ds_objset != NULL) {
dmu_objset_evict(origin_head->ds_objset);
origin_head->ds_objset = NULL;
}
unused_refres_delta =
(int64_t)MIN(origin_head->ds_reserved,
dsl_dataset_phys(origin_head)->ds_unique_bytes) -
(int64_t)MIN(origin_head->ds_reserved,
dsl_dataset_phys(clone)->ds_unique_bytes);
/*
* Reset origin's unique bytes.
*/
{
dsl_dataset_t *origin = clone->ds_prev;
uint64_t comp, uncomp;
dmu_buf_will_dirty(origin->ds_dbuf, tx);
dsl_deadlist_space_range(&clone->ds_deadlist,
dsl_dataset_phys(origin)->ds_prev_snap_txg, UINT64_MAX,
&dsl_dataset_phys(origin)->ds_unique_bytes, &comp, &uncomp);
}
/* swap blkptrs */
{
rrw_enter(&clone->ds_bp_rwlock, RW_WRITER, FTAG);
rrw_enter(&origin_head->ds_bp_rwlock, RW_WRITER, FTAG);
blkptr_t tmp;
tmp = dsl_dataset_phys(origin_head)->ds_bp;
dsl_dataset_phys(origin_head)->ds_bp =
dsl_dataset_phys(clone)->ds_bp;
dsl_dataset_phys(clone)->ds_bp = tmp;
rrw_exit(&origin_head->ds_bp_rwlock, FTAG);
rrw_exit(&clone->ds_bp_rwlock, FTAG);
}
/* set dd_*_bytes */
{
int64_t dused, dcomp, duncomp;
uint64_t cdl_used, cdl_comp, cdl_uncomp;
uint64_t odl_used, odl_comp, odl_uncomp;
ASSERT3U(dsl_dir_phys(clone->ds_dir)->
dd_used_breakdown[DD_USED_SNAP], ==, 0);
dsl_deadlist_space(&clone->ds_deadlist,
&cdl_used, &cdl_comp, &cdl_uncomp);
dsl_deadlist_space(&origin_head->ds_deadlist,
&odl_used, &odl_comp, &odl_uncomp);
dused = dsl_dataset_phys(clone)->ds_referenced_bytes +
cdl_used -
(dsl_dataset_phys(origin_head)->ds_referenced_bytes +
odl_used);
dcomp = dsl_dataset_phys(clone)->ds_compressed_bytes +
cdl_comp -
(dsl_dataset_phys(origin_head)->ds_compressed_bytes +
odl_comp);
duncomp = dsl_dataset_phys(clone)->ds_uncompressed_bytes +
cdl_uncomp -
(dsl_dataset_phys(origin_head)->ds_uncompressed_bytes +
odl_uncomp);
dsl_dir_diduse_space(origin_head->ds_dir, DD_USED_HEAD,
dused, dcomp, duncomp, tx);
dsl_dir_diduse_space(clone->ds_dir, DD_USED_HEAD,
-dused, -dcomp, -duncomp, tx);
/*
* The difference in the space used by snapshots is the
* difference in snapshot space due to the head's
* deadlist (since that's the only thing that's
* changing that affects the snapused).
*/
dsl_deadlist_space_range(&clone->ds_deadlist,
origin_head->ds_dir->dd_origin_txg, UINT64_MAX,
&cdl_used, &cdl_comp, &cdl_uncomp);
dsl_deadlist_space_range(&origin_head->ds_deadlist,
origin_head->ds_dir->dd_origin_txg, UINT64_MAX,
&odl_used, &odl_comp, &odl_uncomp);
dsl_dir_transfer_space(origin_head->ds_dir, cdl_used - odl_used,
DD_USED_HEAD, DD_USED_SNAP, tx);
}
/* swap ds_*_bytes */
SWITCH64(dsl_dataset_phys(origin_head)->ds_referenced_bytes,
dsl_dataset_phys(clone)->ds_referenced_bytes);
SWITCH64(dsl_dataset_phys(origin_head)->ds_compressed_bytes,
dsl_dataset_phys(clone)->ds_compressed_bytes);
SWITCH64(dsl_dataset_phys(origin_head)->ds_uncompressed_bytes,
dsl_dataset_phys(clone)->ds_uncompressed_bytes);
SWITCH64(dsl_dataset_phys(origin_head)->ds_unique_bytes,
dsl_dataset_phys(clone)->ds_unique_bytes);
/* apply any parent delta for change in unconsumed refreservation */
dsl_dir_diduse_space(origin_head->ds_dir, DD_USED_REFRSRV,
unused_refres_delta, 0, 0, tx);
/*
* Swap deadlists.
*/
dsl_deadlist_close(&clone->ds_deadlist);
dsl_deadlist_close(&origin_head->ds_deadlist);
SWITCH64(dsl_dataset_phys(origin_head)->ds_deadlist_obj,
dsl_dataset_phys(clone)->ds_deadlist_obj);
dsl_deadlist_open(&clone->ds_deadlist, dp->dp_meta_objset,
dsl_dataset_phys(clone)->ds_deadlist_obj);
dsl_deadlist_open(&origin_head->ds_deadlist, dp->dp_meta_objset,
dsl_dataset_phys(origin_head)->ds_deadlist_obj);
dsl_dataset_swap_remap_deadlists(clone, origin_head, tx);
/*
* If there is a bookmark at the origin, its "next dataset" is
* changing, so we need to reset its FBN.
*/
dsl_bookmark_next_changed(origin_head, origin_head->ds_prev, tx);
dsl_scan_ds_clone_swapped(origin_head, clone, tx);
/*
* Destroy any livelists associated with the clone or the origin,
* since after the swap the corresponding livelists are no longer
* valid.
*/
dsl_dir_remove_livelist(clone->ds_dir, tx, B_TRUE);
dsl_dir_remove_livelist(origin_head->ds_dir, tx, B_TRUE);
spa_history_log_internal_ds(clone, "clone swap", tx,
"parent=%s", origin_head->ds_dir->dd_myname);
}
/*
* Given a pool name and a dataset object number in that pool,
* return the name of that dataset.
*/
int
dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
{
dsl_pool_t *dp;
dsl_dataset_t *ds;
int error;
error = dsl_pool_hold(pname, FTAG, &dp);
if (error != 0)
return (error);
error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds);
if (error == 0) {
dsl_dataset_name(ds, buf);
dsl_dataset_rele(ds, FTAG);
}
dsl_pool_rele(dp, FTAG);
return (error);
}
int
dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
{
int error = 0;
ASSERT3S(asize, >, 0);
/*
* *ref_rsrv is the portion of asize that will come from any
* unconsumed refreservation space.
*/
*ref_rsrv = 0;
mutex_enter(&ds->ds_lock);
/*
* Make a space adjustment for reserved bytes.
*/
if (ds->ds_reserved > dsl_dataset_phys(ds)->ds_unique_bytes) {
ASSERT3U(*used, >=,
ds->ds_reserved - dsl_dataset_phys(ds)->ds_unique_bytes);
*used -=
(ds->ds_reserved - dsl_dataset_phys(ds)->ds_unique_bytes);
*ref_rsrv =
asize - MIN(asize, parent_delta(ds, asize + inflight));
}
if (!check_quota || ds->ds_quota == 0) {
mutex_exit(&ds->ds_lock);
return (0);
}
/*
* If they are requesting more space, and our current estimate
* is over quota, they get to try again unless the actual
* on-disk is over quota and there are no pending changes (which
* may free up space for us).
*/
if (dsl_dataset_phys(ds)->ds_referenced_bytes + inflight >=
ds->ds_quota) {
if (inflight > 0 ||
dsl_dataset_phys(ds)->ds_referenced_bytes < ds->ds_quota)
error = SET_ERROR(ERESTART);
else
error = SET_ERROR(EDQUOT);
}
mutex_exit(&ds->ds_lock);
return (error);
}
typedef struct dsl_dataset_set_qr_arg {
const char *ddsqra_name;
zprop_source_t ddsqra_source;
uint64_t ddsqra_value;
} dsl_dataset_set_qr_arg_t;
static int
dsl_dataset_set_refquota_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
int error;
uint64_t newval;
if (spa_version(dp->dp_spa) < SPA_VERSION_REFQUOTA)
return (SET_ERROR(ENOTSUP));
error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
if (error != 0)
return (error);
if (ds->ds_is_snapshot) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EINVAL));
}
error = dsl_prop_predict(ds->ds_dir,
zfs_prop_to_name(ZFS_PROP_REFQUOTA),
ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
if (newval == 0) {
dsl_dataset_rele(ds, FTAG);
return (0);
}
if (newval < dsl_dataset_phys(ds)->ds_referenced_bytes ||
newval < ds->ds_reserved) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ENOSPC));
}
dsl_dataset_rele(ds, FTAG);
return (0);
}
static void
dsl_dataset_set_refquota_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds = NULL;
uint64_t newval;
VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
dsl_prop_set_sync_impl(ds,
zfs_prop_to_name(ZFS_PROP_REFQUOTA),
ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1,
&ddsqra->ddsqra_value, tx);
VERIFY0(dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_REFQUOTA), &newval));
if (ds->ds_quota != newval) {
dmu_buf_will_dirty(ds->ds_dbuf, tx);
ds->ds_quota = newval;
}
dsl_dataset_rele(ds, FTAG);
}
int
dsl_dataset_set_refquota(const char *dsname, zprop_source_t source,
uint64_t refquota)
{
dsl_dataset_set_qr_arg_t ddsqra;
ddsqra.ddsqra_name = dsname;
ddsqra.ddsqra_source = source;
ddsqra.ddsqra_value = refquota;
return (dsl_sync_task(dsname, dsl_dataset_set_refquota_check,
dsl_dataset_set_refquota_sync, &ddsqra, 0,
ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
static int
dsl_dataset_set_refreservation_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
int error;
uint64_t newval, unique;
if (spa_version(dp->dp_spa) < SPA_VERSION_REFRESERVATION)
return (SET_ERROR(ENOTSUP));
error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
if (error != 0)
return (error);
if (ds->ds_is_snapshot) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EINVAL));
}
error = dsl_prop_predict(ds->ds_dir,
zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
/*
* If we are doing the preliminary check in open context, the
* space estimates may be inaccurate.
*/
if (!dmu_tx_is_syncing(tx)) {
dsl_dataset_rele(ds, FTAG);
return (0);
}
mutex_enter(&ds->ds_lock);
if (!DS_UNIQUE_IS_ACCURATE(ds))
dsl_dataset_recalc_head_uniq(ds);
unique = dsl_dataset_phys(ds)->ds_unique_bytes;
mutex_exit(&ds->ds_lock);
if (MAX(unique, newval) > MAX(unique, ds->ds_reserved)) {
uint64_t delta = MAX(unique, newval) -
MAX(unique, ds->ds_reserved);
if (delta >
dsl_dir_space_available(ds->ds_dir, NULL, 0, B_TRUE) ||
(ds->ds_quota > 0 && newval > ds->ds_quota)) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ENOSPC));
}
}
dsl_dataset_rele(ds, FTAG);
return (0);
}
void
dsl_dataset_set_refreservation_sync_impl(dsl_dataset_t *ds,
zprop_source_t source, uint64_t value, dmu_tx_t *tx)
{
uint64_t newval;
uint64_t unique;
int64_t delta;
dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
source, sizeof (value), 1, &value, tx);
VERIFY0(dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_REFRESERVATION), &newval));
dmu_buf_will_dirty(ds->ds_dbuf, tx);
mutex_enter(&ds->ds_dir->dd_lock);
mutex_enter(&ds->ds_lock);
ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
unique = dsl_dataset_phys(ds)->ds_unique_bytes;
delta = MAX(0, (int64_t)(newval - unique)) -
MAX(0, (int64_t)(ds->ds_reserved - unique));
ds->ds_reserved = newval;
mutex_exit(&ds->ds_lock);
dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx);
mutex_exit(&ds->ds_dir->dd_lock);
}
static void
dsl_dataset_set_refreservation_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds = NULL;
VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
dsl_dataset_set_refreservation_sync_impl(ds,
ddsqra->ddsqra_source, ddsqra->ddsqra_value, tx);
dsl_dataset_rele(ds, FTAG);
}
int
dsl_dataset_set_refreservation(const char *dsname, zprop_source_t source,
uint64_t refreservation)
{
dsl_dataset_set_qr_arg_t ddsqra;
ddsqra.ddsqra_name = dsname;
ddsqra.ddsqra_source = source;
ddsqra.ddsqra_value = refreservation;
return (dsl_sync_task(dsname, dsl_dataset_set_refreservation_check,
dsl_dataset_set_refreservation_sync, &ddsqra, 0,
ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
typedef struct dsl_dataset_set_compression_arg {
const char *ddsca_name;
zprop_source_t ddsca_source;
uint64_t ddsca_value;
} dsl_dataset_set_compression_arg_t;
static int
dsl_dataset_set_compression_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_set_compression_arg_t *ddsca = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
uint64_t compval = ZIO_COMPRESS_ALGO(ddsca->ddsca_value);
spa_feature_t f = zio_compress_to_feature(compval);
if (f == SPA_FEATURE_NONE)
return (SET_ERROR(EINVAL));
if (!spa_feature_is_enabled(dp->dp_spa, f))
return (SET_ERROR(ENOTSUP));
return (0);
}
static void
dsl_dataset_set_compression_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_set_compression_arg_t *ddsca = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds = NULL;
uint64_t compval = ZIO_COMPRESS_ALGO(ddsca->ddsca_value);
spa_feature_t f = zio_compress_to_feature(compval);
+ ASSERT3S(f, !=, SPA_FEATURE_NONE);
ASSERT3S(spa_feature_table[f].fi_type, ==, ZFEATURE_TYPE_BOOLEAN);
VERIFY0(dsl_dataset_hold(dp, ddsca->ddsca_name, FTAG, &ds));
if (zfeature_active(f, ds->ds_feature[f]) != B_TRUE) {
ds->ds_feature_activation[f] = (void *)B_TRUE;
dsl_dataset_activate_feature(ds->ds_object, f,
ds->ds_feature_activation[f], tx);
ds->ds_feature[f] = ds->ds_feature_activation[f];
}
dsl_dataset_rele(ds, FTAG);
}
int
dsl_dataset_set_compression(const char *dsname, zprop_source_t source,
uint64_t compression)
{
dsl_dataset_set_compression_arg_t ddsca;
/*
* The sync task is only required for zstd in order to activate
* the feature flag when the property is first set.
*/
if (ZIO_COMPRESS_ALGO(compression) != ZIO_COMPRESS_ZSTD)
return (0);
ddsca.ddsca_name = dsname;
ddsca.ddsca_source = source;
ddsca.ddsca_value = compression;
return (dsl_sync_task(dsname, dsl_dataset_set_compression_check,
dsl_dataset_set_compression_sync, &ddsca, 0,
ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
/*
* Return (in *usedp) the amount of space referenced by "new" that was not
* referenced at the time the bookmark corresponds to. "New" may be a
* snapshot or a head. The bookmark must be before new, in
* new's filesystem (or its origin) -- caller verifies this.
*
* The written space is calculated by considering two components: First, we
* ignore any freed space, and calculate the written as new's used space
* minus old's used space. Next, we add in the amount of space that was freed
* between the two time points, thus reducing new's used space relative to
* old's. Specifically, this is the space that was born before
* zbm_creation_txg, and freed before new (ie. on new's deadlist or a
* previous deadlist).
*
* space freed [---------------------]
* snapshots ---O-------O--------O-------O------
* bookmark new
*
* Note, the bookmark's zbm_*_bytes_refd must be valid, but if the HAS_FBN
* flag is not set, we will calculate the freed_before_next based on the
* next snapshot's deadlist, rather than using zbm_*_freed_before_next_snap.
*/
static int
dsl_dataset_space_written_impl(zfs_bookmark_phys_t *bmp,
dsl_dataset_t *new, uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
{
int err = 0;
dsl_pool_t *dp = new->ds_dir->dd_pool;
ASSERT(dsl_pool_config_held(dp));
if (dsl_dataset_is_snapshot(new)) {
ASSERT3U(bmp->zbm_creation_txg, <,
dsl_dataset_phys(new)->ds_creation_txg);
}
*usedp = 0;
*usedp += dsl_dataset_phys(new)->ds_referenced_bytes;
*usedp -= bmp->zbm_referenced_bytes_refd;
*compp = 0;
*compp += dsl_dataset_phys(new)->ds_compressed_bytes;
*compp -= bmp->zbm_compressed_bytes_refd;
*uncompp = 0;
*uncompp += dsl_dataset_phys(new)->ds_uncompressed_bytes;
*uncompp -= bmp->zbm_uncompressed_bytes_refd;
dsl_dataset_t *snap = new;
while (dsl_dataset_phys(snap)->ds_prev_snap_txg >
bmp->zbm_creation_txg) {
uint64_t used, comp, uncomp;
dsl_deadlist_space_range(&snap->ds_deadlist,
0, bmp->zbm_creation_txg,
&used, &comp, &uncomp);
*usedp += used;
*compp += comp;
*uncompp += uncomp;
uint64_t snapobj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
if (snap != new)
dsl_dataset_rele(snap, FTAG);
err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &snap);
if (err != 0)
break;
}
/*
* We might not have the FBN if we are calculating written from
* a snapshot (because we didn't know the correct "next" snapshot
* until now).
*/
if (bmp->zbm_flags & ZBM_FLAG_HAS_FBN) {
*usedp += bmp->zbm_referenced_freed_before_next_snap;
*compp += bmp->zbm_compressed_freed_before_next_snap;
*uncompp += bmp->zbm_uncompressed_freed_before_next_snap;
} else {
ASSERT3U(dsl_dataset_phys(snap)->ds_prev_snap_txg, ==,
bmp->zbm_creation_txg);
uint64_t used, comp, uncomp;
dsl_deadlist_space(&snap->ds_deadlist, &used, &comp, &uncomp);
*usedp += used;
*compp += comp;
*uncompp += uncomp;
}
if (snap != new)
dsl_dataset_rele(snap, FTAG);
return (err);
}
/*
* Return (in *usedp) the amount of space written in new that was not
* present at the time the bookmark corresponds to. New may be a
* snapshot or the head. Old must be a bookmark before new, in
* new's filesystem (or its origin) -- caller verifies this.
*/
int
dsl_dataset_space_written_bookmark(zfs_bookmark_phys_t *bmp,
dsl_dataset_t *new, uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
{
if (!(bmp->zbm_flags & ZBM_FLAG_HAS_FBN))
return (SET_ERROR(ENOTSUP));
return (dsl_dataset_space_written_impl(bmp, new,
usedp, compp, uncompp));
}
/*
* Return (in *usedp) the amount of space written in new that is not
* present in oldsnap. New may be a snapshot or the head. Old must be
* a snapshot before new, in new's filesystem (or its origin). If not then
* fail and return EINVAL.
*/
int
dsl_dataset_space_written(dsl_dataset_t *oldsnap, dsl_dataset_t *new,
uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
{
if (!dsl_dataset_is_before(new, oldsnap, 0))
return (SET_ERROR(EINVAL));
zfs_bookmark_phys_t zbm = { 0 };
dsl_dataset_phys_t *dsp = dsl_dataset_phys(oldsnap);
zbm.zbm_guid = dsp->ds_guid;
zbm.zbm_creation_txg = dsp->ds_creation_txg;
zbm.zbm_creation_time = dsp->ds_creation_time;
zbm.zbm_referenced_bytes_refd = dsp->ds_referenced_bytes;
zbm.zbm_compressed_bytes_refd = dsp->ds_compressed_bytes;
zbm.zbm_uncompressed_bytes_refd = dsp->ds_uncompressed_bytes;
/*
* If oldsnap is the origin (or origin's origin, ...) of new,
* we can't easily calculate the effective FBN. Therefore,
* we do not set ZBM_FLAG_HAS_FBN, so that the _impl will calculate
* it relative to the correct "next": the next snapshot towards "new",
* rather than the next snapshot in oldsnap's dsl_dir.
*/
return (dsl_dataset_space_written_impl(&zbm, new,
usedp, compp, uncompp));
}
/*
* Return (in *usedp) the amount of space that will be reclaimed if firstsnap,
* lastsnap, and all snapshots in between are deleted.
*
* blocks that would be freed [---------------------------]
* snapshots ---O-------O--------O-------O--------O
* firstsnap lastsnap
*
* This is the set of blocks that were born after the snap before firstsnap,
* (birth > firstsnap->prev_snap_txg) and died before the snap after the
* last snap (ie, is on lastsnap->ds_next->ds_deadlist or an earlier deadlist).
* We calculate this by iterating over the relevant deadlists (from the snap
* after lastsnap, backward to the snap after firstsnap), summing up the
* space on the deadlist that was born after the snap before firstsnap.
*/
int
dsl_dataset_space_wouldfree(dsl_dataset_t *firstsnap,
dsl_dataset_t *lastsnap,
uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
{
int err = 0;
uint64_t snapobj;
dsl_pool_t *dp = firstsnap->ds_dir->dd_pool;
ASSERT(firstsnap->ds_is_snapshot);
ASSERT(lastsnap->ds_is_snapshot);
/*
* Check that the snapshots are in the same dsl_dir, and firstsnap
* is before lastsnap.
*/
if (firstsnap->ds_dir != lastsnap->ds_dir ||
dsl_dataset_phys(firstsnap)->ds_creation_txg >
dsl_dataset_phys(lastsnap)->ds_creation_txg)
return (SET_ERROR(EINVAL));
*usedp = *compp = *uncompp = 0;
snapobj = dsl_dataset_phys(lastsnap)->ds_next_snap_obj;
while (snapobj != firstsnap->ds_object) {
dsl_dataset_t *ds;
uint64_t used, comp, uncomp;
err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &ds);
if (err != 0)
break;
dsl_deadlist_space_range(&ds->ds_deadlist,
dsl_dataset_phys(firstsnap)->ds_prev_snap_txg, UINT64_MAX,
&used, &comp, &uncomp);
*usedp += used;
*compp += comp;
*uncompp += uncomp;
snapobj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
ASSERT3U(snapobj, !=, 0);
dsl_dataset_rele(ds, FTAG);
}
return (err);
}
/*
* Return TRUE if 'earlier' is an earlier snapshot in 'later's timeline.
* For example, they could both be snapshots of the same filesystem, and
* 'earlier' is before 'later'. Or 'earlier' could be the origin of
* 'later's filesystem. Or 'earlier' could be an older snapshot in the origin's
* filesystem. Or 'earlier' could be the origin's origin.
*
* If non-zero, earlier_txg is used instead of earlier's ds_creation_txg.
*/
boolean_t
dsl_dataset_is_before(dsl_dataset_t *later, dsl_dataset_t *earlier,
uint64_t earlier_txg)
{
dsl_pool_t *dp = later->ds_dir->dd_pool;
int error;
boolean_t ret;
ASSERT(dsl_pool_config_held(dp));
ASSERT(earlier->ds_is_snapshot || earlier_txg != 0);
if (earlier_txg == 0)
earlier_txg = dsl_dataset_phys(earlier)->ds_creation_txg;
if (later->ds_is_snapshot &&
earlier_txg >= dsl_dataset_phys(later)->ds_creation_txg)
return (B_FALSE);
if (later->ds_dir == earlier->ds_dir)
return (B_TRUE);
/*
* We check dd_origin_obj explicitly here rather than using
* dsl_dir_is_clone() so that we will return TRUE if "earlier"
* is $ORIGIN@$ORIGIN. dsl_dataset_space_written() depends on
* this behavior.
*/
if (dsl_dir_phys(later->ds_dir)->dd_origin_obj == 0)
return (B_FALSE);
dsl_dataset_t *origin;
error = dsl_dataset_hold_obj(dp,
dsl_dir_phys(later->ds_dir)->dd_origin_obj, FTAG, &origin);
if (error != 0)
return (B_FALSE);
if (dsl_dataset_phys(origin)->ds_creation_txg == earlier_txg &&
origin->ds_dir == earlier->ds_dir) {
dsl_dataset_rele(origin, FTAG);
return (B_TRUE);
}
ret = dsl_dataset_is_before(origin, earlier, earlier_txg);
dsl_dataset_rele(origin, FTAG);
return (ret);
}
void
dsl_dataset_zapify(dsl_dataset_t *ds, dmu_tx_t *tx)
{
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
dmu_object_zapify(mos, ds->ds_object, DMU_OT_DSL_DATASET, tx);
}
boolean_t
dsl_dataset_is_zapified(dsl_dataset_t *ds)
{
dmu_object_info_t doi;
dmu_object_info_from_db(ds->ds_dbuf, &doi);
return (doi.doi_type == DMU_OTN_ZAP_METADATA);
}
boolean_t
dsl_dataset_has_resume_receive_state(dsl_dataset_t *ds)
{
return (dsl_dataset_is_zapified(ds) &&
zap_contains(ds->ds_dir->dd_pool->dp_meta_objset,
ds->ds_object, DS_FIELD_RESUME_TOGUID) == 0);
}
uint64_t
dsl_dataset_get_remap_deadlist_object(dsl_dataset_t *ds)
{
uint64_t remap_deadlist_obj;
int err;
if (!dsl_dataset_is_zapified(ds))
return (0);
err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset, ds->ds_object,
DS_FIELD_REMAP_DEADLIST, sizeof (remap_deadlist_obj), 1,
&remap_deadlist_obj);
if (err != 0) {
VERIFY3S(err, ==, ENOENT);
return (0);
}
ASSERT(remap_deadlist_obj != 0);
return (remap_deadlist_obj);
}
boolean_t
dsl_dataset_remap_deadlist_exists(dsl_dataset_t *ds)
{
EQUIV(dsl_deadlist_is_open(&ds->ds_remap_deadlist),
dsl_dataset_get_remap_deadlist_object(ds) != 0);
return (dsl_deadlist_is_open(&ds->ds_remap_deadlist));
}
static void
dsl_dataset_set_remap_deadlist_object(dsl_dataset_t *ds, uint64_t obj,
dmu_tx_t *tx)
{
ASSERT(obj != 0);
dsl_dataset_zapify(ds, tx);
VERIFY0(zap_add(ds->ds_dir->dd_pool->dp_meta_objset, ds->ds_object,
DS_FIELD_REMAP_DEADLIST, sizeof (obj), 1, &obj, tx));
}
static void
dsl_dataset_unset_remap_deadlist_object(dsl_dataset_t *ds, dmu_tx_t *tx)
{
VERIFY0(zap_remove(ds->ds_dir->dd_pool->dp_meta_objset,
ds->ds_object, DS_FIELD_REMAP_DEADLIST, tx));
}
void
dsl_dataset_destroy_remap_deadlist(dsl_dataset_t *ds, dmu_tx_t *tx)
{
uint64_t remap_deadlist_object;
spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(dsl_dataset_remap_deadlist_exists(ds));
remap_deadlist_object = ds->ds_remap_deadlist.dl_object;
dsl_deadlist_close(&ds->ds_remap_deadlist);
dsl_deadlist_free(spa_meta_objset(spa), remap_deadlist_object, tx);
dsl_dataset_unset_remap_deadlist_object(ds, tx);
spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
}
void
dsl_dataset_create_remap_deadlist(dsl_dataset_t *ds, dmu_tx_t *tx)
{
uint64_t remap_deadlist_obj;
spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(MUTEX_HELD(&ds->ds_remap_deadlist_lock));
/*
* Currently we only create remap deadlists when there are indirect
* vdevs with referenced mappings.
*/
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
remap_deadlist_obj = dsl_deadlist_clone(
&ds->ds_deadlist, UINT64_MAX,
dsl_dataset_phys(ds)->ds_prev_snap_obj, tx);
dsl_dataset_set_remap_deadlist_object(ds,
remap_deadlist_obj, tx);
dsl_deadlist_open(&ds->ds_remap_deadlist, spa_meta_objset(spa),
remap_deadlist_obj);
spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
}
void
dsl_dataset_activate_redaction(dsl_dataset_t *ds, uint64_t *redact_snaps,
uint64_t num_redact_snaps, dmu_tx_t *tx)
{
uint64_t dsobj = ds->ds_object;
struct feature_type_uint64_array_arg *ftuaa =
kmem_zalloc(sizeof (*ftuaa), KM_SLEEP);
ftuaa->length = (int64_t)num_redact_snaps;
if (num_redact_snaps > 0) {
ftuaa->array = kmem_alloc(num_redact_snaps * sizeof (uint64_t),
KM_SLEEP);
memcpy(ftuaa->array, redact_snaps, num_redact_snaps *
sizeof (uint64_t));
}
dsl_dataset_activate_feature(dsobj, SPA_FEATURE_REDACTED_DATASETS,
ftuaa, tx);
ds->ds_feature[SPA_FEATURE_REDACTED_DATASETS] = ftuaa;
}
/*
* Find and return (in *oldest_dsobj) the oldest snapshot of the dsobj
* dataset whose birth time is >= min_txg.
*/
int
dsl_dataset_oldest_snapshot(spa_t *spa, uint64_t head_ds, uint64_t min_txg,
uint64_t *oldest_dsobj)
{
dsl_dataset_t *ds;
dsl_pool_t *dp = spa->spa_dsl_pool;
int error = dsl_dataset_hold_obj(dp, head_ds, FTAG, &ds);
if (error != 0)
return (error);
uint64_t prev_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
uint64_t prev_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
while (prev_obj != 0 && min_txg < prev_obj_txg) {
dsl_dataset_rele(ds, FTAG);
if ((error = dsl_dataset_hold_obj(dp, prev_obj,
FTAG, &ds)) != 0)
return (error);
prev_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
prev_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
}
*oldest_dsobj = ds->ds_object;
dsl_dataset_rele(ds, FTAG);
return (0);
}
ZFS_MODULE_PARAM(zfs, zfs_, max_recordsize, INT, ZMOD_RW,
"Max allowed record size");
ZFS_MODULE_PARAM(zfs, zfs_, allow_redacted_dataset_mount, INT, ZMOD_RW,
"Allow mounting of redacted datasets");
ZFS_MODULE_PARAM(zfs, zfs_, snapshot_history_enabled, INT, ZMOD_RW,
"Include snapshot events in pool history/events");
EXPORT_SYMBOL(dsl_dataset_hold);
EXPORT_SYMBOL(dsl_dataset_hold_flags);
EXPORT_SYMBOL(dsl_dataset_hold_obj);
EXPORT_SYMBOL(dsl_dataset_hold_obj_flags);
EXPORT_SYMBOL(dsl_dataset_own);
EXPORT_SYMBOL(dsl_dataset_own_obj);
EXPORT_SYMBOL(dsl_dataset_name);
EXPORT_SYMBOL(dsl_dataset_rele);
EXPORT_SYMBOL(dsl_dataset_rele_flags);
EXPORT_SYMBOL(dsl_dataset_disown);
EXPORT_SYMBOL(dsl_dataset_tryown);
EXPORT_SYMBOL(dsl_dataset_create_sync);
EXPORT_SYMBOL(dsl_dataset_create_sync_dd);
EXPORT_SYMBOL(dsl_dataset_snapshot_check);
EXPORT_SYMBOL(dsl_dataset_snapshot_sync);
EXPORT_SYMBOL(dsl_dataset_promote);
EXPORT_SYMBOL(dsl_dataset_user_hold);
EXPORT_SYMBOL(dsl_dataset_user_release);
EXPORT_SYMBOL(dsl_dataset_get_holds);
EXPORT_SYMBOL(dsl_dataset_get_blkptr);
EXPORT_SYMBOL(dsl_dataset_get_spa);
EXPORT_SYMBOL(dsl_dataset_modified_since_snap);
EXPORT_SYMBOL(dsl_dataset_space_written);
EXPORT_SYMBOL(dsl_dataset_space_wouldfree);
EXPORT_SYMBOL(dsl_dataset_sync);
EXPORT_SYMBOL(dsl_dataset_block_born);
EXPORT_SYMBOL(dsl_dataset_block_kill);
EXPORT_SYMBOL(dsl_dataset_dirty);
EXPORT_SYMBOL(dsl_dataset_stats);
EXPORT_SYMBOL(dsl_dataset_fast_stat);
EXPORT_SYMBOL(dsl_dataset_space);
EXPORT_SYMBOL(dsl_dataset_fsid_guid);
EXPORT_SYMBOL(dsl_dsobj_to_dsname);
EXPORT_SYMBOL(dsl_dataset_check_quota);
EXPORT_SYMBOL(dsl_dataset_clone_swap_check_impl);
EXPORT_SYMBOL(dsl_dataset_clone_swap_sync_impl);
-EXPORT_SYMBOL(dsl_dataset_feature_set_activation);
diff --git a/sys/contrib/openzfs/module/zfs/dsl_dir.c b/sys/contrib/openzfs/module/zfs/dsl_dir.c
index 7460269384b4..a4db3ee2f306 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_dir.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_dir.c
@@ -1,2478 +1,2478 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright (c) 2013 Martin Matuska. All rights reserved.
* Copyright (c) 2014 Joyent, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
*/
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_synctask.h>
#include <sys/dsl_deleg.h>
#include <sys/dmu_impl.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/metaslab.h>
#include <sys/zap.h>
#include <sys/zio.h>
#include <sys/arc.h>
#include <sys/sunddi.h>
#include <sys/zfeature.h>
#include <sys/policy.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_znode.h>
#include <sys/zvol.h>
#include <sys/zthr.h>
#include "zfs_namecheck.h"
#include "zfs_prop.h"
/*
* Filesystem and Snapshot Limits
* ------------------------------
*
* These limits are used to restrict the number of filesystems and/or snapshots
* that can be created at a given level in the tree or below. A typical
* use-case is with a delegated dataset where the administrator wants to ensure
* that a user within the zone is not creating too many additional filesystems
* or snapshots, even though they're not exceeding their space quota.
*
* The filesystem and snapshot counts are stored as extensible properties. This
* capability is controlled by a feature flag and must be enabled to be used.
* Once enabled, the feature is not active until the first limit is set. At
* that point, future operations to create/destroy filesystems or snapshots
* will validate and update the counts.
*
* Because the count properties will not exist before the feature is active,
* the counts are updated when a limit is first set on an uninitialized
* dsl_dir node in the tree (The filesystem/snapshot count on a node includes
* all of the nested filesystems/snapshots. Thus, a new leaf node has a
* filesystem count of 0 and a snapshot count of 0. Non-existent filesystem and
* snapshot count properties on a node indicate uninitialized counts on that
* node.) When first setting a limit on an uninitialized node, the code starts
* at the filesystem with the new limit and descends into all sub-filesystems
* to add the count properties.
*
* In practice this is lightweight since a limit is typically set when the
* filesystem is created and thus has no children. Once valid, changing the
* limit value won't require a re-traversal since the counts are already valid.
* When recursively fixing the counts, if a node with a limit is encountered
* during the descent, the counts are known to be valid and there is no need to
* descend into that filesystem's children. The counts on filesystems above the
* one with the new limit will still be uninitialized, unless a limit is
* eventually set on one of those filesystems. The counts are always recursively
* updated when a limit is set on a dataset, unless there is already a limit.
* When a new limit value is set on a filesystem with an existing limit, it is
* possible for the new limit to be less than the current count at that level
* since a user who can change the limit is also allowed to exceed the limit.
*
* Once the feature is active, then whenever a filesystem or snapshot is
* created, the code recurses up the tree, validating the new count against the
* limit at each initialized level. In practice, most levels will not have a
* limit set. If there is a limit at any initialized level up the tree, the
* check must pass or the creation will fail. Likewise, when a filesystem or
* snapshot is destroyed, the counts are recursively adjusted all the way up
* the initialized nodes in the tree. Renaming a filesystem into different point
* in the tree will first validate, then update the counts on each branch up to
* the common ancestor. A receive will also validate the counts and then update
* them.
*
* An exception to the above behavior is that the limit is not enforced if the
* user has permission to modify the limit. This is primarily so that
* recursive snapshots in the global zone always work. We want to prevent a
* denial-of-service in which a lower level delegated dataset could max out its
* limit and thus block recursive snapshots from being taken in the global zone.
* Because of this, it is possible for the snapshot count to be over the limit
* and snapshots taken in the global zone could cause a lower level dataset to
* hit or exceed its limit. The administrator taking the global zone recursive
* snapshot should be aware of this side-effect and behave accordingly.
* For consistency, the filesystem limit is also not enforced if the user can
* modify the limit.
*
* The filesystem and snapshot limits are validated by dsl_fs_ss_limit_check()
* and updated by dsl_fs_ss_count_adjust(). A new limit value is setup in
* dsl_dir_activate_fs_ss_limit() and the counts are adjusted, if necessary, by
* dsl_dir_init_fs_ss_count().
*/
static uint64_t dsl_dir_space_towrite(dsl_dir_t *dd);
typedef struct ddulrt_arg {
dsl_dir_t *ddulrta_dd;
uint64_t ddlrta_txg;
} ddulrt_arg_t;
static void
dsl_dir_evict_async(void *dbu)
{
dsl_dir_t *dd = dbu;
int t;
dsl_pool_t *dp __maybe_unused = dd->dd_pool;
dd->dd_dbuf = NULL;
for (t = 0; t < TXG_SIZE; t++) {
ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t));
ASSERT(dd->dd_tempreserved[t] == 0);
ASSERT(dd->dd_space_towrite[t] == 0);
}
if (dd->dd_parent)
dsl_dir_async_rele(dd->dd_parent, dd);
spa_async_close(dd->dd_pool->dp_spa, dd);
if (dsl_deadlist_is_open(&dd->dd_livelist))
dsl_dir_livelist_close(dd);
dsl_prop_fini(dd);
cv_destroy(&dd->dd_activity_cv);
mutex_destroy(&dd->dd_activity_lock);
mutex_destroy(&dd->dd_lock);
kmem_free(dd, sizeof (dsl_dir_t));
}
int
dsl_dir_hold_obj(dsl_pool_t *dp, uint64_t ddobj,
const char *tail, const void *tag, dsl_dir_t **ddp)
{
dmu_buf_t *dbuf;
dsl_dir_t *dd;
dmu_object_info_t doi;
int err;
ASSERT(dsl_pool_config_held(dp));
err = dmu_bonus_hold(dp->dp_meta_objset, ddobj, tag, &dbuf);
if (err != 0)
return (err);
dd = dmu_buf_get_user(dbuf);
dmu_object_info_from_db(dbuf, &doi);
ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_DSL_DIR);
ASSERT3U(doi.doi_bonus_size, >=, sizeof (dsl_dir_phys_t));
if (dd == NULL) {
dsl_dir_t *winner;
dd = kmem_zalloc(sizeof (dsl_dir_t), KM_SLEEP);
dd->dd_object = ddobj;
dd->dd_dbuf = dbuf;
dd->dd_pool = dp;
mutex_init(&dd->dd_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&dd->dd_activity_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&dd->dd_activity_cv, NULL, CV_DEFAULT, NULL);
dsl_prop_init(dd);
if (dsl_dir_is_zapified(dd)) {
err = zap_lookup(dp->dp_meta_objset,
ddobj, DD_FIELD_CRYPTO_KEY_OBJ,
sizeof (uint64_t), 1, &dd->dd_crypto_obj);
if (err == 0) {
/* check for on-disk format errata */
if (dsl_dir_incompatible_encryption_version(
dd)) {
dp->dp_spa->spa_errata =
ZPOOL_ERRATA_ZOL_6845_ENCRYPTION;
}
} else if (err != ENOENT) {
goto errout;
}
}
if (dsl_dir_phys(dd)->dd_parent_obj) {
err = dsl_dir_hold_obj(dp,
dsl_dir_phys(dd)->dd_parent_obj, NULL, dd,
&dd->dd_parent);
if (err != 0)
goto errout;
if (tail) {
#ifdef ZFS_DEBUG
uint64_t foundobj;
err = zap_lookup(dp->dp_meta_objset,
dsl_dir_phys(dd->dd_parent)->
dd_child_dir_zapobj, tail,
sizeof (foundobj), 1, &foundobj);
ASSERT(err || foundobj == ddobj);
#endif
(void) strlcpy(dd->dd_myname, tail,
sizeof (dd->dd_myname));
} else {
err = zap_value_search(dp->dp_meta_objset,
dsl_dir_phys(dd->dd_parent)->
dd_child_dir_zapobj,
ddobj, 0, dd->dd_myname);
}
if (err != 0)
goto errout;
} else {
(void) strlcpy(dd->dd_myname, spa_name(dp->dp_spa),
sizeof (dd->dd_myname));
}
if (dsl_dir_is_clone(dd)) {
dmu_buf_t *origin_bonus;
dsl_dataset_phys_t *origin_phys;
/*
* We can't open the origin dataset, because
* that would require opening this dsl_dir.
* Just look at its phys directly instead.
*/
err = dmu_bonus_hold(dp->dp_meta_objset,
dsl_dir_phys(dd)->dd_origin_obj, FTAG,
&origin_bonus);
if (err != 0)
goto errout;
origin_phys = origin_bonus->db_data;
dd->dd_origin_txg =
origin_phys->ds_creation_txg;
dmu_buf_rele(origin_bonus, FTAG);
if (dsl_dir_is_zapified(dd)) {
uint64_t obj;
err = zap_lookup(dp->dp_meta_objset,
dd->dd_object, DD_FIELD_LIVELIST,
sizeof (uint64_t), 1, &obj);
if (err == 0)
dsl_dir_livelist_open(dd, obj);
else if (err != ENOENT)
goto errout;
}
}
if (dsl_dir_is_zapified(dd)) {
inode_timespec_t t = {0};
zap_lookup(dp->dp_meta_objset, ddobj,
- zfs_prop_to_name(ZFS_PROP_SNAPSHOTS_CHANGED),
+ DD_FIELD_SNAPSHOTS_CHANGED,
sizeof (uint64_t),
sizeof (inode_timespec_t) / sizeof (uint64_t),
&t);
dd->dd_snap_cmtime = t;
}
dmu_buf_init_user(&dd->dd_dbu, NULL, dsl_dir_evict_async,
&dd->dd_dbuf);
winner = dmu_buf_set_user_ie(dbuf, &dd->dd_dbu);
if (winner != NULL) {
if (dd->dd_parent)
dsl_dir_rele(dd->dd_parent, dd);
if (dsl_deadlist_is_open(&dd->dd_livelist))
dsl_dir_livelist_close(dd);
dsl_prop_fini(dd);
cv_destroy(&dd->dd_activity_cv);
mutex_destroy(&dd->dd_activity_lock);
mutex_destroy(&dd->dd_lock);
kmem_free(dd, sizeof (dsl_dir_t));
dd = winner;
} else {
spa_open_ref(dp->dp_spa, dd);
}
}
/*
* The dsl_dir_t has both open-to-close and instantiate-to-evict
* holds on the spa. We need the open-to-close holds because
* otherwise the spa_refcnt wouldn't change when we open a
* dir which the spa also has open, so we could incorrectly
* think it was OK to unload/export/destroy the pool. We need
* the instantiate-to-evict hold because the dsl_dir_t has a
* pointer to the dd_pool, which has a pointer to the spa_t.
*/
spa_open_ref(dp->dp_spa, tag);
ASSERT3P(dd->dd_pool, ==, dp);
ASSERT3U(dd->dd_object, ==, ddobj);
ASSERT3P(dd->dd_dbuf, ==, dbuf);
*ddp = dd;
return (0);
errout:
if (dd->dd_parent)
dsl_dir_rele(dd->dd_parent, dd);
if (dsl_deadlist_is_open(&dd->dd_livelist))
dsl_dir_livelist_close(dd);
dsl_prop_fini(dd);
cv_destroy(&dd->dd_activity_cv);
mutex_destroy(&dd->dd_activity_lock);
mutex_destroy(&dd->dd_lock);
kmem_free(dd, sizeof (dsl_dir_t));
dmu_buf_rele(dbuf, tag);
return (err);
}
void
dsl_dir_rele(dsl_dir_t *dd, const void *tag)
{
dprintf_dd(dd, "%s\n", "");
spa_close(dd->dd_pool->dp_spa, tag);
dmu_buf_rele(dd->dd_dbuf, tag);
}
/*
* Remove a reference to the given dsl dir that is being asynchronously
* released. Async releases occur from a taskq performing eviction of
* dsl datasets and dirs. This process is identical to a normal release
* with the exception of using the async API for releasing the reference on
* the spa.
*/
void
dsl_dir_async_rele(dsl_dir_t *dd, const void *tag)
{
dprintf_dd(dd, "%s\n", "");
spa_async_close(dd->dd_pool->dp_spa, tag);
dmu_buf_rele(dd->dd_dbuf, tag);
}
/* buf must be at least ZFS_MAX_DATASET_NAME_LEN bytes */
void
dsl_dir_name(dsl_dir_t *dd, char *buf)
{
if (dd->dd_parent) {
dsl_dir_name(dd->dd_parent, buf);
VERIFY3U(strlcat(buf, "/", ZFS_MAX_DATASET_NAME_LEN), <,
ZFS_MAX_DATASET_NAME_LEN);
} else {
buf[0] = '\0';
}
if (!MUTEX_HELD(&dd->dd_lock)) {
/*
* recursive mutex so that we can use
* dprintf_dd() with dd_lock held
*/
mutex_enter(&dd->dd_lock);
VERIFY3U(strlcat(buf, dd->dd_myname, ZFS_MAX_DATASET_NAME_LEN),
<, ZFS_MAX_DATASET_NAME_LEN);
mutex_exit(&dd->dd_lock);
} else {
VERIFY3U(strlcat(buf, dd->dd_myname, ZFS_MAX_DATASET_NAME_LEN),
<, ZFS_MAX_DATASET_NAME_LEN);
}
}
/* Calculate name length, avoiding all the strcat calls of dsl_dir_name */
int
dsl_dir_namelen(dsl_dir_t *dd)
{
int result = 0;
if (dd->dd_parent) {
/* parent's name + 1 for the "/" */
result = dsl_dir_namelen(dd->dd_parent) + 1;
}
if (!MUTEX_HELD(&dd->dd_lock)) {
/* see dsl_dir_name */
mutex_enter(&dd->dd_lock);
result += strlen(dd->dd_myname);
mutex_exit(&dd->dd_lock);
} else {
result += strlen(dd->dd_myname);
}
return (result);
}
static int
getcomponent(const char *path, char *component, const char **nextp)
{
char *p;
if ((path == NULL) || (path[0] == '\0'))
return (SET_ERROR(ENOENT));
/* This would be a good place to reserve some namespace... */
p = strpbrk(path, "/@");
if (p && (p[1] == '/' || p[1] == '@')) {
/* two separators in a row */
return (SET_ERROR(EINVAL));
}
if (p == NULL || p == path) {
/*
* if the first thing is an @ or /, it had better be an
* @ and it had better not have any more ats or slashes,
* and it had better have something after the @.
*/
if (p != NULL &&
(p[0] != '@' || strpbrk(path+1, "/@") || p[1] == '\0'))
return (SET_ERROR(EINVAL));
if (strlen(path) >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
(void) strlcpy(component, path, ZFS_MAX_DATASET_NAME_LEN);
p = NULL;
} else if (p[0] == '/') {
if (p - path >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
(void) strncpy(component, path, p - path);
component[p - path] = '\0';
p++;
} else if (p[0] == '@') {
/*
* if the next separator is an @, there better not be
* any more slashes.
*/
if (strchr(path, '/'))
return (SET_ERROR(EINVAL));
if (p - path >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
(void) strncpy(component, path, p - path);
component[p - path] = '\0';
} else {
panic("invalid p=%p", (void *)p);
}
*nextp = p;
return (0);
}
/*
* Return the dsl_dir_t, and possibly the last component which couldn't
* be found in *tail. The name must be in the specified dsl_pool_t. This
* thread must hold the dp_config_rwlock for the pool. Returns NULL if the
* path is bogus, or if tail==NULL and we couldn't parse the whole name.
* (*tail)[0] == '@' means that the last component is a snapshot.
*/
int
dsl_dir_hold(dsl_pool_t *dp, const char *name, const void *tag,
dsl_dir_t **ddp, const char **tailp)
{
char *buf;
const char *spaname, *next, *nextnext = NULL;
int err;
dsl_dir_t *dd;
uint64_t ddobj;
buf = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
err = getcomponent(name, buf, &next);
if (err != 0)
goto error;
/* Make sure the name is in the specified pool. */
spaname = spa_name(dp->dp_spa);
if (strcmp(buf, spaname) != 0) {
err = SET_ERROR(EXDEV);
goto error;
}
ASSERT(dsl_pool_config_held(dp));
err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, NULL, tag, &dd);
if (err != 0) {
goto error;
}
while (next != NULL) {
dsl_dir_t *child_dd;
err = getcomponent(next, buf, &nextnext);
if (err != 0)
break;
ASSERT(next[0] != '\0');
if (next[0] == '@')
break;
dprintf("looking up %s in obj%lld\n",
buf, (longlong_t)dsl_dir_phys(dd)->dd_child_dir_zapobj);
err = zap_lookup(dp->dp_meta_objset,
dsl_dir_phys(dd)->dd_child_dir_zapobj,
buf, sizeof (ddobj), 1, &ddobj);
if (err != 0) {
if (err == ENOENT)
err = 0;
break;
}
err = dsl_dir_hold_obj(dp, ddobj, buf, tag, &child_dd);
if (err != 0)
break;
dsl_dir_rele(dd, tag);
dd = child_dd;
next = nextnext;
}
if (err != 0) {
dsl_dir_rele(dd, tag);
goto error;
}
/*
* It's an error if there's more than one component left, or
* tailp==NULL and there's any component left.
*/
if (next != NULL &&
(tailp == NULL || (nextnext && nextnext[0] != '\0'))) {
/* bad path name */
dsl_dir_rele(dd, tag);
dprintf("next=%p (%s) tail=%p\n", next, next?next:"", tailp);
err = SET_ERROR(ENOENT);
}
if (tailp != NULL)
*tailp = next;
if (err == 0)
*ddp = dd;
error:
kmem_free(buf, ZFS_MAX_DATASET_NAME_LEN);
return (err);
}
/*
* If the counts are already initialized for this filesystem and its
* descendants then do nothing, otherwise initialize the counts.
*
* The counts on this filesystem, and those below, may be uninitialized due to
* either the use of a pre-existing pool which did not support the
* filesystem/snapshot limit feature, or one in which the feature had not yet
* been enabled.
*
* Recursively descend the filesystem tree and update the filesystem/snapshot
* counts on each filesystem below, then update the cumulative count on the
* current filesystem. If the filesystem already has a count set on it,
* then we know that its counts, and the counts on the filesystems below it,
* are already correct, so we don't have to update this filesystem.
*/
static void
dsl_dir_init_fs_ss_count(dsl_dir_t *dd, dmu_tx_t *tx)
{
uint64_t my_fs_cnt = 0;
uint64_t my_ss_cnt = 0;
dsl_pool_t *dp = dd->dd_pool;
objset_t *os = dp->dp_meta_objset;
zap_cursor_t *zc;
zap_attribute_t *za;
dsl_dataset_t *ds;
ASSERT(spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT));
ASSERT(dsl_pool_config_held(dp));
ASSERT(dmu_tx_is_syncing(tx));
dsl_dir_zapify(dd, tx);
/*
* If the filesystem count has already been initialized then we
* don't need to recurse down any further.
*/
if (zap_contains(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT) == 0)
return;
zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
/* Iterate my child dirs */
for (zap_cursor_init(zc, os, dsl_dir_phys(dd)->dd_child_dir_zapobj);
zap_cursor_retrieve(zc, za) == 0; zap_cursor_advance(zc)) {
dsl_dir_t *chld_dd;
uint64_t count;
VERIFY0(dsl_dir_hold_obj(dp, za->za_first_integer, NULL, FTAG,
&chld_dd));
/*
* Ignore hidden ($FREE, $MOS & $ORIGIN) objsets.
*/
if (chld_dd->dd_myname[0] == '$') {
dsl_dir_rele(chld_dd, FTAG);
continue;
}
my_fs_cnt++; /* count this child */
dsl_dir_init_fs_ss_count(chld_dd, tx);
VERIFY0(zap_lookup(os, chld_dd->dd_object,
DD_FIELD_FILESYSTEM_COUNT, sizeof (count), 1, &count));
my_fs_cnt += count;
VERIFY0(zap_lookup(os, chld_dd->dd_object,
DD_FIELD_SNAPSHOT_COUNT, sizeof (count), 1, &count));
my_ss_cnt += count;
dsl_dir_rele(chld_dd, FTAG);
}
zap_cursor_fini(zc);
/* Count my snapshots (we counted children's snapshots above) */
VERIFY0(dsl_dataset_hold_obj(dd->dd_pool,
dsl_dir_phys(dd)->dd_head_dataset_obj, FTAG, &ds));
for (zap_cursor_init(zc, os, dsl_dataset_phys(ds)->ds_snapnames_zapobj);
zap_cursor_retrieve(zc, za) == 0;
zap_cursor_advance(zc)) {
/* Don't count temporary snapshots */
if (za->za_name[0] != '%')
my_ss_cnt++;
}
zap_cursor_fini(zc);
dsl_dataset_rele(ds, FTAG);
kmem_free(zc, sizeof (zap_cursor_t));
kmem_free(za, sizeof (zap_attribute_t));
/* we're in a sync task, update counts */
dmu_buf_will_dirty(dd->dd_dbuf, tx);
VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT,
sizeof (my_fs_cnt), 1, &my_fs_cnt, tx));
VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT,
sizeof (my_ss_cnt), 1, &my_ss_cnt, tx));
}
static int
dsl_dir_actv_fs_ss_limit_check(void *arg, dmu_tx_t *tx)
{
char *ddname = (char *)arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
dsl_dir_t *dd;
int error;
error = dsl_dataset_hold(dp, ddname, FTAG, &ds);
if (error != 0)
return (error);
if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT)) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ENOTSUP));
}
dd = ds->ds_dir;
if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT) &&
dsl_dir_is_zapified(dd) &&
zap_contains(dp->dp_meta_objset, dd->dd_object,
DD_FIELD_FILESYSTEM_COUNT) == 0) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EALREADY));
}
dsl_dataset_rele(ds, FTAG);
return (0);
}
static void
dsl_dir_actv_fs_ss_limit_sync(void *arg, dmu_tx_t *tx)
{
char *ddname = (char *)arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
spa_t *spa;
VERIFY0(dsl_dataset_hold(dp, ddname, FTAG, &ds));
spa = dsl_dataset_get_spa(ds);
if (!spa_feature_is_active(spa, SPA_FEATURE_FS_SS_LIMIT)) {
/*
* Since the feature was not active and we're now setting a
* limit, increment the feature-active counter so that the
* feature becomes active for the first time.
*
* We are already in a sync task so we can update the MOS.
*/
spa_feature_incr(spa, SPA_FEATURE_FS_SS_LIMIT, tx);
}
/*
* Since we are now setting a non-UINT64_MAX limit on the filesystem,
* we need to ensure the counts are correct. Descend down the tree from
* this point and update all of the counts to be accurate.
*/
dsl_dir_init_fs_ss_count(ds->ds_dir, tx);
dsl_dataset_rele(ds, FTAG);
}
/*
* Make sure the feature is enabled and activate it if necessary.
* Since we're setting a limit, ensure the on-disk counts are valid.
* This is only called by the ioctl path when setting a limit value.
*
* We do not need to validate the new limit, since users who can change the
* limit are also allowed to exceed the limit.
*/
int
dsl_dir_activate_fs_ss_limit(const char *ddname)
{
int error;
error = dsl_sync_task(ddname, dsl_dir_actv_fs_ss_limit_check,
dsl_dir_actv_fs_ss_limit_sync, (void *)ddname, 0,
ZFS_SPACE_CHECK_RESERVED);
if (error == EALREADY)
error = 0;
return (error);
}
/*
* Used to determine if the filesystem_limit or snapshot_limit should be
* enforced. We allow the limit to be exceeded if the user has permission to
* write the property value. We pass in the creds that we got in the open
* context since we will always be the GZ root in syncing context. We also have
* to handle the case where we are allowed to change the limit on the current
* dataset, but there may be another limit in the tree above.
*
* We can never modify these two properties within a non-global zone. In
* addition, the other checks are modeled on zfs_secpolicy_write_perms. We
* can't use that function since we are already holding the dp_config_rwlock.
* In addition, we already have the dd and dealing with snapshots is simplified
* in this code.
*/
typedef enum {
ENFORCE_ALWAYS,
ENFORCE_NEVER,
ENFORCE_ABOVE
} enforce_res_t;
static enforce_res_t
dsl_enforce_ds_ss_limits(dsl_dir_t *dd, zfs_prop_t prop,
cred_t *cr, proc_t *proc)
{
enforce_res_t enforce = ENFORCE_ALWAYS;
uint64_t obj;
dsl_dataset_t *ds;
uint64_t zoned;
const char *zonedstr;
ASSERT(prop == ZFS_PROP_FILESYSTEM_LIMIT ||
prop == ZFS_PROP_SNAPSHOT_LIMIT);
#ifdef _KERNEL
if (crgetzoneid(cr) != GLOBAL_ZONEID)
return (ENFORCE_ALWAYS);
/*
* We are checking the saved credentials of the user process, which is
* not the current process. Note that we can't use secpolicy_zfs(),
* because it only works if the cred is that of the current process (on
* Linux).
*/
if (secpolicy_zfs_proc(cr, proc) == 0)
return (ENFORCE_NEVER);
#else
(void) proc;
#endif
if ((obj = dsl_dir_phys(dd)->dd_head_dataset_obj) == 0)
return (ENFORCE_ALWAYS);
ASSERT(dsl_pool_config_held(dd->dd_pool));
if (dsl_dataset_hold_obj(dd->dd_pool, obj, FTAG, &ds) != 0)
return (ENFORCE_ALWAYS);
zonedstr = zfs_prop_to_name(ZFS_PROP_ZONED);
if (dsl_prop_get_ds(ds, zonedstr, 8, 1, &zoned, NULL) || zoned) {
/* Only root can access zoned fs's from the GZ */
enforce = ENFORCE_ALWAYS;
} else {
if (dsl_deleg_access_impl(ds, zfs_prop_to_name(prop), cr) == 0)
enforce = ENFORCE_ABOVE;
}
dsl_dataset_rele(ds, FTAG);
return (enforce);
}
/*
* Check if adding additional child filesystem(s) would exceed any filesystem
* limits or adding additional snapshot(s) would exceed any snapshot limits.
* The prop argument indicates which limit to check.
*
* Note that all filesystem limits up to the root (or the highest
* initialized) filesystem or the given ancestor must be satisfied.
*/
int
dsl_fs_ss_limit_check(dsl_dir_t *dd, uint64_t delta, zfs_prop_t prop,
dsl_dir_t *ancestor, cred_t *cr, proc_t *proc)
{
objset_t *os = dd->dd_pool->dp_meta_objset;
uint64_t limit, count;
const char *count_prop;
enforce_res_t enforce;
int err = 0;
ASSERT(dsl_pool_config_held(dd->dd_pool));
ASSERT(prop == ZFS_PROP_FILESYSTEM_LIMIT ||
prop == ZFS_PROP_SNAPSHOT_LIMIT);
/*
* If we're allowed to change the limit, don't enforce the limit
* e.g. this can happen if a snapshot is taken by an administrative
* user in the global zone (i.e. a recursive snapshot by root).
* However, we must handle the case of delegated permissions where we
* are allowed to change the limit on the current dataset, but there
* is another limit in the tree above.
*/
enforce = dsl_enforce_ds_ss_limits(dd, prop, cr, proc);
if (enforce == ENFORCE_NEVER)
return (0);
/*
* e.g. if renaming a dataset with no snapshots, count adjustment
* is 0.
*/
if (delta == 0)
return (0);
if (prop == ZFS_PROP_SNAPSHOT_LIMIT) {
/*
* We don't enforce the limit for temporary snapshots. This is
* indicated by a NULL cred_t argument.
*/
if (cr == NULL)
return (0);
count_prop = DD_FIELD_SNAPSHOT_COUNT;
} else {
count_prop = DD_FIELD_FILESYSTEM_COUNT;
}
/*
* If an ancestor has been provided, stop checking the limit once we
* hit that dir. We need this during rename so that we don't overcount
* the check once we recurse up to the common ancestor.
*/
if (ancestor == dd)
return (0);
/*
* If we hit an uninitialized node while recursing up the tree, we can
* stop since we know there is no limit here (or above). The counts are
* not valid on this node and we know we won't touch this node's counts.
*/
if (!dsl_dir_is_zapified(dd))
return (0);
err = zap_lookup(os, dd->dd_object,
count_prop, sizeof (count), 1, &count);
if (err == ENOENT)
return (0);
if (err != 0)
return (err);
err = dsl_prop_get_dd(dd, zfs_prop_to_name(prop), 8, 1, &limit, NULL,
B_FALSE);
if (err != 0)
return (err);
/* Is there a limit which we've hit? */
if (enforce == ENFORCE_ALWAYS && (count + delta) > limit)
return (SET_ERROR(EDQUOT));
if (dd->dd_parent != NULL)
err = dsl_fs_ss_limit_check(dd->dd_parent, delta, prop,
ancestor, cr, proc);
return (err);
}
/*
* Adjust the filesystem or snapshot count for the specified dsl_dir_t and all
* parents. When a new filesystem/snapshot is created, increment the count on
* all parents, and when a filesystem/snapshot is destroyed, decrement the
* count.
*/
void
dsl_fs_ss_count_adjust(dsl_dir_t *dd, int64_t delta, const char *prop,
dmu_tx_t *tx)
{
int err;
objset_t *os = dd->dd_pool->dp_meta_objset;
uint64_t count;
ASSERT(dsl_pool_config_held(dd->dd_pool));
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(strcmp(prop, DD_FIELD_FILESYSTEM_COUNT) == 0 ||
strcmp(prop, DD_FIELD_SNAPSHOT_COUNT) == 0);
/*
* We don't do accounting for hidden ($FREE, $MOS & $ORIGIN) objsets.
*/
if (dd->dd_myname[0] == '$' && strcmp(prop,
DD_FIELD_FILESYSTEM_COUNT) == 0) {
return;
}
/*
* e.g. if renaming a dataset with no snapshots, count adjustment is 0
*/
if (delta == 0)
return;
/*
* If we hit an uninitialized node while recursing up the tree, we can
* stop since we know the counts are not valid on this node and we
* know we shouldn't touch this node's counts. An uninitialized count
* on the node indicates that either the feature has not yet been
* activated or there are no limits on this part of the tree.
*/
if (!dsl_dir_is_zapified(dd) || (err = zap_lookup(os, dd->dd_object,
prop, sizeof (count), 1, &count)) == ENOENT)
return;
VERIFY0(err);
count += delta;
/* Use a signed verify to make sure we're not neg. */
VERIFY3S(count, >=, 0);
VERIFY0(zap_update(os, dd->dd_object, prop, sizeof (count), 1, &count,
tx));
/* Roll up this additional count into our ancestors */
if (dd->dd_parent != NULL)
dsl_fs_ss_count_adjust(dd->dd_parent, delta, prop, tx);
}
uint64_t
dsl_dir_create_sync(dsl_pool_t *dp, dsl_dir_t *pds, const char *name,
dmu_tx_t *tx)
{
objset_t *mos = dp->dp_meta_objset;
uint64_t ddobj;
dsl_dir_phys_t *ddphys;
dmu_buf_t *dbuf;
ddobj = dmu_object_alloc(mos, DMU_OT_DSL_DIR, 0,
DMU_OT_DSL_DIR, sizeof (dsl_dir_phys_t), tx);
if (pds) {
VERIFY0(zap_add(mos, dsl_dir_phys(pds)->dd_child_dir_zapobj,
name, sizeof (uint64_t), 1, &ddobj, tx));
} else {
/* it's the root dir */
VERIFY0(zap_add(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, &ddobj, tx));
}
VERIFY0(dmu_bonus_hold(mos, ddobj, FTAG, &dbuf));
dmu_buf_will_dirty(dbuf, tx);
ddphys = dbuf->db_data;
ddphys->dd_creation_time = gethrestime_sec();
if (pds) {
ddphys->dd_parent_obj = pds->dd_object;
/* update the filesystem counts */
dsl_fs_ss_count_adjust(pds, 1, DD_FIELD_FILESYSTEM_COUNT, tx);
}
ddphys->dd_props_zapobj = zap_create(mos,
DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx);
ddphys->dd_child_dir_zapobj = zap_create(mos,
DMU_OT_DSL_DIR_CHILD_MAP, DMU_OT_NONE, 0, tx);
if (spa_version(dp->dp_spa) >= SPA_VERSION_USED_BREAKDOWN)
ddphys->dd_flags |= DD_FLAG_USED_BREAKDOWN;
dmu_buf_rele(dbuf, FTAG);
return (ddobj);
}
boolean_t
dsl_dir_is_clone(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_origin_obj &&
(dd->dd_pool->dp_origin_snap == NULL ||
dsl_dir_phys(dd)->dd_origin_obj !=
dd->dd_pool->dp_origin_snap->ds_object));
}
uint64_t
dsl_dir_get_used(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_used_bytes);
}
uint64_t
dsl_dir_get_compressed(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_compressed_bytes);
}
uint64_t
dsl_dir_get_quota(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_quota);
}
uint64_t
dsl_dir_get_reservation(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_reserved);
}
uint64_t
dsl_dir_get_compressratio(dsl_dir_t *dd)
{
/* a fixed point number, 100x the ratio */
return (dsl_dir_phys(dd)->dd_compressed_bytes == 0 ? 100 :
(dsl_dir_phys(dd)->dd_uncompressed_bytes * 100 /
dsl_dir_phys(dd)->dd_compressed_bytes));
}
uint64_t
dsl_dir_get_logicalused(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_uncompressed_bytes);
}
uint64_t
dsl_dir_get_usedsnap(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_SNAP]);
}
uint64_t
dsl_dir_get_usedds(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_HEAD]);
}
uint64_t
dsl_dir_get_usedrefreserv(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_REFRSRV]);
}
uint64_t
dsl_dir_get_usedchild(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_CHILD] +
dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_CHILD_RSRV]);
}
void
dsl_dir_get_origin(dsl_dir_t *dd, char *buf)
{
dsl_dataset_t *ds;
VERIFY0(dsl_dataset_hold_obj(dd->dd_pool,
dsl_dir_phys(dd)->dd_origin_obj, FTAG, &ds));
dsl_dataset_name(ds, buf);
dsl_dataset_rele(ds, FTAG);
}
int
dsl_dir_get_filesystem_count(dsl_dir_t *dd, uint64_t *count)
{
if (dsl_dir_is_zapified(dd)) {
objset_t *os = dd->dd_pool->dp_meta_objset;
return (zap_lookup(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT,
sizeof (*count), 1, count));
} else {
return (SET_ERROR(ENOENT));
}
}
int
dsl_dir_get_snapshot_count(dsl_dir_t *dd, uint64_t *count)
{
if (dsl_dir_is_zapified(dd)) {
objset_t *os = dd->dd_pool->dp_meta_objset;
return (zap_lookup(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT,
sizeof (*count), 1, count));
} else {
return (SET_ERROR(ENOENT));
}
}
void
dsl_dir_stats(dsl_dir_t *dd, nvlist_t *nv)
{
mutex_enter(&dd->dd_lock);
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_QUOTA,
dsl_dir_get_quota(dd));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_RESERVATION,
dsl_dir_get_reservation(dd));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_LOGICALUSED,
dsl_dir_get_logicalused(dd));
if (dsl_dir_phys(dd)->dd_flags & DD_FLAG_USED_BREAKDOWN) {
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDSNAP,
dsl_dir_get_usedsnap(dd));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDDS,
dsl_dir_get_usedds(dd));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDREFRESERV,
dsl_dir_get_usedrefreserv(dd));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDCHILD,
dsl_dir_get_usedchild(dd));
}
mutex_exit(&dd->dd_lock);
uint64_t count;
if (dsl_dir_get_filesystem_count(dd, &count) == 0) {
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_FILESYSTEM_COUNT,
count);
}
if (dsl_dir_get_snapshot_count(dd, &count) == 0) {
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_SNAPSHOT_COUNT,
count);
}
if (dsl_dir_is_clone(dd)) {
char buf[ZFS_MAX_DATASET_NAME_LEN];
dsl_dir_get_origin(dd, buf);
dsl_prop_nvlist_add_string(nv, ZFS_PROP_ORIGIN, buf);
}
}
void
dsl_dir_dirty(dsl_dir_t *dd, dmu_tx_t *tx)
{
dsl_pool_t *dp = dd->dd_pool;
ASSERT(dsl_dir_phys(dd));
if (txg_list_add(&dp->dp_dirty_dirs, dd, tx->tx_txg)) {
/* up the hold count until we can be written out */
dmu_buf_add_ref(dd->dd_dbuf, dd);
}
}
static int64_t
parent_delta(dsl_dir_t *dd, uint64_t used, int64_t delta)
{
uint64_t old_accounted = MAX(used, dsl_dir_phys(dd)->dd_reserved);
uint64_t new_accounted =
MAX(used + delta, dsl_dir_phys(dd)->dd_reserved);
return (new_accounted - old_accounted);
}
void
dsl_dir_sync(dsl_dir_t *dd, dmu_tx_t *tx)
{
ASSERT(dmu_tx_is_syncing(tx));
mutex_enter(&dd->dd_lock);
ASSERT0(dd->dd_tempreserved[tx->tx_txg & TXG_MASK]);
dprintf_dd(dd, "txg=%llu towrite=%lluK\n", (u_longlong_t)tx->tx_txg,
(u_longlong_t)dd->dd_space_towrite[tx->tx_txg & TXG_MASK] / 1024);
dd->dd_space_towrite[tx->tx_txg & TXG_MASK] = 0;
mutex_exit(&dd->dd_lock);
/* release the hold from dsl_dir_dirty */
dmu_buf_rele(dd->dd_dbuf, dd);
}
static uint64_t
dsl_dir_space_towrite(dsl_dir_t *dd)
{
uint64_t space = 0;
ASSERT(MUTEX_HELD(&dd->dd_lock));
for (int i = 0; i < TXG_SIZE; i++) {
space += dd->dd_space_towrite[i & TXG_MASK];
ASSERT3U(dd->dd_space_towrite[i & TXG_MASK], >=, 0);
}
return (space);
}
/*
* How much space would dd have available if ancestor had delta applied
* to it? If ondiskonly is set, we're only interested in what's
* on-disk, not estimated pending changes.
*/
uint64_t
dsl_dir_space_available(dsl_dir_t *dd,
dsl_dir_t *ancestor, int64_t delta, int ondiskonly)
{
uint64_t parentspace, myspace, quota, used;
/*
* If there are no restrictions otherwise, assume we have
* unlimited space available.
*/
quota = UINT64_MAX;
parentspace = UINT64_MAX;
if (dd->dd_parent != NULL) {
parentspace = dsl_dir_space_available(dd->dd_parent,
ancestor, delta, ondiskonly);
}
mutex_enter(&dd->dd_lock);
if (dsl_dir_phys(dd)->dd_quota != 0)
quota = dsl_dir_phys(dd)->dd_quota;
used = dsl_dir_phys(dd)->dd_used_bytes;
if (!ondiskonly)
used += dsl_dir_space_towrite(dd);
if (dd->dd_parent == NULL) {
uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool,
ZFS_SPACE_CHECK_NORMAL);
quota = MIN(quota, poolsize);
}
if (dsl_dir_phys(dd)->dd_reserved > used && parentspace != UINT64_MAX) {
/*
* We have some space reserved, in addition to what our
* parent gave us.
*/
parentspace += dsl_dir_phys(dd)->dd_reserved - used;
}
if (dd == ancestor) {
ASSERT(delta <= 0);
ASSERT(used >= -delta);
used += delta;
if (parentspace != UINT64_MAX)
parentspace -= delta;
}
if (used > quota) {
/* over quota */
myspace = 0;
} else {
/*
* the lesser of the space provided by our parent and
* the space left in our quota
*/
myspace = MIN(parentspace, quota - used);
}
mutex_exit(&dd->dd_lock);
return (myspace);
}
struct tempreserve {
list_node_t tr_node;
dsl_dir_t *tr_ds;
uint64_t tr_size;
};
static int
dsl_dir_tempreserve_impl(dsl_dir_t *dd, uint64_t asize, boolean_t netfree,
boolean_t ignorequota, list_t *tr_list,
dmu_tx_t *tx, boolean_t first)
{
uint64_t txg;
uint64_t quota;
struct tempreserve *tr;
int retval;
uint64_t ref_rsrv;
top_of_function:
txg = tx->tx_txg;
retval = EDQUOT;
ref_rsrv = 0;
ASSERT3U(txg, !=, 0);
ASSERT3S(asize, >, 0);
mutex_enter(&dd->dd_lock);
/*
* Check against the dsl_dir's quota. We don't add in the delta
* when checking for over-quota because they get one free hit.
*/
uint64_t est_inflight = dsl_dir_space_towrite(dd);
for (int i = 0; i < TXG_SIZE; i++)
est_inflight += dd->dd_tempreserved[i];
uint64_t used_on_disk = dsl_dir_phys(dd)->dd_used_bytes;
/*
* On the first iteration, fetch the dataset's used-on-disk and
* refreservation values. Also, if checkrefquota is set, test if
* allocating this space would exceed the dataset's refquota.
*/
if (first && tx->tx_objset) {
int error;
dsl_dataset_t *ds = tx->tx_objset->os_dsl_dataset;
error = dsl_dataset_check_quota(ds, !netfree,
asize, est_inflight, &used_on_disk, &ref_rsrv);
if (error != 0) {
mutex_exit(&dd->dd_lock);
DMU_TX_STAT_BUMP(dmu_tx_quota);
return (error);
}
}
/*
* If this transaction will result in a net free of space,
* we want to let it through.
*/
if (ignorequota || netfree || dsl_dir_phys(dd)->dd_quota == 0)
quota = UINT64_MAX;
else
quota = dsl_dir_phys(dd)->dd_quota;
/*
* Adjust the quota against the actual pool size at the root
* minus any outstanding deferred frees.
* To ensure that it's possible to remove files from a full
* pool without inducing transient overcommits, we throttle
* netfree transactions against a quota that is slightly larger,
* but still within the pool's allocation slop. In cases where
* we're very close to full, this will allow a steady trickle of
* removes to get through.
*/
if (dd->dd_parent == NULL) {
uint64_t avail = dsl_pool_unreserved_space(dd->dd_pool,
(netfree) ?
ZFS_SPACE_CHECK_RESERVED : ZFS_SPACE_CHECK_NORMAL);
if (avail < quota) {
quota = avail;
retval = SET_ERROR(ENOSPC);
}
}
/*
* If they are requesting more space, and our current estimate
* is over quota, they get to try again unless the actual
* on-disk is over quota and there are no pending changes
* or deferred frees (which may free up space for us).
*/
if (used_on_disk + est_inflight >= quota) {
if (est_inflight > 0 || used_on_disk < quota) {
retval = SET_ERROR(ERESTART);
} else {
ASSERT3U(used_on_disk, >=, quota);
if (retval == ENOSPC && (used_on_disk - quota) <
dsl_pool_deferred_space(dd->dd_pool)) {
retval = SET_ERROR(ERESTART);
}
}
dprintf_dd(dd, "failing: used=%lluK inflight = %lluK "
"quota=%lluK tr=%lluK err=%d\n",
(u_longlong_t)used_on_disk>>10,
(u_longlong_t)est_inflight>>10,
(u_longlong_t)quota>>10, (u_longlong_t)asize>>10, retval);
mutex_exit(&dd->dd_lock);
DMU_TX_STAT_BUMP(dmu_tx_quota);
return (retval);
}
/* We need to up our estimated delta before dropping dd_lock */
dd->dd_tempreserved[txg & TXG_MASK] += asize;
uint64_t parent_rsrv = parent_delta(dd, used_on_disk + est_inflight,
asize - ref_rsrv);
mutex_exit(&dd->dd_lock);
tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
tr->tr_ds = dd;
tr->tr_size = asize;
list_insert_tail(tr_list, tr);
/* see if it's OK with our parent */
if (dd->dd_parent != NULL && parent_rsrv != 0) {
/*
* Recurse on our parent without recursion. This has been
* observed to be potentially large stack usage even within
* the test suite. Largest seen stack was 7632 bytes on linux.
*/
dd = dd->dd_parent;
asize = parent_rsrv;
ignorequota = (dsl_dir_phys(dd)->dd_head_dataset_obj == 0);
first = B_FALSE;
goto top_of_function;
} else {
return (0);
}
}
/*
* Reserve space in this dsl_dir, to be used in this tx's txg.
* After the space has been dirtied (and dsl_dir_willuse_space()
* has been called), the reservation should be canceled, using
* dsl_dir_tempreserve_clear().
*/
int
dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, uint64_t asize,
boolean_t netfree, void **tr_cookiep, dmu_tx_t *tx)
{
int err;
list_t *tr_list;
if (asize == 0) {
*tr_cookiep = NULL;
return (0);
}
tr_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
list_create(tr_list, sizeof (struct tempreserve),
offsetof(struct tempreserve, tr_node));
ASSERT3S(asize, >, 0);
err = arc_tempreserve_space(dd->dd_pool->dp_spa, lsize, tx->tx_txg);
if (err == 0) {
struct tempreserve *tr;
tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
tr->tr_size = lsize;
list_insert_tail(tr_list, tr);
} else {
if (err == EAGAIN) {
/*
* If arc_memory_throttle() detected that pageout
* is running and we are low on memory, we delay new
* non-pageout transactions to give pageout an
* advantage.
*
* It is unfortunate to be delaying while the caller's
* locks are held.
*/
txg_delay(dd->dd_pool, tx->tx_txg,
MSEC2NSEC(10), MSEC2NSEC(10));
err = SET_ERROR(ERESTART);
}
}
if (err == 0) {
err = dsl_dir_tempreserve_impl(dd, asize, netfree,
B_FALSE, tr_list, tx, B_TRUE);
}
if (err != 0)
dsl_dir_tempreserve_clear(tr_list, tx);
else
*tr_cookiep = tr_list;
return (err);
}
/*
* Clear a temporary reservation that we previously made with
* dsl_dir_tempreserve_space().
*/
void
dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx)
{
int txgidx = tx->tx_txg & TXG_MASK;
list_t *tr_list = tr_cookie;
struct tempreserve *tr;
ASSERT3U(tx->tx_txg, !=, 0);
if (tr_cookie == NULL)
return;
while ((tr = list_head(tr_list)) != NULL) {
if (tr->tr_ds) {
mutex_enter(&tr->tr_ds->dd_lock);
ASSERT3U(tr->tr_ds->dd_tempreserved[txgidx], >=,
tr->tr_size);
tr->tr_ds->dd_tempreserved[txgidx] -= tr->tr_size;
mutex_exit(&tr->tr_ds->dd_lock);
} else {
arc_tempreserve_clear(tr->tr_size);
}
list_remove(tr_list, tr);
kmem_free(tr, sizeof (struct tempreserve));
}
kmem_free(tr_list, sizeof (list_t));
}
/*
* This should be called from open context when we think we're going to write
* or free space, for example when dirtying data. Be conservative; it's okay
* to write less space or free more, but we don't want to write more or free
* less than the amount specified.
*
* NOTE: The behavior of this function is identical to the Illumos / FreeBSD
* version however it has been adjusted to use an iterative rather than
* recursive algorithm to minimize stack usage.
*/
void
dsl_dir_willuse_space(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx)
{
int64_t parent_space;
uint64_t est_used;
do {
mutex_enter(&dd->dd_lock);
if (space > 0)
dd->dd_space_towrite[tx->tx_txg & TXG_MASK] += space;
est_used = dsl_dir_space_towrite(dd) +
dsl_dir_phys(dd)->dd_used_bytes;
parent_space = parent_delta(dd, est_used, space);
mutex_exit(&dd->dd_lock);
/* Make sure that we clean up dd_space_to* */
dsl_dir_dirty(dd, tx);
dd = dd->dd_parent;
space = parent_space;
} while (space && dd);
}
/* call from syncing context when we actually write/free space for this dd */
void
dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type,
int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx)
{
int64_t accounted_delta;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(type < DD_USED_NUM);
dmu_buf_will_dirty(dd->dd_dbuf, tx);
/*
* dsl_dataset_set_refreservation_sync_impl() calls this with
* dd_lock held, so that it can atomically update
* ds->ds_reserved and the dsl_dir accounting, so that
* dsl_dataset_check_quota() can see dataset and dir accounting
* consistently.
*/
boolean_t needlock = !MUTEX_HELD(&dd->dd_lock);
if (needlock)
mutex_enter(&dd->dd_lock);
dsl_dir_phys_t *ddp = dsl_dir_phys(dd);
accounted_delta = parent_delta(dd, ddp->dd_used_bytes, used);
ASSERT(used >= 0 || ddp->dd_used_bytes >= -used);
ASSERT(compressed >= 0 || ddp->dd_compressed_bytes >= -compressed);
ASSERT(uncompressed >= 0 ||
ddp->dd_uncompressed_bytes >= -uncompressed);
ddp->dd_used_bytes += used;
ddp->dd_uncompressed_bytes += uncompressed;
ddp->dd_compressed_bytes += compressed;
if (ddp->dd_flags & DD_FLAG_USED_BREAKDOWN) {
ASSERT(used >= 0 || ddp->dd_used_breakdown[type] >= -used);
ddp->dd_used_breakdown[type] += used;
#ifdef ZFS_DEBUG
{
dd_used_t t;
uint64_t u = 0;
for (t = 0; t < DD_USED_NUM; t++)
u += ddp->dd_used_breakdown[t];
ASSERT3U(u, ==, ddp->dd_used_bytes);
}
#endif
}
if (needlock)
mutex_exit(&dd->dd_lock);
if (dd->dd_parent != NULL) {
dsl_dir_diduse_transfer_space(dd->dd_parent,
accounted_delta, compressed, uncompressed,
used, DD_USED_CHILD_RSRV, DD_USED_CHILD, tx);
}
}
void
dsl_dir_transfer_space(dsl_dir_t *dd, int64_t delta,
dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx)
{
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(oldtype < DD_USED_NUM);
ASSERT(newtype < DD_USED_NUM);
dsl_dir_phys_t *ddp = dsl_dir_phys(dd);
if (delta == 0 ||
!(ddp->dd_flags & DD_FLAG_USED_BREAKDOWN))
return;
dmu_buf_will_dirty(dd->dd_dbuf, tx);
mutex_enter(&dd->dd_lock);
ASSERT(delta > 0 ?
ddp->dd_used_breakdown[oldtype] >= delta :
ddp->dd_used_breakdown[newtype] >= -delta);
ASSERT(ddp->dd_used_bytes >= ABS(delta));
ddp->dd_used_breakdown[oldtype] -= delta;
ddp->dd_used_breakdown[newtype] += delta;
mutex_exit(&dd->dd_lock);
}
void
dsl_dir_diduse_transfer_space(dsl_dir_t *dd, int64_t used,
int64_t compressed, int64_t uncompressed, int64_t tonew,
dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx)
{
int64_t accounted_delta;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(oldtype < DD_USED_NUM);
ASSERT(newtype < DD_USED_NUM);
dmu_buf_will_dirty(dd->dd_dbuf, tx);
mutex_enter(&dd->dd_lock);
dsl_dir_phys_t *ddp = dsl_dir_phys(dd);
accounted_delta = parent_delta(dd, ddp->dd_used_bytes, used);
ASSERT(used >= 0 || ddp->dd_used_bytes >= -used);
ASSERT(compressed >= 0 || ddp->dd_compressed_bytes >= -compressed);
ASSERT(uncompressed >= 0 ||
ddp->dd_uncompressed_bytes >= -uncompressed);
ddp->dd_used_bytes += used;
ddp->dd_uncompressed_bytes += uncompressed;
ddp->dd_compressed_bytes += compressed;
if (ddp->dd_flags & DD_FLAG_USED_BREAKDOWN) {
ASSERT(tonew - used <= 0 ||
ddp->dd_used_breakdown[oldtype] >= tonew - used);
ASSERT(tonew >= 0 ||
ddp->dd_used_breakdown[newtype] >= -tonew);
ddp->dd_used_breakdown[oldtype] -= tonew - used;
ddp->dd_used_breakdown[newtype] += tonew;
#ifdef ZFS_DEBUG
{
dd_used_t t;
uint64_t u = 0;
for (t = 0; t < DD_USED_NUM; t++)
u += ddp->dd_used_breakdown[t];
ASSERT3U(u, ==, ddp->dd_used_bytes);
}
#endif
}
mutex_exit(&dd->dd_lock);
if (dd->dd_parent != NULL) {
dsl_dir_diduse_transfer_space(dd->dd_parent,
accounted_delta, compressed, uncompressed,
used, DD_USED_CHILD_RSRV, DD_USED_CHILD, tx);
}
}
typedef struct dsl_dir_set_qr_arg {
const char *ddsqra_name;
zprop_source_t ddsqra_source;
uint64_t ddsqra_value;
} dsl_dir_set_qr_arg_t;
static int
dsl_dir_set_quota_check(void *arg, dmu_tx_t *tx)
{
dsl_dir_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
int error;
uint64_t towrite, newval;
error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
if (error != 0)
return (error);
error = dsl_prop_predict(ds->ds_dir, "quota",
ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
if (newval == 0) {
dsl_dataset_rele(ds, FTAG);
return (0);
}
mutex_enter(&ds->ds_dir->dd_lock);
/*
* If we are doing the preliminary check in open context, and
* there are pending changes, then don't fail it, since the
* pending changes could under-estimate the amount of space to be
* freed up.
*/
towrite = dsl_dir_space_towrite(ds->ds_dir);
if ((dmu_tx_is_syncing(tx) || towrite == 0) &&
(newval < dsl_dir_phys(ds->ds_dir)->dd_reserved ||
newval < dsl_dir_phys(ds->ds_dir)->dd_used_bytes + towrite)) {
error = SET_ERROR(ENOSPC);
}
mutex_exit(&ds->ds_dir->dd_lock);
dsl_dataset_rele(ds, FTAG);
return (error);
}
static void
dsl_dir_set_quota_sync(void *arg, dmu_tx_t *tx)
{
dsl_dir_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
uint64_t newval;
VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
if (spa_version(dp->dp_spa) >= SPA_VERSION_RECVD_PROPS) {
dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_QUOTA),
ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1,
&ddsqra->ddsqra_value, tx);
VERIFY0(dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_QUOTA), &newval));
} else {
newval = ddsqra->ddsqra_value;
spa_history_log_internal_ds(ds, "set", tx, "%s=%lld",
zfs_prop_to_name(ZFS_PROP_QUOTA), (longlong_t)newval);
}
dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
mutex_enter(&ds->ds_dir->dd_lock);
dsl_dir_phys(ds->ds_dir)->dd_quota = newval;
mutex_exit(&ds->ds_dir->dd_lock);
dsl_dataset_rele(ds, FTAG);
}
int
dsl_dir_set_quota(const char *ddname, zprop_source_t source, uint64_t quota)
{
dsl_dir_set_qr_arg_t ddsqra;
ddsqra.ddsqra_name = ddname;
ddsqra.ddsqra_source = source;
ddsqra.ddsqra_value = quota;
return (dsl_sync_task(ddname, dsl_dir_set_quota_check,
dsl_dir_set_quota_sync, &ddsqra, 0,
ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
static int
dsl_dir_set_reservation_check(void *arg, dmu_tx_t *tx)
{
dsl_dir_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
dsl_dir_t *dd;
uint64_t newval, used, avail;
int error;
error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
if (error != 0)
return (error);
dd = ds->ds_dir;
/*
* If we are doing the preliminary check in open context, the
* space estimates may be inaccurate.
*/
if (!dmu_tx_is_syncing(tx)) {
dsl_dataset_rele(ds, FTAG);
return (0);
}
error = dsl_prop_predict(ds->ds_dir,
zfs_prop_to_name(ZFS_PROP_RESERVATION),
ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
mutex_enter(&dd->dd_lock);
used = dsl_dir_phys(dd)->dd_used_bytes;
mutex_exit(&dd->dd_lock);
if (dd->dd_parent) {
avail = dsl_dir_space_available(dd->dd_parent,
NULL, 0, FALSE);
} else {
avail = dsl_pool_adjustedsize(dd->dd_pool,
ZFS_SPACE_CHECK_NORMAL) - used;
}
if (MAX(used, newval) > MAX(used, dsl_dir_phys(dd)->dd_reserved)) {
uint64_t delta = MAX(used, newval) -
MAX(used, dsl_dir_phys(dd)->dd_reserved);
if (delta > avail ||
(dsl_dir_phys(dd)->dd_quota > 0 &&
newval > dsl_dir_phys(dd)->dd_quota))
error = SET_ERROR(ENOSPC);
}
dsl_dataset_rele(ds, FTAG);
return (error);
}
void
dsl_dir_set_reservation_sync_impl(dsl_dir_t *dd, uint64_t value, dmu_tx_t *tx)
{
uint64_t used;
int64_t delta;
dmu_buf_will_dirty(dd->dd_dbuf, tx);
mutex_enter(&dd->dd_lock);
used = dsl_dir_phys(dd)->dd_used_bytes;
delta = MAX(used, value) - MAX(used, dsl_dir_phys(dd)->dd_reserved);
dsl_dir_phys(dd)->dd_reserved = value;
if (dd->dd_parent != NULL) {
/* Roll up this additional usage into our ancestors */
dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
delta, 0, 0, tx);
}
mutex_exit(&dd->dd_lock);
}
static void
dsl_dir_set_reservation_sync(void *arg, dmu_tx_t *tx)
{
dsl_dir_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
uint64_t newval;
VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
if (spa_version(dp->dp_spa) >= SPA_VERSION_RECVD_PROPS) {
dsl_prop_set_sync_impl(ds,
zfs_prop_to_name(ZFS_PROP_RESERVATION),
ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1,
&ddsqra->ddsqra_value, tx);
VERIFY0(dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_RESERVATION), &newval));
} else {
newval = ddsqra->ddsqra_value;
spa_history_log_internal_ds(ds, "set", tx, "%s=%lld",
zfs_prop_to_name(ZFS_PROP_RESERVATION),
(longlong_t)newval);
}
dsl_dir_set_reservation_sync_impl(ds->ds_dir, newval, tx);
dsl_dataset_rele(ds, FTAG);
}
int
dsl_dir_set_reservation(const char *ddname, zprop_source_t source,
uint64_t reservation)
{
dsl_dir_set_qr_arg_t ddsqra;
ddsqra.ddsqra_name = ddname;
ddsqra.ddsqra_source = source;
ddsqra.ddsqra_value = reservation;
return (dsl_sync_task(ddname, dsl_dir_set_reservation_check,
dsl_dir_set_reservation_sync, &ddsqra, 0,
ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
static dsl_dir_t *
closest_common_ancestor(dsl_dir_t *ds1, dsl_dir_t *ds2)
{
for (; ds1; ds1 = ds1->dd_parent) {
dsl_dir_t *dd;
for (dd = ds2; dd; dd = dd->dd_parent) {
if (ds1 == dd)
return (dd);
}
}
return (NULL);
}
/*
* If delta is applied to dd, how much of that delta would be applied to
* ancestor? Syncing context only.
*/
static int64_t
would_change(dsl_dir_t *dd, int64_t delta, dsl_dir_t *ancestor)
{
if (dd == ancestor)
return (delta);
mutex_enter(&dd->dd_lock);
delta = parent_delta(dd, dsl_dir_phys(dd)->dd_used_bytes, delta);
mutex_exit(&dd->dd_lock);
return (would_change(dd->dd_parent, delta, ancestor));
}
typedef struct dsl_dir_rename_arg {
const char *ddra_oldname;
const char *ddra_newname;
cred_t *ddra_cred;
proc_t *ddra_proc;
} dsl_dir_rename_arg_t;
typedef struct dsl_valid_rename_arg {
int char_delta;
int nest_delta;
} dsl_valid_rename_arg_t;
static int
dsl_valid_rename(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
(void) dp;
dsl_valid_rename_arg_t *dvra = arg;
char namebuf[ZFS_MAX_DATASET_NAME_LEN];
dsl_dataset_name(ds, namebuf);
ASSERT3U(strnlen(namebuf, ZFS_MAX_DATASET_NAME_LEN),
<, ZFS_MAX_DATASET_NAME_LEN);
int namelen = strlen(namebuf) + dvra->char_delta;
int depth = get_dataset_depth(namebuf) + dvra->nest_delta;
if (namelen >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
if (dvra->nest_delta > 0 && depth >= zfs_max_dataset_nesting)
return (SET_ERROR(ENAMETOOLONG));
return (0);
}
static int
dsl_dir_rename_check(void *arg, dmu_tx_t *tx)
{
dsl_dir_rename_arg_t *ddra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *dd, *newparent;
dsl_valid_rename_arg_t dvra;
dsl_dataset_t *parentds;
objset_t *parentos;
const char *mynewname;
int error;
/* target dir should exist */
error = dsl_dir_hold(dp, ddra->ddra_oldname, FTAG, &dd, NULL);
if (error != 0)
return (error);
/* new parent should exist */
error = dsl_dir_hold(dp, ddra->ddra_newname, FTAG,
&newparent, &mynewname);
if (error != 0) {
dsl_dir_rele(dd, FTAG);
return (error);
}
/* can't rename to different pool */
if (dd->dd_pool != newparent->dd_pool) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (SET_ERROR(EXDEV));
}
/* new name should not already exist */
if (mynewname == NULL) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (SET_ERROR(EEXIST));
}
/* can't rename below anything but filesystems (eg. no ZVOLs) */
error = dsl_dataset_hold_obj(newparent->dd_pool,
dsl_dir_phys(newparent)->dd_head_dataset_obj, FTAG, &parentds);
if (error != 0) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (error);
}
error = dmu_objset_from_ds(parentds, &parentos);
if (error != 0) {
dsl_dataset_rele(parentds, FTAG);
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (error);
}
if (dmu_objset_type(parentos) != DMU_OST_ZFS) {
dsl_dataset_rele(parentds, FTAG);
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
}
dsl_dataset_rele(parentds, FTAG);
ASSERT3U(strnlen(ddra->ddra_newname, ZFS_MAX_DATASET_NAME_LEN),
<, ZFS_MAX_DATASET_NAME_LEN);
ASSERT3U(strnlen(ddra->ddra_oldname, ZFS_MAX_DATASET_NAME_LEN),
<, ZFS_MAX_DATASET_NAME_LEN);
dvra.char_delta = strlen(ddra->ddra_newname)
- strlen(ddra->ddra_oldname);
dvra.nest_delta = get_dataset_depth(ddra->ddra_newname)
- get_dataset_depth(ddra->ddra_oldname);
/* if the name length is growing, validate child name lengths */
if (dvra.char_delta > 0 || dvra.nest_delta > 0) {
error = dmu_objset_find_dp(dp, dd->dd_object, dsl_valid_rename,
&dvra, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
if (error != 0) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (error);
}
}
if (dmu_tx_is_syncing(tx)) {
if (spa_feature_is_active(dp->dp_spa,
SPA_FEATURE_FS_SS_LIMIT)) {
/*
* Although this is the check function and we don't
* normally make on-disk changes in check functions,
* we need to do that here.
*
* Ensure this portion of the tree's counts have been
* initialized in case the new parent has limits set.
*/
dsl_dir_init_fs_ss_count(dd, tx);
}
}
if (newparent != dd->dd_parent) {
/* is there enough space? */
uint64_t myspace =
MAX(dsl_dir_phys(dd)->dd_used_bytes,
dsl_dir_phys(dd)->dd_reserved);
objset_t *os = dd->dd_pool->dp_meta_objset;
uint64_t fs_cnt = 0;
uint64_t ss_cnt = 0;
if (dsl_dir_is_zapified(dd)) {
int err;
err = zap_lookup(os, dd->dd_object,
DD_FIELD_FILESYSTEM_COUNT, sizeof (fs_cnt), 1,
&fs_cnt);
if (err != ENOENT && err != 0) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (err);
}
/*
* have to add 1 for the filesystem itself that we're
* moving
*/
fs_cnt++;
err = zap_lookup(os, dd->dd_object,
DD_FIELD_SNAPSHOT_COUNT, sizeof (ss_cnt), 1,
&ss_cnt);
if (err != ENOENT && err != 0) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (err);
}
}
/* check for encryption errors */
error = dsl_dir_rename_crypt_check(dd, newparent);
if (error != 0) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (SET_ERROR(EACCES));
}
/* no rename into our descendant */
if (closest_common_ancestor(dd, newparent) == dd) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (SET_ERROR(EINVAL));
}
error = dsl_dir_transfer_possible(dd->dd_parent,
newparent, fs_cnt, ss_cnt, myspace,
ddra->ddra_cred, ddra->ddra_proc);
if (error != 0) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (error);
}
}
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (0);
}
static void
dsl_dir_rename_sync(void *arg, dmu_tx_t *tx)
{
dsl_dir_rename_arg_t *ddra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *dd, *newparent;
const char *mynewname;
objset_t *mos = dp->dp_meta_objset;
VERIFY0(dsl_dir_hold(dp, ddra->ddra_oldname, FTAG, &dd, NULL));
VERIFY0(dsl_dir_hold(dp, ddra->ddra_newname, FTAG, &newparent,
&mynewname));
/* Log this before we change the name. */
spa_history_log_internal_dd(dd, "rename", tx,
"-> %s", ddra->ddra_newname);
if (newparent != dd->dd_parent) {
objset_t *os = dd->dd_pool->dp_meta_objset;
uint64_t fs_cnt = 0;
uint64_t ss_cnt = 0;
/*
* We already made sure the dd counts were initialized in the
* check function.
*/
if (spa_feature_is_active(dp->dp_spa,
SPA_FEATURE_FS_SS_LIMIT)) {
VERIFY0(zap_lookup(os, dd->dd_object,
DD_FIELD_FILESYSTEM_COUNT, sizeof (fs_cnt), 1,
&fs_cnt));
/* add 1 for the filesystem itself that we're moving */
fs_cnt++;
VERIFY0(zap_lookup(os, dd->dd_object,
DD_FIELD_SNAPSHOT_COUNT, sizeof (ss_cnt), 1,
&ss_cnt));
}
dsl_fs_ss_count_adjust(dd->dd_parent, -fs_cnt,
DD_FIELD_FILESYSTEM_COUNT, tx);
dsl_fs_ss_count_adjust(newparent, fs_cnt,
DD_FIELD_FILESYSTEM_COUNT, tx);
dsl_fs_ss_count_adjust(dd->dd_parent, -ss_cnt,
DD_FIELD_SNAPSHOT_COUNT, tx);
dsl_fs_ss_count_adjust(newparent, ss_cnt,
DD_FIELD_SNAPSHOT_COUNT, tx);
dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD,
-dsl_dir_phys(dd)->dd_used_bytes,
-dsl_dir_phys(dd)->dd_compressed_bytes,
-dsl_dir_phys(dd)->dd_uncompressed_bytes, tx);
dsl_dir_diduse_space(newparent, DD_USED_CHILD,
dsl_dir_phys(dd)->dd_used_bytes,
dsl_dir_phys(dd)->dd_compressed_bytes,
dsl_dir_phys(dd)->dd_uncompressed_bytes, tx);
if (dsl_dir_phys(dd)->dd_reserved >
dsl_dir_phys(dd)->dd_used_bytes) {
uint64_t unused_rsrv = dsl_dir_phys(dd)->dd_reserved -
dsl_dir_phys(dd)->dd_used_bytes;
dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
-unused_rsrv, 0, 0, tx);
dsl_dir_diduse_space(newparent, DD_USED_CHILD_RSRV,
unused_rsrv, 0, 0, tx);
}
}
dmu_buf_will_dirty(dd->dd_dbuf, tx);
/* remove from old parent zapobj */
VERIFY0(zap_remove(mos,
dsl_dir_phys(dd->dd_parent)->dd_child_dir_zapobj,
dd->dd_myname, tx));
(void) strlcpy(dd->dd_myname, mynewname,
sizeof (dd->dd_myname));
dsl_dir_rele(dd->dd_parent, dd);
dsl_dir_phys(dd)->dd_parent_obj = newparent->dd_object;
VERIFY0(dsl_dir_hold_obj(dp,
newparent->dd_object, NULL, dd, &dd->dd_parent));
/* add to new parent zapobj */
VERIFY0(zap_add(mos, dsl_dir_phys(newparent)->dd_child_dir_zapobj,
dd->dd_myname, 8, 1, &dd->dd_object, tx));
/* TODO: A rename callback to avoid these layering violations. */
zfsvfs_update_fromname(ddra->ddra_oldname, ddra->ddra_newname);
zvol_rename_minors(dp->dp_spa, ddra->ddra_oldname,
ddra->ddra_newname, B_TRUE);
dsl_prop_notify_all(dd);
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
}
int
dsl_dir_rename(const char *oldname, const char *newname)
{
dsl_dir_rename_arg_t ddra;
ddra.ddra_oldname = oldname;
ddra.ddra_newname = newname;
ddra.ddra_cred = CRED();
ddra.ddra_proc = curproc;
return (dsl_sync_task(oldname,
dsl_dir_rename_check, dsl_dir_rename_sync, &ddra,
3, ZFS_SPACE_CHECK_RESERVED));
}
int
dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd,
uint64_t fs_cnt, uint64_t ss_cnt, uint64_t space,
cred_t *cr, proc_t *proc)
{
dsl_dir_t *ancestor;
int64_t adelta;
uint64_t avail;
int err;
ancestor = closest_common_ancestor(sdd, tdd);
adelta = would_change(sdd, -space, ancestor);
avail = dsl_dir_space_available(tdd, ancestor, adelta, FALSE);
if (avail < space)
return (SET_ERROR(ENOSPC));
err = dsl_fs_ss_limit_check(tdd, fs_cnt, ZFS_PROP_FILESYSTEM_LIMIT,
ancestor, cr, proc);
if (err != 0)
return (err);
err = dsl_fs_ss_limit_check(tdd, ss_cnt, ZFS_PROP_SNAPSHOT_LIMIT,
ancestor, cr, proc);
if (err != 0)
return (err);
return (0);
}
inode_timespec_t
dsl_dir_snap_cmtime(dsl_dir_t *dd)
{
inode_timespec_t t;
mutex_enter(&dd->dd_lock);
t = dd->dd_snap_cmtime;
mutex_exit(&dd->dd_lock);
return (t);
}
void
dsl_dir_snap_cmtime_update(dsl_dir_t *dd, dmu_tx_t *tx)
{
dsl_pool_t *dp = dmu_tx_pool(tx);
inode_timespec_t t;
gethrestime(&t);
mutex_enter(&dd->dd_lock);
dd->dd_snap_cmtime = t;
if (spa_feature_is_enabled(dp->dp_spa,
SPA_FEATURE_EXTENSIBLE_DATASET)) {
objset_t *mos = dd->dd_pool->dp_meta_objset;
uint64_t ddobj = dd->dd_object;
dsl_dir_zapify(dd, tx);
VERIFY0(zap_update(mos, ddobj,
- zfs_prop_to_name(ZFS_PROP_SNAPSHOTS_CHANGED),
+ DD_FIELD_SNAPSHOTS_CHANGED,
sizeof (uint64_t),
sizeof (inode_timespec_t) / sizeof (uint64_t),
&t, tx));
}
mutex_exit(&dd->dd_lock);
}
void
dsl_dir_zapify(dsl_dir_t *dd, dmu_tx_t *tx)
{
objset_t *mos = dd->dd_pool->dp_meta_objset;
dmu_object_zapify(mos, dd->dd_object, DMU_OT_DSL_DIR, tx);
}
boolean_t
dsl_dir_is_zapified(dsl_dir_t *dd)
{
dmu_object_info_t doi;
dmu_object_info_from_db(dd->dd_dbuf, &doi);
return (doi.doi_type == DMU_OTN_ZAP_METADATA);
}
void
dsl_dir_livelist_open(dsl_dir_t *dd, uint64_t obj)
{
objset_t *mos = dd->dd_pool->dp_meta_objset;
ASSERT(spa_feature_is_active(dd->dd_pool->dp_spa,
SPA_FEATURE_LIVELIST));
dsl_deadlist_open(&dd->dd_livelist, mos, obj);
bplist_create(&dd->dd_pending_allocs);
bplist_create(&dd->dd_pending_frees);
}
void
dsl_dir_livelist_close(dsl_dir_t *dd)
{
dsl_deadlist_close(&dd->dd_livelist);
bplist_destroy(&dd->dd_pending_allocs);
bplist_destroy(&dd->dd_pending_frees);
}
void
dsl_dir_remove_livelist(dsl_dir_t *dd, dmu_tx_t *tx, boolean_t total)
{
uint64_t obj;
dsl_pool_t *dp = dmu_tx_pool(tx);
spa_t *spa = dp->dp_spa;
livelist_condense_entry_t to_condense = spa->spa_to_condense;
if (!dsl_deadlist_is_open(&dd->dd_livelist))
return;
/*
* If the livelist being removed is set to be condensed, stop the
* condense zthr and indicate the cancellation in the spa_to_condense
* struct in case the condense no-wait synctask has already started
*/
zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
if (ll_condense_thread != NULL &&
(to_condense.ds != NULL) && (to_condense.ds->ds_dir == dd)) {
/*
* We use zthr_wait_cycle_done instead of zthr_cancel
* because we don't want to destroy the zthr, just have
* it skip its current task.
*/
spa->spa_to_condense.cancelled = B_TRUE;
zthr_wait_cycle_done(ll_condense_thread);
/*
* If we've returned from zthr_wait_cycle_done without
* clearing the to_condense data structure it's either
* because the no-wait synctask has started (which is
* indicated by 'syncing' field of to_condense) and we
* can expect it to clear to_condense on its own.
* Otherwise, we returned before the zthr ran. The
* checkfunc will now fail as cancelled == B_TRUE so we
* can safely NULL out ds, allowing a different dir's
* livelist to be condensed.
*
* We can be sure that the to_condense struct will not
* be repopulated at this stage because both this
* function and dsl_livelist_try_condense execute in
* syncing context.
*/
if ((spa->spa_to_condense.ds != NULL) &&
!spa->spa_to_condense.syncing) {
dmu_buf_rele(spa->spa_to_condense.ds->ds_dbuf,
spa);
spa->spa_to_condense.ds = NULL;
}
}
dsl_dir_livelist_close(dd);
VERIFY0(zap_lookup(dp->dp_meta_objset, dd->dd_object,
DD_FIELD_LIVELIST, sizeof (uint64_t), 1, &obj));
VERIFY0(zap_remove(dp->dp_meta_objset, dd->dd_object,
DD_FIELD_LIVELIST, tx));
if (total) {
dsl_deadlist_free(dp->dp_meta_objset, obj, tx);
spa_feature_decr(spa, SPA_FEATURE_LIVELIST, tx);
}
}
static int
dsl_dir_activity_in_progress(dsl_dir_t *dd, dsl_dataset_t *ds,
zfs_wait_activity_t activity, boolean_t *in_progress)
{
int error = 0;
ASSERT(MUTEX_HELD(&dd->dd_activity_lock));
switch (activity) {
case ZFS_WAIT_DELETEQ: {
#ifdef _KERNEL
objset_t *os;
error = dmu_objset_from_ds(ds, &os);
if (error != 0)
break;
mutex_enter(&os->os_user_ptr_lock);
void *user = dmu_objset_get_user(os);
mutex_exit(&os->os_user_ptr_lock);
if (dmu_objset_type(os) != DMU_OST_ZFS ||
user == NULL || zfs_get_vfs_flag_unmounted(os)) {
*in_progress = B_FALSE;
return (0);
}
uint64_t readonly = B_FALSE;
error = zfs_get_temporary_prop(ds, ZFS_PROP_READONLY, &readonly,
NULL);
if (error != 0)
break;
if (readonly || !spa_writeable(dd->dd_pool->dp_spa)) {
*in_progress = B_FALSE;
return (0);
}
uint64_t count, unlinked_obj;
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
&unlinked_obj);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
break;
}
error = zap_count(os, unlinked_obj, &count);
if (error == 0)
*in_progress = (count != 0);
break;
#else
/*
* The delete queue is ZPL specific, and libzpool doesn't have
* it. It doesn't make sense to wait for it.
*/
(void) ds;
*in_progress = B_FALSE;
break;
#endif
}
default:
panic("unrecognized value for activity %d", activity);
}
return (error);
}
int
dsl_dir_wait(dsl_dir_t *dd, dsl_dataset_t *ds, zfs_wait_activity_t activity,
boolean_t *waited)
{
int error = 0;
boolean_t in_progress;
dsl_pool_t *dp = dd->dd_pool;
for (;;) {
dsl_pool_config_enter(dp, FTAG);
error = dsl_dir_activity_in_progress(dd, ds, activity,
&in_progress);
dsl_pool_config_exit(dp, FTAG);
if (error != 0 || !in_progress)
break;
*waited = B_TRUE;
if (cv_wait_sig(&dd->dd_activity_cv, &dd->dd_activity_lock) ==
0 || dd->dd_activity_cancelled) {
error = SET_ERROR(EINTR);
break;
}
}
return (error);
}
void
dsl_dir_cancel_waiters(dsl_dir_t *dd)
{
mutex_enter(&dd->dd_activity_lock);
dd->dd_activity_cancelled = B_TRUE;
cv_broadcast(&dd->dd_activity_cv);
while (dd->dd_activity_waiters > 0)
cv_wait(&dd->dd_activity_cv, &dd->dd_activity_lock);
mutex_exit(&dd->dd_activity_lock);
}
#if defined(_KERNEL)
EXPORT_SYMBOL(dsl_dir_set_quota);
EXPORT_SYMBOL(dsl_dir_set_reservation);
#endif
diff --git a/sys/contrib/openzfs/module/zfs/dsl_scan.c b/sys/contrib/openzfs/module/zfs/dsl_scan.c
index 2b76bed1b69b..28afc3dead7e 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_scan.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_scan.c
@@ -1,4509 +1,4494 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2021 by Delphix. All rights reserved.
* Copyright 2016 Gary Mills
* Copyright (c) 2017, 2019, Datto Inc. All rights reserved.
* Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
* Copyright 2019 Joyent, Inc.
*/
#include <sys/dsl_scan.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_synctask.h>
#include <sys/dnode.h>
#include <sys/dmu_tx.h>
#include <sys/dmu_objset.h>
#include <sys/arc.h>
#include <sys/zap.h>
#include <sys/zio.h>
#include <sys/zfs_context.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_znode.h>
#include <sys/spa_impl.h>
#include <sys/vdev_impl.h>
#include <sys/zil_impl.h>
#include <sys/zio_checksum.h>
#include <sys/ddt.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/zfeature.h>
#include <sys/abd.h>
#include <sys/range_tree.h>
#ifdef _KERNEL
#include <sys/zfs_vfsops.h>
#endif
/*
* Grand theory statement on scan queue sorting
*
* Scanning is implemented by recursively traversing all indirection levels
* in an object and reading all blocks referenced from said objects. This
* results in us approximately traversing the object from lowest logical
* offset to the highest. For best performance, we would want the logical
* blocks to be physically contiguous. However, this is frequently not the
* case with pools given the allocation patterns of copy-on-write filesystems.
* So instead, we put the I/Os into a reordering queue and issue them in a
* way that will most benefit physical disks (LBA-order).
*
* Queue management:
*
* Ideally, we would want to scan all metadata and queue up all block I/O
* prior to starting to issue it, because that allows us to do an optimal
* sorting job. This can however consume large amounts of memory. Therefore
* we continuously monitor the size of the queues and constrain them to 5%
* (zfs_scan_mem_lim_fact) of physmem. If the queues grow larger than this
* limit, we clear out a few of the largest extents at the head of the queues
* to make room for more scanning. Hopefully, these extents will be fairly
* large and contiguous, allowing us to approach sequential I/O throughput
* even without a fully sorted tree.
*
* Metadata scanning takes place in dsl_scan_visit(), which is called from
* dsl_scan_sync() every spa_sync(). If we have either fully scanned all
* metadata on the pool, or we need to make room in memory because our
* queues are too large, dsl_scan_visit() is postponed and
* scan_io_queues_run() is called from dsl_scan_sync() instead. This implies
* that metadata scanning and queued I/O issuing are mutually exclusive. This
* allows us to provide maximum sequential I/O throughput for the majority of
* I/O's issued since sequential I/O performance is significantly negatively
* impacted if it is interleaved with random I/O.
*
* Implementation Notes
*
* One side effect of the queued scanning algorithm is that the scanning code
* needs to be notified whenever a block is freed. This is needed to allow
* the scanning code to remove these I/Os from the issuing queue. Additionally,
* we do not attempt to queue gang blocks to be issued sequentially since this
* is very hard to do and would have an extremely limited performance benefit.
* Instead, we simply issue gang I/Os as soon as we find them using the legacy
* algorithm.
*
* Backwards compatibility
*
* This new algorithm is backwards compatible with the legacy on-disk data
* structures (and therefore does not require a new feature flag).
* Periodically during scanning (see zfs_scan_checkpoint_intval), the scan
* will stop scanning metadata (in logical order) and wait for all outstanding
* sorted I/O to complete. Once this is done, we write out a checkpoint
* bookmark, indicating that we have scanned everything logically before it.
* If the pool is imported on a machine without the new sorting algorithm,
* the scan simply resumes from the last checkpoint using the legacy algorithm.
*/
typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *,
const zbookmark_phys_t *);
static scan_cb_t dsl_scan_scrub_cb;
static int scan_ds_queue_compare(const void *a, const void *b);
static int scan_prefetch_queue_compare(const void *a, const void *b);
static void scan_ds_queue_clear(dsl_scan_t *scn);
static void scan_ds_prefetch_queue_clear(dsl_scan_t *scn);
static boolean_t scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj,
uint64_t *txg);
static void scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg);
static void scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj);
static void scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx);
static uint64_t dsl_scan_count_data_disks(vdev_t *vd);
extern int zfs_vdev_async_write_active_min_dirty_percent;
static int zfs_scan_blkstats = 0;
/*
* By default zfs will check to ensure it is not over the hard memory
* limit before each txg. If finer-grained control of this is needed
* this value can be set to 1 to enable checking before scanning each
* block.
*/
static int zfs_scan_strict_mem_lim = B_FALSE;
/*
* Maximum number of parallelly executed bytes per leaf vdev. We attempt
* to strike a balance here between keeping the vdev queues full of I/Os
* at all times and not overflowing the queues to cause long latency,
* which would cause long txg sync times. No matter what, we will not
* overload the drives with I/O, since that is protected by
* zfs_vdev_scrub_max_active.
*/
static unsigned long zfs_scan_vdev_limit = 4 << 20;
static int zfs_scan_issue_strategy = 0;
static int zfs_scan_legacy = B_FALSE; /* don't queue & sort zios, go direct */
static unsigned long zfs_scan_max_ext_gap = 2 << 20; /* in bytes */
/*
* fill_weight is non-tunable at runtime, so we copy it at module init from
* zfs_scan_fill_weight. Runtime adjustments to zfs_scan_fill_weight would
* break queue sorting.
*/
static int zfs_scan_fill_weight = 3;
static uint64_t fill_weight;
/* See dsl_scan_should_clear() for details on the memory limit tunables */
static const uint64_t zfs_scan_mem_lim_min = 16 << 20; /* bytes */
static const uint64_t zfs_scan_mem_lim_soft_max = 128 << 20; /* bytes */
static int zfs_scan_mem_lim_fact = 20; /* fraction of physmem */
static int zfs_scan_mem_lim_soft_fact = 20; /* fraction of mem lim above */
static int zfs_scrub_min_time_ms = 1000; /* min millis to scrub per txg */
static int zfs_obsolete_min_time_ms = 500; /* min millis to obsolete per txg */
static int zfs_free_min_time_ms = 1000; /* min millis to free per txg */
static int zfs_resilver_min_time_ms = 3000; /* min millis to resilver per txg */
static int zfs_scan_checkpoint_intval = 7200; /* in seconds */
int zfs_scan_suspend_progress = 0; /* set to prevent scans from progressing */
static int zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
static int zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */
static const enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE;
/* max number of blocks to free in a single TXG */
static unsigned long zfs_async_block_max_blocks = ULONG_MAX;
/* max number of dedup blocks to free in a single TXG */
static unsigned long zfs_max_async_dedup_frees = 100000;
/* set to disable resilver deferring */
static int zfs_resilver_disable_defer = B_FALSE;
/*
* We wait a few txgs after importing a pool to begin scanning so that
* the import / mounting code isn't held up by scrub / resilver IO.
* Unfortunately, it is a bit difficult to determine exactly how long
* this will take since userspace will trigger fs mounts asynchronously
* and the kernel will create zvol minors asynchronously. As a result,
* the value provided here is a bit arbitrary, but represents a
* reasonable estimate of how many txgs it will take to finish fully
* importing a pool
*/
#define SCAN_IMPORT_WAIT_TXGS 5
#define DSL_SCAN_IS_SCRUB_RESILVER(scn) \
((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \
(scn)->scn_phys.scn_func == POOL_SCAN_RESILVER)
/*
* Enable/disable the processing of the free_bpobj object.
*/
static int zfs_free_bpobj_enabled = 1;
/* the order has to match pool_scan_type */
static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = {
NULL,
dsl_scan_scrub_cb, /* POOL_SCAN_SCRUB */
dsl_scan_scrub_cb, /* POOL_SCAN_RESILVER */
};
/* In core node for the scn->scn_queue. Represents a dataset to be scanned */
typedef struct {
uint64_t sds_dsobj;
uint64_t sds_txg;
avl_node_t sds_node;
} scan_ds_t;
/*
* This controls what conditions are placed on dsl_scan_sync_state():
* SYNC_OPTIONAL) write out scn_phys iff scn_queues_pending == 0
* SYNC_MANDATORY) write out scn_phys always. scn_queues_pending must be 0.
* SYNC_CACHED) if scn_queues_pending == 0, write out scn_phys. Otherwise
* write out the scn_phys_cached version.
* See dsl_scan_sync_state for details.
*/
typedef enum {
SYNC_OPTIONAL,
SYNC_MANDATORY,
SYNC_CACHED
} state_sync_type_t;
/*
* This struct represents the minimum information needed to reconstruct a
* zio for sequential scanning. This is useful because many of these will
* accumulate in the sequential IO queues before being issued, so saving
* memory matters here.
*/
typedef struct scan_io {
/* fields from blkptr_t */
uint64_t sio_blk_prop;
uint64_t sio_phys_birth;
uint64_t sio_birth;
zio_cksum_t sio_cksum;
uint32_t sio_nr_dvas;
/* fields from zio_t */
uint32_t sio_flags;
zbookmark_phys_t sio_zb;
/* members for queue sorting */
union {
avl_node_t sio_addr_node; /* link into issuing queue */
list_node_t sio_list_node; /* link for issuing to disk */
} sio_nodes;
/*
* There may be up to SPA_DVAS_PER_BP DVAs here from the bp,
* depending on how many were in the original bp. Only the
* first DVA is really used for sorting and issuing purposes.
* The other DVAs (if provided) simply exist so that the zio
* layer can find additional copies to repair from in the
* event of an error. This array must go at the end of the
* struct to allow this for the variable number of elements.
*/
dva_t sio_dva[0];
} scan_io_t;
#define SIO_SET_OFFSET(sio, x) DVA_SET_OFFSET(&(sio)->sio_dva[0], x)
#define SIO_SET_ASIZE(sio, x) DVA_SET_ASIZE(&(sio)->sio_dva[0], x)
#define SIO_GET_OFFSET(sio) DVA_GET_OFFSET(&(sio)->sio_dva[0])
#define SIO_GET_ASIZE(sio) DVA_GET_ASIZE(&(sio)->sio_dva[0])
#define SIO_GET_END_OFFSET(sio) \
(SIO_GET_OFFSET(sio) + SIO_GET_ASIZE(sio))
#define SIO_GET_MUSED(sio) \
(sizeof (scan_io_t) + ((sio)->sio_nr_dvas * sizeof (dva_t)))
struct dsl_scan_io_queue {
dsl_scan_t *q_scn; /* associated dsl_scan_t */
vdev_t *q_vd; /* top-level vdev that this queue represents */
zio_t *q_zio; /* scn_zio_root child for waiting on IO */
/* trees used for sorting I/Os and extents of I/Os */
range_tree_t *q_exts_by_addr;
zfs_btree_t q_exts_by_size;
avl_tree_t q_sios_by_addr;
uint64_t q_sio_memused;
uint64_t q_last_ext_addr;
/* members for zio rate limiting */
uint64_t q_maxinflight_bytes;
uint64_t q_inflight_bytes;
kcondvar_t q_zio_cv; /* used under vd->vdev_scan_io_queue_lock */
/* per txg statistics */
uint64_t q_total_seg_size_this_txg;
uint64_t q_segs_this_txg;
uint64_t q_total_zio_size_this_txg;
uint64_t q_zios_this_txg;
};
/* private data for dsl_scan_prefetch_cb() */
typedef struct scan_prefetch_ctx {
zfs_refcount_t spc_refcnt; /* refcount for memory management */
dsl_scan_t *spc_scn; /* dsl_scan_t for the pool */
boolean_t spc_root; /* is this prefetch for an objset? */
uint8_t spc_indblkshift; /* dn_indblkshift of current dnode */
uint16_t spc_datablkszsec; /* dn_idatablkszsec of current dnode */
} scan_prefetch_ctx_t;
/* private data for dsl_scan_prefetch() */
typedef struct scan_prefetch_issue_ctx {
avl_node_t spic_avl_node; /* link into scn->scn_prefetch_queue */
scan_prefetch_ctx_t *spic_spc; /* spc for the callback */
blkptr_t spic_bp; /* bp to prefetch */
zbookmark_phys_t spic_zb; /* bookmark to prefetch */
} scan_prefetch_issue_ctx_t;
static void scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue);
static void scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue,
scan_io_t *sio);
static dsl_scan_io_queue_t *scan_io_queue_create(vdev_t *vd);
static void scan_io_queues_destroy(dsl_scan_t *scn);
static kmem_cache_t *sio_cache[SPA_DVAS_PER_BP];
/* sio->sio_nr_dvas must be set so we know which cache to free from */
static void
sio_free(scan_io_t *sio)
{
ASSERT3U(sio->sio_nr_dvas, >, 0);
ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP);
kmem_cache_free(sio_cache[sio->sio_nr_dvas - 1], sio);
}
/* It is up to the caller to set sio->sio_nr_dvas for freeing */
static scan_io_t *
sio_alloc(unsigned short nr_dvas)
{
ASSERT3U(nr_dvas, >, 0);
ASSERT3U(nr_dvas, <=, SPA_DVAS_PER_BP);
return (kmem_cache_alloc(sio_cache[nr_dvas - 1], KM_SLEEP));
}
void
scan_init(void)
{
/*
* This is used in ext_size_compare() to weight segments
* based on how sparse they are. This cannot be changed
* mid-scan and the tree comparison functions don't currently
* have a mechanism for passing additional context to the
* compare functions. Thus we store this value globally and
* we only allow it to be set at module initialization time
*/
fill_weight = zfs_scan_fill_weight;
for (int i = 0; i < SPA_DVAS_PER_BP; i++) {
char name[36];
(void) snprintf(name, sizeof (name), "sio_cache_%d", i);
sio_cache[i] = kmem_cache_create(name,
(sizeof (scan_io_t) + ((i + 1) * sizeof (dva_t))),
0, NULL, NULL, NULL, NULL, NULL, 0);
}
}
void
scan_fini(void)
{
for (int i = 0; i < SPA_DVAS_PER_BP; i++) {
kmem_cache_destroy(sio_cache[i]);
}
}
static inline boolean_t
dsl_scan_is_running(const dsl_scan_t *scn)
{
return (scn->scn_phys.scn_state == DSS_SCANNING);
}
boolean_t
dsl_scan_resilvering(dsl_pool_t *dp)
{
return (dsl_scan_is_running(dp->dp_scan) &&
dp->dp_scan->scn_phys.scn_func == POOL_SCAN_RESILVER);
}
static inline void
sio2bp(const scan_io_t *sio, blkptr_t *bp)
{
memset(bp, 0, sizeof (*bp));
bp->blk_prop = sio->sio_blk_prop;
bp->blk_phys_birth = sio->sio_phys_birth;
bp->blk_birth = sio->sio_birth;
bp->blk_fill = 1; /* we always only work with data pointers */
bp->blk_cksum = sio->sio_cksum;
ASSERT3U(sio->sio_nr_dvas, >, 0);
ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP);
memcpy(bp->blk_dva, sio->sio_dva, sio->sio_nr_dvas * sizeof (dva_t));
}
static inline void
bp2sio(const blkptr_t *bp, scan_io_t *sio, int dva_i)
{
sio->sio_blk_prop = bp->blk_prop;
sio->sio_phys_birth = bp->blk_phys_birth;
sio->sio_birth = bp->blk_birth;
sio->sio_cksum = bp->blk_cksum;
sio->sio_nr_dvas = BP_GET_NDVAS(bp);
/*
* Copy the DVAs to the sio. We need all copies of the block so
* that the self healing code can use the alternate copies if the
* first is corrupted. We want the DVA at index dva_i to be first
* in the sio since this is the primary one that we want to issue.
*/
for (int i = 0, j = dva_i; i < sio->sio_nr_dvas; i++, j++) {
sio->sio_dva[i] = bp->blk_dva[j % sio->sio_nr_dvas];
}
}
int
dsl_scan_init(dsl_pool_t *dp, uint64_t txg)
{
int err;
dsl_scan_t *scn;
spa_t *spa = dp->dp_spa;
uint64_t f;
scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP);
scn->scn_dp = dp;
/*
* It's possible that we're resuming a scan after a reboot so
* make sure that the scan_async_destroying flag is initialized
* appropriately.
*/
ASSERT(!scn->scn_async_destroying);
scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa,
SPA_FEATURE_ASYNC_DESTROY);
/*
* Calculate the max number of in-flight bytes for pool-wide
* scanning operations (minimum 1MB). Limits for the issuing
* phase are done per top-level vdev and are handled separately.
*/
scn->scn_maxinflight_bytes = MAX(zfs_scan_vdev_limit *
dsl_scan_count_data_disks(spa->spa_root_vdev), 1ULL << 20);
avl_create(&scn->scn_queue, scan_ds_queue_compare, sizeof (scan_ds_t),
offsetof(scan_ds_t, sds_node));
avl_create(&scn->scn_prefetch_queue, scan_prefetch_queue_compare,
sizeof (scan_prefetch_issue_ctx_t),
offsetof(scan_prefetch_issue_ctx_t, spic_avl_node));
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
"scrub_func", sizeof (uint64_t), 1, &f);
if (err == 0) {
/*
* There was an old-style scrub in progress. Restart a
* new-style scrub from the beginning.
*/
scn->scn_restart_txg = txg;
zfs_dbgmsg("old-style scrub was in progress for %s; "
"restarting new-style scrub in txg %llu",
spa->spa_name,
(longlong_t)scn->scn_restart_txg);
/*
* Load the queue obj from the old location so that it
* can be freed by dsl_scan_done().
*/
(void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
"scrub_queue", sizeof (uint64_t), 1,
&scn->scn_phys.scn_queue_obj);
} else {
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
&scn->scn_phys);
/*
* Detect if the pool contains the signature of #2094. If it
* does properly update the scn->scn_phys structure and notify
* the administrator by setting an errata for the pool.
*/
if (err == EOVERFLOW) {
uint64_t zaptmp[SCAN_PHYS_NUMINTS + 1];
VERIFY3S(SCAN_PHYS_NUMINTS, ==, 24);
VERIFY3S(offsetof(dsl_scan_phys_t, scn_flags), ==,
(23 * sizeof (uint64_t)));
err = zap_lookup(dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SCAN,
sizeof (uint64_t), SCAN_PHYS_NUMINTS + 1, &zaptmp);
if (err == 0) {
uint64_t overflow = zaptmp[SCAN_PHYS_NUMINTS];
if (overflow & ~DSL_SCAN_FLAGS_MASK ||
scn->scn_async_destroying) {
spa->spa_errata =
ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY;
return (EOVERFLOW);
}
memcpy(&scn->scn_phys, zaptmp,
SCAN_PHYS_NUMINTS * sizeof (uint64_t));
scn->scn_phys.scn_flags = overflow;
/* Required scrub already in progress. */
if (scn->scn_phys.scn_state == DSS_FINISHED ||
scn->scn_phys.scn_state == DSS_CANCELED)
spa->spa_errata =
ZPOOL_ERRATA_ZOL_2094_SCRUB;
}
}
if (err == ENOENT)
return (0);
else if (err)
return (err);
/*
* We might be restarting after a reboot, so jump the issued
* counter to how far we've scanned. We know we're consistent
* up to here.
*/
scn->scn_issued_before_pass = scn->scn_phys.scn_examined;
if (dsl_scan_is_running(scn) &&
spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) {
/*
* A new-type scrub was in progress on an old
* pool, and the pool was accessed by old
* software. Restart from the beginning, since
* the old software may have changed the pool in
* the meantime.
*/
scn->scn_restart_txg = txg;
zfs_dbgmsg("new-style scrub for %s was modified "
"by old software; restarting in txg %llu",
spa->spa_name,
(longlong_t)scn->scn_restart_txg);
} else if (dsl_scan_resilvering(dp)) {
/*
* If a resilver is in progress and there are already
* errors, restart it instead of finishing this scan and
* then restarting it. If there haven't been any errors
* then remember that the incore DTL is valid.
*/
if (scn->scn_phys.scn_errors > 0) {
scn->scn_restart_txg = txg;
zfs_dbgmsg("resilver can't excise DTL_MISSING "
"when finished; restarting on %s in txg "
"%llu",
spa->spa_name,
(u_longlong_t)scn->scn_restart_txg);
} else {
/* it's safe to excise DTL when finished */
spa->spa_scrub_started = B_TRUE;
}
}
}
memcpy(&scn->scn_phys_cached, &scn->scn_phys, sizeof (scn->scn_phys));
/* reload the queue into the in-core state */
if (scn->scn_phys.scn_queue_obj != 0) {
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj);
zap_cursor_retrieve(&zc, &za) == 0;
(void) zap_cursor_advance(&zc)) {
scan_ds_queue_insert(scn,
zfs_strtonum(za.za_name, NULL),
za.za_first_integer);
}
zap_cursor_fini(&zc);
}
spa_scan_stat_init(spa);
return (0);
}
void
dsl_scan_fini(dsl_pool_t *dp)
{
if (dp->dp_scan != NULL) {
dsl_scan_t *scn = dp->dp_scan;
if (scn->scn_taskq != NULL)
taskq_destroy(scn->scn_taskq);
scan_ds_queue_clear(scn);
avl_destroy(&scn->scn_queue);
scan_ds_prefetch_queue_clear(scn);
avl_destroy(&scn->scn_prefetch_queue);
kmem_free(dp->dp_scan, sizeof (dsl_scan_t));
dp->dp_scan = NULL;
}
}
static boolean_t
dsl_scan_restarting(dsl_scan_t *scn, dmu_tx_t *tx)
{
return (scn->scn_restart_txg != 0 &&
scn->scn_restart_txg <= tx->tx_txg);
}
boolean_t
dsl_scan_resilver_scheduled(dsl_pool_t *dp)
{
return ((dp->dp_scan && dp->dp_scan->scn_restart_txg != 0) ||
(spa_async_tasks(dp->dp_spa) & SPA_ASYNC_RESILVER));
}
boolean_t
dsl_scan_scrubbing(const dsl_pool_t *dp)
{
dsl_scan_phys_t *scn_phys = &dp->dp_scan->scn_phys;
return (scn_phys->scn_state == DSS_SCANNING &&
scn_phys->scn_func == POOL_SCAN_SCRUB);
}
boolean_t
dsl_scan_is_paused_scrub(const dsl_scan_t *scn)
{
return (dsl_scan_scrubbing(scn->scn_dp) &&
scn->scn_phys.scn_flags & DSF_SCRUB_PAUSED);
}
/*
* Writes out a persistent dsl_scan_phys_t record to the pool directory.
* Because we can be running in the block sorting algorithm, we do not always
* want to write out the record, only when it is "safe" to do so. This safety
* condition is achieved by making sure that the sorting queues are empty
* (scn_queues_pending == 0). When this condition is not true, the sync'd state
* is inconsistent with how much actual scanning progress has been made. The
* kind of sync to be performed is specified by the sync_type argument. If the
* sync is optional, we only sync if the queues are empty. If the sync is
* mandatory, we do a hard ASSERT to make sure that the queues are empty. The
* third possible state is a "cached" sync. This is done in response to:
* 1) The dataset that was in the last sync'd dsl_scan_phys_t having been
* destroyed, so we wouldn't be able to restart scanning from it.
* 2) The snapshot that was in the last sync'd dsl_scan_phys_t having been
* superseded by a newer snapshot.
* 3) The dataset that was in the last sync'd dsl_scan_phys_t having been
* swapped with its clone.
* In all cases, a cached sync simply rewrites the last record we've written,
* just slightly modified. For the modifications that are performed to the
* last written dsl_scan_phys_t, see dsl_scan_ds_destroyed,
* dsl_scan_ds_snapshotted and dsl_scan_ds_clone_swapped.
*/
static void
dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx, state_sync_type_t sync_type)
{
int i;
spa_t *spa = scn->scn_dp->dp_spa;
ASSERT(sync_type != SYNC_MANDATORY || scn->scn_queues_pending == 0);
if (scn->scn_queues_pending == 0) {
for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
dsl_scan_io_queue_t *q = vd->vdev_scan_io_queue;
if (q == NULL)
continue;
mutex_enter(&vd->vdev_scan_io_queue_lock);
ASSERT3P(avl_first(&q->q_sios_by_addr), ==, NULL);
ASSERT3P(zfs_btree_first(&q->q_exts_by_size, NULL), ==,
NULL);
ASSERT3P(range_tree_first(q->q_exts_by_addr), ==, NULL);
mutex_exit(&vd->vdev_scan_io_queue_lock);
}
if (scn->scn_phys.scn_queue_obj != 0)
scan_ds_queue_sync(scn, tx);
VERIFY0(zap_update(scn->scn_dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
&scn->scn_phys, tx));
memcpy(&scn->scn_phys_cached, &scn->scn_phys,
sizeof (scn->scn_phys));
if (scn->scn_checkpointing)
zfs_dbgmsg("finish scan checkpoint for %s",
spa->spa_name);
scn->scn_checkpointing = B_FALSE;
scn->scn_last_checkpoint = ddi_get_lbolt();
} else if (sync_type == SYNC_CACHED) {
VERIFY0(zap_update(scn->scn_dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
&scn->scn_phys_cached, tx));
}
}
int
dsl_scan_setup_check(void *arg, dmu_tx_t *tx)
{
(void) arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev;
if (dsl_scan_is_running(scn) || vdev_rebuild_active(rvd))
return (SET_ERROR(EBUSY));
return (0);
}
void
dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
{
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
pool_scan_func_t *funcp = arg;
dmu_object_type_t ot = 0;
dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa;
ASSERT(!dsl_scan_is_running(scn));
ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS);
memset(&scn->scn_phys, 0, sizeof (scn->scn_phys));
scn->scn_phys.scn_func = *funcp;
scn->scn_phys.scn_state = DSS_SCANNING;
scn->scn_phys.scn_min_txg = 0;
scn->scn_phys.scn_max_txg = tx->tx_txg;
scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */
scn->scn_phys.scn_start_time = gethrestime_sec();
scn->scn_phys.scn_errors = 0;
scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc;
scn->scn_issued_before_pass = 0;
scn->scn_restart_txg = 0;
scn->scn_done_txg = 0;
scn->scn_last_checkpoint = 0;
scn->scn_checkpointing = B_FALSE;
spa_scan_stat_init(spa);
if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max;
/* rewrite all disk labels */
vdev_config_dirty(spa->spa_root_vdev);
if (vdev_resilver_needed(spa->spa_root_vdev,
&scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) {
nvlist_t *aux = fnvlist_alloc();
fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE,
"healing");
spa_event_notify(spa, NULL, aux,
ESC_ZFS_RESILVER_START);
nvlist_free(aux);
} else {
spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_START);
}
spa->spa_scrub_started = B_TRUE;
/*
* If this is an incremental scrub, limit the DDT scrub phase
* to just the auto-ditto class (for correctness); the rest
* of the scrub should go faster using top-down pruning.
*/
if (scn->scn_phys.scn_min_txg > TXG_INITIAL)
scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO;
/*
* When starting a resilver clear any existing rebuild state.
* This is required to prevent stale rebuild status from
* being reported when a rebuild is run, then a resilver and
* finally a scrub. In which case only the scrub status
* should be reported by 'zpool status'.
*/
if (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) {
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
vdev_rebuild_clear_sync(
(void *)(uintptr_t)vd->vdev_id, tx);
}
}
}
/* back to the generic stuff */
if (zfs_scan_blkstats) {
if (dp->dp_blkstats == NULL) {
dp->dp_blkstats =
vmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP);
}
memset(&dp->dp_blkstats->zab_type, 0,
sizeof (dp->dp_blkstats->zab_type));
} else {
if (dp->dp_blkstats) {
vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
dp->dp_blkstats = NULL;
}
}
if (spa_version(spa) < SPA_VERSION_DSL_SCRUB)
ot = DMU_OT_ZAP_OTHER;
scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset,
ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx);
memcpy(&scn->scn_phys_cached, &scn->scn_phys, sizeof (scn->scn_phys));
dsl_scan_sync_state(scn, tx, SYNC_MANDATORY);
spa_history_log_internal(spa, "scan setup", tx,
"func=%u mintxg=%llu maxtxg=%llu",
*funcp, (u_longlong_t)scn->scn_phys.scn_min_txg,
(u_longlong_t)scn->scn_phys.scn_max_txg);
}
/*
* Called by the ZFS_IOC_POOL_SCAN ioctl to start a scrub or resilver.
* Can also be called to resume a paused scrub.
*/
int
dsl_scan(dsl_pool_t *dp, pool_scan_func_t func)
{
spa_t *spa = dp->dp_spa;
dsl_scan_t *scn = dp->dp_scan;
/*
* Purge all vdev caches and probe all devices. We do this here
* rather than in sync context because this requires a writer lock
* on the spa_config lock, which we can't do from sync context. The
* spa_scrub_reopen flag indicates that vdev_open() should not
* attempt to start another scrub.
*/
spa_vdev_state_enter(spa, SCL_NONE);
spa->spa_scrub_reopen = B_TRUE;
vdev_reopen(spa->spa_root_vdev);
spa->spa_scrub_reopen = B_FALSE;
(void) spa_vdev_state_exit(spa, NULL, 0);
if (func == POOL_SCAN_RESILVER) {
dsl_scan_restart_resilver(spa->spa_dsl_pool, 0);
return (0);
}
if (func == POOL_SCAN_SCRUB && dsl_scan_is_paused_scrub(scn)) {
/* got scrub start cmd, resume paused scrub */
int err = dsl_scrub_set_pause_resume(scn->scn_dp,
POOL_SCRUB_NORMAL);
if (err == 0) {
spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_RESUME);
return (SET_ERROR(ECANCELED));
}
return (SET_ERROR(err));
}
return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check,
dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
static void
dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx)
{
static const char *old_names[] = {
"scrub_bookmark",
"scrub_ddt_bookmark",
"scrub_ddt_class_max",
"scrub_queue",
"scrub_min_txg",
"scrub_max_txg",
"scrub_func",
"scrub_errors",
NULL
};
dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa;
int i;
/* Remove any remnants of an old-style scrub. */
for (i = 0; old_names[i]; i++) {
(void) zap_remove(dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx);
}
if (scn->scn_phys.scn_queue_obj != 0) {
VERIFY0(dmu_object_free(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, tx));
scn->scn_phys.scn_queue_obj = 0;
}
scan_ds_queue_clear(scn);
scan_ds_prefetch_queue_clear(scn);
scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED;
/*
* If we were "restarted" from a stopped state, don't bother
* with anything else.
*/
if (!dsl_scan_is_running(scn)) {
ASSERT(!scn->scn_is_sorted);
return;
}
if (scn->scn_is_sorted) {
scan_io_queues_destroy(scn);
scn->scn_is_sorted = B_FALSE;
if (scn->scn_taskq != NULL) {
taskq_destroy(scn->scn_taskq);
scn->scn_taskq = NULL;
}
}
scn->scn_phys.scn_state = complete ? DSS_FINISHED : DSS_CANCELED;
spa_notify_waiters(spa);
if (dsl_scan_restarting(scn, tx))
spa_history_log_internal(spa, "scan aborted, restarting", tx,
"errors=%llu", (u_longlong_t)spa_get_errlog_size(spa));
else if (!complete)
spa_history_log_internal(spa, "scan cancelled", tx,
"errors=%llu", (u_longlong_t)spa_get_errlog_size(spa));
else
spa_history_log_internal(spa, "scan done", tx,
"errors=%llu", (u_longlong_t)spa_get_errlog_size(spa));
if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
spa->spa_scrub_active = B_FALSE;
/*
* If the scrub/resilver completed, update all DTLs to
* reflect this. Whether it succeeded or not, vacate
* all temporary scrub DTLs.
*
* As the scrub does not currently support traversing
* data that have been freed but are part of a checkpoint,
* we don't mark the scrub as done in the DTLs as faults
* may still exist in those vdevs.
*/
if (complete &&
!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg,
scn->scn_phys.scn_max_txg, B_TRUE, B_FALSE);
if (scn->scn_phys.scn_min_txg) {
nvlist_t *aux = fnvlist_alloc();
fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE,
"healing");
spa_event_notify(spa, NULL, aux,
ESC_ZFS_RESILVER_FINISH);
nvlist_free(aux);
} else {
spa_event_notify(spa, NULL, NULL,
ESC_ZFS_SCRUB_FINISH);
}
} else {
vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg,
0, B_TRUE, B_FALSE);
}
spa_errlog_rotate(spa);
/*
* Don't clear flag until after vdev_dtl_reassess to ensure that
* DTL_MISSING will get updated when possible.
*/
spa->spa_scrub_started = B_FALSE;
/*
* We may have finished replacing a device.
* Let the async thread assess this and handle the detach.
*/
spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
/*
* Clear any resilver_deferred flags in the config.
* If there are drives that need resilvering, kick
* off an asynchronous request to start resilver.
* vdev_clear_resilver_deferred() may update the config
* before the resilver can restart. In the event of
* a crash during this period, the spa loading code
* will find the drives that need to be resilvered
* and start the resilver then.
*/
if (spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER) &&
vdev_clear_resilver_deferred(spa->spa_root_vdev, tx)) {
spa_history_log_internal(spa,
"starting deferred resilver", tx, "errors=%llu",
(u_longlong_t)spa_get_errlog_size(spa));
spa_async_request(spa, SPA_ASYNC_RESILVER);
}
/* Clear recent error events (i.e. duplicate events tracking) */
if (complete)
zfs_ereport_clear(spa, NULL);
}
scn->scn_phys.scn_end_time = gethrestime_sec();
if (spa->spa_errata == ZPOOL_ERRATA_ZOL_2094_SCRUB)
spa->spa_errata = 0;
ASSERT(!dsl_scan_is_running(scn));
}
static int
dsl_scan_cancel_check(void *arg, dmu_tx_t *tx)
{
(void) arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
if (!dsl_scan_is_running(scn))
return (SET_ERROR(ENOENT));
return (0);
}
static void
dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx)
{
(void) arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
dsl_scan_done(scn, B_FALSE, tx);
dsl_scan_sync_state(scn, tx, SYNC_MANDATORY);
spa_event_notify(scn->scn_dp->dp_spa, NULL, NULL, ESC_ZFS_SCRUB_ABORT);
}
int
dsl_scan_cancel(dsl_pool_t *dp)
{
return (dsl_sync_task(spa_name(dp->dp_spa), dsl_scan_cancel_check,
dsl_scan_cancel_sync, NULL, 3, ZFS_SPACE_CHECK_RESERVED));
}
static int
dsl_scrub_pause_resume_check(void *arg, dmu_tx_t *tx)
{
pool_scrub_cmd_t *cmd = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_scan_t *scn = dp->dp_scan;
if (*cmd == POOL_SCRUB_PAUSE) {
/* can't pause a scrub when there is no in-progress scrub */
if (!dsl_scan_scrubbing(dp))
return (SET_ERROR(ENOENT));
/* can't pause a paused scrub */
if (dsl_scan_is_paused_scrub(scn))
return (SET_ERROR(EBUSY));
} else if (*cmd != POOL_SCRUB_NORMAL) {
return (SET_ERROR(ENOTSUP));
}
return (0);
}
static void
dsl_scrub_pause_resume_sync(void *arg, dmu_tx_t *tx)
{
pool_scrub_cmd_t *cmd = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
spa_t *spa = dp->dp_spa;
dsl_scan_t *scn = dp->dp_scan;
if (*cmd == POOL_SCRUB_PAUSE) {
/* can't pause a scrub when there is no in-progress scrub */
spa->spa_scan_pass_scrub_pause = gethrestime_sec();
scn->scn_phys.scn_flags |= DSF_SCRUB_PAUSED;
scn->scn_phys_cached.scn_flags |= DSF_SCRUB_PAUSED;
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_PAUSED);
spa_notify_waiters(spa);
} else {
ASSERT3U(*cmd, ==, POOL_SCRUB_NORMAL);
if (dsl_scan_is_paused_scrub(scn)) {
/*
* We need to keep track of how much time we spend
* paused per pass so that we can adjust the scrub rate
* shown in the output of 'zpool status'
*/
spa->spa_scan_pass_scrub_spent_paused +=
gethrestime_sec() - spa->spa_scan_pass_scrub_pause;
spa->spa_scan_pass_scrub_pause = 0;
scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED;
scn->scn_phys_cached.scn_flags &= ~DSF_SCRUB_PAUSED;
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
}
}
}
/*
* Set scrub pause/resume state if it makes sense to do so
*/
int
dsl_scrub_set_pause_resume(const dsl_pool_t *dp, pool_scrub_cmd_t cmd)
{
return (dsl_sync_task(spa_name(dp->dp_spa),
dsl_scrub_pause_resume_check, dsl_scrub_pause_resume_sync, &cmd, 3,
ZFS_SPACE_CHECK_RESERVED));
}
/* start a new scan, or restart an existing one. */
void
dsl_scan_restart_resilver(dsl_pool_t *dp, uint64_t txg)
{
if (txg == 0) {
dmu_tx_t *tx;
tx = dmu_tx_create_dd(dp->dp_mos_dir);
VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT));
txg = dmu_tx_get_txg(tx);
dp->dp_scan->scn_restart_txg = txg;
dmu_tx_commit(tx);
} else {
dp->dp_scan->scn_restart_txg = txg;
}
zfs_dbgmsg("restarting resilver for %s at txg=%llu",
dp->dp_spa->spa_name, (longlong_t)txg);
}
void
dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp)
{
zio_free(dp->dp_spa, txg, bp);
}
void
dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp)
{
ASSERT(dsl_pool_sync_context(dp));
zio_nowait(zio_free_sync(pio, dp->dp_spa, txg, bpp, pio->io_flags));
}
static int
scan_ds_queue_compare(const void *a, const void *b)
{
const scan_ds_t *sds_a = a, *sds_b = b;
if (sds_a->sds_dsobj < sds_b->sds_dsobj)
return (-1);
if (sds_a->sds_dsobj == sds_b->sds_dsobj)
return (0);
return (1);
}
static void
scan_ds_queue_clear(dsl_scan_t *scn)
{
void *cookie = NULL;
scan_ds_t *sds;
while ((sds = avl_destroy_nodes(&scn->scn_queue, &cookie)) != NULL) {
kmem_free(sds, sizeof (*sds));
}
}
static boolean_t
scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj, uint64_t *txg)
{
scan_ds_t srch, *sds;
srch.sds_dsobj = dsobj;
sds = avl_find(&scn->scn_queue, &srch, NULL);
if (sds != NULL && txg != NULL)
*txg = sds->sds_txg;
return (sds != NULL);
}
static void
scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg)
{
scan_ds_t *sds;
avl_index_t where;
sds = kmem_zalloc(sizeof (*sds), KM_SLEEP);
sds->sds_dsobj = dsobj;
sds->sds_txg = txg;
VERIFY3P(avl_find(&scn->scn_queue, sds, &where), ==, NULL);
avl_insert(&scn->scn_queue, sds, where);
}
static void
scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj)
{
scan_ds_t srch, *sds;
srch.sds_dsobj = dsobj;
sds = avl_find(&scn->scn_queue, &srch, NULL);
VERIFY(sds != NULL);
avl_remove(&scn->scn_queue, sds);
kmem_free(sds, sizeof (*sds));
}
static void
scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx)
{
dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa;
dmu_object_type_t ot = (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) ?
DMU_OT_SCAN_QUEUE : DMU_OT_ZAP_OTHER;
ASSERT0(scn->scn_queues_pending);
ASSERT(scn->scn_phys.scn_queue_obj != 0);
VERIFY0(dmu_object_free(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, tx));
scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, ot,
DMU_OT_NONE, 0, tx);
for (scan_ds_t *sds = avl_first(&scn->scn_queue);
sds != NULL; sds = AVL_NEXT(&scn->scn_queue, sds)) {
VERIFY0(zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, sds->sds_dsobj,
sds->sds_txg, tx));
}
}
/*
* Computes the memory limit state that we're currently in. A sorted scan
* needs quite a bit of memory to hold the sorting queue, so we need to
* reasonably constrain the size so it doesn't impact overall system
* performance. We compute two limits:
* 1) Hard memory limit: if the amount of memory used by the sorting
* queues on a pool gets above this value, we stop the metadata
* scanning portion and start issuing the queued up and sorted
* I/Os to reduce memory usage.
* This limit is calculated as a fraction of physmem (by default 5%).
* We constrain the lower bound of the hard limit to an absolute
* minimum of zfs_scan_mem_lim_min (default: 16 MiB). We also constrain
* the upper bound to 5% of the total pool size - no chance we'll
* ever need that much memory, but just to keep the value in check.
* 2) Soft memory limit: once we hit the hard memory limit, we start
* issuing I/O to reduce queue memory usage, but we don't want to
* completely empty out the queues, since we might be able to find I/Os
* that will fill in the gaps of our non-sequential IOs at some point
* in the future. So we stop the issuing of I/Os once the amount of
* memory used drops below the soft limit (at which point we stop issuing
* I/O and start scanning metadata again).
*
* This limit is calculated by subtracting a fraction of the hard
* limit from the hard limit. By default this fraction is 5%, so
* the soft limit is 95% of the hard limit. We cap the size of the
* difference between the hard and soft limits at an absolute
* maximum of zfs_scan_mem_lim_soft_max (default: 128 MiB) - this is
* sufficient to not cause too frequent switching between the
* metadata scan and I/O issue (even at 2k recordsize, 128 MiB's
* worth of queues is about 1.2 GiB of on-pool data, so scanning
* that should take at least a decent fraction of a second).
*/
static boolean_t
dsl_scan_should_clear(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev;
uint64_t alloc, mlim_hard, mlim_soft, mused;
alloc = metaslab_class_get_alloc(spa_normal_class(spa));
alloc += metaslab_class_get_alloc(spa_special_class(spa));
alloc += metaslab_class_get_alloc(spa_dedup_class(spa));
mlim_hard = MAX((physmem / zfs_scan_mem_lim_fact) * PAGESIZE,
zfs_scan_mem_lim_min);
mlim_hard = MIN(mlim_hard, alloc / 20);
mlim_soft = mlim_hard - MIN(mlim_hard / zfs_scan_mem_lim_soft_fact,
zfs_scan_mem_lim_soft_max);
mused = 0;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *tvd = rvd->vdev_child[i];
dsl_scan_io_queue_t *queue;
mutex_enter(&tvd->vdev_scan_io_queue_lock);
queue = tvd->vdev_scan_io_queue;
if (queue != NULL) {
/*
* # of extents in exts_by_addr = # in exts_by_size.
* B-tree efficiency is ~75%, but can be as low as 50%.
*/
mused += zfs_btree_numnodes(&queue->q_exts_by_size) *
((sizeof (range_seg_gap_t) + sizeof (uint64_t)) *
3 / 2) + queue->q_sio_memused;
}
mutex_exit(&tvd->vdev_scan_io_queue_lock);
}
dprintf("current scan memory usage: %llu bytes\n", (longlong_t)mused);
if (mused == 0)
ASSERT0(scn->scn_queues_pending);
/*
* If we are above our hard limit, we need to clear out memory.
* If we are below our soft limit, we need to accumulate sequential IOs.
* Otherwise, we should keep doing whatever we are currently doing.
*/
if (mused >= mlim_hard)
return (B_TRUE);
else if (mused < mlim_soft)
return (B_FALSE);
else
return (scn->scn_clearing);
}
static boolean_t
dsl_scan_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb)
{
/* we never skip user/group accounting objects */
if (zb && (int64_t)zb->zb_object < 0)
return (B_FALSE);
if (scn->scn_suspending)
return (B_TRUE); /* we're already suspending */
if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark))
return (B_FALSE); /* we're resuming */
/* We only know how to resume from level-0 and objset blocks. */
if (zb && (zb->zb_level != 0 && zb->zb_level != ZB_ROOT_LEVEL))
return (B_FALSE);
/*
* We suspend if:
* - we have scanned for at least the minimum time (default 1 sec
* for scrub, 3 sec for resilver), and either we have sufficient
* dirty data that we are starting to write more quickly
* (default 30%), someone is explicitly waiting for this txg
* to complete, or we have used up all of the time in the txg
* timeout (default 5 sec).
* or
* - the spa is shutting down because this pool is being exported
* or the machine is rebooting.
* or
* - the scan queue has reached its memory use limit
*/
uint64_t curr_time_ns = gethrtime();
uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time;
uint64_t sync_time_ns = curr_time_ns -
scn->scn_dp->dp_spa->spa_sync_starttime;
uint64_t dirty_min_bytes = zfs_dirty_data_max *
zfs_vdev_async_write_active_min_dirty_percent / 100;
int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
if ((NSEC2MSEC(scan_time_ns) > mintime &&
(scn->scn_dp->dp_dirty_total >= dirty_min_bytes ||
txg_sync_waiting(scn->scn_dp) ||
NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) ||
spa_shutting_down(scn->scn_dp->dp_spa) ||
(zfs_scan_strict_mem_lim && dsl_scan_should_clear(scn))) {
if (zb && zb->zb_level == ZB_ROOT_LEVEL) {
dprintf("suspending at first available bookmark "
"%llx/%llx/%llx/%llx\n",
(longlong_t)zb->zb_objset,
(longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(longlong_t)zb->zb_blkid);
SET_BOOKMARK(&scn->scn_phys.scn_bookmark,
zb->zb_objset, 0, 0, 0);
} else if (zb != NULL) {
dprintf("suspending at bookmark %llx/%llx/%llx/%llx\n",
(longlong_t)zb->zb_objset,
(longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(longlong_t)zb->zb_blkid);
scn->scn_phys.scn_bookmark = *zb;
} else {
#ifdef ZFS_DEBUG
dsl_scan_phys_t *scnp = &scn->scn_phys;
dprintf("suspending at at DDT bookmark "
"%llx/%llx/%llx/%llx\n",
(longlong_t)scnp->scn_ddt_bookmark.ddb_class,
(longlong_t)scnp->scn_ddt_bookmark.ddb_type,
(longlong_t)scnp->scn_ddt_bookmark.ddb_checksum,
(longlong_t)scnp->scn_ddt_bookmark.ddb_cursor);
#endif
}
scn->scn_suspending = B_TRUE;
return (B_TRUE);
}
return (B_FALSE);
}
typedef struct zil_scan_arg {
dsl_pool_t *zsa_dp;
zil_header_t *zsa_zh;
} zil_scan_arg_t;
static int
dsl_scan_zil_block(zilog_t *zilog, const blkptr_t *bp, void *arg,
uint64_t claim_txg)
{
(void) zilog;
zil_scan_arg_t *zsa = arg;
dsl_pool_t *dp = zsa->zsa_dp;
dsl_scan_t *scn = dp->dp_scan;
zil_header_t *zh = zsa->zsa_zh;
zbookmark_phys_t zb;
ASSERT(!BP_IS_REDACTED(bp));
if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg)
return (0);
/*
* One block ("stubby") can be allocated a long time ago; we
* want to visit that one because it has been allocated
* (on-disk) even if it hasn't been claimed (even though for
* scrub there's nothing to do to it).
*/
if (claim_txg == 0 && bp->blk_birth >= spa_min_claim_txg(dp->dp_spa))
return (0);
SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
return (0);
}
static int
dsl_scan_zil_record(zilog_t *zilog, const lr_t *lrc, void *arg,
uint64_t claim_txg)
{
(void) zilog;
if (lrc->lrc_txtype == TX_WRITE) {
zil_scan_arg_t *zsa = arg;
dsl_pool_t *dp = zsa->zsa_dp;
dsl_scan_t *scn = dp->dp_scan;
zil_header_t *zh = zsa->zsa_zh;
const lr_write_t *lr = (const lr_write_t *)lrc;
const blkptr_t *bp = &lr->lr_blkptr;
zbookmark_phys_t zb;
ASSERT(!BP_IS_REDACTED(bp));
if (BP_IS_HOLE(bp) ||
bp->blk_birth <= scn->scn_phys.scn_cur_min_txg)
return (0);
/*
* birth can be < claim_txg if this record's txg is
* already txg sync'ed (but this log block contains
* other records that are not synced)
*/
if (claim_txg == 0 || bp->blk_birth < claim_txg)
return (0);
SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
lr->lr_foid, ZB_ZIL_LEVEL,
lr->lr_offset / BP_GET_LSIZE(bp));
VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
}
return (0);
}
static void
dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh)
{
uint64_t claim_txg = zh->zh_claim_txg;
zil_scan_arg_t zsa = { dp, zh };
zilog_t *zilog;
ASSERT(spa_writeable(dp->dp_spa));
/*
* We only want to visit blocks that have been claimed but not yet
* replayed (or, in read-only mode, blocks that *would* be claimed).
*/
if (claim_txg == 0)
return;
zilog = zil_alloc(dp->dp_meta_objset, zh);
(void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa,
claim_txg, B_FALSE);
zil_free(zilog);
}
/*
* We compare scan_prefetch_issue_ctx_t's based on their bookmarks. The idea
* here is to sort the AVL tree by the order each block will be needed.
*/
static int
scan_prefetch_queue_compare(const void *a, const void *b)
{
const scan_prefetch_issue_ctx_t *spic_a = a, *spic_b = b;
const scan_prefetch_ctx_t *spc_a = spic_a->spic_spc;
const scan_prefetch_ctx_t *spc_b = spic_b->spic_spc;
return (zbookmark_compare(spc_a->spc_datablkszsec,
spc_a->spc_indblkshift, spc_b->spc_datablkszsec,
spc_b->spc_indblkshift, &spic_a->spic_zb, &spic_b->spic_zb));
}
static void
scan_prefetch_ctx_rele(scan_prefetch_ctx_t *spc, const void *tag)
{
if (zfs_refcount_remove(&spc->spc_refcnt, tag) == 0) {
zfs_refcount_destroy(&spc->spc_refcnt);
kmem_free(spc, sizeof (scan_prefetch_ctx_t));
}
}
static scan_prefetch_ctx_t *
scan_prefetch_ctx_create(dsl_scan_t *scn, dnode_phys_t *dnp, const void *tag)
{
scan_prefetch_ctx_t *spc;
spc = kmem_alloc(sizeof (scan_prefetch_ctx_t), KM_SLEEP);
zfs_refcount_create(&spc->spc_refcnt);
zfs_refcount_add(&spc->spc_refcnt, tag);
spc->spc_scn = scn;
if (dnp != NULL) {
spc->spc_datablkszsec = dnp->dn_datablkszsec;
spc->spc_indblkshift = dnp->dn_indblkshift;
spc->spc_root = B_FALSE;
} else {
spc->spc_datablkszsec = 0;
spc->spc_indblkshift = 0;
spc->spc_root = B_TRUE;
}
return (spc);
}
static void
scan_prefetch_ctx_add_ref(scan_prefetch_ctx_t *spc, const void *tag)
{
zfs_refcount_add(&spc->spc_refcnt, tag);
}
static void
scan_ds_prefetch_queue_clear(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
void *cookie = NULL;
scan_prefetch_issue_ctx_t *spic = NULL;
mutex_enter(&spa->spa_scrub_lock);
while ((spic = avl_destroy_nodes(&scn->scn_prefetch_queue,
&cookie)) != NULL) {
scan_prefetch_ctx_rele(spic->spic_spc, scn);
kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
}
mutex_exit(&spa->spa_scrub_lock);
}
static boolean_t
dsl_scan_check_prefetch_resume(scan_prefetch_ctx_t *spc,
const zbookmark_phys_t *zb)
{
zbookmark_phys_t *last_zb = &spc->spc_scn->scn_prefetch_bookmark;
dnode_phys_t tmp_dnp;
dnode_phys_t *dnp = (spc->spc_root) ? NULL : &tmp_dnp;
if (zb->zb_objset != last_zb->zb_objset)
return (B_TRUE);
if ((int64_t)zb->zb_object < 0)
return (B_FALSE);
tmp_dnp.dn_datablkszsec = spc->spc_datablkszsec;
tmp_dnp.dn_indblkshift = spc->spc_indblkshift;
if (zbookmark_subtree_completed(dnp, zb, last_zb))
return (B_TRUE);
return (B_FALSE);
}
static void
dsl_scan_prefetch(scan_prefetch_ctx_t *spc, blkptr_t *bp, zbookmark_phys_t *zb)
{
avl_index_t idx;
dsl_scan_t *scn = spc->spc_scn;
spa_t *spa = scn->scn_dp->dp_spa;
scan_prefetch_issue_ctx_t *spic;
if (zfs_no_scrub_prefetch || BP_IS_REDACTED(bp))
return;
if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg ||
(BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE &&
BP_GET_TYPE(bp) != DMU_OT_OBJSET))
return;
if (dsl_scan_check_prefetch_resume(spc, zb))
return;
scan_prefetch_ctx_add_ref(spc, scn);
spic = kmem_alloc(sizeof (scan_prefetch_issue_ctx_t), KM_SLEEP);
spic->spic_spc = spc;
spic->spic_bp = *bp;
spic->spic_zb = *zb;
/*
* Add the IO to the queue of blocks to prefetch. This allows us to
* prioritize blocks that we will need first for the main traversal
* thread.
*/
mutex_enter(&spa->spa_scrub_lock);
if (avl_find(&scn->scn_prefetch_queue, spic, &idx) != NULL) {
/* this block is already queued for prefetch */
kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
scan_prefetch_ctx_rele(spc, scn);
mutex_exit(&spa->spa_scrub_lock);
return;
}
avl_insert(&scn->scn_prefetch_queue, spic, idx);
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&spa->spa_scrub_lock);
}
static void
dsl_scan_prefetch_dnode(dsl_scan_t *scn, dnode_phys_t *dnp,
uint64_t objset, uint64_t object)
{
int i;
zbookmark_phys_t zb;
scan_prefetch_ctx_t *spc;
if (dnp->dn_nblkptr == 0 && !(dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
return;
SET_BOOKMARK(&zb, objset, object, 0, 0);
spc = scan_prefetch_ctx_create(scn, dnp, FTAG);
for (i = 0; i < dnp->dn_nblkptr; i++) {
zb.zb_level = BP_GET_LEVEL(&dnp->dn_blkptr[i]);
zb.zb_blkid = i;
dsl_scan_prefetch(spc, &dnp->dn_blkptr[i], &zb);
}
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
zb.zb_level = 0;
zb.zb_blkid = DMU_SPILL_BLKID;
dsl_scan_prefetch(spc, DN_SPILL_BLKPTR(dnp), &zb);
}
scan_prefetch_ctx_rele(spc, FTAG);
}
static void
dsl_scan_prefetch_cb(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *private)
{
(void) zio;
scan_prefetch_ctx_t *spc = private;
dsl_scan_t *scn = spc->spc_scn;
spa_t *spa = scn->scn_dp->dp_spa;
/* broadcast that the IO has completed for rate limiting purposes */
mutex_enter(&spa->spa_scrub_lock);
ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp));
spa->spa_scrub_inflight -= BP_GET_PSIZE(bp);
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&spa->spa_scrub_lock);
/* if there was an error or we are done prefetching, just cleanup */
if (buf == NULL || scn->scn_prefetch_stop)
goto out;
if (BP_GET_LEVEL(bp) > 0) {
int i;
blkptr_t *cbp;
int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
zbookmark_phys_t czb;
for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) {
SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
zb->zb_level - 1, zb->zb_blkid * epb + i);
dsl_scan_prefetch(spc, cbp, &czb);
}
} else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
dnode_phys_t *cdnp;
int i;
int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
for (i = 0, cdnp = buf->b_data; i < epb;
i += cdnp->dn_extra_slots + 1,
cdnp += cdnp->dn_extra_slots + 1) {
dsl_scan_prefetch_dnode(scn, cdnp,
zb->zb_objset, zb->zb_blkid * epb + i);
}
} else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
objset_phys_t *osp = buf->b_data;
dsl_scan_prefetch_dnode(scn, &osp->os_meta_dnode,
zb->zb_objset, DMU_META_DNODE_OBJECT);
if (OBJSET_BUF_HAS_USERUSED(buf)) {
dsl_scan_prefetch_dnode(scn,
&osp->os_groupused_dnode, zb->zb_objset,
DMU_GROUPUSED_OBJECT);
dsl_scan_prefetch_dnode(scn,
&osp->os_userused_dnode, zb->zb_objset,
DMU_USERUSED_OBJECT);
}
}
out:
if (buf != NULL)
arc_buf_destroy(buf, private);
scan_prefetch_ctx_rele(spc, scn);
}
static void
dsl_scan_prefetch_thread(void *arg)
{
dsl_scan_t *scn = arg;
spa_t *spa = scn->scn_dp->dp_spa;
scan_prefetch_issue_ctx_t *spic;
/* loop until we are told to stop */
while (!scn->scn_prefetch_stop) {
arc_flags_t flags = ARC_FLAG_NOWAIT |
ARC_FLAG_PRESCIENT_PREFETCH | ARC_FLAG_PREFETCH;
int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD;
mutex_enter(&spa->spa_scrub_lock);
/*
* Wait until we have an IO to issue and are not above our
* maximum in flight limit.
*/
while (!scn->scn_prefetch_stop &&
(avl_numnodes(&scn->scn_prefetch_queue) == 0 ||
spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes)) {
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
}
/* recheck if we should stop since we waited for the cv */
if (scn->scn_prefetch_stop) {
mutex_exit(&spa->spa_scrub_lock);
break;
}
/* remove the prefetch IO from the tree */
spic = avl_first(&scn->scn_prefetch_queue);
spa->spa_scrub_inflight += BP_GET_PSIZE(&spic->spic_bp);
avl_remove(&scn->scn_prefetch_queue, spic);
mutex_exit(&spa->spa_scrub_lock);
if (BP_IS_PROTECTED(&spic->spic_bp)) {
ASSERT(BP_GET_TYPE(&spic->spic_bp) == DMU_OT_DNODE ||
BP_GET_TYPE(&spic->spic_bp) == DMU_OT_OBJSET);
ASSERT3U(BP_GET_LEVEL(&spic->spic_bp), ==, 0);
zio_flags |= ZIO_FLAG_RAW;
}
/* issue the prefetch asynchronously */
(void) arc_read(scn->scn_zio_root, scn->scn_dp->dp_spa,
&spic->spic_bp, dsl_scan_prefetch_cb, spic->spic_spc,
ZIO_PRIORITY_SCRUB, zio_flags, &flags, &spic->spic_zb);
kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
}
ASSERT(scn->scn_prefetch_stop);
/* free any prefetches we didn't get to complete */
mutex_enter(&spa->spa_scrub_lock);
while ((spic = avl_first(&scn->scn_prefetch_queue)) != NULL) {
avl_remove(&scn->scn_prefetch_queue, spic);
scan_prefetch_ctx_rele(spic->spic_spc, scn);
kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
}
ASSERT0(avl_numnodes(&scn->scn_prefetch_queue));
mutex_exit(&spa->spa_scrub_lock);
}
static boolean_t
dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp,
const zbookmark_phys_t *zb)
{
/*
* We never skip over user/group accounting objects (obj<0)
*/
if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark) &&
(int64_t)zb->zb_object >= 0) {
/*
* If we already visited this bp & everything below (in
* a prior txg sync), don't bother doing it again.
*/
if (zbookmark_subtree_completed(dnp, zb,
&scn->scn_phys.scn_bookmark))
return (B_TRUE);
/*
* If we found the block we're trying to resume from, or
* we went past it, zero it out to indicate that it's OK
* to start checking for suspending again.
*/
if (zbookmark_subtree_tbd(dnp, zb,
&scn->scn_phys.scn_bookmark)) {
dprintf("resuming at %llx/%llx/%llx/%llx\n",
(longlong_t)zb->zb_objset,
(longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(longlong_t)zb->zb_blkid);
memset(&scn->scn_phys.scn_bookmark, 0, sizeof (*zb));
}
}
return (B_FALSE);
}
static void dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb,
dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn,
dmu_objset_type_t ostype, dmu_tx_t *tx);
inline __attribute__((always_inline)) static void dsl_scan_visitdnode(
dsl_scan_t *, dsl_dataset_t *ds, dmu_objset_type_t ostype,
dnode_phys_t *dnp, uint64_t object, dmu_tx_t *tx);
/*
* Return nonzero on i/o error.
* Return new buf to write out in *bufp.
*/
inline __attribute__((always_inline)) static int
dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype,
dnode_phys_t *dnp, const blkptr_t *bp,
const zbookmark_phys_t *zb, dmu_tx_t *tx)
{
dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa;
int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD;
int err;
ASSERT(!BP_IS_REDACTED(bp));
/*
* There is an unlikely case of encountering dnodes with contradicting
* dn_bonuslen and DNODE_FLAG_SPILL_BLKPTR flag before in files created
* or modified before commit 4254acb was merged. As it is not possible
* to know which of the two is correct, report an error.
*/
if (dnp != NULL &&
dnp->dn_bonuslen > DN_MAX_BONUS_LEN(dnp)) {
scn->scn_phys.scn_errors++;
spa_log_error(spa, zb);
return (SET_ERROR(EINVAL));
}
if (BP_GET_LEVEL(bp) > 0) {
arc_flags_t flags = ARC_FLAG_WAIT;
int i;
blkptr_t *cbp;
int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
arc_buf_t *buf;
err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
if (err) {
scn->scn_phys.scn_errors++;
return (err);
}
for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) {
zbookmark_phys_t czb;
SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
zb->zb_level - 1,
zb->zb_blkid * epb + i);
dsl_scan_visitbp(cbp, &czb, dnp,
ds, scn, ostype, tx);
}
arc_buf_destroy(buf, &buf);
} else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
arc_flags_t flags = ARC_FLAG_WAIT;
dnode_phys_t *cdnp;
int i;
int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
arc_buf_t *buf;
if (BP_IS_PROTECTED(bp)) {
ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
zio_flags |= ZIO_FLAG_RAW;
}
err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
if (err) {
scn->scn_phys.scn_errors++;
return (err);
}
for (i = 0, cdnp = buf->b_data; i < epb;
i += cdnp->dn_extra_slots + 1,
cdnp += cdnp->dn_extra_slots + 1) {
dsl_scan_visitdnode(scn, ds, ostype,
cdnp, zb->zb_blkid * epb + i, tx);
}
arc_buf_destroy(buf, &buf);
} else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
arc_flags_t flags = ARC_FLAG_WAIT;
objset_phys_t *osp;
arc_buf_t *buf;
err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
if (err) {
scn->scn_phys.scn_errors++;
return (err);
}
osp = buf->b_data;
dsl_scan_visitdnode(scn, ds, osp->os_type,
&osp->os_meta_dnode, DMU_META_DNODE_OBJECT, tx);
if (OBJSET_BUF_HAS_USERUSED(buf)) {
/*
* We also always visit user/group/project accounting
* objects, and never skip them, even if we are
* suspending. This is necessary so that the
* space deltas from this txg get integrated.
*/
if (OBJSET_BUF_HAS_PROJECTUSED(buf))
dsl_scan_visitdnode(scn, ds, osp->os_type,
&osp->os_projectused_dnode,
DMU_PROJECTUSED_OBJECT, tx);
dsl_scan_visitdnode(scn, ds, osp->os_type,
&osp->os_groupused_dnode,
DMU_GROUPUSED_OBJECT, tx);
dsl_scan_visitdnode(scn, ds, osp->os_type,
&osp->os_userused_dnode,
DMU_USERUSED_OBJECT, tx);
}
arc_buf_destroy(buf, &buf);
} else if (!zfs_blkptr_verify(spa, bp, B_FALSE, BLK_VERIFY_LOG)) {
/*
* Sanity check the block pointer contents, this is handled
* by arc_read() for the cases above.
*/
scn->scn_phys.scn_errors++;
spa_log_error(spa, zb);
return (SET_ERROR(EINVAL));
}
return (0);
}
inline __attribute__((always_inline)) static void
dsl_scan_visitdnode(dsl_scan_t *scn, dsl_dataset_t *ds,
dmu_objset_type_t ostype, dnode_phys_t *dnp,
uint64_t object, dmu_tx_t *tx)
{
int j;
for (j = 0; j < dnp->dn_nblkptr; j++) {
zbookmark_phys_t czb;
SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object,
dnp->dn_nlevels - 1, j);
dsl_scan_visitbp(&dnp->dn_blkptr[j],
&czb, dnp, ds, scn, ostype, tx);
}
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
zbookmark_phys_t czb;
SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object,
0, DMU_SPILL_BLKID);
dsl_scan_visitbp(DN_SPILL_BLKPTR(dnp),
&czb, dnp, ds, scn, ostype, tx);
}
}
/*
* The arguments are in this order because mdb can only print the
* first 5; we want them to be useful.
*/
static void
dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb,
dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn,
dmu_objset_type_t ostype, dmu_tx_t *tx)
{
dsl_pool_t *dp = scn->scn_dp;
blkptr_t *bp_toread = NULL;
if (dsl_scan_check_suspend(scn, zb))
return;
if (dsl_scan_check_resume(scn, dnp, zb))
return;
scn->scn_visited_this_txg++;
if (BP_IS_HOLE(bp)) {
scn->scn_holes_this_txg++;
return;
}
if (BP_IS_REDACTED(bp)) {
ASSERT(dsl_dataset_feature_is_active(ds,
SPA_FEATURE_REDACTED_DATASETS));
return;
}
- /*
- * Check if this block contradicts any filesystem flags.
- */
- spa_feature_t f = SPA_FEATURE_LARGE_BLOCKS;
- if (BP_GET_LSIZE(bp) > SPA_OLD_MAXBLOCKSIZE)
- ASSERT3B(dsl_dataset_feature_is_active(ds, f), ==, B_TRUE);
-
- f = zio_checksum_to_feature(BP_GET_CHECKSUM(bp));
- if (f != SPA_FEATURE_NONE)
- ASSERT3B(dsl_dataset_feature_is_active(ds, f), ==, B_TRUE);
-
- f = zio_compress_to_feature(BP_GET_COMPRESS(bp));
- if (f != SPA_FEATURE_NONE)
- ASSERT3B(dsl_dataset_feature_is_active(ds, f), ==, B_TRUE);
-
if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) {
scn->scn_lt_min_this_txg++;
return;
}
bp_toread = kmem_alloc(sizeof (blkptr_t), KM_SLEEP);
*bp_toread = *bp;
if (dsl_scan_recurse(scn, ds, ostype, dnp, bp_toread, zb, tx) != 0)
goto out;
/*
* If dsl_scan_ddt() has already visited this block, it will have
* already done any translations or scrubbing, so don't call the
* callback again.
*/
if (ddt_class_contains(dp->dp_spa,
scn->scn_phys.scn_ddt_class_max, bp)) {
scn->scn_ddt_contained_this_txg++;
goto out;
}
/*
* If this block is from the future (after cur_max_txg), then we
* are doing this on behalf of a deleted snapshot, and we will
* revisit the future block on the next pass of this dataset.
* Don't scan it now unless we need to because something
* under it was modified.
*/
if (BP_PHYSICAL_BIRTH(bp) > scn->scn_phys.scn_cur_max_txg) {
scn->scn_gt_max_this_txg++;
goto out;
}
scan_funcs[scn->scn_phys.scn_func](dp, bp, zb);
out:
kmem_free(bp_toread, sizeof (blkptr_t));
}
static void
dsl_scan_visit_rootbp(dsl_scan_t *scn, dsl_dataset_t *ds, blkptr_t *bp,
dmu_tx_t *tx)
{
zbookmark_phys_t zb;
scan_prefetch_ctx_t *spc;
SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
if (ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) {
SET_BOOKMARK(&scn->scn_prefetch_bookmark,
zb.zb_objset, 0, 0, 0);
} else {
scn->scn_prefetch_bookmark = scn->scn_phys.scn_bookmark;
}
scn->scn_objsets_visited_this_txg++;
spc = scan_prefetch_ctx_create(scn, NULL, FTAG);
dsl_scan_prefetch(spc, bp, &zb);
scan_prefetch_ctx_rele(spc, FTAG);
dsl_scan_visitbp(bp, &zb, NULL, ds, scn, DMU_OST_NONE, tx);
dprintf_ds(ds, "finished scan%s", "");
}
static void
ds_destroyed_scn_phys(dsl_dataset_t *ds, dsl_scan_phys_t *scn_phys)
{
if (scn_phys->scn_bookmark.zb_objset == ds->ds_object) {
if (ds->ds_is_snapshot) {
/*
* Note:
* - scn_cur_{min,max}_txg stays the same.
* - Setting the flag is not really necessary if
* scn_cur_max_txg == scn_max_txg, because there
* is nothing after this snapshot that we care
* about. However, we set it anyway and then
* ignore it when we retraverse it in
* dsl_scan_visitds().
*/
scn_phys->scn_bookmark.zb_objset =
dsl_dataset_phys(ds)->ds_next_snap_obj;
zfs_dbgmsg("destroying ds %llu on %s; currently "
"traversing; reset zb_objset to %llu",
(u_longlong_t)ds->ds_object,
ds->ds_dir->dd_pool->dp_spa->spa_name,
(u_longlong_t)dsl_dataset_phys(ds)->
ds_next_snap_obj);
scn_phys->scn_flags |= DSF_VISIT_DS_AGAIN;
} else {
SET_BOOKMARK(&scn_phys->scn_bookmark,
ZB_DESTROYED_OBJSET, 0, 0, 0);
zfs_dbgmsg("destroying ds %llu on %s; currently "
"traversing; reset bookmark to -1,0,0,0",
(u_longlong_t)ds->ds_object,
ds->ds_dir->dd_pool->dp_spa->spa_name);
}
}
}
/*
* Invoked when a dataset is destroyed. We need to make sure that:
*
* 1) If it is the dataset that was currently being scanned, we write
* a new dsl_scan_phys_t and marking the objset reference in it
* as destroyed.
* 2) Remove it from the work queue, if it was present.
*
* If the dataset was actually a snapshot, instead of marking the dataset
* as destroyed, we instead substitute the next snapshot in line.
*/
void
dsl_scan_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
dsl_scan_t *scn = dp->dp_scan;
uint64_t mintxg;
if (!dsl_scan_is_running(scn))
return;
ds_destroyed_scn_phys(ds, &scn->scn_phys);
ds_destroyed_scn_phys(ds, &scn->scn_phys_cached);
if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) {
scan_ds_queue_remove(scn, ds->ds_object);
if (ds->ds_is_snapshot)
scan_ds_queue_insert(scn,
dsl_dataset_phys(ds)->ds_next_snap_obj, mintxg);
}
if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
ds->ds_object, &mintxg) == 0) {
ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds->ds_object, tx));
if (ds->ds_is_snapshot) {
/*
* We keep the same mintxg; it could be >
* ds_creation_txg if the previous snapshot was
* deleted too.
*/
VERIFY(zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj,
dsl_dataset_phys(ds)->ds_next_snap_obj,
mintxg, tx) == 0);
zfs_dbgmsg("destroying ds %llu on %s; in queue; "
"replacing with %llu",
(u_longlong_t)ds->ds_object,
dp->dp_spa->spa_name,
(u_longlong_t)dsl_dataset_phys(ds)->
ds_next_snap_obj);
} else {
zfs_dbgmsg("destroying ds %llu on %s; in queue; "
"removing",
(u_longlong_t)ds->ds_object,
dp->dp_spa->spa_name);
}
}
/*
* dsl_scan_sync() should be called after this, and should sync
* out our changed state, but just to be safe, do it here.
*/
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
}
static void
ds_snapshotted_bookmark(dsl_dataset_t *ds, zbookmark_phys_t *scn_bookmark)
{
if (scn_bookmark->zb_objset == ds->ds_object) {
scn_bookmark->zb_objset =
dsl_dataset_phys(ds)->ds_prev_snap_obj;
zfs_dbgmsg("snapshotting ds %llu on %s; currently traversing; "
"reset zb_objset to %llu",
(u_longlong_t)ds->ds_object,
ds->ds_dir->dd_pool->dp_spa->spa_name,
(u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj);
}
}
/*
* Called when a dataset is snapshotted. If we were currently traversing
* this snapshot, we reset our bookmark to point at the newly created
* snapshot. We also modify our work queue to remove the old snapshot and
* replace with the new one.
*/
void
dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
dsl_scan_t *scn = dp->dp_scan;
uint64_t mintxg;
if (!dsl_scan_is_running(scn))
return;
ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0);
ds_snapshotted_bookmark(ds, &scn->scn_phys.scn_bookmark);
ds_snapshotted_bookmark(ds, &scn->scn_phys_cached.scn_bookmark);
if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) {
scan_ds_queue_remove(scn, ds->ds_object);
scan_ds_queue_insert(scn,
dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg);
}
if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
ds->ds_object, &mintxg) == 0) {
VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds->ds_object, tx));
VERIFY(zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj,
dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg, tx) == 0);
zfs_dbgmsg("snapshotting ds %llu on %s; in queue; "
"replacing with %llu",
(u_longlong_t)ds->ds_object,
dp->dp_spa->spa_name,
(u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj);
}
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
}
static void
ds_clone_swapped_bookmark(dsl_dataset_t *ds1, dsl_dataset_t *ds2,
zbookmark_phys_t *scn_bookmark)
{
if (scn_bookmark->zb_objset == ds1->ds_object) {
scn_bookmark->zb_objset = ds2->ds_object;
zfs_dbgmsg("clone_swap ds %llu on %s; currently traversing; "
"reset zb_objset to %llu",
(u_longlong_t)ds1->ds_object,
ds1->ds_dir->dd_pool->dp_spa->spa_name,
(u_longlong_t)ds2->ds_object);
} else if (scn_bookmark->zb_objset == ds2->ds_object) {
scn_bookmark->zb_objset = ds1->ds_object;
zfs_dbgmsg("clone_swap ds %llu on %s; currently traversing; "
"reset zb_objset to %llu",
(u_longlong_t)ds2->ds_object,
ds2->ds_dir->dd_pool->dp_spa->spa_name,
(u_longlong_t)ds1->ds_object);
}
}
/*
* Called when an origin dataset and its clone are swapped. If we were
* currently traversing the dataset, we need to switch to traversing the
* newly promoted clone.
*/
void
dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx)
{
dsl_pool_t *dp = ds1->ds_dir->dd_pool;
dsl_scan_t *scn = dp->dp_scan;
uint64_t mintxg1, mintxg2;
boolean_t ds1_queued, ds2_queued;
if (!dsl_scan_is_running(scn))
return;
ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys.scn_bookmark);
ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys_cached.scn_bookmark);
/*
* Handle the in-memory scan queue.
*/
ds1_queued = scan_ds_queue_contains(scn, ds1->ds_object, &mintxg1);
ds2_queued = scan_ds_queue_contains(scn, ds2->ds_object, &mintxg2);
/* Sanity checking. */
if (ds1_queued) {
ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
}
if (ds2_queued) {
ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
}
if (ds1_queued && ds2_queued) {
/*
* If both are queued, we don't need to do anything.
* The swapping code below would not handle this case correctly,
* since we can't insert ds2 if it is already there. That's
* because scan_ds_queue_insert() prohibits a duplicate insert
* and panics.
*/
} else if (ds1_queued) {
scan_ds_queue_remove(scn, ds1->ds_object);
scan_ds_queue_insert(scn, ds2->ds_object, mintxg1);
} else if (ds2_queued) {
scan_ds_queue_remove(scn, ds2->ds_object);
scan_ds_queue_insert(scn, ds1->ds_object, mintxg2);
}
/*
* Handle the on-disk scan queue.
* The on-disk state is an out-of-date version of the in-memory state,
* so the in-memory and on-disk values for ds1_queued and ds2_queued may
* be different. Therefore we need to apply the swap logic to the
* on-disk state independently of the in-memory state.
*/
ds1_queued = zap_lookup_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds1->ds_object, &mintxg1) == 0;
ds2_queued = zap_lookup_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds2->ds_object, &mintxg2) == 0;
/* Sanity checking. */
if (ds1_queued) {
ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
}
if (ds2_queued) {
ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
}
if (ds1_queued && ds2_queued) {
/*
* If both are queued, we don't need to do anything.
* Alternatively, we could check for EEXIST from
* zap_add_int_key() and back out to the original state, but
* that would be more work than checking for this case upfront.
*/
} else if (ds1_queued) {
VERIFY3S(0, ==, zap_remove_int(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds1->ds_object, tx));
VERIFY3S(0, ==, zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg1, tx));
zfs_dbgmsg("clone_swap ds %llu on %s; in queue; "
"replacing with %llu",
(u_longlong_t)ds1->ds_object,
dp->dp_spa->spa_name,
(u_longlong_t)ds2->ds_object);
} else if (ds2_queued) {
VERIFY3S(0, ==, zap_remove_int(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds2->ds_object, tx));
VERIFY3S(0, ==, zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg2, tx));
zfs_dbgmsg("clone_swap ds %llu on %s; in queue; "
"replacing with %llu",
(u_longlong_t)ds2->ds_object,
dp->dp_spa->spa_name,
(u_longlong_t)ds1->ds_object);
}
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
}
static int
enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
{
uint64_t originobj = *(uint64_t *)arg;
dsl_dataset_t *ds;
int err;
dsl_scan_t *scn = dp->dp_scan;
if (dsl_dir_phys(hds->ds_dir)->dd_origin_obj != originobj)
return (0);
err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
if (err)
return (err);
while (dsl_dataset_phys(ds)->ds_prev_snap_obj != originobj) {
dsl_dataset_t *prev;
err = dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
dsl_dataset_rele(ds, FTAG);
if (err)
return (err);
ds = prev;
}
scan_ds_queue_insert(scn, ds->ds_object,
dsl_dataset_phys(ds)->ds_prev_snap_txg);
dsl_dataset_rele(ds, FTAG);
return (0);
}
static void
dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx)
{
dsl_pool_t *dp = scn->scn_dp;
dsl_dataset_t *ds;
VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
if (scn->scn_phys.scn_cur_min_txg >=
scn->scn_phys.scn_max_txg) {
/*
* This can happen if this snapshot was created after the
* scan started, and we already completed a previous snapshot
* that was created after the scan started. This snapshot
* only references blocks with:
*
* birth < our ds_creation_txg
* cur_min_txg is no less than ds_creation_txg.
* We have already visited these blocks.
* or
* birth > scn_max_txg
* The scan requested not to visit these blocks.
*
* Subsequent snapshots (and clones) can reference our
* blocks, or blocks with even higher birth times.
* Therefore we do not need to visit them either,
* so we do not add them to the work queue.
*
* Note that checking for cur_min_txg >= cur_max_txg
* is not sufficient, because in that case we may need to
* visit subsequent snapshots. This happens when min_txg > 0,
* which raises cur_min_txg. In this case we will visit
* this dataset but skip all of its blocks, because the
* rootbp's birth time is < cur_min_txg. Then we will
* add the next snapshots/clones to the work queue.
*/
char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
dsl_dataset_name(ds, dsname);
zfs_dbgmsg("scanning dataset %llu (%s) is unnecessary because "
"cur_min_txg (%llu) >= max_txg (%llu)",
(longlong_t)dsobj, dsname,
(longlong_t)scn->scn_phys.scn_cur_min_txg,
(longlong_t)scn->scn_phys.scn_max_txg);
kmem_free(dsname, MAXNAMELEN);
goto out;
}
/*
* Only the ZIL in the head (non-snapshot) is valid. Even though
* snapshots can have ZIL block pointers (which may be the same
* BP as in the head), they must be ignored. In addition, $ORIGIN
* doesn't have a objset (i.e. its ds_bp is a hole) so we don't
* need to look for a ZIL in it either. So we traverse the ZIL here,
* rather than in scan_recurse(), because the regular snapshot
* block-sharing rules don't apply to it.
*/
if (!dsl_dataset_is_snapshot(ds) &&
(dp->dp_origin_snap == NULL ||
ds->ds_dir != dp->dp_origin_snap->ds_dir)) {
objset_t *os;
if (dmu_objset_from_ds(ds, &os) != 0) {
goto out;
}
dsl_scan_zil(dp, &os->os_zil_header);
}
/*
* Iterate over the bps in this ds.
*/
dmu_buf_will_dirty(ds->ds_dbuf, tx);
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
dsl_scan_visit_rootbp(scn, ds, &dsl_dataset_phys(ds)->ds_bp, tx);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
dsl_dataset_name(ds, dsname);
zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; "
"suspending=%u",
(longlong_t)dsobj, dsname,
(longlong_t)scn->scn_phys.scn_cur_min_txg,
(longlong_t)scn->scn_phys.scn_cur_max_txg,
(int)scn->scn_suspending);
kmem_free(dsname, ZFS_MAX_DATASET_NAME_LEN);
if (scn->scn_suspending)
goto out;
/*
* We've finished this pass over this dataset.
*/
/*
* If we did not completely visit this dataset, do another pass.
*/
if (scn->scn_phys.scn_flags & DSF_VISIT_DS_AGAIN) {
zfs_dbgmsg("incomplete pass on %s; visiting again",
dp->dp_spa->spa_name);
scn->scn_phys.scn_flags &= ~DSF_VISIT_DS_AGAIN;
scan_ds_queue_insert(scn, ds->ds_object,
scn->scn_phys.scn_cur_max_txg);
goto out;
}
/*
* Add descendant datasets to work queue.
*/
if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) {
scan_ds_queue_insert(scn,
dsl_dataset_phys(ds)->ds_next_snap_obj,
dsl_dataset_phys(ds)->ds_creation_txg);
}
if (dsl_dataset_phys(ds)->ds_num_children > 1) {
boolean_t usenext = B_FALSE;
if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
uint64_t count;
/*
* A bug in a previous version of the code could
* cause upgrade_clones_cb() to not set
* ds_next_snap_obj when it should, leading to a
* missing entry. Therefore we can only use the
* next_clones_obj when its count is correct.
*/
int err = zap_count(dp->dp_meta_objset,
dsl_dataset_phys(ds)->ds_next_clones_obj, &count);
if (err == 0 &&
count == dsl_dataset_phys(ds)->ds_num_children - 1)
usenext = B_TRUE;
}
if (usenext) {
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, dp->dp_meta_objset,
dsl_dataset_phys(ds)->ds_next_clones_obj);
zap_cursor_retrieve(&zc, &za) == 0;
(void) zap_cursor_advance(&zc)) {
scan_ds_queue_insert(scn,
zfs_strtonum(za.za_name, NULL),
dsl_dataset_phys(ds)->ds_creation_txg);
}
zap_cursor_fini(&zc);
} else {
VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
enqueue_clones_cb, &ds->ds_object,
DS_FIND_CHILDREN));
}
}
out:
dsl_dataset_rele(ds, FTAG);
}
static int
enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
{
(void) arg;
dsl_dataset_t *ds;
int err;
dsl_scan_t *scn = dp->dp_scan;
err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
if (err)
return (err);
while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
dsl_dataset_t *prev;
err = dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
if (err) {
dsl_dataset_rele(ds, FTAG);
return (err);
}
/*
* If this is a clone, we don't need to worry about it for now.
*/
if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) {
dsl_dataset_rele(ds, FTAG);
dsl_dataset_rele(prev, FTAG);
return (0);
}
dsl_dataset_rele(ds, FTAG);
ds = prev;
}
scan_ds_queue_insert(scn, ds->ds_object,
dsl_dataset_phys(ds)->ds_prev_snap_txg);
dsl_dataset_rele(ds, FTAG);
return (0);
}
void
dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum,
ddt_entry_t *dde, dmu_tx_t *tx)
{
(void) tx;
const ddt_key_t *ddk = &dde->dde_key;
ddt_phys_t *ddp = dde->dde_phys;
blkptr_t bp;
zbookmark_phys_t zb = { 0 };
if (!dsl_scan_is_running(scn))
return;
/*
* This function is special because it is the only thing
* that can add scan_io_t's to the vdev scan queues from
* outside dsl_scan_sync(). For the most part this is ok
* as long as it is called from within syncing context.
* However, dsl_scan_sync() expects that no new sio's will
* be added between when all the work for a scan is done
* and the next txg when the scan is actually marked as
* completed. This check ensures we do not issue new sio's
* during this period.
*/
if (scn->scn_done_txg != 0)
return;
for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0 ||
ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg)
continue;
ddt_bp_create(checksum, ddk, ddp, &bp);
scn->scn_visited_this_txg++;
scan_funcs[scn->scn_phys.scn_func](scn->scn_dp, &bp, &zb);
}
}
/*
* Scrub/dedup interaction.
*
* If there are N references to a deduped block, we don't want to scrub it
* N times -- ideally, we should scrub it exactly once.
*
* We leverage the fact that the dde's replication class (enum ddt_class)
* is ordered from highest replication class (DDT_CLASS_DITTO) to lowest
* (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order.
*
* To prevent excess scrubbing, the scrub begins by walking the DDT
* to find all blocks with refcnt > 1, and scrubs each of these once.
* Since there are two replication classes which contain blocks with
* refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first.
* Finally the top-down scrub begins, only visiting blocks with refcnt == 1.
*
* There would be nothing more to say if a block's refcnt couldn't change
* during a scrub, but of course it can so we must account for changes
* in a block's replication class.
*
* Here's an example of what can occur:
*
* If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1
* when visited during the top-down scrub phase, it will be scrubbed twice.
* This negates our scrub optimization, but is otherwise harmless.
*
* If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1
* on each visit during the top-down scrub phase, it will never be scrubbed.
* To catch this, ddt_sync_entry() notifies the scrub code whenever a block's
* reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to
* DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1
* while a scrub is in progress, it scrubs the block right then.
*/
static void
dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx)
{
ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark;
ddt_entry_t dde = {{{{0}}}};
int error;
uint64_t n = 0;
while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) {
ddt_t *ddt;
if (ddb->ddb_class > scn->scn_phys.scn_ddt_class_max)
break;
dprintf("visiting ddb=%llu/%llu/%llu/%llx\n",
(longlong_t)ddb->ddb_class,
(longlong_t)ddb->ddb_type,
(longlong_t)ddb->ddb_checksum,
(longlong_t)ddb->ddb_cursor);
/* There should be no pending changes to the dedup table */
ddt = scn->scn_dp->dp_spa->spa_ddt[ddb->ddb_checksum];
ASSERT(avl_first(&ddt->ddt_tree) == NULL);
dsl_scan_ddt_entry(scn, ddb->ddb_checksum, &dde, tx);
n++;
if (dsl_scan_check_suspend(scn, NULL))
break;
}
zfs_dbgmsg("scanned %llu ddt entries on %s with class_max = %u; "
"suspending=%u", (longlong_t)n, scn->scn_dp->dp_spa->spa_name,
(int)scn->scn_phys.scn_ddt_class_max, (int)scn->scn_suspending);
ASSERT(error == 0 || error == ENOENT);
ASSERT(error != ENOENT ||
ddb->ddb_class > scn->scn_phys.scn_ddt_class_max);
}
static uint64_t
dsl_scan_ds_maxtxg(dsl_dataset_t *ds)
{
uint64_t smt = ds->ds_dir->dd_pool->dp_scan->scn_phys.scn_max_txg;
if (ds->ds_is_snapshot)
return (MIN(smt, dsl_dataset_phys(ds)->ds_creation_txg));
return (smt);
}
static void
dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx)
{
scan_ds_t *sds;
dsl_pool_t *dp = scn->scn_dp;
if (scn->scn_phys.scn_ddt_bookmark.ddb_class <=
scn->scn_phys.scn_ddt_class_max) {
scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg;
scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg;
dsl_scan_ddt(scn, tx);
if (scn->scn_suspending)
return;
}
if (scn->scn_phys.scn_bookmark.zb_objset == DMU_META_OBJSET) {
/* First do the MOS & ORIGIN */
scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg;
scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg;
dsl_scan_visit_rootbp(scn, NULL,
&dp->dp_meta_rootbp, tx);
spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp);
if (scn->scn_suspending)
return;
if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) {
VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
enqueue_cb, NULL, DS_FIND_CHILDREN));
} else {
dsl_scan_visitds(scn,
dp->dp_origin_snap->ds_object, tx);
}
ASSERT(!scn->scn_suspending);
} else if (scn->scn_phys.scn_bookmark.zb_objset !=
ZB_DESTROYED_OBJSET) {
uint64_t dsobj = scn->scn_phys.scn_bookmark.zb_objset;
/*
* If we were suspended, continue from here. Note if the
* ds we were suspended on was deleted, the zb_objset may
* be -1, so we will skip this and find a new objset
* below.
*/
dsl_scan_visitds(scn, dsobj, tx);
if (scn->scn_suspending)
return;
}
/*
* In case we suspended right at the end of the ds, zero the
* bookmark so we don't think that we're still trying to resume.
*/
memset(&scn->scn_phys.scn_bookmark, 0, sizeof (zbookmark_phys_t));
/*
* Keep pulling things out of the dataset avl queue. Updates to the
* persistent zap-object-as-queue happen only at checkpoints.
*/
while ((sds = avl_first(&scn->scn_queue)) != NULL) {
dsl_dataset_t *ds;
uint64_t dsobj = sds->sds_dsobj;
uint64_t txg = sds->sds_txg;
/* dequeue and free the ds from the queue */
scan_ds_queue_remove(scn, dsobj);
sds = NULL;
/* set up min / max txg */
VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
if (txg != 0) {
scn->scn_phys.scn_cur_min_txg =
MAX(scn->scn_phys.scn_min_txg, txg);
} else {
scn->scn_phys.scn_cur_min_txg =
MAX(scn->scn_phys.scn_min_txg,
dsl_dataset_phys(ds)->ds_prev_snap_txg);
}
scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds);
dsl_dataset_rele(ds, FTAG);
dsl_scan_visitds(scn, dsobj, tx);
if (scn->scn_suspending)
return;
}
/* No more objsets to fetch, we're done */
scn->scn_phys.scn_bookmark.zb_objset = ZB_DESTROYED_OBJSET;
ASSERT0(scn->scn_suspending);
}
static uint64_t
dsl_scan_count_data_disks(vdev_t *rvd)
{
uint64_t i, leaves = 0;
for (i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
if (vd->vdev_islog || vd->vdev_isspare || vd->vdev_isl2cache)
continue;
leaves += vdev_get_ndisks(vd) - vdev_get_nparity(vd);
}
return (leaves);
}
static void
scan_io_queues_update_zio_stats(dsl_scan_io_queue_t *q, const blkptr_t *bp)
{
int i;
uint64_t cur_size = 0;
for (i = 0; i < BP_GET_NDVAS(bp); i++) {
cur_size += DVA_GET_ASIZE(&bp->blk_dva[i]);
}
q->q_total_zio_size_this_txg += cur_size;
q->q_zios_this_txg++;
}
static void
scan_io_queues_update_seg_stats(dsl_scan_io_queue_t *q, uint64_t start,
uint64_t end)
{
q->q_total_seg_size_this_txg += end - start;
q->q_segs_this_txg++;
}
static boolean_t
scan_io_queue_check_suspend(dsl_scan_t *scn)
{
/* See comment in dsl_scan_check_suspend() */
uint64_t curr_time_ns = gethrtime();
uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time;
uint64_t sync_time_ns = curr_time_ns -
scn->scn_dp->dp_spa->spa_sync_starttime;
uint64_t dirty_min_bytes = zfs_dirty_data_max *
zfs_vdev_async_write_active_min_dirty_percent / 100;
int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
return ((NSEC2MSEC(scan_time_ns) > mintime &&
(scn->scn_dp->dp_dirty_total >= dirty_min_bytes ||
txg_sync_waiting(scn->scn_dp) ||
NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) ||
spa_shutting_down(scn->scn_dp->dp_spa));
}
/*
* Given a list of scan_io_t's in io_list, this issues the I/Os out to
* disk. This consumes the io_list and frees the scan_io_t's. This is
* called when emptying queues, either when we're up against the memory
* limit or when we have finished scanning. Returns B_TRUE if we stopped
* processing the list before we finished. Any sios that were not issued
* will remain in the io_list.
*/
static boolean_t
scan_io_queue_issue(dsl_scan_io_queue_t *queue, list_t *io_list)
{
dsl_scan_t *scn = queue->q_scn;
scan_io_t *sio;
boolean_t suspended = B_FALSE;
while ((sio = list_head(io_list)) != NULL) {
blkptr_t bp;
if (scan_io_queue_check_suspend(scn)) {
suspended = B_TRUE;
break;
}
sio2bp(sio, &bp);
scan_exec_io(scn->scn_dp, &bp, sio->sio_flags,
&sio->sio_zb, queue);
(void) list_remove_head(io_list);
scan_io_queues_update_zio_stats(queue, &bp);
sio_free(sio);
}
return (suspended);
}
/*
* This function removes sios from an IO queue which reside within a given
* range_seg_t and inserts them (in offset order) into a list. Note that
* we only ever return a maximum of 32 sios at once. If there are more sios
* to process within this segment that did not make it onto the list we
* return B_TRUE and otherwise B_FALSE.
*/
static boolean_t
scan_io_queue_gather(dsl_scan_io_queue_t *queue, range_seg_t *rs, list_t *list)
{
scan_io_t *srch_sio, *sio, *next_sio;
avl_index_t idx;
uint_t num_sios = 0;
int64_t bytes_issued = 0;
ASSERT(rs != NULL);
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
srch_sio = sio_alloc(1);
srch_sio->sio_nr_dvas = 1;
SIO_SET_OFFSET(srch_sio, rs_get_start(rs, queue->q_exts_by_addr));
/*
* The exact start of the extent might not contain any matching zios,
* so if that's the case, examine the next one in the tree.
*/
sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx);
sio_free(srch_sio);
if (sio == NULL)
sio = avl_nearest(&queue->q_sios_by_addr, idx, AVL_AFTER);
while (sio != NULL && SIO_GET_OFFSET(sio) < rs_get_end(rs,
queue->q_exts_by_addr) && num_sios <= 32) {
ASSERT3U(SIO_GET_OFFSET(sio), >=, rs_get_start(rs,
queue->q_exts_by_addr));
ASSERT3U(SIO_GET_END_OFFSET(sio), <=, rs_get_end(rs,
queue->q_exts_by_addr));
next_sio = AVL_NEXT(&queue->q_sios_by_addr, sio);
avl_remove(&queue->q_sios_by_addr, sio);
if (avl_is_empty(&queue->q_sios_by_addr))
atomic_add_64(&queue->q_scn->scn_queues_pending, -1);
queue->q_sio_memused -= SIO_GET_MUSED(sio);
bytes_issued += SIO_GET_ASIZE(sio);
num_sios++;
list_insert_tail(list, sio);
sio = next_sio;
}
/*
* We limit the number of sios we process at once to 32 to avoid
* biting off more than we can chew. If we didn't take everything
* in the segment we update it to reflect the work we were able to
* complete. Otherwise, we remove it from the range tree entirely.
*/
if (sio != NULL && SIO_GET_OFFSET(sio) < rs_get_end(rs,
queue->q_exts_by_addr)) {
range_tree_adjust_fill(queue->q_exts_by_addr, rs,
-bytes_issued);
range_tree_resize_segment(queue->q_exts_by_addr, rs,
SIO_GET_OFFSET(sio), rs_get_end(rs,
queue->q_exts_by_addr) - SIO_GET_OFFSET(sio));
queue->q_last_ext_addr = SIO_GET_OFFSET(sio);
return (B_TRUE);
} else {
uint64_t rstart = rs_get_start(rs, queue->q_exts_by_addr);
uint64_t rend = rs_get_end(rs, queue->q_exts_by_addr);
range_tree_remove(queue->q_exts_by_addr, rstart, rend - rstart);
queue->q_last_ext_addr = -1;
return (B_FALSE);
}
}
/*
* This is called from the queue emptying thread and selects the next
* extent from which we are to issue I/Os. The behavior of this function
* depends on the state of the scan, the current memory consumption and
* whether or not we are performing a scan shutdown.
* 1) We select extents in an elevator algorithm (LBA-order) if the scan
* needs to perform a checkpoint
* 2) We select the largest available extent if we are up against the
* memory limit.
* 3) Otherwise we don't select any extents.
*/
static range_seg_t *
scan_io_queue_fetch_ext(dsl_scan_io_queue_t *queue)
{
dsl_scan_t *scn = queue->q_scn;
range_tree_t *rt = queue->q_exts_by_addr;
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
ASSERT(scn->scn_is_sorted);
if (!scn->scn_checkpointing && !scn->scn_clearing)
return (NULL);
/*
* During normal clearing, we want to issue our largest segments
* first, keeping IO as sequential as possible, and leaving the
* smaller extents for later with the hope that they might eventually
* grow to larger sequential segments. However, when the scan is
* checkpointing, no new extents will be added to the sorting queue,
* so the way we are sorted now is as good as it will ever get.
* In this case, we instead switch to issuing extents in LBA order.
*/
if ((zfs_scan_issue_strategy < 1 && scn->scn_checkpointing) ||
zfs_scan_issue_strategy == 1)
return (range_tree_first(rt));
/*
* Try to continue previous extent if it is not completed yet. After
* shrink in scan_io_queue_gather() it may no longer be the best, but
* otherwise we leave shorter remnant every txg.
*/
uint64_t start;
uint64_t size = 1 << rt->rt_shift;
range_seg_t *addr_rs;
if (queue->q_last_ext_addr != -1) {
start = queue->q_last_ext_addr;
addr_rs = range_tree_find(rt, start, size);
if (addr_rs != NULL)
return (addr_rs);
}
/*
* Nothing to continue, so find new best extent.
*/
uint64_t *v = zfs_btree_first(&queue->q_exts_by_size, NULL);
if (v == NULL)
return (NULL);
queue->q_last_ext_addr = start = *v << rt->rt_shift;
/*
* We need to get the original entry in the by_addr tree so we can
* modify it.
*/
addr_rs = range_tree_find(rt, start, size);
ASSERT3P(addr_rs, !=, NULL);
ASSERT3U(rs_get_start(addr_rs, rt), ==, start);
ASSERT3U(rs_get_end(addr_rs, rt), >, start);
return (addr_rs);
}
static void
scan_io_queues_run_one(void *arg)
{
dsl_scan_io_queue_t *queue = arg;
kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock;
boolean_t suspended = B_FALSE;
range_seg_t *rs;
scan_io_t *sio;
zio_t *zio;
list_t sio_list;
ASSERT(queue->q_scn->scn_is_sorted);
list_create(&sio_list, sizeof (scan_io_t),
offsetof(scan_io_t, sio_nodes.sio_list_node));
zio = zio_null(queue->q_scn->scn_zio_root, queue->q_scn->scn_dp->dp_spa,
NULL, NULL, NULL, ZIO_FLAG_CANFAIL);
mutex_enter(q_lock);
queue->q_zio = zio;
/* Calculate maximum in-flight bytes for this vdev. */
queue->q_maxinflight_bytes = MAX(1, zfs_scan_vdev_limit *
(vdev_get_ndisks(queue->q_vd) - vdev_get_nparity(queue->q_vd)));
/* reset per-queue scan statistics for this txg */
queue->q_total_seg_size_this_txg = 0;
queue->q_segs_this_txg = 0;
queue->q_total_zio_size_this_txg = 0;
queue->q_zios_this_txg = 0;
/* loop until we run out of time or sios */
while ((rs = scan_io_queue_fetch_ext(queue)) != NULL) {
uint64_t seg_start = 0, seg_end = 0;
boolean_t more_left;
ASSERT(list_is_empty(&sio_list));
/* loop while we still have sios left to process in this rs */
do {
scan_io_t *first_sio, *last_sio;
/*
* We have selected which extent needs to be
* processed next. Gather up the corresponding sios.
*/
more_left = scan_io_queue_gather(queue, rs, &sio_list);
ASSERT(!list_is_empty(&sio_list));
first_sio = list_head(&sio_list);
last_sio = list_tail(&sio_list);
seg_end = SIO_GET_END_OFFSET(last_sio);
if (seg_start == 0)
seg_start = SIO_GET_OFFSET(first_sio);
/*
* Issuing sios can take a long time so drop the
* queue lock. The sio queue won't be updated by
* other threads since we're in syncing context so
* we can be sure that our trees will remain exactly
* as we left them.
*/
mutex_exit(q_lock);
suspended = scan_io_queue_issue(queue, &sio_list);
mutex_enter(q_lock);
if (suspended)
break;
} while (more_left);
/* update statistics for debugging purposes */
scan_io_queues_update_seg_stats(queue, seg_start, seg_end);
if (suspended)
break;
}
/*
* If we were suspended in the middle of processing,
* requeue any unfinished sios and exit.
*/
while ((sio = list_head(&sio_list)) != NULL) {
list_remove(&sio_list, sio);
scan_io_queue_insert_impl(queue, sio);
}
queue->q_zio = NULL;
mutex_exit(q_lock);
zio_nowait(zio);
list_destroy(&sio_list);
}
/*
* Performs an emptying run on all scan queues in the pool. This just
* punches out one thread per top-level vdev, each of which processes
* only that vdev's scan queue. We can parallelize the I/O here because
* we know that each queue's I/Os only affect its own top-level vdev.
*
* This function waits for the queue runs to complete, and must be
* called from dsl_scan_sync (or in general, syncing context).
*/
static void
scan_io_queues_run(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
ASSERT(scn->scn_is_sorted);
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
if (scn->scn_queues_pending == 0)
return;
if (scn->scn_taskq == NULL) {
int nthreads = spa->spa_root_vdev->vdev_children;
/*
* We need to make this taskq *always* execute as many
* threads in parallel as we have top-level vdevs and no
* less, otherwise strange serialization of the calls to
* scan_io_queues_run_one can occur during spa_sync runs
* and that significantly impacts performance.
*/
scn->scn_taskq = taskq_create("dsl_scan_iss", nthreads,
minclsyspri, nthreads, nthreads, TASKQ_PREPOPULATE);
}
for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
mutex_enter(&vd->vdev_scan_io_queue_lock);
if (vd->vdev_scan_io_queue != NULL) {
VERIFY(taskq_dispatch(scn->scn_taskq,
scan_io_queues_run_one, vd->vdev_scan_io_queue,
TQ_SLEEP) != TASKQID_INVALID);
}
mutex_exit(&vd->vdev_scan_io_queue_lock);
}
/*
* Wait for the queues to finish issuing their IOs for this run
* before we return. There may still be IOs in flight at this
* point.
*/
taskq_wait(scn->scn_taskq);
}
static boolean_t
dsl_scan_async_block_should_pause(dsl_scan_t *scn)
{
uint64_t elapsed_nanosecs;
if (zfs_recover)
return (B_FALSE);
if (zfs_async_block_max_blocks != 0 &&
scn->scn_visited_this_txg >= zfs_async_block_max_blocks) {
return (B_TRUE);
}
if (zfs_max_async_dedup_frees != 0 &&
scn->scn_dedup_frees_this_txg >= zfs_max_async_dedup_frees) {
return (B_TRUE);
}
elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time;
return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout ||
(NSEC2MSEC(elapsed_nanosecs) > scn->scn_async_block_min_time_ms &&
txg_sync_waiting(scn->scn_dp)) ||
spa_shutting_down(scn->scn_dp->dp_spa));
}
static int
dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
dsl_scan_t *scn = arg;
if (!scn->scn_is_bptree ||
(BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) {
if (dsl_scan_async_block_should_pause(scn))
return (SET_ERROR(ERESTART));
}
zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa,
dmu_tx_get_txg(tx), bp, 0));
dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD,
-bp_get_dsize_sync(scn->scn_dp->dp_spa, bp),
-BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx);
scn->scn_visited_this_txg++;
if (BP_GET_DEDUP(bp))
scn->scn_dedup_frees_this_txg++;
return (0);
}
static void
dsl_scan_update_stats(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
uint64_t i;
uint64_t seg_size_total = 0, zio_size_total = 0;
uint64_t seg_count_total = 0, zio_count_total = 0;
for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
dsl_scan_io_queue_t *queue = vd->vdev_scan_io_queue;
if (queue == NULL)
continue;
seg_size_total += queue->q_total_seg_size_this_txg;
zio_size_total += queue->q_total_zio_size_this_txg;
seg_count_total += queue->q_segs_this_txg;
zio_count_total += queue->q_zios_this_txg;
}
if (seg_count_total == 0 || zio_count_total == 0) {
scn->scn_avg_seg_size_this_txg = 0;
scn->scn_avg_zio_size_this_txg = 0;
scn->scn_segs_this_txg = 0;
scn->scn_zios_this_txg = 0;
return;
}
scn->scn_avg_seg_size_this_txg = seg_size_total / seg_count_total;
scn->scn_avg_zio_size_this_txg = zio_size_total / zio_count_total;
scn->scn_segs_this_txg = seg_count_total;
scn->scn_zios_this_txg = zio_count_total;
}
static int
bpobj_dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(!bp_freed);
return (dsl_scan_free_block_cb(arg, bp, tx));
}
static int
dsl_scan_obsolete_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(!bp_freed);
dsl_scan_t *scn = arg;
const dva_t *dva = &bp->blk_dva[0];
if (dsl_scan_async_block_should_pause(scn))
return (SET_ERROR(ERESTART));
spa_vdev_indirect_mark_obsolete(scn->scn_dp->dp_spa,
DVA_GET_VDEV(dva), DVA_GET_OFFSET(dva),
DVA_GET_ASIZE(dva), tx);
scn->scn_visited_this_txg++;
return (0);
}
boolean_t
dsl_scan_active(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
uint64_t used = 0, comp, uncomp;
boolean_t clones_left;
if (spa->spa_load_state != SPA_LOAD_NONE)
return (B_FALSE);
if (spa_shutting_down(spa))
return (B_FALSE);
if ((dsl_scan_is_running(scn) && !dsl_scan_is_paused_scrub(scn)) ||
(scn->scn_async_destroying && !scn->scn_async_stalled))
return (B_TRUE);
if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
(void) bpobj_space(&scn->scn_dp->dp_free_bpobj,
&used, &comp, &uncomp);
}
clones_left = spa_livelist_delete_check(spa);
return ((used != 0) || (clones_left));
}
static boolean_t
dsl_scan_check_deferred(vdev_t *vd)
{
boolean_t need_resilver = B_FALSE;
for (int c = 0; c < vd->vdev_children; c++) {
need_resilver |=
dsl_scan_check_deferred(vd->vdev_child[c]);
}
if (!vdev_is_concrete(vd) || vd->vdev_aux ||
!vd->vdev_ops->vdev_op_leaf)
return (need_resilver);
if (!vd->vdev_resilver_deferred)
need_resilver = B_TRUE;
return (need_resilver);
}
static boolean_t
dsl_scan_need_resilver(spa_t *spa, const dva_t *dva, size_t psize,
uint64_t phys_birth)
{
vdev_t *vd;
vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
if (vd->vdev_ops == &vdev_indirect_ops) {
/*
* The indirect vdev can point to multiple
* vdevs. For simplicity, always create
* the resilver zio_t. zio_vdev_io_start()
* will bypass the child resilver i/o's if
* they are on vdevs that don't have DTL's.
*/
return (B_TRUE);
}
if (DVA_GET_GANG(dva)) {
/*
* Gang members may be spread across multiple
* vdevs, so the best estimate we have is the
* scrub range, which has already been checked.
* XXX -- it would be better to change our
* allocation policy to ensure that all
* gang members reside on the same vdev.
*/
return (B_TRUE);
}
/*
* Check if the top-level vdev must resilver this offset.
* When the offset does not intersect with a dirty leaf DTL
* then it may be possible to skip the resilver IO. The psize
* is provided instead of asize to simplify the check for RAIDZ.
*/
if (!vdev_dtl_need_resilver(vd, dva, psize, phys_birth))
return (B_FALSE);
/*
* Check that this top-level vdev has a device under it which
* is resilvering and is not deferred.
*/
if (!dsl_scan_check_deferred(vd))
return (B_FALSE);
return (B_TRUE);
}
static int
dsl_process_async_destroys(dsl_pool_t *dp, dmu_tx_t *tx)
{
dsl_scan_t *scn = dp->dp_scan;
spa_t *spa = dp->dp_spa;
int err = 0;
if (spa_suspend_async_destroy(spa))
return (0);
if (zfs_free_bpobj_enabled &&
spa_version(spa) >= SPA_VERSION_DEADLISTS) {
scn->scn_is_bptree = B_FALSE;
scn->scn_async_block_min_time_ms = zfs_free_min_time_ms;
scn->scn_zio_root = zio_root(spa, NULL,
NULL, ZIO_FLAG_MUSTSUCCEED);
err = bpobj_iterate(&dp->dp_free_bpobj,
bpobj_dsl_scan_free_block_cb, scn, tx);
VERIFY0(zio_wait(scn->scn_zio_root));
scn->scn_zio_root = NULL;
if (err != 0 && err != ERESTART)
zfs_panic_recover("error %u from bpobj_iterate()", err);
}
if (err == 0 && spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) {
ASSERT(scn->scn_async_destroying);
scn->scn_is_bptree = B_TRUE;
scn->scn_zio_root = zio_root(spa, NULL,
NULL, ZIO_FLAG_MUSTSUCCEED);
err = bptree_iterate(dp->dp_meta_objset,
dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb, scn, tx);
VERIFY0(zio_wait(scn->scn_zio_root));
scn->scn_zio_root = NULL;
if (err == EIO || err == ECKSUM) {
err = 0;
} else if (err != 0 && err != ERESTART) {
zfs_panic_recover("error %u from "
"traverse_dataset_destroyed()", err);
}
if (bptree_is_empty(dp->dp_meta_objset, dp->dp_bptree_obj)) {
/* finished; deactivate async destroy feature */
spa_feature_decr(spa, SPA_FEATURE_ASYNC_DESTROY, tx);
ASSERT(!spa_feature_is_active(spa,
SPA_FEATURE_ASYNC_DESTROY));
VERIFY0(zap_remove(dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_BPTREE_OBJ, tx));
VERIFY0(bptree_free(dp->dp_meta_objset,
dp->dp_bptree_obj, tx));
dp->dp_bptree_obj = 0;
scn->scn_async_destroying = B_FALSE;
scn->scn_async_stalled = B_FALSE;
} else {
/*
* If we didn't make progress, mark the async
* destroy as stalled, so that we will not initiate
* a spa_sync() on its behalf. Note that we only
* check this if we are not finished, because if the
* bptree had no blocks for us to visit, we can
* finish without "making progress".
*/
scn->scn_async_stalled =
(scn->scn_visited_this_txg == 0);
}
}
if (scn->scn_visited_this_txg) {
zfs_dbgmsg("freed %llu blocks in %llums from "
"free_bpobj/bptree on %s in txg %llu; err=%u",
(longlong_t)scn->scn_visited_this_txg,
(longlong_t)
NSEC2MSEC(gethrtime() - scn->scn_sync_start_time),
spa->spa_name, (longlong_t)tx->tx_txg, err);
scn->scn_visited_this_txg = 0;
scn->scn_dedup_frees_this_txg = 0;
/*
* Write out changes to the DDT that may be required as a
* result of the blocks freed. This ensures that the DDT
* is clean when a scrub/resilver runs.
*/
ddt_sync(spa, tx->tx_txg);
}
if (err != 0)
return (err);
if (dp->dp_free_dir != NULL && !scn->scn_async_destroying &&
zfs_free_leak_on_eio &&
(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes != 0 ||
dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes != 0 ||
dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes != 0)) {
/*
* We have finished background destroying, but there is still
* some space left in the dp_free_dir. Transfer this leaked
* space to the dp_leak_dir.
*/
if (dp->dp_leak_dir == NULL) {
rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
(void) dsl_dir_create_sync(dp, dp->dp_root_dir,
LEAK_DIR_NAME, tx);
VERIFY0(dsl_pool_open_special_dir(dp,
LEAK_DIR_NAME, &dp->dp_leak_dir));
rrw_exit(&dp->dp_config_rwlock, FTAG);
}
dsl_dir_diduse_space(dp->dp_leak_dir, DD_USED_HEAD,
dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes,
dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes,
dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx);
dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
-dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes,
-dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes,
-dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx);
}
if (dp->dp_free_dir != NULL && !scn->scn_async_destroying &&
!spa_livelist_delete_check(spa)) {
/* finished; verify that space accounting went to zero */
ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes);
ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes);
ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes);
}
spa_notify_waiters(spa);
EQUIV(bpobj_is_open(&dp->dp_obsolete_bpobj),
0 == zap_contains(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_OBSOLETE_BPOBJ));
if (err == 0 && bpobj_is_open(&dp->dp_obsolete_bpobj)) {
ASSERT(spa_feature_is_active(dp->dp_spa,
SPA_FEATURE_OBSOLETE_COUNTS));
scn->scn_is_bptree = B_FALSE;
scn->scn_async_block_min_time_ms = zfs_obsolete_min_time_ms;
err = bpobj_iterate(&dp->dp_obsolete_bpobj,
dsl_scan_obsolete_block_cb, scn, tx);
if (err != 0 && err != ERESTART)
zfs_panic_recover("error %u from bpobj_iterate()", err);
if (bpobj_is_empty(&dp->dp_obsolete_bpobj))
dsl_pool_destroy_obsolete_bpobj(dp, tx);
}
return (0);
}
/*
* This is the primary entry point for scans that is called from syncing
* context. Scans must happen entirely during syncing context so that we
* can guarantee that blocks we are currently scanning will not change out
* from under us. While a scan is active, this function controls how quickly
* transaction groups proceed, instead of the normal handling provided by
* txg_sync_thread().
*/
void
dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx)
{
int err = 0;
dsl_scan_t *scn = dp->dp_scan;
spa_t *spa = dp->dp_spa;
state_sync_type_t sync_type = SYNC_OPTIONAL;
if (spa->spa_resilver_deferred &&
!spa_feature_is_active(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER))
spa_feature_incr(spa, SPA_FEATURE_RESILVER_DEFER, tx);
/*
* Check for scn_restart_txg before checking spa_load_state, so
* that we can restart an old-style scan while the pool is being
* imported (see dsl_scan_init). We also restart scans if there
* is a deferred resilver and the user has manually disabled
* deferred resilvers via the tunable.
*/
if (dsl_scan_restarting(scn, tx) ||
(spa->spa_resilver_deferred && zfs_resilver_disable_defer)) {
pool_scan_func_t func = POOL_SCAN_SCRUB;
dsl_scan_done(scn, B_FALSE, tx);
if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL))
func = POOL_SCAN_RESILVER;
zfs_dbgmsg("restarting scan func=%u on %s txg=%llu",
func, dp->dp_spa->spa_name, (longlong_t)tx->tx_txg);
dsl_scan_setup_sync(&func, tx);
}
/*
* Only process scans in sync pass 1.
*/
if (spa_sync_pass(spa) > 1)
return;
/*
* If the spa is shutting down, then stop scanning. This will
* ensure that the scan does not dirty any new data during the
* shutdown phase.
*/
if (spa_shutting_down(spa))
return;
/*
* If the scan is inactive due to a stalled async destroy, try again.
*/
if (!scn->scn_async_stalled && !dsl_scan_active(scn))
return;
/* reset scan statistics */
scn->scn_visited_this_txg = 0;
scn->scn_dedup_frees_this_txg = 0;
scn->scn_holes_this_txg = 0;
scn->scn_lt_min_this_txg = 0;
scn->scn_gt_max_this_txg = 0;
scn->scn_ddt_contained_this_txg = 0;
scn->scn_objsets_visited_this_txg = 0;
scn->scn_avg_seg_size_this_txg = 0;
scn->scn_segs_this_txg = 0;
scn->scn_avg_zio_size_this_txg = 0;
scn->scn_zios_this_txg = 0;
scn->scn_suspending = B_FALSE;
scn->scn_sync_start_time = gethrtime();
spa->spa_scrub_active = B_TRUE;
/*
* First process the async destroys. If we suspend, don't do
* any scrubbing or resilvering. This ensures that there are no
* async destroys while we are scanning, so the scan code doesn't
* have to worry about traversing it. It is also faster to free the
* blocks than to scrub them.
*/
err = dsl_process_async_destroys(dp, tx);
if (err != 0)
return;
if (!dsl_scan_is_running(scn) || dsl_scan_is_paused_scrub(scn))
return;
/*
* Wait a few txgs after importing to begin scanning so that
* we can get the pool imported quickly.
*/
if (spa->spa_syncing_txg < spa->spa_first_txg + SCAN_IMPORT_WAIT_TXGS)
return;
/*
* zfs_scan_suspend_progress can be set to disable scan progress.
* We don't want to spin the txg_sync thread, so we add a delay
* here to simulate the time spent doing a scan. This is mostly
* useful for testing and debugging.
*/
if (zfs_scan_suspend_progress) {
uint64_t scan_time_ns = gethrtime() - scn->scn_sync_start_time;
int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
while (zfs_scan_suspend_progress &&
!txg_sync_waiting(scn->scn_dp) &&
!spa_shutting_down(scn->scn_dp->dp_spa) &&
NSEC2MSEC(scan_time_ns) < mintime) {
delay(hz);
scan_time_ns = gethrtime() - scn->scn_sync_start_time;
}
return;
}
/*
* It is possible to switch from unsorted to sorted at any time,
* but afterwards the scan will remain sorted unless reloaded from
* a checkpoint after a reboot.
*/
if (!zfs_scan_legacy) {
scn->scn_is_sorted = B_TRUE;
if (scn->scn_last_checkpoint == 0)
scn->scn_last_checkpoint = ddi_get_lbolt();
}
/*
* For sorted scans, determine what kind of work we will be doing
* this txg based on our memory limitations and whether or not we
* need to perform a checkpoint.
*/
if (scn->scn_is_sorted) {
/*
* If we are over our checkpoint interval, set scn_clearing
* so that we can begin checkpointing immediately. The
* checkpoint allows us to save a consistent bookmark
* representing how much data we have scrubbed so far.
* Otherwise, use the memory limit to determine if we should
* scan for metadata or start issue scrub IOs. We accumulate
* metadata until we hit our hard memory limit at which point
* we issue scrub IOs until we are at our soft memory limit.
*/
if (scn->scn_checkpointing ||
ddi_get_lbolt() - scn->scn_last_checkpoint >
SEC_TO_TICK(zfs_scan_checkpoint_intval)) {
if (!scn->scn_checkpointing)
zfs_dbgmsg("begin scan checkpoint for %s",
spa->spa_name);
scn->scn_checkpointing = B_TRUE;
scn->scn_clearing = B_TRUE;
} else {
boolean_t should_clear = dsl_scan_should_clear(scn);
if (should_clear && !scn->scn_clearing) {
zfs_dbgmsg("begin scan clearing for %s",
spa->spa_name);
scn->scn_clearing = B_TRUE;
} else if (!should_clear && scn->scn_clearing) {
zfs_dbgmsg("finish scan clearing for %s",
spa->spa_name);
scn->scn_clearing = B_FALSE;
}
}
} else {
ASSERT0(scn->scn_checkpointing);
ASSERT0(scn->scn_clearing);
}
if (!scn->scn_clearing && scn->scn_done_txg == 0) {
/* Need to scan metadata for more blocks to scrub */
dsl_scan_phys_t *scnp = &scn->scn_phys;
taskqid_t prefetch_tqid;
/*
* Recalculate the max number of in-flight bytes for pool-wide
* scanning operations (minimum 1MB). Limits for the issuing
* phase are done per top-level vdev and are handled separately.
*/
scn->scn_maxinflight_bytes = MAX(zfs_scan_vdev_limit *
dsl_scan_count_data_disks(spa->spa_root_vdev), 1ULL << 20);
if (scnp->scn_ddt_bookmark.ddb_class <=
scnp->scn_ddt_class_max) {
ASSERT(ZB_IS_ZERO(&scnp->scn_bookmark));
zfs_dbgmsg("doing scan sync for %s txg %llu; "
"ddt bm=%llu/%llu/%llu/%llx",
spa->spa_name,
(longlong_t)tx->tx_txg,
(longlong_t)scnp->scn_ddt_bookmark.ddb_class,
(longlong_t)scnp->scn_ddt_bookmark.ddb_type,
(longlong_t)scnp->scn_ddt_bookmark.ddb_checksum,
(longlong_t)scnp->scn_ddt_bookmark.ddb_cursor);
} else {
zfs_dbgmsg("doing scan sync for %s txg %llu; "
"bm=%llu/%llu/%llu/%llu",
spa->spa_name,
(longlong_t)tx->tx_txg,
(longlong_t)scnp->scn_bookmark.zb_objset,
(longlong_t)scnp->scn_bookmark.zb_object,
(longlong_t)scnp->scn_bookmark.zb_level,
(longlong_t)scnp->scn_bookmark.zb_blkid);
}
scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
NULL, ZIO_FLAG_CANFAIL);
scn->scn_prefetch_stop = B_FALSE;
prefetch_tqid = taskq_dispatch(dp->dp_sync_taskq,
dsl_scan_prefetch_thread, scn, TQ_SLEEP);
ASSERT(prefetch_tqid != TASKQID_INVALID);
dsl_pool_config_enter(dp, FTAG);
dsl_scan_visit(scn, tx);
dsl_pool_config_exit(dp, FTAG);
mutex_enter(&dp->dp_spa->spa_scrub_lock);
scn->scn_prefetch_stop = B_TRUE;
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&dp->dp_spa->spa_scrub_lock);
taskq_wait_id(dp->dp_sync_taskq, prefetch_tqid);
(void) zio_wait(scn->scn_zio_root);
scn->scn_zio_root = NULL;
zfs_dbgmsg("scan visited %llu blocks of %s in %llums "
"(%llu os's, %llu holes, %llu < mintxg, "
"%llu in ddt, %llu > maxtxg)",
(longlong_t)scn->scn_visited_this_txg,
spa->spa_name,
(longlong_t)NSEC2MSEC(gethrtime() -
scn->scn_sync_start_time),
(longlong_t)scn->scn_objsets_visited_this_txg,
(longlong_t)scn->scn_holes_this_txg,
(longlong_t)scn->scn_lt_min_this_txg,
(longlong_t)scn->scn_ddt_contained_this_txg,
(longlong_t)scn->scn_gt_max_this_txg);
if (!scn->scn_suspending) {
ASSERT0(avl_numnodes(&scn->scn_queue));
scn->scn_done_txg = tx->tx_txg + 1;
if (scn->scn_is_sorted) {
scn->scn_checkpointing = B_TRUE;
scn->scn_clearing = B_TRUE;
}
zfs_dbgmsg("scan complete for %s txg %llu",
spa->spa_name,
(longlong_t)tx->tx_txg);
}
} else if (scn->scn_is_sorted && scn->scn_queues_pending != 0) {
ASSERT(scn->scn_clearing);
/* need to issue scrubbing IOs from per-vdev queues */
scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
NULL, ZIO_FLAG_CANFAIL);
scan_io_queues_run(scn);
(void) zio_wait(scn->scn_zio_root);
scn->scn_zio_root = NULL;
/* calculate and dprintf the current memory usage */
(void) dsl_scan_should_clear(scn);
dsl_scan_update_stats(scn);
zfs_dbgmsg("scan issued %llu blocks for %s (%llu segs) "
"in %llums (avg_block_size = %llu, avg_seg_size = %llu)",
(longlong_t)scn->scn_zios_this_txg,
spa->spa_name,
(longlong_t)scn->scn_segs_this_txg,
(longlong_t)NSEC2MSEC(gethrtime() -
scn->scn_sync_start_time),
(longlong_t)scn->scn_avg_zio_size_this_txg,
(longlong_t)scn->scn_avg_seg_size_this_txg);
} else if (scn->scn_done_txg != 0 && scn->scn_done_txg <= tx->tx_txg) {
/* Finished with everything. Mark the scrub as complete */
zfs_dbgmsg("scan issuing complete txg %llu for %s",
(longlong_t)tx->tx_txg,
spa->spa_name);
ASSERT3U(scn->scn_done_txg, !=, 0);
ASSERT0(spa->spa_scrub_inflight);
ASSERT0(scn->scn_queues_pending);
dsl_scan_done(scn, B_TRUE, tx);
sync_type = SYNC_MANDATORY;
}
dsl_scan_sync_state(scn, tx, sync_type);
}
static void
count_block_issued(spa_t *spa, const blkptr_t *bp, boolean_t all)
{
/*
* Don't count embedded bp's, since we already did the work of
* scanning these when we scanned the containing block.
*/
if (BP_IS_EMBEDDED(bp))
return;
/*
* Update the spa's stats on how many bytes we have issued.
* Sequential scrubs create a zio for each DVA of the bp. Each
* of these will include all DVAs for repair purposes, but the
* zio code will only try the first one unless there is an issue.
* Therefore, we should only count the first DVA for these IOs.
*/
atomic_add_64(&spa->spa_scan_pass_issued,
all ? BP_GET_ASIZE(bp) : DVA_GET_ASIZE(&bp->blk_dva[0]));
}
static void
count_block(zfs_all_blkstats_t *zab, const blkptr_t *bp)
{
/*
* If we resume after a reboot, zab will be NULL; don't record
* incomplete stats in that case.
*/
if (zab == NULL)
return;
for (int i = 0; i < 4; i++) {
int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS;
int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL;
if (t & DMU_OT_NEWTYPE)
t = DMU_OT_OTHER;
zfs_blkstat_t *zb = &zab->zab_type[l][t];
int equal;
zb->zb_count++;
zb->zb_asize += BP_GET_ASIZE(bp);
zb->zb_lsize += BP_GET_LSIZE(bp);
zb->zb_psize += BP_GET_PSIZE(bp);
zb->zb_gangs += BP_COUNT_GANG(bp);
switch (BP_GET_NDVAS(bp)) {
case 2:
if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1]))
zb->zb_ditto_2_of_2_samevdev++;
break;
case 3:
equal = (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1])) +
(DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[2])) +
(DVA_GET_VDEV(&bp->blk_dva[1]) ==
DVA_GET_VDEV(&bp->blk_dva[2]));
if (equal == 1)
zb->zb_ditto_2_of_3_samevdev++;
else if (equal == 3)
zb->zb_ditto_3_of_3_samevdev++;
break;
}
}
}
static void
scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, scan_io_t *sio)
{
avl_index_t idx;
dsl_scan_t *scn = queue->q_scn;
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
if (unlikely(avl_is_empty(&queue->q_sios_by_addr)))
atomic_add_64(&scn->scn_queues_pending, 1);
if (avl_find(&queue->q_sios_by_addr, sio, &idx) != NULL) {
/* block is already scheduled for reading */
sio_free(sio);
return;
}
avl_insert(&queue->q_sios_by_addr, sio, idx);
queue->q_sio_memused += SIO_GET_MUSED(sio);
range_tree_add(queue->q_exts_by_addr, SIO_GET_OFFSET(sio),
SIO_GET_ASIZE(sio));
}
/*
* Given all the info we got from our metadata scanning process, we
* construct a scan_io_t and insert it into the scan sorting queue. The
* I/O must already be suitable for us to process. This is controlled
* by dsl_scan_enqueue().
*/
static void
scan_io_queue_insert(dsl_scan_io_queue_t *queue, const blkptr_t *bp, int dva_i,
int zio_flags, const zbookmark_phys_t *zb)
{
scan_io_t *sio = sio_alloc(BP_GET_NDVAS(bp));
ASSERT0(BP_IS_GANG(bp));
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
bp2sio(bp, sio, dva_i);
sio->sio_flags = zio_flags;
sio->sio_zb = *zb;
queue->q_last_ext_addr = -1;
scan_io_queue_insert_impl(queue, sio);
}
/*
* Given a set of I/O parameters as discovered by the metadata traversal
* process, attempts to place the I/O into the sorted queues (if allowed),
* or immediately executes the I/O.
*/
static void
dsl_scan_enqueue(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
const zbookmark_phys_t *zb)
{
spa_t *spa = dp->dp_spa;
ASSERT(!BP_IS_EMBEDDED(bp));
/*
* Gang blocks are hard to issue sequentially, so we just issue them
* here immediately instead of queuing them.
*/
if (!dp->dp_scan->scn_is_sorted || BP_IS_GANG(bp)) {
scan_exec_io(dp, bp, zio_flags, zb, NULL);
return;
}
for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
dva_t dva;
vdev_t *vdev;
dva = bp->blk_dva[i];
vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&dva));
ASSERT(vdev != NULL);
mutex_enter(&vdev->vdev_scan_io_queue_lock);
if (vdev->vdev_scan_io_queue == NULL)
vdev->vdev_scan_io_queue = scan_io_queue_create(vdev);
ASSERT(dp->dp_scan != NULL);
scan_io_queue_insert(vdev->vdev_scan_io_queue, bp,
i, zio_flags, zb);
mutex_exit(&vdev->vdev_scan_io_queue_lock);
}
}
static int
dsl_scan_scrub_cb(dsl_pool_t *dp,
const blkptr_t *bp, const zbookmark_phys_t *zb)
{
dsl_scan_t *scn = dp->dp_scan;
spa_t *spa = dp->dp_spa;
uint64_t phys_birth = BP_PHYSICAL_BIRTH(bp);
size_t psize = BP_GET_PSIZE(bp);
boolean_t needs_io = B_FALSE;
int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL;
count_block(dp->dp_blkstats, bp);
if (phys_birth <= scn->scn_phys.scn_min_txg ||
phys_birth >= scn->scn_phys.scn_max_txg) {
count_block_issued(spa, bp, B_TRUE);
return (0);
}
/* Embedded BP's have phys_birth==0, so we reject them above. */
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn));
if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) {
zio_flags |= ZIO_FLAG_SCRUB;
needs_io = B_TRUE;
} else {
ASSERT3U(scn->scn_phys.scn_func, ==, POOL_SCAN_RESILVER);
zio_flags |= ZIO_FLAG_RESILVER;
needs_io = B_FALSE;
}
/* If it's an intent log block, failure is expected. */
if (zb->zb_level == ZB_ZIL_LEVEL)
zio_flags |= ZIO_FLAG_SPECULATIVE;
for (int d = 0; d < BP_GET_NDVAS(bp); d++) {
const dva_t *dva = &bp->blk_dva[d];
/*
* Keep track of how much data we've examined so that
* zpool(8) status can make useful progress reports.
*/
uint64_t asize = DVA_GET_ASIZE(dva);
scn->scn_phys.scn_examined += asize;
spa->spa_scan_pass_exam += asize;
/* if it's a resilver, this may not be in the target range */
if (!needs_io)
needs_io = dsl_scan_need_resilver(spa, dva, psize,
phys_birth);
}
if (needs_io && !zfs_no_scrub_io) {
dsl_scan_enqueue(dp, bp, zio_flags, zb);
} else {
count_block_issued(spa, bp, B_TRUE);
}
/* do not relocate this block */
return (0);
}
static void
dsl_scan_scrub_done(zio_t *zio)
{
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
dsl_scan_io_queue_t *queue = zio->io_private;
abd_free(zio->io_abd);
if (queue == NULL) {
mutex_enter(&spa->spa_scrub_lock);
ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp));
spa->spa_scrub_inflight -= BP_GET_PSIZE(bp);
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&spa->spa_scrub_lock);
} else {
mutex_enter(&queue->q_vd->vdev_scan_io_queue_lock);
ASSERT3U(queue->q_inflight_bytes, >=, BP_GET_PSIZE(bp));
queue->q_inflight_bytes -= BP_GET_PSIZE(bp);
cv_broadcast(&queue->q_zio_cv);
mutex_exit(&queue->q_vd->vdev_scan_io_queue_lock);
}
if (zio->io_error && (zio->io_error != ECKSUM ||
!(zio->io_flags & ZIO_FLAG_SPECULATIVE))) {
atomic_inc_64(&spa->spa_dsl_pool->dp_scan->scn_phys.scn_errors);
}
}
/*
* Given a scanning zio's information, executes the zio. The zio need
* not necessarily be only sortable, this function simply executes the
* zio, no matter what it is. The optional queue argument allows the
* caller to specify that they want per top level vdev IO rate limiting
* instead of the legacy global limiting.
*/
static void
scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue)
{
spa_t *spa = dp->dp_spa;
dsl_scan_t *scn = dp->dp_scan;
size_t size = BP_GET_PSIZE(bp);
abd_t *data = abd_alloc_for_io(size, B_FALSE);
zio_t *pio;
if (queue == NULL) {
ASSERT3U(scn->scn_maxinflight_bytes, >, 0);
mutex_enter(&spa->spa_scrub_lock);
while (spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes)
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
spa->spa_scrub_inflight += BP_GET_PSIZE(bp);
mutex_exit(&spa->spa_scrub_lock);
pio = scn->scn_zio_root;
} else {
kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock;
ASSERT3U(queue->q_maxinflight_bytes, >, 0);
mutex_enter(q_lock);
while (queue->q_inflight_bytes >= queue->q_maxinflight_bytes)
cv_wait(&queue->q_zio_cv, q_lock);
queue->q_inflight_bytes += BP_GET_PSIZE(bp);
pio = queue->q_zio;
mutex_exit(q_lock);
}
ASSERT(pio != NULL);
count_block_issued(spa, bp, queue == NULL);
zio_nowait(zio_read(pio, spa, bp, data, size, dsl_scan_scrub_done,
queue, ZIO_PRIORITY_SCRUB, zio_flags, zb));
}
/*
* This is the primary extent sorting algorithm. We balance two parameters:
* 1) how many bytes of I/O are in an extent
* 2) how well the extent is filled with I/O (as a fraction of its total size)
* Since we allow extents to have gaps between their constituent I/Os, it's
* possible to have a fairly large extent that contains the same amount of
* I/O bytes than a much smaller extent, which just packs the I/O more tightly.
* The algorithm sorts based on a score calculated from the extent's size,
* the relative fill volume (in %) and a "fill weight" parameter that controls
* the split between whether we prefer larger extents or more well populated
* extents:
*
* SCORE = FILL_IN_BYTES + (FILL_IN_PERCENT * FILL_IN_BYTES * FILL_WEIGHT)
*
* Example:
* 1) assume extsz = 64 MiB
* 2) assume fill = 32 MiB (extent is half full)
* 3) assume fill_weight = 3
* 4) SCORE = 32M + (((32M * 100) / 64M) * 3 * 32M) / 100
* SCORE = 32M + (50 * 3 * 32M) / 100
* SCORE = 32M + (4800M / 100)
* SCORE = 32M + 48M
* ^ ^
* | +--- final total relative fill-based score
* +--------- final total fill-based score
* SCORE = 80M
*
* As can be seen, at fill_ratio=3, the algorithm is slightly biased towards
* extents that are more completely filled (in a 3:2 ratio) vs just larger.
* Note that as an optimization, we replace multiplication and division by
* 100 with bitshifting by 7 (which effectively multiplies and divides by 128).
*
* Since we do not care if one extent is only few percent better than another,
* compress the score into 6 bits via binary logarithm AKA highbit64() and
* put into otherwise unused due to ashift high bits of offset. This allows
* to reduce q_exts_by_size B-tree elements to only 64 bits and compare them
* with single operation. Plus it makes scrubs more sequential and reduces
* chances that minor extent change move it within the B-tree.
*/
static int
ext_size_compare(const void *x, const void *y)
{
const uint64_t *a = x, *b = y;
return (TREE_CMP(*a, *b));
}
static void
ext_size_create(range_tree_t *rt, void *arg)
{
(void) rt;
zfs_btree_t *size_tree = arg;
zfs_btree_create(size_tree, ext_size_compare, sizeof (uint64_t));
}
static void
ext_size_destroy(range_tree_t *rt, void *arg)
{
(void) rt;
zfs_btree_t *size_tree = arg;
ASSERT0(zfs_btree_numnodes(size_tree));
zfs_btree_destroy(size_tree);
}
static uint64_t
ext_size_value(range_tree_t *rt, range_seg_gap_t *rsg)
{
(void) rt;
uint64_t size = rsg->rs_end - rsg->rs_start;
uint64_t score = rsg->rs_fill + ((((rsg->rs_fill << 7) / size) *
fill_weight * rsg->rs_fill) >> 7);
ASSERT3U(rt->rt_shift, >=, 8);
return (((uint64_t)(64 - highbit64(score)) << 56) | rsg->rs_start);
}
static void
ext_size_add(range_tree_t *rt, range_seg_t *rs, void *arg)
{
zfs_btree_t *size_tree = arg;
ASSERT3U(rt->rt_type, ==, RANGE_SEG_GAP);
uint64_t v = ext_size_value(rt, (range_seg_gap_t *)rs);
zfs_btree_add(size_tree, &v);
}
static void
ext_size_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
{
zfs_btree_t *size_tree = arg;
ASSERT3U(rt->rt_type, ==, RANGE_SEG_GAP);
uint64_t v = ext_size_value(rt, (range_seg_gap_t *)rs);
zfs_btree_remove(size_tree, &v);
}
static void
ext_size_vacate(range_tree_t *rt, void *arg)
{
zfs_btree_t *size_tree = arg;
zfs_btree_clear(size_tree);
zfs_btree_destroy(size_tree);
ext_size_create(rt, arg);
}
static const range_tree_ops_t ext_size_ops = {
.rtop_create = ext_size_create,
.rtop_destroy = ext_size_destroy,
.rtop_add = ext_size_add,
.rtop_remove = ext_size_remove,
.rtop_vacate = ext_size_vacate
};
/*
* Comparator for the q_sios_by_addr tree. Sorting is simply performed
* based on LBA-order (from lowest to highest).
*/
static int
sio_addr_compare(const void *x, const void *y)
{
const scan_io_t *a = x, *b = y;
return (TREE_CMP(SIO_GET_OFFSET(a), SIO_GET_OFFSET(b)));
}
/* IO queues are created on demand when they are needed. */
static dsl_scan_io_queue_t *
scan_io_queue_create(vdev_t *vd)
{
dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan;
dsl_scan_io_queue_t *q = kmem_zalloc(sizeof (*q), KM_SLEEP);
q->q_scn = scn;
q->q_vd = vd;
q->q_sio_memused = 0;
q->q_last_ext_addr = -1;
cv_init(&q->q_zio_cv, NULL, CV_DEFAULT, NULL);
q->q_exts_by_addr = range_tree_create_gap(&ext_size_ops, RANGE_SEG_GAP,
&q->q_exts_by_size, 0, vd->vdev_ashift, zfs_scan_max_ext_gap);
avl_create(&q->q_sios_by_addr, sio_addr_compare,
sizeof (scan_io_t), offsetof(scan_io_t, sio_nodes.sio_addr_node));
return (q);
}
/*
* Destroys a scan queue and all segments and scan_io_t's contained in it.
* No further execution of I/O occurs, anything pending in the queue is
* simply freed without being executed.
*/
void
dsl_scan_io_queue_destroy(dsl_scan_io_queue_t *queue)
{
dsl_scan_t *scn = queue->q_scn;
scan_io_t *sio;
void *cookie = NULL;
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
if (!avl_is_empty(&queue->q_sios_by_addr))
atomic_add_64(&scn->scn_queues_pending, -1);
while ((sio = avl_destroy_nodes(&queue->q_sios_by_addr, &cookie)) !=
NULL) {
ASSERT(range_tree_contains(queue->q_exts_by_addr,
SIO_GET_OFFSET(sio), SIO_GET_ASIZE(sio)));
queue->q_sio_memused -= SIO_GET_MUSED(sio);
sio_free(sio);
}
ASSERT0(queue->q_sio_memused);
range_tree_vacate(queue->q_exts_by_addr, NULL, queue);
range_tree_destroy(queue->q_exts_by_addr);
avl_destroy(&queue->q_sios_by_addr);
cv_destroy(&queue->q_zio_cv);
kmem_free(queue, sizeof (*queue));
}
/*
* Properly transfers a dsl_scan_queue_t from `svd' to `tvd'. This is
* called on behalf of vdev_top_transfer when creating or destroying
* a mirror vdev due to zpool attach/detach.
*/
void
dsl_scan_io_queue_vdev_xfer(vdev_t *svd, vdev_t *tvd)
{
mutex_enter(&svd->vdev_scan_io_queue_lock);
mutex_enter(&tvd->vdev_scan_io_queue_lock);
VERIFY3P(tvd->vdev_scan_io_queue, ==, NULL);
tvd->vdev_scan_io_queue = svd->vdev_scan_io_queue;
svd->vdev_scan_io_queue = NULL;
if (tvd->vdev_scan_io_queue != NULL)
tvd->vdev_scan_io_queue->q_vd = tvd;
mutex_exit(&tvd->vdev_scan_io_queue_lock);
mutex_exit(&svd->vdev_scan_io_queue_lock);
}
static void
scan_io_queues_destroy(dsl_scan_t *scn)
{
vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *tvd = rvd->vdev_child[i];
mutex_enter(&tvd->vdev_scan_io_queue_lock);
if (tvd->vdev_scan_io_queue != NULL)
dsl_scan_io_queue_destroy(tvd->vdev_scan_io_queue);
tvd->vdev_scan_io_queue = NULL;
mutex_exit(&tvd->vdev_scan_io_queue_lock);
}
}
static void
dsl_scan_freed_dva(spa_t *spa, const blkptr_t *bp, int dva_i)
{
dsl_pool_t *dp = spa->spa_dsl_pool;
dsl_scan_t *scn = dp->dp_scan;
vdev_t *vdev;
kmutex_t *q_lock;
dsl_scan_io_queue_t *queue;
scan_io_t *srch_sio, *sio;
avl_index_t idx;
uint64_t start, size;
vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[dva_i]));
ASSERT(vdev != NULL);
q_lock = &vdev->vdev_scan_io_queue_lock;
queue = vdev->vdev_scan_io_queue;
mutex_enter(q_lock);
if (queue == NULL) {
mutex_exit(q_lock);
return;
}
srch_sio = sio_alloc(BP_GET_NDVAS(bp));
bp2sio(bp, srch_sio, dva_i);
start = SIO_GET_OFFSET(srch_sio);
size = SIO_GET_ASIZE(srch_sio);
/*
* We can find the zio in two states:
* 1) Cold, just sitting in the queue of zio's to be issued at
* some point in the future. In this case, all we do is
* remove the zio from the q_sios_by_addr tree, decrement
* its data volume from the containing range_seg_t and
* resort the q_exts_by_size tree to reflect that the
* range_seg_t has lost some of its 'fill'. We don't shorten
* the range_seg_t - this is usually rare enough not to be
* worth the extra hassle of trying keep track of precise
* extent boundaries.
* 2) Hot, where the zio is currently in-flight in
* dsl_scan_issue_ios. In this case, we can't simply
* reach in and stop the in-flight zio's, so we instead
* block the caller. Eventually, dsl_scan_issue_ios will
* be done with issuing the zio's it gathered and will
* signal us.
*/
sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx);
sio_free(srch_sio);
if (sio != NULL) {
blkptr_t tmpbp;
/* Got it while it was cold in the queue */
ASSERT3U(start, ==, SIO_GET_OFFSET(sio));
ASSERT3U(size, ==, SIO_GET_ASIZE(sio));
avl_remove(&queue->q_sios_by_addr, sio);
if (avl_is_empty(&queue->q_sios_by_addr))
atomic_add_64(&scn->scn_queues_pending, -1);
queue->q_sio_memused -= SIO_GET_MUSED(sio);
ASSERT(range_tree_contains(queue->q_exts_by_addr, start, size));
range_tree_remove_fill(queue->q_exts_by_addr, start, size);
/* count the block as though we issued it */
sio2bp(sio, &tmpbp);
count_block_issued(spa, &tmpbp, B_FALSE);
sio_free(sio);
}
mutex_exit(q_lock);
}
/*
* Callback invoked when a zio_free() zio is executing. This needs to be
* intercepted to prevent the zio from deallocating a particular portion
* of disk space and it then getting reallocated and written to, while we
* still have it queued up for processing.
*/
void
dsl_scan_freed(spa_t *spa, const blkptr_t *bp)
{
dsl_pool_t *dp = spa->spa_dsl_pool;
dsl_scan_t *scn = dp->dp_scan;
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(scn != NULL);
if (!dsl_scan_is_running(scn))
return;
for (int i = 0; i < BP_GET_NDVAS(bp); i++)
dsl_scan_freed_dva(spa, bp, i);
}
/*
* Check if a vdev needs resilvering (non-empty DTL), if so, and resilver has
* not started, start it. Otherwise, only restart if max txg in DTL range is
* greater than the max txg in the current scan. If the DTL max is less than
* the scan max, then the vdev has not missed any new data since the resilver
* started, so a restart is not needed.
*/
void
dsl_scan_assess_vdev(dsl_pool_t *dp, vdev_t *vd)
{
uint64_t min, max;
if (!vdev_resilver_needed(vd, &min, &max))
return;
if (!dsl_scan_resilvering(dp)) {
spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER);
return;
}
if (max <= dp->dp_scan->scn_phys.scn_max_txg)
return;
/* restart is needed, check if it can be deferred */
if (spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER))
vdev_defer_resilver(vd);
else
spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER);
}
ZFS_MODULE_PARAM(zfs, zfs_, scan_vdev_limit, ULONG, ZMOD_RW,
"Max bytes in flight per leaf vdev for scrubs and resilvers");
ZFS_MODULE_PARAM(zfs, zfs_, scrub_min_time_ms, INT, ZMOD_RW,
"Min millisecs to scrub per txg");
ZFS_MODULE_PARAM(zfs, zfs_, obsolete_min_time_ms, INT, ZMOD_RW,
"Min millisecs to obsolete per txg");
ZFS_MODULE_PARAM(zfs, zfs_, free_min_time_ms, INT, ZMOD_RW,
"Min millisecs to free per txg");
ZFS_MODULE_PARAM(zfs, zfs_, resilver_min_time_ms, INT, ZMOD_RW,
"Min millisecs to resilver per txg");
ZFS_MODULE_PARAM(zfs, zfs_, scan_suspend_progress, INT, ZMOD_RW,
"Set to prevent scans from progressing");
ZFS_MODULE_PARAM(zfs, zfs_, no_scrub_io, INT, ZMOD_RW,
"Set to disable scrub I/O");
ZFS_MODULE_PARAM(zfs, zfs_, no_scrub_prefetch, INT, ZMOD_RW,
"Set to disable scrub prefetching");
ZFS_MODULE_PARAM(zfs, zfs_, async_block_max_blocks, ULONG, ZMOD_RW,
"Max number of blocks freed in one txg");
ZFS_MODULE_PARAM(zfs, zfs_, max_async_dedup_frees, ULONG, ZMOD_RW,
"Max number of dedup blocks freed in one txg");
ZFS_MODULE_PARAM(zfs, zfs_, free_bpobj_enabled, INT, ZMOD_RW,
"Enable processing of the free_bpobj");
ZFS_MODULE_PARAM(zfs, zfs_, scan_blkstats, INT, ZMOD_RW,
"Enable block statistics calculation during scrub");
ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_fact, INT, ZMOD_RW,
"Fraction of RAM for scan hard limit");
ZFS_MODULE_PARAM(zfs, zfs_, scan_issue_strategy, INT, ZMOD_RW,
"IO issuing strategy during scrubbing. 0 = default, 1 = LBA, 2 = size");
ZFS_MODULE_PARAM(zfs, zfs_, scan_legacy, INT, ZMOD_RW,
"Scrub using legacy non-sequential method");
ZFS_MODULE_PARAM(zfs, zfs_, scan_checkpoint_intval, INT, ZMOD_RW,
"Scan progress on-disk checkpointing interval");
ZFS_MODULE_PARAM(zfs, zfs_, scan_max_ext_gap, ULONG, ZMOD_RW,
"Max gap in bytes between sequential scrub / resilver I/Os");
ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_soft_fact, INT, ZMOD_RW,
"Fraction of hard limit used as soft limit");
ZFS_MODULE_PARAM(zfs, zfs_, scan_strict_mem_lim, INT, ZMOD_RW,
"Tunable to attempt to reduce lock contention");
ZFS_MODULE_PARAM(zfs, zfs_, scan_fill_weight, INT, ZMOD_RW,
"Tunable to adjust bias towards more filled segments during scans");
ZFS_MODULE_PARAM(zfs, zfs_, resilver_disable_defer, INT, ZMOD_RW,
"Process all resilvers immediately");
diff --git a/sys/contrib/openzfs/module/zfs/fm.c b/sys/contrib/openzfs/module/zfs/fm.c
index e7a7ad583242..bc13b5517c4e 100644
--- a/sys/contrib/openzfs/module/zfs/fm.c
+++ b/sys/contrib/openzfs/module/zfs/fm.c
@@ -1,1374 +1,1374 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
*/
/*
* Fault Management Architecture (FMA) Resource and Protocol Support
*
* The routines contained herein provide services to support kernel subsystems
* in publishing fault management telemetry (see PSARC 2002/412 and 2003/089).
*
* Name-Value Pair Lists
*
* The embodiment of an FMA protocol element (event, fmri or authority) is a
* name-value pair list (nvlist_t). FMA-specific nvlist constructor and
* destructor functions, fm_nvlist_create() and fm_nvlist_destroy(), are used
* to create an nvpair list using custom allocators. Callers may choose to
* allocate either from the kernel memory allocator, or from a preallocated
* buffer, useful in constrained contexts like high-level interrupt routines.
*
* Protocol Event and FMRI Construction
*
* Convenience routines are provided to construct nvlist events according to
* the FMA Event Protocol and Naming Schema specification for ereports and
* FMRIs for the dev, cpu, hc, mem, legacy hc and de schemes.
*
* ENA Manipulation
*
* Routines to generate ENA formats 0, 1 and 2 are available as well as
* routines to increment formats 1 and 2. Individual fields within the
* ENA are extractable via fm_ena_time_get(), fm_ena_id_get(),
* fm_ena_format_get() and fm_ena_gen_get().
*/
#include <sys/types.h>
#include <sys/time.h>
#include <sys/list.h>
#include <sys/nvpair.h>
#include <sys/cmn_err.h>
#include <sys/sysmacros.h>
#include <sys/sunddi.h>
#include <sys/systeminfo.h>
#include <sys/fm/util.h>
#include <sys/fm/protocol.h>
#include <sys/kstat.h>
#include <sys/zfs_context.h>
#ifdef _KERNEL
#include <sys/atomic.h>
#include <sys/condvar.h>
#include <sys/zfs_ioctl.h>
static int zfs_zevent_len_max = 512;
static int zevent_len_cur = 0;
static int zevent_waiters = 0;
static int zevent_flags = 0;
/* Num events rate limited since the last time zfs_zevent_next() was called */
static uint64_t ratelimit_dropped = 0;
/*
* The EID (Event IDentifier) is used to uniquely tag a zevent when it is
* posted. The posted EIDs are monotonically increasing but not persistent.
* They will be reset to the initial value (1) each time the kernel module is
* loaded.
*/
static uint64_t zevent_eid = 0;
static kmutex_t zevent_lock;
static list_t zevent_list;
static kcondvar_t zevent_cv;
#endif /* _KERNEL */
/*
* Common fault management kstats to record event generation failures
*/
struct erpt_kstat {
kstat_named_t erpt_dropped; /* num erpts dropped on post */
kstat_named_t erpt_set_failed; /* num erpt set failures */
kstat_named_t fmri_set_failed; /* num fmri set failures */
kstat_named_t payload_set_failed; /* num payload set failures */
kstat_named_t erpt_duplicates; /* num duplicate erpts */
};
static struct erpt_kstat erpt_kstat_data = {
{ "erpt-dropped", KSTAT_DATA_UINT64 },
{ "erpt-set-failed", KSTAT_DATA_UINT64 },
{ "fmri-set-failed", KSTAT_DATA_UINT64 },
{ "payload-set-failed", KSTAT_DATA_UINT64 },
{ "erpt-duplicates", KSTAT_DATA_UINT64 }
};
kstat_t *fm_ksp;
#ifdef _KERNEL
static zevent_t *
zfs_zevent_alloc(void)
{
zevent_t *ev;
ev = kmem_zalloc(sizeof (zevent_t), KM_SLEEP);
list_create(&ev->ev_ze_list, sizeof (zfs_zevent_t),
offsetof(zfs_zevent_t, ze_node));
list_link_init(&ev->ev_node);
return (ev);
}
static void
zfs_zevent_free(zevent_t *ev)
{
/* Run provided cleanup callback */
ev->ev_cb(ev->ev_nvl, ev->ev_detector);
list_destroy(&ev->ev_ze_list);
kmem_free(ev, sizeof (zevent_t));
}
static void
zfs_zevent_drain(zevent_t *ev)
{
zfs_zevent_t *ze;
ASSERT(MUTEX_HELD(&zevent_lock));
list_remove(&zevent_list, ev);
/* Remove references to this event in all private file data */
while ((ze = list_head(&ev->ev_ze_list)) != NULL) {
list_remove(&ev->ev_ze_list, ze);
ze->ze_zevent = NULL;
ze->ze_dropped++;
}
zfs_zevent_free(ev);
}
void
zfs_zevent_drain_all(int *count)
{
zevent_t *ev;
mutex_enter(&zevent_lock);
while ((ev = list_head(&zevent_list)) != NULL)
zfs_zevent_drain(ev);
*count = zevent_len_cur;
zevent_len_cur = 0;
mutex_exit(&zevent_lock);
}
/*
* New zevents are inserted at the head. If the maximum queue
* length is exceeded a zevent will be drained from the tail.
* As part of this any user space processes which currently have
* a reference to this zevent_t in their private data will have
* this reference set to NULL.
*/
static void
zfs_zevent_insert(zevent_t *ev)
{
ASSERT(MUTEX_HELD(&zevent_lock));
list_insert_head(&zevent_list, ev);
if (zevent_len_cur >= zfs_zevent_len_max)
zfs_zevent_drain(list_tail(&zevent_list));
else
zevent_len_cur++;
}
/*
* Post a zevent. The cb will be called when nvl and detector are no longer
* needed, i.e.:
* - An error happened and a zevent can't be posted. In this case, cb is called
* before zfs_zevent_post() returns.
* - The event is being drained and freed.
*/
int
zfs_zevent_post(nvlist_t *nvl, nvlist_t *detector, zevent_cb_t *cb)
{
inode_timespec_t tv;
int64_t tv_array[2];
uint64_t eid;
size_t nvl_size = 0;
zevent_t *ev;
int error;
ASSERT(cb != NULL);
gethrestime(&tv);
tv_array[0] = tv.tv_sec;
tv_array[1] = tv.tv_nsec;
error = nvlist_add_int64_array(nvl, FM_EREPORT_TIME, tv_array, 2);
if (error) {
atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
goto out;
}
eid = atomic_inc_64_nv(&zevent_eid);
error = nvlist_add_uint64(nvl, FM_EREPORT_EID, eid);
if (error) {
atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
goto out;
}
error = nvlist_size(nvl, &nvl_size, NV_ENCODE_NATIVE);
if (error) {
atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64);
goto out;
}
if (nvl_size > ERPT_DATA_SZ || nvl_size == 0) {
atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64);
error = EOVERFLOW;
goto out;
}
ev = zfs_zevent_alloc();
if (ev == NULL) {
atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64);
error = ENOMEM;
goto out;
}
ev->ev_nvl = nvl;
ev->ev_detector = detector;
ev->ev_cb = cb;
ev->ev_eid = eid;
mutex_enter(&zevent_lock);
zfs_zevent_insert(ev);
cv_broadcast(&zevent_cv);
mutex_exit(&zevent_lock);
out:
if (error)
cb(nvl, detector);
return (error);
}
void
zfs_zevent_track_duplicate(void)
{
atomic_inc_64(&erpt_kstat_data.erpt_duplicates.value.ui64);
}
static int
zfs_zevent_minor_to_state(minor_t minor, zfs_zevent_t **ze)
{
*ze = zfsdev_get_state(minor, ZST_ZEVENT);
if (*ze == NULL)
return (SET_ERROR(EBADF));
return (0);
}
zfs_file_t *
zfs_zevent_fd_hold(int fd, minor_t *minorp, zfs_zevent_t **ze)
{
zfs_file_t *fp = zfs_file_get(fd);
if (fp == NULL)
return (NULL);
int error = zfsdev_getminor(fp, minorp);
if (error == 0)
error = zfs_zevent_minor_to_state(*minorp, ze);
if (error) {
zfs_zevent_fd_rele(fp);
fp = NULL;
}
return (fp);
}
void
zfs_zevent_fd_rele(zfs_file_t *fp)
{
zfs_file_put(fp);
}
/*
* Get the next zevent in the stream and place a copy in 'event'. This
* may fail with ENOMEM if the encoded nvlist size exceeds the passed
* 'event_size'. In this case the stream pointer is not advanced and
* and 'event_size' is set to the minimum required buffer size.
*/
int
zfs_zevent_next(zfs_zevent_t *ze, nvlist_t **event, uint64_t *event_size,
uint64_t *dropped)
{
zevent_t *ev;
size_t size;
int error = 0;
mutex_enter(&zevent_lock);
if (ze->ze_zevent == NULL) {
/* New stream start at the beginning/tail */
ev = list_tail(&zevent_list);
if (ev == NULL) {
error = ENOENT;
goto out;
}
} else {
/*
* Existing stream continue with the next element and remove
* ourselves from the wait queue for the previous element
*/
ev = list_prev(&zevent_list, ze->ze_zevent);
if (ev == NULL) {
error = ENOENT;
goto out;
}
}
VERIFY(nvlist_size(ev->ev_nvl, &size, NV_ENCODE_NATIVE) == 0);
if (size > *event_size) {
*event_size = size;
error = ENOMEM;
goto out;
}
if (ze->ze_zevent)
list_remove(&ze->ze_zevent->ev_ze_list, ze);
ze->ze_zevent = ev;
list_insert_head(&ev->ev_ze_list, ze);
(void) nvlist_dup(ev->ev_nvl, event, KM_SLEEP);
*dropped = ze->ze_dropped;
#ifdef _KERNEL
/* Include events dropped due to rate limiting */
*dropped += atomic_swap_64(&ratelimit_dropped, 0);
#endif
ze->ze_dropped = 0;
out:
mutex_exit(&zevent_lock);
return (error);
}
/*
* Wait in an interruptible state for any new events.
*/
int
zfs_zevent_wait(zfs_zevent_t *ze)
{
int error = EAGAIN;
mutex_enter(&zevent_lock);
zevent_waiters++;
while (error == EAGAIN) {
if (zevent_flags & ZEVENT_SHUTDOWN) {
error = SET_ERROR(ESHUTDOWN);
break;
}
error = cv_wait_sig(&zevent_cv, &zevent_lock);
if (signal_pending(current)) {
error = SET_ERROR(EINTR);
break;
} else if (!list_is_empty(&zevent_list)) {
error = 0;
continue;
} else {
error = EAGAIN;
}
}
zevent_waiters--;
mutex_exit(&zevent_lock);
return (error);
}
/*
* The caller may seek to a specific EID by passing that EID. If the EID
* is still available in the posted list of events the cursor is positioned
* there. Otherwise ENOENT is returned and the cursor is not moved.
*
* There are two reserved EIDs which may be passed and will never fail.
* ZEVENT_SEEK_START positions the cursor at the start of the list, and
* ZEVENT_SEEK_END positions the cursor at the end of the list.
*/
int
zfs_zevent_seek(zfs_zevent_t *ze, uint64_t eid)
{
zevent_t *ev;
int error = 0;
mutex_enter(&zevent_lock);
if (eid == ZEVENT_SEEK_START) {
if (ze->ze_zevent)
list_remove(&ze->ze_zevent->ev_ze_list, ze);
ze->ze_zevent = NULL;
goto out;
}
if (eid == ZEVENT_SEEK_END) {
if (ze->ze_zevent)
list_remove(&ze->ze_zevent->ev_ze_list, ze);
ev = list_head(&zevent_list);
if (ev) {
ze->ze_zevent = ev;
list_insert_head(&ev->ev_ze_list, ze);
} else {
ze->ze_zevent = NULL;
}
goto out;
}
for (ev = list_tail(&zevent_list); ev != NULL;
ev = list_prev(&zevent_list, ev)) {
if (ev->ev_eid == eid) {
if (ze->ze_zevent)
list_remove(&ze->ze_zevent->ev_ze_list, ze);
ze->ze_zevent = ev;
list_insert_head(&ev->ev_ze_list, ze);
break;
}
}
if (ev == NULL)
error = ENOENT;
out:
mutex_exit(&zevent_lock);
return (error);
}
void
zfs_zevent_init(zfs_zevent_t **zep)
{
zfs_zevent_t *ze;
ze = *zep = kmem_zalloc(sizeof (zfs_zevent_t), KM_SLEEP);
list_link_init(&ze->ze_node);
}
void
zfs_zevent_destroy(zfs_zevent_t *ze)
{
mutex_enter(&zevent_lock);
if (ze->ze_zevent)
list_remove(&ze->ze_zevent->ev_ze_list, ze);
mutex_exit(&zevent_lock);
kmem_free(ze, sizeof (zfs_zevent_t));
}
#endif /* _KERNEL */
/*
* Wrappers for FM nvlist allocators
*/
static void *
i_fm_alloc(nv_alloc_t *nva, size_t size)
{
(void) nva;
return (kmem_alloc(size, KM_SLEEP));
}
static void
i_fm_free(nv_alloc_t *nva, void *buf, size_t size)
{
(void) nva;
kmem_free(buf, size);
}
static const nv_alloc_ops_t fm_mem_alloc_ops = {
.nv_ao_init = NULL,
.nv_ao_fini = NULL,
.nv_ao_alloc = i_fm_alloc,
.nv_ao_free = i_fm_free,
.nv_ao_reset = NULL
};
/*
* Create and initialize a new nv_alloc_t for a fixed buffer, buf. A pointer
* to the newly allocated nv_alloc_t structure is returned upon success or NULL
* is returned to indicate that the nv_alloc structure could not be created.
*/
nv_alloc_t *
fm_nva_xcreate(char *buf, size_t bufsz)
{
nv_alloc_t *nvhdl = kmem_zalloc(sizeof (nv_alloc_t), KM_SLEEP);
if (bufsz == 0 || nv_alloc_init(nvhdl, nv_fixed_ops, buf, bufsz) != 0) {
kmem_free(nvhdl, sizeof (nv_alloc_t));
return (NULL);
}
return (nvhdl);
}
/*
* Destroy a previously allocated nv_alloc structure. The fixed buffer
* associated with nva must be freed by the caller.
*/
void
fm_nva_xdestroy(nv_alloc_t *nva)
{
nv_alloc_fini(nva);
kmem_free(nva, sizeof (nv_alloc_t));
}
/*
* Create a new nv list. A pointer to a new nv list structure is returned
* upon success or NULL is returned to indicate that the structure could
* not be created. The newly created nv list is created and managed by the
* operations installed in nva. If nva is NULL, the default FMA nva
* operations are installed and used.
*
* When called from the kernel and nva == NULL, this function must be called
* from passive kernel context with no locks held that can prevent a
* sleeping memory allocation from occurring. Otherwise, this function may
* be called from other kernel contexts as long a valid nva created via
* fm_nva_create() is supplied.
*/
nvlist_t *
fm_nvlist_create(nv_alloc_t *nva)
{
int hdl_alloced = 0;
nvlist_t *nvl;
nv_alloc_t *nvhdl;
if (nva == NULL) {
nvhdl = kmem_zalloc(sizeof (nv_alloc_t), KM_SLEEP);
if (nv_alloc_init(nvhdl, &fm_mem_alloc_ops, NULL, 0) != 0) {
kmem_free(nvhdl, sizeof (nv_alloc_t));
return (NULL);
}
hdl_alloced = 1;
} else {
nvhdl = nva;
}
if (nvlist_xalloc(&nvl, NV_UNIQUE_NAME, nvhdl) != 0) {
if (hdl_alloced) {
nv_alloc_fini(nvhdl);
kmem_free(nvhdl, sizeof (nv_alloc_t));
}
return (NULL);
}
return (nvl);
}
/*
* Destroy a previously allocated nvlist structure. flag indicates whether
* or not the associated nva structure should be freed (FM_NVA_FREE) or
* retained (FM_NVA_RETAIN). Retaining the nv alloc structure allows
* it to be re-used for future nvlist creation operations.
*/
void
fm_nvlist_destroy(nvlist_t *nvl, int flag)
{
nv_alloc_t *nva = nvlist_lookup_nv_alloc(nvl);
nvlist_free(nvl);
if (nva != NULL) {
if (flag == FM_NVA_FREE)
fm_nva_xdestroy(nva);
}
}
int
i_fm_payload_set(nvlist_t *payload, const char *name, va_list ap)
{
int nelem, ret = 0;
data_type_t type;
while (ret == 0 && name != NULL) {
type = va_arg(ap, data_type_t);
switch (type) {
case DATA_TYPE_BYTE:
ret = nvlist_add_byte(payload, name,
va_arg(ap, uint_t));
break;
case DATA_TYPE_BYTE_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_byte_array(payload, name,
va_arg(ap, uchar_t *), nelem);
break;
case DATA_TYPE_BOOLEAN_VALUE:
ret = nvlist_add_boolean_value(payload, name,
va_arg(ap, boolean_t));
break;
case DATA_TYPE_BOOLEAN_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_boolean_array(payload, name,
va_arg(ap, boolean_t *), nelem);
break;
case DATA_TYPE_INT8:
ret = nvlist_add_int8(payload, name,
va_arg(ap, int));
break;
case DATA_TYPE_INT8_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_int8_array(payload, name,
va_arg(ap, int8_t *), nelem);
break;
case DATA_TYPE_UINT8:
ret = nvlist_add_uint8(payload, name,
va_arg(ap, uint_t));
break;
case DATA_TYPE_UINT8_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_uint8_array(payload, name,
va_arg(ap, uint8_t *), nelem);
break;
case DATA_TYPE_INT16:
ret = nvlist_add_int16(payload, name,
va_arg(ap, int));
break;
case DATA_TYPE_INT16_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_int16_array(payload, name,
va_arg(ap, int16_t *), nelem);
break;
case DATA_TYPE_UINT16:
ret = nvlist_add_uint16(payload, name,
va_arg(ap, uint_t));
break;
case DATA_TYPE_UINT16_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_uint16_array(payload, name,
va_arg(ap, uint16_t *), nelem);
break;
case DATA_TYPE_INT32:
ret = nvlist_add_int32(payload, name,
va_arg(ap, int32_t));
break;
case DATA_TYPE_INT32_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_int32_array(payload, name,
va_arg(ap, int32_t *), nelem);
break;
case DATA_TYPE_UINT32:
ret = nvlist_add_uint32(payload, name,
va_arg(ap, uint32_t));
break;
case DATA_TYPE_UINT32_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_uint32_array(payload, name,
va_arg(ap, uint32_t *), nelem);
break;
case DATA_TYPE_INT64:
ret = nvlist_add_int64(payload, name,
va_arg(ap, int64_t));
break;
case DATA_TYPE_INT64_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_int64_array(payload, name,
va_arg(ap, int64_t *), nelem);
break;
case DATA_TYPE_UINT64:
ret = nvlist_add_uint64(payload, name,
va_arg(ap, uint64_t));
break;
case DATA_TYPE_UINT64_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_uint64_array(payload, name,
va_arg(ap, uint64_t *), nelem);
break;
case DATA_TYPE_STRING:
ret = nvlist_add_string(payload, name,
va_arg(ap, char *));
break;
case DATA_TYPE_STRING_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_string_array(payload, name,
va_arg(ap, const char **), nelem);
break;
case DATA_TYPE_NVLIST:
ret = nvlist_add_nvlist(payload, name,
va_arg(ap, nvlist_t *));
break;
case DATA_TYPE_NVLIST_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_nvlist_array(payload, name,
va_arg(ap, const nvlist_t **), nelem);
break;
default:
ret = EINVAL;
}
name = va_arg(ap, char *);
}
return (ret);
}
void
fm_payload_set(nvlist_t *payload, ...)
{
int ret;
const char *name;
va_list ap;
va_start(ap, payload);
name = va_arg(ap, char *);
ret = i_fm_payload_set(payload, name, ap);
va_end(ap);
if (ret)
atomic_inc_64(&erpt_kstat_data.payload_set_failed.value.ui64);
}
/*
* Set-up and validate the members of an ereport event according to:
*
* Member name Type Value
* ====================================================
* class string ereport
* version uint8_t 0
* ena uint64_t <ena>
* detector nvlist_t <detector>
* ereport-payload nvlist_t <var args>
*
* We don't actually add a 'version' member to the payload. Really,
* the version quoted to us by our caller is that of the category 1
* "ereport" event class (and we require FM_EREPORT_VERS0) but
* the payload version of the actual leaf class event under construction
* may be something else. Callers should supply a version in the varargs,
* or (better) we could take two version arguments - one for the
* ereport category 1 classification (expect FM_EREPORT_VERS0) and one
* for the leaf class.
*/
void
fm_ereport_set(nvlist_t *ereport, int version, const char *erpt_class,
uint64_t ena, const nvlist_t *detector, ...)
{
char ereport_class[FM_MAX_CLASS];
const char *name;
va_list ap;
int ret;
if (version != FM_EREPORT_VERS0) {
atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
return;
}
(void) snprintf(ereport_class, FM_MAX_CLASS, "%s.%s",
FM_EREPORT_CLASS, erpt_class);
if (nvlist_add_string(ereport, FM_CLASS, ereport_class) != 0) {
atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
return;
}
if (nvlist_add_uint64(ereport, FM_EREPORT_ENA, ena)) {
atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
}
if (nvlist_add_nvlist(ereport, FM_EREPORT_DETECTOR,
(nvlist_t *)detector) != 0) {
atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
}
va_start(ap, detector);
name = va_arg(ap, const char *);
ret = i_fm_payload_set(ereport, name, ap);
va_end(ap);
if (ret)
atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
}
/*
* Set-up and validate the members of an hc fmri according to;
*
* Member name Type Value
* ===================================================
* version uint8_t 0
* auth nvlist_t <auth>
* hc-name string <name>
* hc-id string <id>
*
* Note that auth and hc-id are optional members.
*/
#define HC_MAXPAIRS 20
#define HC_MAXNAMELEN 50
static int
fm_fmri_hc_set_common(nvlist_t *fmri, int version, const nvlist_t *auth)
{
if (version != FM_HC_SCHEME_VERSION) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return (0);
}
if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0 ||
nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_HC) != 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return (0);
}
if (auth != NULL && nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY,
(nvlist_t *)auth) != 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return (0);
}
return (1);
}
void
fm_fmri_hc_set(nvlist_t *fmri, int version, const nvlist_t *auth,
nvlist_t *snvl, int npairs, ...)
{
nv_alloc_t *nva = nvlist_lookup_nv_alloc(fmri);
nvlist_t *pairs[HC_MAXPAIRS];
va_list ap;
int i;
if (!fm_fmri_hc_set_common(fmri, version, auth))
return;
npairs = MIN(npairs, HC_MAXPAIRS);
va_start(ap, npairs);
for (i = 0; i < npairs; i++) {
const char *name = va_arg(ap, const char *);
uint32_t id = va_arg(ap, uint32_t);
char idstr[11];
(void) snprintf(idstr, sizeof (idstr), "%u", id);
pairs[i] = fm_nvlist_create(nva);
if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 ||
nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) {
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
}
}
va_end(ap);
if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST,
(const nvlist_t **)pairs, npairs) != 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
}
for (i = 0; i < npairs; i++)
fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN);
if (snvl != NULL) {
if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) {
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
}
}
}
void
fm_fmri_hc_create(nvlist_t *fmri, int version, const nvlist_t *auth,
nvlist_t *snvl, nvlist_t *bboard, int npairs, ...)
{
nv_alloc_t *nva = nvlist_lookup_nv_alloc(fmri);
nvlist_t *pairs[HC_MAXPAIRS];
nvlist_t **hcl;
uint_t n;
int i, j;
va_list ap;
char *hcname, *hcid;
if (!fm_fmri_hc_set_common(fmri, version, auth))
return;
/*
* copy the bboard nvpairs to the pairs array
*/
if (nvlist_lookup_nvlist_array(bboard, FM_FMRI_HC_LIST, &hcl, &n)
!= 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
for (i = 0; i < n; i++) {
if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_NAME,
&hcname) != 0) {
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_ID, &hcid) != 0) {
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
pairs[i] = fm_nvlist_create(nva);
if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, hcname) != 0 ||
nvlist_add_string(pairs[i], FM_FMRI_HC_ID, hcid) != 0) {
for (j = 0; j <= i; j++) {
if (pairs[j] != NULL)
fm_nvlist_destroy(pairs[j],
FM_NVA_RETAIN);
}
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
}
/*
* create the pairs from passed in pairs
*/
npairs = MIN(npairs, HC_MAXPAIRS);
va_start(ap, npairs);
for (i = n; i < npairs + n; i++) {
const char *name = va_arg(ap, const char *);
uint32_t id = va_arg(ap, uint32_t);
char idstr[11];
(void) snprintf(idstr, sizeof (idstr), "%u", id);
pairs[i] = fm_nvlist_create(nva);
if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 ||
nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) {
for (j = 0; j <= i; j++) {
if (pairs[j] != NULL)
fm_nvlist_destroy(pairs[j],
FM_NVA_RETAIN);
}
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
}
va_end(ap);
/*
* Create the fmri hc list
*/
if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST,
(const nvlist_t **)pairs, npairs + n) != 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
for (i = 0; i < npairs + n; i++) {
fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN);
}
if (snvl != NULL) {
if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) {
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
}
}
/*
* Set-up and validate the members of an dev fmri according to:
*
* Member name Type Value
* ====================================================
* version uint8_t 0
* auth nvlist_t <auth>
* devpath string <devpath>
* [devid] string <devid>
* [target-port-l0id] string <target-port-lun0-id>
*
* Note that auth and devid are optional members.
*/
void
fm_fmri_dev_set(nvlist_t *fmri_dev, int version, const nvlist_t *auth,
const char *devpath, const char *devid, const char *tpl0)
{
int err = 0;
if (version != DEV_SCHEME_VERSION0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
err |= nvlist_add_uint8(fmri_dev, FM_VERSION, version);
err |= nvlist_add_string(fmri_dev, FM_FMRI_SCHEME, FM_FMRI_SCHEME_DEV);
if (auth != NULL) {
err |= nvlist_add_nvlist(fmri_dev, FM_FMRI_AUTHORITY,
(nvlist_t *)auth);
}
err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_PATH, devpath);
if (devid != NULL)
err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_ID, devid);
if (tpl0 != NULL)
err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_TGTPTLUN0, tpl0);
if (err)
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
}
/*
* Set-up and validate the members of an cpu fmri according to:
*
* Member name Type Value
* ====================================================
* version uint8_t 0
* auth nvlist_t <auth>
* cpuid uint32_t <cpu_id>
* cpumask uint8_t <cpu_mask>
* serial uint64_t <serial_id>
*
* Note that auth, cpumask, serial are optional members.
*
*/
void
fm_fmri_cpu_set(nvlist_t *fmri_cpu, int version, const nvlist_t *auth,
uint32_t cpu_id, uint8_t *cpu_maskp, const char *serial_idp)
{
uint64_t *failedp = &erpt_kstat_data.fmri_set_failed.value.ui64;
if (version < CPU_SCHEME_VERSION1) {
atomic_inc_64(failedp);
return;
}
if (nvlist_add_uint8(fmri_cpu, FM_VERSION, version) != 0) {
atomic_inc_64(failedp);
return;
}
if (nvlist_add_string(fmri_cpu, FM_FMRI_SCHEME,
FM_FMRI_SCHEME_CPU) != 0) {
atomic_inc_64(failedp);
return;
}
if (auth != NULL && nvlist_add_nvlist(fmri_cpu, FM_FMRI_AUTHORITY,
(nvlist_t *)auth) != 0)
atomic_inc_64(failedp);
if (nvlist_add_uint32(fmri_cpu, FM_FMRI_CPU_ID, cpu_id) != 0)
atomic_inc_64(failedp);
if (cpu_maskp != NULL && nvlist_add_uint8(fmri_cpu, FM_FMRI_CPU_MASK,
*cpu_maskp) != 0)
atomic_inc_64(failedp);
if (serial_idp == NULL || nvlist_add_string(fmri_cpu,
FM_FMRI_CPU_SERIAL_ID, (char *)serial_idp) != 0)
atomic_inc_64(failedp);
}
/*
* Set-up and validate the members of a mem according to:
*
* Member name Type Value
* ====================================================
* version uint8_t 0
* auth nvlist_t <auth> [optional]
* unum string <unum>
* serial string <serial> [optional*]
* offset uint64_t <offset> [optional]
*
* * serial is required if offset is present
*/
void
fm_fmri_mem_set(nvlist_t *fmri, int version, const nvlist_t *auth,
const char *unum, const char *serial, uint64_t offset)
{
if (version != MEM_SCHEME_VERSION0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (!serial && (offset != (uint64_t)-1)) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_MEM) != 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (auth != NULL) {
if (nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY,
(nvlist_t *)auth) != 0) {
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
}
}
if (nvlist_add_string(fmri, FM_FMRI_MEM_UNUM, unum) != 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
}
if (serial != NULL) {
if (nvlist_add_string_array(fmri, FM_FMRI_MEM_SERIAL_ID,
(const char **)&serial, 1) != 0) {
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
}
if (offset != (uint64_t)-1 && nvlist_add_uint64(fmri,
FM_FMRI_MEM_OFFSET, offset) != 0) {
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
}
}
}
void
fm_fmri_zfs_set(nvlist_t *fmri, int version, uint64_t pool_guid,
uint64_t vdev_guid)
{
if (version != ZFS_SCHEME_VERSION0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_ZFS) != 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_POOL, pool_guid) != 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
}
if (vdev_guid != 0) {
if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_VDEV, vdev_guid) != 0) {
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
}
}
}
uint64_t
fm_ena_increment(uint64_t ena)
{
uint64_t new_ena;
switch (ENA_FORMAT(ena)) {
case FM_ENA_FMT1:
new_ena = ena + (1 << ENA_FMT1_GEN_SHFT);
break;
case FM_ENA_FMT2:
new_ena = ena + (1 << ENA_FMT2_GEN_SHFT);
break;
default:
new_ena = 0;
}
return (new_ena);
}
uint64_t
fm_ena_generate_cpu(uint64_t timestamp, processorid_t cpuid, uchar_t format)
{
uint64_t ena = 0;
switch (format) {
case FM_ENA_FMT1:
if (timestamp) {
ena = (uint64_t)((format & ENA_FORMAT_MASK) |
((cpuid << ENA_FMT1_CPUID_SHFT) &
ENA_FMT1_CPUID_MASK) |
((timestamp << ENA_FMT1_TIME_SHFT) &
ENA_FMT1_TIME_MASK));
} else {
ena = (uint64_t)((format & ENA_FORMAT_MASK) |
((cpuid << ENA_FMT1_CPUID_SHFT) &
ENA_FMT1_CPUID_MASK) |
((gethrtime() << ENA_FMT1_TIME_SHFT) &
ENA_FMT1_TIME_MASK));
}
break;
case FM_ENA_FMT2:
ena = (uint64_t)((format & ENA_FORMAT_MASK) |
((timestamp << ENA_FMT2_TIME_SHFT) & ENA_FMT2_TIME_MASK));
break;
default:
break;
}
return (ena);
}
uint64_t
fm_ena_generate(uint64_t timestamp, uchar_t format)
{
uint64_t ena;
kpreempt_disable();
ena = fm_ena_generate_cpu(timestamp, getcpuid(), format);
kpreempt_enable();
return (ena);
}
uint64_t
fm_ena_generation_get(uint64_t ena)
{
uint64_t gen;
switch (ENA_FORMAT(ena)) {
case FM_ENA_FMT1:
gen = (ena & ENA_FMT1_GEN_MASK) >> ENA_FMT1_GEN_SHFT;
break;
case FM_ENA_FMT2:
gen = (ena & ENA_FMT2_GEN_MASK) >> ENA_FMT2_GEN_SHFT;
break;
default:
gen = 0;
break;
}
return (gen);
}
uchar_t
fm_ena_format_get(uint64_t ena)
{
return (ENA_FORMAT(ena));
}
uint64_t
fm_ena_id_get(uint64_t ena)
{
uint64_t id;
switch (ENA_FORMAT(ena)) {
case FM_ENA_FMT1:
id = (ena & ENA_FMT1_ID_MASK) >> ENA_FMT1_ID_SHFT;
break;
case FM_ENA_FMT2:
id = (ena & ENA_FMT2_ID_MASK) >> ENA_FMT2_ID_SHFT;
break;
default:
id = 0;
}
return (id);
}
uint64_t
fm_ena_time_get(uint64_t ena)
{
uint64_t time;
switch (ENA_FORMAT(ena)) {
case FM_ENA_FMT1:
time = (ena & ENA_FMT1_TIME_MASK) >> ENA_FMT1_TIME_SHFT;
break;
case FM_ENA_FMT2:
time = (ena & ENA_FMT2_TIME_MASK) >> ENA_FMT2_TIME_SHFT;
break;
default:
time = 0;
}
return (time);
}
#ifdef _KERNEL
/*
* Helper function to increment ereport dropped count. Used by the event
* rate limiting code to give feedback to the user about how many events were
* rate limited by including them in the 'dropped' count.
*/
void
fm_erpt_dropped_increment(void)
{
atomic_inc_64(&ratelimit_dropped);
}
void
fm_init(void)
{
zevent_len_cur = 0;
zevent_flags = 0;
/* Initialize zevent allocation and generation kstats */
fm_ksp = kstat_create("zfs", 0, "fm", "misc", KSTAT_TYPE_NAMED,
sizeof (struct erpt_kstat) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (fm_ksp != NULL) {
fm_ksp->ks_data = &erpt_kstat_data;
kstat_install(fm_ksp);
} else {
cmn_err(CE_NOTE, "failed to create fm/misc kstat\n");
}
mutex_init(&zevent_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zevent_list, sizeof (zevent_t),
offsetof(zevent_t, ev_node));
cv_init(&zevent_cv, NULL, CV_DEFAULT, NULL);
zfs_ereport_init();
}
void
fm_fini(void)
{
int count;
zfs_ereport_fini();
zfs_zevent_drain_all(&count);
mutex_enter(&zevent_lock);
cv_broadcast(&zevent_cv);
zevent_flags |= ZEVENT_SHUTDOWN;
while (zevent_waiters > 0) {
mutex_exit(&zevent_lock);
- schedule();
+ kpreempt(KPREEMPT_SYNC);
mutex_enter(&zevent_lock);
}
mutex_exit(&zevent_lock);
cv_destroy(&zevent_cv);
list_destroy(&zevent_list);
mutex_destroy(&zevent_lock);
if (fm_ksp != NULL) {
kstat_delete(fm_ksp);
fm_ksp = NULL;
}
}
#endif /* _KERNEL */
ZFS_MODULE_PARAM(zfs_zevent, zfs_zevent_, len_max, INT, ZMOD_RW,
"Max event queue length");
diff --git a/sys/contrib/openzfs/module/zfs/spa.c b/sys/contrib/openzfs/module/zfs/spa.c
index b2b59af42947..eeec3b6be9ca 100644
--- a/sys/contrib/openzfs/module/zfs/spa.c
+++ b/sys/contrib/openzfs/module/zfs/spa.c
@@ -1,10037 +1,10036 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2018, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Toomas Soome <tsoome@me.com>
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright 2018 Joyent, Inc.
* Copyright (c) 2017, 2019, Datto Inc. All rights reserved.
* Copyright 2017 Joyent, Inc.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
*/
/*
* SPA: Storage Pool Allocator
*
* This file contains all the routines used when modifying on-disk SPA state.
* This includes opening, importing, destroying, exporting a pool, and syncing a
* pool.
*/
#include <sys/zfs_context.h>
#include <sys/fm/fs/zfs.h>
#include <sys/spa_impl.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/zap.h>
#include <sys/zil.h>
#include <sys/ddt.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_removal.h>
#include <sys/vdev_indirect_mapping.h>
#include <sys/vdev_indirect_births.h>
#include <sys/vdev_initialize.h>
#include <sys/vdev_rebuild.h>
#include <sys/vdev_trim.h>
#include <sys/vdev_disk.h>
#include <sys/vdev_draid.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
#include <sys/mmp.h>
#include <sys/uberblock_impl.h>
#include <sys/txg.h>
#include <sys/avl.h>
#include <sys/bpobj.h>
#include <sys/dmu_traverse.h>
#include <sys/dmu_objset.h>
#include <sys/unique.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_synctask.h>
#include <sys/fs/zfs.h>
#include <sys/arc.h>
#include <sys/callb.h>
#include <sys/systeminfo.h>
-#include <sys/spa_boot.h>
#include <sys/zfs_ioctl.h>
#include <sys/dsl_scan.h>
#include <sys/zfeature.h>
#include <sys/dsl_destroy.h>
#include <sys/zvol.h>
#ifdef _KERNEL
#include <sys/fm/protocol.h>
#include <sys/fm/util.h>
#include <sys/callb.h>
#include <sys/zone.h>
#include <sys/vmsystm.h>
#endif /* _KERNEL */
#include "zfs_prop.h"
#include "zfs_comutil.h"
/*
* The interval, in seconds, at which failed configuration cache file writes
* should be retried.
*/
int zfs_ccw_retry_interval = 300;
typedef enum zti_modes {
ZTI_MODE_FIXED, /* value is # of threads (min 1) */
ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */
ZTI_MODE_SCALE, /* Taskqs scale with CPUs. */
ZTI_MODE_NULL, /* don't create a taskq */
ZTI_NMODES
} zti_modes_t;
#define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) }
#define ZTI_PCT(n) { ZTI_MODE_ONLINE_PERCENT, (n), 1 }
#define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 }
#define ZTI_SCALE { ZTI_MODE_SCALE, 0, 1 }
#define ZTI_NULL { ZTI_MODE_NULL, 0, 0 }
#define ZTI_N(n) ZTI_P(n, 1)
#define ZTI_ONE ZTI_N(1)
typedef struct zio_taskq_info {
zti_modes_t zti_mode;
uint_t zti_value;
uint_t zti_count;
} zio_taskq_info_t;
static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
"iss", "iss_h", "int", "int_h"
};
/*
* This table defines the taskq settings for each ZFS I/O type. When
* initializing a pool, we use this table to create an appropriately sized
* taskq. Some operations are low volume and therefore have a small, static
* number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE
* macros. Other operations process a large amount of data; the ZTI_BATCH
* macro causes us to create a taskq oriented for throughput. Some operations
* are so high frequency and short-lived that the taskq itself can become a
* point of lock contention. The ZTI_P(#, #) macro indicates that we need an
* additional degree of parallelism specified by the number of threads per-
* taskq and the number of taskqs; when dispatching an event in this case, the
* particular taskq is chosen at random. ZTI_SCALE is similar to ZTI_BATCH,
* but with number of taskqs also scaling with number of CPUs.
*
* The different taskq priorities are to handle the different contexts (issue
* and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that
* need to be handled with minimum delay.
*/
static const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
/* ISSUE ISSUE_HIGH INTR INTR_HIGH */
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */
{ ZTI_N(8), ZTI_NULL, ZTI_SCALE, ZTI_NULL }, /* READ */
{ ZTI_BATCH, ZTI_N(5), ZTI_SCALE, ZTI_N(5) }, /* WRITE */
{ ZTI_SCALE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */
{ ZTI_N(4), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* TRIM */
};
static void spa_sync_version(void *arg, dmu_tx_t *tx);
static void spa_sync_props(void *arg, dmu_tx_t *tx);
static boolean_t spa_has_active_shared_spare(spa_t *spa);
static int spa_load_impl(spa_t *spa, spa_import_type_t type,
const char **ereport);
static void spa_vdev_resilver_done(spa_t *spa);
static uint_t zio_taskq_batch_pct = 80; /* 1 thread per cpu in pset */
static uint_t zio_taskq_batch_tpq; /* threads per taskq */
static const boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */
static const uint_t zio_taskq_basedc = 80; /* base duty cycle */
static const boolean_t spa_create_process = B_TRUE; /* no process => no sysdc */
/*
* Report any spa_load_verify errors found, but do not fail spa_load.
* This is used by zdb to analyze non-idle pools.
*/
boolean_t spa_load_verify_dryrun = B_FALSE;
/*
* Allow read spacemaps in case of readonly import (spa_mode == SPA_MODE_READ).
* This is used by zdb for spacemaps verification.
*/
boolean_t spa_mode_readable_spacemaps = B_FALSE;
/*
* This (illegal) pool name is used when temporarily importing a spa_t in order
* to get the vdev stats associated with the imported devices.
*/
#define TRYIMPORT_NAME "$import"
/*
* For debugging purposes: print out vdev tree during pool import.
*/
static int spa_load_print_vdev_tree = B_FALSE;
/*
* A non-zero value for zfs_max_missing_tvds means that we allow importing
* pools with missing top-level vdevs. This is strictly intended for advanced
* pool recovery cases since missing data is almost inevitable. Pools with
* missing devices can only be imported read-only for safety reasons, and their
* fail-mode will be automatically set to "continue".
*
* With 1 missing vdev we should be able to import the pool and mount all
* datasets. User data that was not modified after the missing device has been
* added should be recoverable. This means that snapshots created prior to the
* addition of that device should be completely intact.
*
* With 2 missing vdevs, some datasets may fail to mount since there are
* dataset statistics that are stored as regular metadata. Some data might be
* recoverable if those vdevs were added recently.
*
* With 3 or more missing vdevs, the pool is severely damaged and MOS entries
* may be missing entirely. Chances of data recovery are very low. Note that
* there are also risks of performing an inadvertent rewind as we might be
* missing all the vdevs with the latest uberblocks.
*/
unsigned long zfs_max_missing_tvds = 0;
/*
* The parameters below are similar to zfs_max_missing_tvds but are only
* intended for a preliminary open of the pool with an untrusted config which
* might be incomplete or out-dated.
*
* We are more tolerant for pools opened from a cachefile since we could have
* an out-dated cachefile where a device removal was not registered.
* We could have set the limit arbitrarily high but in the case where devices
* are really missing we would want to return the proper error codes; we chose
* SPA_DVAS_PER_BP - 1 so that some copies of the MOS would still be available
* and we get a chance to retrieve the trusted config.
*/
uint64_t zfs_max_missing_tvds_cachefile = SPA_DVAS_PER_BP - 1;
/*
* In the case where config was assembled by scanning device paths (/dev/dsks
* by default) we are less tolerant since all the existing devices should have
* been detected and we want spa_load to return the right error codes.
*/
uint64_t zfs_max_missing_tvds_scan = 0;
/*
* Debugging aid that pauses spa_sync() towards the end.
*/
static const boolean_t zfs_pause_spa_sync = B_FALSE;
/*
* Variables to indicate the livelist condense zthr func should wait at certain
* points for the livelist to be removed - used to test condense/destroy races
*/
static int zfs_livelist_condense_zthr_pause = 0;
static int zfs_livelist_condense_sync_pause = 0;
/*
* Variables to track whether or not condense cancellation has been
* triggered in testing.
*/
static int zfs_livelist_condense_sync_cancel = 0;
static int zfs_livelist_condense_zthr_cancel = 0;
/*
* Variable to track whether or not extra ALLOC blkptrs were added to a
* livelist entry while it was being condensed (caused by the way we track
* remapped blkptrs in dbuf_remap_impl)
*/
static int zfs_livelist_condense_new_alloc = 0;
/*
* ==========================================================================
* SPA properties routines
* ==========================================================================
*/
/*
* Add a (source=src, propname=propval) list to an nvlist.
*/
static void
spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, const char *strval,
uint64_t intval, zprop_source_t src)
{
const char *propname = zpool_prop_to_name(prop);
nvlist_t *propval;
propval = fnvlist_alloc();
fnvlist_add_uint64(propval, ZPROP_SOURCE, src);
if (strval != NULL)
fnvlist_add_string(propval, ZPROP_VALUE, strval);
else
fnvlist_add_uint64(propval, ZPROP_VALUE, intval);
fnvlist_add_nvlist(nvl, propname, propval);
nvlist_free(propval);
}
/*
* Get property values from the spa configuration.
*/
static void
spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
{
vdev_t *rvd = spa->spa_root_vdev;
dsl_pool_t *pool = spa->spa_dsl_pool;
uint64_t size, alloc, cap, version;
const zprop_source_t src = ZPROP_SRC_NONE;
spa_config_dirent_t *dp;
metaslab_class_t *mc = spa_normal_class(spa);
ASSERT(MUTEX_HELD(&spa->spa_props_lock));
if (rvd != NULL) {
alloc = metaslab_class_get_alloc(mc);
alloc += metaslab_class_get_alloc(spa_special_class(spa));
alloc += metaslab_class_get_alloc(spa_dedup_class(spa));
alloc += metaslab_class_get_alloc(spa_embedded_log_class(spa));
size = metaslab_class_get_space(mc);
size += metaslab_class_get_space(spa_special_class(spa));
size += metaslab_class_get_space(spa_dedup_class(spa));
size += metaslab_class_get_space(spa_embedded_log_class(spa));
spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
size - alloc, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_CHECKPOINT, NULL,
spa->spa_checkpoint_info.sci_dspace, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL,
metaslab_class_fragmentation(mc), src);
spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL,
metaslab_class_expandable_space(mc), src);
spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL,
(spa_mode(spa) == SPA_MODE_READ), src);
cap = (size == 0) ? 0 : (alloc * 100 / size);
spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL,
ddt_get_pool_dedup_ratio(spa), src);
spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
rvd->vdev_state, src);
version = spa_version(spa);
if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) {
spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL,
version, ZPROP_SRC_DEFAULT);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL,
version, ZPROP_SRC_LOCAL);
}
spa_prop_add_list(*nvp, ZPOOL_PROP_LOAD_GUID,
NULL, spa_load_guid(spa), src);
}
if (pool != NULL) {
/*
* The $FREE directory was introduced in SPA_VERSION_DEADLISTS,
* when opening pools before this version freedir will be NULL.
*/
if (pool->dp_free_dir != NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL,
dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes,
src);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING,
NULL, 0, src);
}
if (pool->dp_leak_dir != NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL,
dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes,
src);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED,
NULL, 0, src);
}
}
spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
if (spa->spa_comment != NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment,
0, ZPROP_SRC_LOCAL);
}
if (spa->spa_compatibility != NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_COMPATIBILITY,
spa->spa_compatibility, 0, ZPROP_SRC_LOCAL);
}
if (spa->spa_root != NULL)
spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
0, ZPROP_SRC_LOCAL);
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE);
}
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) {
spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
DNODE_MAX_SIZE, ZPROP_SRC_NONE);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
DNODE_MIN_SIZE, ZPROP_SRC_NONE);
}
if ((dp = list_head(&spa->spa_config_list)) != NULL) {
if (dp->scd_path == NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
"none", 0, ZPROP_SRC_LOCAL);
} else if (strcmp(dp->scd_path, spa_config_path) != 0) {
spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
dp->scd_path, 0, ZPROP_SRC_LOCAL);
}
}
}
/*
* Get zpool property values.
*/
int
spa_prop_get(spa_t *spa, nvlist_t **nvp)
{
objset_t *mos = spa->spa_meta_objset;
zap_cursor_t zc;
zap_attribute_t za;
dsl_pool_t *dp;
int err;
err = nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP);
if (err)
return (err);
dp = spa_get_dsl(spa);
dsl_pool_config_enter(dp, FTAG);
mutex_enter(&spa->spa_props_lock);
/*
* Get properties from the spa config.
*/
spa_prop_get_config(spa, nvp);
/* If no pool property object, no more prop to get. */
if (mos == NULL || spa->spa_pool_props_object == 0)
goto out;
/*
* Get properties from the MOS pool property object.
*/
for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
(err = zap_cursor_retrieve(&zc, &za)) == 0;
zap_cursor_advance(&zc)) {
uint64_t intval = 0;
char *strval = NULL;
zprop_source_t src = ZPROP_SRC_DEFAULT;
zpool_prop_t prop;
if ((prop = zpool_name_to_prop(za.za_name)) == ZPOOL_PROP_INVAL)
continue;
switch (za.za_integer_length) {
case 8:
/* integer property */
if (za.za_first_integer !=
zpool_prop_default_numeric(prop))
src = ZPROP_SRC_LOCAL;
if (prop == ZPOOL_PROP_BOOTFS) {
dsl_dataset_t *ds = NULL;
err = dsl_dataset_hold_obj(dp,
za.za_first_integer, FTAG, &ds);
if (err != 0)
break;
strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN,
KM_SLEEP);
dsl_dataset_name(ds, strval);
dsl_dataset_rele(ds, FTAG);
} else {
strval = NULL;
intval = za.za_first_integer;
}
spa_prop_add_list(*nvp, prop, strval, intval, src);
if (strval != NULL)
kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN);
break;
case 1:
/* string property */
strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
err = zap_lookup(mos, spa->spa_pool_props_object,
za.za_name, 1, za.za_num_integers, strval);
if (err) {
kmem_free(strval, za.za_num_integers);
break;
}
spa_prop_add_list(*nvp, prop, strval, 0, src);
kmem_free(strval, za.za_num_integers);
break;
default:
break;
}
}
zap_cursor_fini(&zc);
out:
mutex_exit(&spa->spa_props_lock);
dsl_pool_config_exit(dp, FTAG);
if (err && err != ENOENT) {
nvlist_free(*nvp);
*nvp = NULL;
return (err);
}
return (0);
}
/*
* Validate the given pool properties nvlist and modify the list
* for the property values to be set.
*/
static int
spa_prop_validate(spa_t *spa, nvlist_t *props)
{
nvpair_t *elem;
int error = 0, reset_bootfs = 0;
uint64_t objnum = 0;
boolean_t has_feature = B_FALSE;
elem = NULL;
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
uint64_t intval;
char *strval, *slash, *check, *fname;
const char *propname = nvpair_name(elem);
zpool_prop_t prop = zpool_name_to_prop(propname);
switch (prop) {
case ZPOOL_PROP_INVAL:
if (!zpool_prop_feature(propname)) {
error = SET_ERROR(EINVAL);
break;
}
/*
* Sanitize the input.
*/
if (nvpair_type(elem) != DATA_TYPE_UINT64) {
error = SET_ERROR(EINVAL);
break;
}
if (nvpair_value_uint64(elem, &intval) != 0) {
error = SET_ERROR(EINVAL);
break;
}
if (intval != 0) {
error = SET_ERROR(EINVAL);
break;
}
fname = strchr(propname, '@') + 1;
if (zfeature_lookup_name(fname, NULL) != 0) {
error = SET_ERROR(EINVAL);
break;
}
has_feature = B_TRUE;
break;
case ZPOOL_PROP_VERSION:
error = nvpair_value_uint64(elem, &intval);
if (!error &&
(intval < spa_version(spa) ||
intval > SPA_VERSION_BEFORE_FEATURES ||
has_feature))
error = SET_ERROR(EINVAL);
break;
case ZPOOL_PROP_DELEGATION:
case ZPOOL_PROP_AUTOREPLACE:
case ZPOOL_PROP_LISTSNAPS:
case ZPOOL_PROP_AUTOEXPAND:
case ZPOOL_PROP_AUTOTRIM:
error = nvpair_value_uint64(elem, &intval);
if (!error && intval > 1)
error = SET_ERROR(EINVAL);
break;
case ZPOOL_PROP_MULTIHOST:
error = nvpair_value_uint64(elem, &intval);
if (!error && intval > 1)
error = SET_ERROR(EINVAL);
if (!error) {
uint32_t hostid = zone_get_hostid(NULL);
if (hostid)
spa->spa_hostid = hostid;
else
error = SET_ERROR(ENOTSUP);
}
break;
case ZPOOL_PROP_BOOTFS:
/*
* If the pool version is less than SPA_VERSION_BOOTFS,
* or the pool is still being created (version == 0),
* the bootfs property cannot be set.
*/
if (spa_version(spa) < SPA_VERSION_BOOTFS) {
error = SET_ERROR(ENOTSUP);
break;
}
/*
* Make sure the vdev config is bootable
*/
if (!vdev_is_bootable(spa->spa_root_vdev)) {
error = SET_ERROR(ENOTSUP);
break;
}
reset_bootfs = 1;
error = nvpair_value_string(elem, &strval);
if (!error) {
objset_t *os;
if (strval == NULL || strval[0] == '\0') {
objnum = zpool_prop_default_numeric(
ZPOOL_PROP_BOOTFS);
break;
}
error = dmu_objset_hold(strval, FTAG, &os);
if (error != 0)
break;
/* Must be ZPL. */
if (dmu_objset_type(os) != DMU_OST_ZFS) {
error = SET_ERROR(ENOTSUP);
} else {
objnum = dmu_objset_id(os);
}
dmu_objset_rele(os, FTAG);
}
break;
case ZPOOL_PROP_FAILUREMODE:
error = nvpair_value_uint64(elem, &intval);
if (!error && intval > ZIO_FAILURE_MODE_PANIC)
error = SET_ERROR(EINVAL);
/*
* This is a special case which only occurs when
* the pool has completely failed. This allows
* the user to change the in-core failmode property
* without syncing it out to disk (I/Os might
* currently be blocked). We do this by returning
* EIO to the caller (spa_prop_set) to trick it
* into thinking we encountered a property validation
* error.
*/
if (!error && spa_suspended(spa)) {
spa->spa_failmode = intval;
error = SET_ERROR(EIO);
}
break;
case ZPOOL_PROP_CACHEFILE:
if ((error = nvpair_value_string(elem, &strval)) != 0)
break;
if (strval[0] == '\0')
break;
if (strcmp(strval, "none") == 0)
break;
if (strval[0] != '/') {
error = SET_ERROR(EINVAL);
break;
}
slash = strrchr(strval, '/');
ASSERT(slash != NULL);
if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
strcmp(slash, "/..") == 0)
error = SET_ERROR(EINVAL);
break;
case ZPOOL_PROP_COMMENT:
if ((error = nvpair_value_string(elem, &strval)) != 0)
break;
for (check = strval; *check != '\0'; check++) {
if (!isprint(*check)) {
error = SET_ERROR(EINVAL);
break;
}
}
if (strlen(strval) > ZPROP_MAX_COMMENT)
error = SET_ERROR(E2BIG);
break;
default:
break;
}
if (error)
break;
}
(void) nvlist_remove_all(props,
zpool_prop_to_name(ZPOOL_PROP_DEDUPDITTO));
if (!error && reset_bootfs) {
error = nvlist_remove(props,
zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
if (!error) {
error = nvlist_add_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
}
}
return (error);
}
void
spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
{
char *cachefile;
spa_config_dirent_t *dp;
if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
&cachefile) != 0)
return;
dp = kmem_alloc(sizeof (spa_config_dirent_t),
KM_SLEEP);
if (cachefile[0] == '\0')
dp->scd_path = spa_strdup(spa_config_path);
else if (strcmp(cachefile, "none") == 0)
dp->scd_path = NULL;
else
dp->scd_path = spa_strdup(cachefile);
list_insert_head(&spa->spa_config_list, dp);
if (need_sync)
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
int
spa_prop_set(spa_t *spa, nvlist_t *nvp)
{
int error;
nvpair_t *elem = NULL;
boolean_t need_sync = B_FALSE;
if ((error = spa_prop_validate(spa, nvp)) != 0)
return (error);
while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem));
if (prop == ZPOOL_PROP_CACHEFILE ||
prop == ZPOOL_PROP_ALTROOT ||
prop == ZPOOL_PROP_READONLY)
continue;
if (prop == ZPOOL_PROP_VERSION || prop == ZPOOL_PROP_INVAL) {
uint64_t ver = 0;
if (prop == ZPOOL_PROP_VERSION) {
VERIFY(nvpair_value_uint64(elem, &ver) == 0);
} else {
ASSERT(zpool_prop_feature(nvpair_name(elem)));
ver = SPA_VERSION_FEATURES;
need_sync = B_TRUE;
}
/* Save time if the version is already set. */
if (ver == spa_version(spa))
continue;
/*
* In addition to the pool directory object, we might
* create the pool properties object, the features for
* read object, the features for write object, or the
* feature descriptions object.
*/
error = dsl_sync_task(spa->spa_name, NULL,
spa_sync_version, &ver,
6, ZFS_SPACE_CHECK_RESERVED);
if (error)
return (error);
continue;
}
need_sync = B_TRUE;
break;
}
if (need_sync) {
return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props,
nvp, 6, ZFS_SPACE_CHECK_RESERVED));
}
return (0);
}
/*
* If the bootfs property value is dsobj, clear it.
*/
void
spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
{
if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
VERIFY(zap_remove(spa->spa_meta_objset,
spa->spa_pool_props_object,
zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
spa->spa_bootfs = 0;
}
}
static int
spa_change_guid_check(void *arg, dmu_tx_t *tx)
{
uint64_t *newguid __maybe_unused = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
vdev_t *rvd = spa->spa_root_vdev;
uint64_t vdev_state;
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
int error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
return (SET_ERROR(error));
}
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
vdev_state = rvd->vdev_state;
spa_config_exit(spa, SCL_STATE, FTAG);
if (vdev_state != VDEV_STATE_HEALTHY)
return (SET_ERROR(ENXIO));
ASSERT3U(spa_guid(spa), !=, *newguid);
return (0);
}
static void
spa_change_guid_sync(void *arg, dmu_tx_t *tx)
{
uint64_t *newguid = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
uint64_t oldguid;
vdev_t *rvd = spa->spa_root_vdev;
oldguid = spa_guid(spa);
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
rvd->vdev_guid = *newguid;
rvd->vdev_guid_sum += (*newguid - oldguid);
vdev_config_dirty(rvd);
spa_config_exit(spa, SCL_STATE, FTAG);
spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu",
(u_longlong_t)oldguid, (u_longlong_t)*newguid);
}
/*
* Change the GUID for the pool. This is done so that we can later
* re-import a pool built from a clone of our own vdevs. We will modify
* the root vdev's guid, our own pool guid, and then mark all of our
* vdevs dirty. Note that we must make sure that all our vdevs are
* online when we do this, or else any vdevs that weren't present
* would be orphaned from our pool. We are also going to issue a
* sysevent to update any watchers.
*/
int
spa_change_guid(spa_t *spa)
{
int error;
uint64_t guid;
mutex_enter(&spa->spa_vdev_top_lock);
mutex_enter(&spa_namespace_lock);
guid = spa_generate_guid(NULL);
error = dsl_sync_task(spa->spa_name, spa_change_guid_check,
spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED);
if (error == 0) {
spa_write_cachefile(spa, B_FALSE, B_TRUE);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_REGUID);
}
mutex_exit(&spa_namespace_lock);
mutex_exit(&spa->spa_vdev_top_lock);
return (error);
}
/*
* ==========================================================================
* SPA state manipulation (open/create/destroy/import/export)
* ==========================================================================
*/
static int
spa_error_entry_compare(const void *a, const void *b)
{
const spa_error_entry_t *sa = (const spa_error_entry_t *)a;
const spa_error_entry_t *sb = (const spa_error_entry_t *)b;
int ret;
ret = memcmp(&sa->se_bookmark, &sb->se_bookmark,
sizeof (zbookmark_phys_t));
return (TREE_ISIGN(ret));
}
/*
* Utility function which retrieves copies of the current logs and
* re-initializes them in the process.
*/
void
spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
{
ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
memcpy(last, &spa->spa_errlist_last, sizeof (avl_tree_t));
memcpy(scrub, &spa->spa_errlist_scrub, sizeof (avl_tree_t));
avl_create(&spa->spa_errlist_scrub,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
avl_create(&spa->spa_errlist_last,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
}
static void
spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
{
const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
enum zti_modes mode = ztip->zti_mode;
uint_t value = ztip->zti_value;
uint_t count = ztip->zti_count;
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
uint_t cpus, flags = TASKQ_DYNAMIC;
boolean_t batch = B_FALSE;
switch (mode) {
case ZTI_MODE_FIXED:
ASSERT3U(value, >, 0);
break;
case ZTI_MODE_BATCH:
batch = B_TRUE;
flags |= TASKQ_THREADS_CPU_PCT;
value = MIN(zio_taskq_batch_pct, 100);
break;
case ZTI_MODE_SCALE:
flags |= TASKQ_THREADS_CPU_PCT;
/*
* We want more taskqs to reduce lock contention, but we want
* less for better request ordering and CPU utilization.
*/
cpus = MAX(1, boot_ncpus * zio_taskq_batch_pct / 100);
if (zio_taskq_batch_tpq > 0) {
count = MAX(1, (cpus + zio_taskq_batch_tpq / 2) /
zio_taskq_batch_tpq);
} else {
/*
* Prefer 6 threads per taskq, but no more taskqs
* than threads in them on large systems. For 80%:
*
* taskq taskq total
* cpus taskqs percent threads threads
* ------- ------- ------- ------- -------
* 1 1 80% 1 1
* 2 1 80% 1 1
* 4 1 80% 3 3
* 8 2 40% 3 6
* 16 3 27% 4 12
* 32 5 16% 5 25
* 64 7 11% 7 49
* 128 10 8% 10 100
* 256 14 6% 15 210
*/
count = 1 + cpus / 6;
while (count * count > cpus)
count--;
}
/* Limit each taskq within 100% to not trigger assertion. */
count = MAX(count, (zio_taskq_batch_pct + 99) / 100);
value = (zio_taskq_batch_pct + count / 2) / count;
break;
case ZTI_MODE_NULL:
tqs->stqs_count = 0;
tqs->stqs_taskq = NULL;
return;
default:
panic("unrecognized mode for %s_%s taskq (%u:%u) in "
"spa_activate()",
zio_type_name[t], zio_taskq_types[q], mode, value);
break;
}
ASSERT3U(count, >, 0);
tqs->stqs_count = count;
tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP);
for (uint_t i = 0; i < count; i++) {
taskq_t *tq;
char name[32];
if (count > 1)
(void) snprintf(name, sizeof (name), "%s_%s_%u",
zio_type_name[t], zio_taskq_types[q], i);
else
(void) snprintf(name, sizeof (name), "%s_%s",
zio_type_name[t], zio_taskq_types[q]);
if (zio_taskq_sysdc && spa->spa_proc != &p0) {
if (batch)
flags |= TASKQ_DC_BATCH;
(void) zio_taskq_basedc;
tq = taskq_create_sysdc(name, value, 50, INT_MAX,
spa->spa_proc, zio_taskq_basedc, flags);
} else {
pri_t pri = maxclsyspri;
/*
* The write issue taskq can be extremely CPU
* intensive. Run it at slightly less important
* priority than the other taskqs.
*
* Under Linux and FreeBSD this means incrementing
* the priority value as opposed to platforms like
* illumos where it should be decremented.
*
* On FreeBSD, if priorities divided by four (RQ_PPQ)
* are equal then a difference between them is
* insignificant.
*/
if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE) {
#if defined(__linux__)
pri++;
#elif defined(__FreeBSD__)
pri += 4;
#else
#error "unknown OS"
#endif
}
tq = taskq_create_proc(name, value, pri, 50,
INT_MAX, spa->spa_proc, flags);
}
tqs->stqs_taskq[i] = tq;
}
}
static void
spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
{
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
if (tqs->stqs_taskq == NULL) {
ASSERT3U(tqs->stqs_count, ==, 0);
return;
}
for (uint_t i = 0; i < tqs->stqs_count; i++) {
ASSERT3P(tqs->stqs_taskq[i], !=, NULL);
taskq_destroy(tqs->stqs_taskq[i]);
}
kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *));
tqs->stqs_taskq = NULL;
}
/*
* Dispatch a task to the appropriate taskq for the ZFS I/O type and priority.
* Note that a type may have multiple discrete taskqs to avoid lock contention
* on the taskq itself. In that case we choose which taskq at random by using
* the low bits of gethrtime().
*/
void
spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent)
{
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
taskq_t *tq;
ASSERT3P(tqs->stqs_taskq, !=, NULL);
ASSERT3U(tqs->stqs_count, !=, 0);
if (tqs->stqs_count == 1) {
tq = tqs->stqs_taskq[0];
} else {
tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
}
taskq_dispatch_ent(tq, func, arg, flags, ent);
}
/*
* Same as spa_taskq_dispatch_ent() but block on the task until completion.
*/
void
spa_taskq_dispatch_sync(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
task_func_t *func, void *arg, uint_t flags)
{
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
taskq_t *tq;
taskqid_t id;
ASSERT3P(tqs->stqs_taskq, !=, NULL);
ASSERT3U(tqs->stqs_count, !=, 0);
if (tqs->stqs_count == 1) {
tq = tqs->stqs_taskq[0];
} else {
tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
}
id = taskq_dispatch(tq, func, arg, flags);
if (id)
taskq_wait_id(tq, id);
}
static void
spa_create_zio_taskqs(spa_t *spa)
{
for (int t = 0; t < ZIO_TYPES; t++) {
for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
spa_taskqs_init(spa, t, q);
}
}
}
/*
* Disabled until spa_thread() can be adapted for Linux.
*/
#undef HAVE_SPA_THREAD
#if defined(_KERNEL) && defined(HAVE_SPA_THREAD)
static void
spa_thread(void *arg)
{
psetid_t zio_taskq_psrset_bind = PS_NONE;
callb_cpr_t cprinfo;
spa_t *spa = arg;
user_t *pu = PTOU(curproc);
CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr,
spa->spa_name);
ASSERT(curproc != &p0);
(void) snprintf(pu->u_psargs, sizeof (pu->u_psargs),
"zpool-%s", spa->spa_name);
(void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm));
/* bind this thread to the requested psrset */
if (zio_taskq_psrset_bind != PS_NONE) {
pool_lock();
mutex_enter(&cpu_lock);
mutex_enter(&pidlock);
mutex_enter(&curproc->p_lock);
if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind,
0, NULL, NULL) == 0) {
curthread->t_bind_pset = zio_taskq_psrset_bind;
} else {
cmn_err(CE_WARN,
"Couldn't bind process for zfs pool \"%s\" to "
"pset %d\n", spa->spa_name, zio_taskq_psrset_bind);
}
mutex_exit(&curproc->p_lock);
mutex_exit(&pidlock);
mutex_exit(&cpu_lock);
pool_unlock();
}
if (zio_taskq_sysdc) {
sysdc_thread_enter(curthread, 100, 0);
}
spa->spa_proc = curproc;
spa->spa_did = curthread->t_did;
spa_create_zio_taskqs(spa);
mutex_enter(&spa->spa_proc_lock);
ASSERT(spa->spa_proc_state == SPA_PROC_CREATED);
spa->spa_proc_state = SPA_PROC_ACTIVE;
cv_broadcast(&spa->spa_proc_cv);
CALLB_CPR_SAFE_BEGIN(&cprinfo);
while (spa->spa_proc_state == SPA_PROC_ACTIVE)
cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock);
ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE);
spa->spa_proc_state = SPA_PROC_GONE;
spa->spa_proc = &p0;
cv_broadcast(&spa->spa_proc_cv);
CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */
mutex_enter(&curproc->p_lock);
lwp_exit();
}
#endif
/*
* Activate an uninitialized pool.
*/
static void
spa_activate(spa_t *spa, spa_mode_t mode)
{
ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
spa->spa_state = POOL_STATE_ACTIVE;
spa->spa_mode = mode;
spa->spa_read_spacemaps = spa_mode_readable_spacemaps;
spa->spa_normal_class = metaslab_class_create(spa, &zfs_metaslab_ops);
spa->spa_log_class = metaslab_class_create(spa, &zfs_metaslab_ops);
spa->spa_embedded_log_class =
metaslab_class_create(spa, &zfs_metaslab_ops);
spa->spa_special_class = metaslab_class_create(spa, &zfs_metaslab_ops);
spa->spa_dedup_class = metaslab_class_create(spa, &zfs_metaslab_ops);
/* Try to create a covering process */
mutex_enter(&spa->spa_proc_lock);
ASSERT(spa->spa_proc_state == SPA_PROC_NONE);
ASSERT(spa->spa_proc == &p0);
spa->spa_did = 0;
(void) spa_create_process;
#ifdef HAVE_SPA_THREAD
/* Only create a process if we're going to be around a while. */
if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) {
if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri,
NULL, 0) == 0) {
spa->spa_proc_state = SPA_PROC_CREATED;
while (spa->spa_proc_state == SPA_PROC_CREATED) {
cv_wait(&spa->spa_proc_cv,
&spa->spa_proc_lock);
}
ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
ASSERT(spa->spa_proc != &p0);
ASSERT(spa->spa_did != 0);
} else {
#ifdef _KERNEL
cmn_err(CE_WARN,
"Couldn't create process for zfs pool \"%s\"\n",
spa->spa_name);
#endif
}
}
#endif /* HAVE_SPA_THREAD */
mutex_exit(&spa->spa_proc_lock);
/* If we didn't create a process, we need to create our taskqs. */
if (spa->spa_proc == &p0) {
spa_create_zio_taskqs(spa);
}
for (size_t i = 0; i < TXG_SIZE; i++) {
spa->spa_txg_zio[i] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL);
}
list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_config_dirty_node));
list_create(&spa->spa_evicting_os_list, sizeof (objset_t),
offsetof(objset_t, os_evicting_node));
list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_state_dirty_node));
txg_list_create(&spa->spa_vdev_txg_list, spa,
offsetof(struct vdev, vdev_txg_node));
avl_create(&spa->spa_errlist_scrub,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
avl_create(&spa->spa_errlist_last,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
avl_create(&spa->spa_errlist_healed,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
spa_activate_os(spa);
spa_keystore_init(&spa->spa_keystore);
/*
* This taskq is used to perform zvol-minor-related tasks
* asynchronously. This has several advantages, including easy
* resolution of various deadlocks.
*
* The taskq must be single threaded to ensure tasks are always
* processed in the order in which they were dispatched.
*
* A taskq per pool allows one to keep the pools independent.
* This way if one pool is suspended, it will not impact another.
*
* The preferred location to dispatch a zvol minor task is a sync
* task. In this context, there is easy access to the spa_t and minimal
* error handling is required because the sync task must succeed.
*/
spa->spa_zvol_taskq = taskq_create("z_zvol", 1, defclsyspri,
1, INT_MAX, 0);
/*
* Taskq dedicated to prefetcher threads: this is used to prevent the
* pool traverse code from monopolizing the global (and limited)
* system_taskq by inappropriately scheduling long running tasks on it.
*/
spa->spa_prefetch_taskq = taskq_create("z_prefetch", 100,
defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
/*
* The taskq to upgrade datasets in this pool. Currently used by
* feature SPA_FEATURE_USEROBJ_ACCOUNTING/SPA_FEATURE_PROJECT_QUOTA.
*/
spa->spa_upgrade_taskq = taskq_create("z_upgrade", 100,
defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
}
/*
* Opposite of spa_activate().
*/
static void
spa_deactivate(spa_t *spa)
{
ASSERT(spa->spa_sync_on == B_FALSE);
ASSERT(spa->spa_dsl_pool == NULL);
ASSERT(spa->spa_root_vdev == NULL);
ASSERT(spa->spa_async_zio_root == NULL);
ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
spa_evicting_os_wait(spa);
if (spa->spa_zvol_taskq) {
taskq_destroy(spa->spa_zvol_taskq);
spa->spa_zvol_taskq = NULL;
}
if (spa->spa_prefetch_taskq) {
taskq_destroy(spa->spa_prefetch_taskq);
spa->spa_prefetch_taskq = NULL;
}
if (spa->spa_upgrade_taskq) {
taskq_destroy(spa->spa_upgrade_taskq);
spa->spa_upgrade_taskq = NULL;
}
txg_list_destroy(&spa->spa_vdev_txg_list);
list_destroy(&spa->spa_config_dirty_list);
list_destroy(&spa->spa_evicting_os_list);
list_destroy(&spa->spa_state_dirty_list);
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
for (int t = 0; t < ZIO_TYPES; t++) {
for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
spa_taskqs_fini(spa, t, q);
}
}
for (size_t i = 0; i < TXG_SIZE; i++) {
ASSERT3P(spa->spa_txg_zio[i], !=, NULL);
VERIFY0(zio_wait(spa->spa_txg_zio[i]));
spa->spa_txg_zio[i] = NULL;
}
metaslab_class_destroy(spa->spa_normal_class);
spa->spa_normal_class = NULL;
metaslab_class_destroy(spa->spa_log_class);
spa->spa_log_class = NULL;
metaslab_class_destroy(spa->spa_embedded_log_class);
spa->spa_embedded_log_class = NULL;
metaslab_class_destroy(spa->spa_special_class);
spa->spa_special_class = NULL;
metaslab_class_destroy(spa->spa_dedup_class);
spa->spa_dedup_class = NULL;
/*
* If this was part of an import or the open otherwise failed, we may
* still have errors left in the queues. Empty them just in case.
*/
spa_errlog_drain(spa);
avl_destroy(&spa->spa_errlist_scrub);
avl_destroy(&spa->spa_errlist_last);
avl_destroy(&spa->spa_errlist_healed);
spa_keystore_fini(&spa->spa_keystore);
spa->spa_state = POOL_STATE_UNINITIALIZED;
mutex_enter(&spa->spa_proc_lock);
if (spa->spa_proc_state != SPA_PROC_NONE) {
ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
spa->spa_proc_state = SPA_PROC_DEACTIVATE;
cv_broadcast(&spa->spa_proc_cv);
while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) {
ASSERT(spa->spa_proc != &p0);
cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
}
ASSERT(spa->spa_proc_state == SPA_PROC_GONE);
spa->spa_proc_state = SPA_PROC_NONE;
}
ASSERT(spa->spa_proc == &p0);
mutex_exit(&spa->spa_proc_lock);
/*
* We want to make sure spa_thread() has actually exited the ZFS
* module, so that the module can't be unloaded out from underneath
* it.
*/
if (spa->spa_did != 0) {
thread_join(spa->spa_did);
spa->spa_did = 0;
}
spa_deactivate_os(spa);
}
/*
* Verify a pool configuration, and construct the vdev tree appropriately. This
* will create all the necessary vdevs in the appropriate layout, with each vdev
* in the CLOSED state. This will prep the pool before open/creation/import.
* All vdev validation is done by the vdev_alloc() routine.
*/
int
spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
uint_t id, int atype)
{
nvlist_t **child;
uint_t children;
int error;
if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
return (error);
if ((*vdp)->vdev_ops->vdev_op_leaf)
return (0);
error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children);
if (error == ENOENT)
return (0);
if (error) {
vdev_free(*vdp);
*vdp = NULL;
return (SET_ERROR(EINVAL));
}
for (int c = 0; c < children; c++) {
vdev_t *vd;
if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
atype)) != 0) {
vdev_free(*vdp);
*vdp = NULL;
return (error);
}
}
ASSERT(*vdp != NULL);
return (0);
}
static boolean_t
spa_should_flush_logs_on_unload(spa_t *spa)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return (B_FALSE);
if (!spa_writeable(spa))
return (B_FALSE);
if (!spa->spa_sync_on)
return (B_FALSE);
if (spa_state(spa) != POOL_STATE_EXPORTED)
return (B_FALSE);
if (zfs_keep_log_spacemaps_at_export)
return (B_FALSE);
return (B_TRUE);
}
/*
* Opens a transaction that will set the flag that will instruct
* spa_sync to attempt to flush all the metaslabs for that txg.
*/
static void
spa_unload_log_sm_flush_all(spa_t *spa)
{
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
ASSERT3U(spa->spa_log_flushall_txg, ==, 0);
spa->spa_log_flushall_txg = dmu_tx_get_txg(tx);
dmu_tx_commit(tx);
txg_wait_synced(spa_get_dsl(spa), spa->spa_log_flushall_txg);
}
static void
spa_unload_log_sm_metadata(spa_t *spa)
{
void *cookie = NULL;
spa_log_sm_t *sls;
while ((sls = avl_destroy_nodes(&spa->spa_sm_logs_by_txg,
&cookie)) != NULL) {
VERIFY0(sls->sls_mscount);
kmem_free(sls, sizeof (spa_log_sm_t));
}
for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
e != NULL; e = list_head(&spa->spa_log_summary)) {
VERIFY0(e->lse_mscount);
list_remove(&spa->spa_log_summary, e);
kmem_free(e, sizeof (log_summary_entry_t));
}
spa->spa_unflushed_stats.sus_nblocks = 0;
spa->spa_unflushed_stats.sus_memused = 0;
spa->spa_unflushed_stats.sus_blocklimit = 0;
}
static void
spa_destroy_aux_threads(spa_t *spa)
{
if (spa->spa_condense_zthr != NULL) {
zthr_destroy(spa->spa_condense_zthr);
spa->spa_condense_zthr = NULL;
}
if (spa->spa_checkpoint_discard_zthr != NULL) {
zthr_destroy(spa->spa_checkpoint_discard_zthr);
spa->spa_checkpoint_discard_zthr = NULL;
}
if (spa->spa_livelist_delete_zthr != NULL) {
zthr_destroy(spa->spa_livelist_delete_zthr);
spa->spa_livelist_delete_zthr = NULL;
}
if (spa->spa_livelist_condense_zthr != NULL) {
zthr_destroy(spa->spa_livelist_condense_zthr);
spa->spa_livelist_condense_zthr = NULL;
}
}
/*
* Opposite of spa_load().
*/
static void
spa_unload(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_state(spa) != POOL_STATE_UNINITIALIZED);
spa_import_progress_remove(spa_guid(spa));
spa_load_note(spa, "UNLOADING");
spa_wake_waiters(spa);
/*
* If we have set the spa_final_txg, we have already performed the
* tasks below in spa_export_common(). We should not redo it here since
* we delay the final TXGs beyond what spa_final_txg is set at.
*/
if (spa->spa_final_txg == UINT64_MAX) {
/*
* If the log space map feature is enabled and the pool is
* getting exported (but not destroyed), we want to spend some
* time flushing as many metaslabs as we can in an attempt to
* destroy log space maps and save import time.
*/
if (spa_should_flush_logs_on_unload(spa))
spa_unload_log_sm_flush_all(spa);
/*
* Stop async tasks.
*/
spa_async_suspend(spa);
if (spa->spa_root_vdev) {
vdev_t *root_vdev = spa->spa_root_vdev;
vdev_initialize_stop_all(root_vdev,
VDEV_INITIALIZE_ACTIVE);
vdev_trim_stop_all(root_vdev, VDEV_TRIM_ACTIVE);
vdev_autotrim_stop_all(spa);
vdev_rebuild_stop_all(spa);
}
}
/*
* Stop syncing.
*/
if (spa->spa_sync_on) {
txg_sync_stop(spa->spa_dsl_pool);
spa->spa_sync_on = B_FALSE;
}
/*
* This ensures that there is no async metaslab prefetching
* while we attempt to unload the spa.
*/
if (spa->spa_root_vdev != NULL) {
for (int c = 0; c < spa->spa_root_vdev->vdev_children; c++) {
vdev_t *vc = spa->spa_root_vdev->vdev_child[c];
if (vc->vdev_mg != NULL)
taskq_wait(vc->vdev_mg->mg_taskq);
}
}
if (spa->spa_mmp.mmp_thread)
mmp_thread_stop(spa);
/*
* Wait for any outstanding async I/O to complete.
*/
if (spa->spa_async_zio_root != NULL) {
for (int i = 0; i < max_ncpus; i++)
(void) zio_wait(spa->spa_async_zio_root[i]);
kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *));
spa->spa_async_zio_root = NULL;
}
if (spa->spa_vdev_removal != NULL) {
spa_vdev_removal_destroy(spa->spa_vdev_removal);
spa->spa_vdev_removal = NULL;
}
spa_destroy_aux_threads(spa);
spa_condense_fini(spa);
bpobj_close(&spa->spa_deferred_bpobj);
spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
/*
* Close all vdevs.
*/
if (spa->spa_root_vdev)
vdev_free(spa->spa_root_vdev);
ASSERT(spa->spa_root_vdev == NULL);
/*
* Close the dsl pool.
*/
if (spa->spa_dsl_pool) {
dsl_pool_close(spa->spa_dsl_pool);
spa->spa_dsl_pool = NULL;
spa->spa_meta_objset = NULL;
}
ddt_unload(spa);
spa_unload_log_sm_metadata(spa);
/*
* Drop and purge level 2 cache
*/
spa_l2cache_drop(spa);
for (int i = 0; i < spa->spa_spares.sav_count; i++)
vdev_free(spa->spa_spares.sav_vdevs[i]);
if (spa->spa_spares.sav_vdevs) {
kmem_free(spa->spa_spares.sav_vdevs,
spa->spa_spares.sav_count * sizeof (void *));
spa->spa_spares.sav_vdevs = NULL;
}
if (spa->spa_spares.sav_config) {
nvlist_free(spa->spa_spares.sav_config);
spa->spa_spares.sav_config = NULL;
}
spa->spa_spares.sav_count = 0;
for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
vdev_free(spa->spa_l2cache.sav_vdevs[i]);
}
if (spa->spa_l2cache.sav_vdevs) {
kmem_free(spa->spa_l2cache.sav_vdevs,
spa->spa_l2cache.sav_count * sizeof (void *));
spa->spa_l2cache.sav_vdevs = NULL;
}
if (spa->spa_l2cache.sav_config) {
nvlist_free(spa->spa_l2cache.sav_config);
spa->spa_l2cache.sav_config = NULL;
}
spa->spa_l2cache.sav_count = 0;
spa->spa_async_suspended = 0;
spa->spa_indirect_vdevs_loaded = B_FALSE;
if (spa->spa_comment != NULL) {
spa_strfree(spa->spa_comment);
spa->spa_comment = NULL;
}
if (spa->spa_compatibility != NULL) {
spa_strfree(spa->spa_compatibility);
spa->spa_compatibility = NULL;
}
spa_config_exit(spa, SCL_ALL, spa);
}
/*
* Load (or re-load) the current list of vdevs describing the active spares for
* this pool. When this is called, we have some form of basic information in
* 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and
* then re-generate a more complete list including status information.
*/
void
spa_load_spares(spa_t *spa)
{
nvlist_t **spares;
uint_t nspares;
int i;
vdev_t *vd, *tvd;
#ifndef _KERNEL
/*
* zdb opens both the current state of the pool and the
* checkpointed state (if present), with a different spa_t.
*
* As spare vdevs are shared among open pools, we skip loading
* them when we load the checkpointed state of the pool.
*/
if (!spa_writeable(spa))
return;
#endif
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
/*
* First, close and free any existing spare vdevs.
*/
for (i = 0; i < spa->spa_spares.sav_count; i++) {
vd = spa->spa_spares.sav_vdevs[i];
/* Undo the call to spa_activate() below */
if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
B_FALSE)) != NULL && tvd->vdev_isspare)
spa_spare_remove(tvd);
vdev_close(vd);
vdev_free(vd);
}
if (spa->spa_spares.sav_vdevs)
kmem_free(spa->spa_spares.sav_vdevs,
spa->spa_spares.sav_count * sizeof (void *));
if (spa->spa_spares.sav_config == NULL)
nspares = 0;
else
VERIFY0(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, &spares, &nspares));
spa->spa_spares.sav_count = (int)nspares;
spa->spa_spares.sav_vdevs = NULL;
if (nspares == 0)
return;
/*
* Construct the array of vdevs, opening them to get status in the
* process. For each spare, there is potentially two different vdev_t
* structures associated with it: one in the list of spares (used only
* for basic validation purposes) and one in the active vdev
* configuration (if it's spared in). During this phase we open and
* validate each vdev on the spare list. If the vdev also exists in the
* active configuration, then we also mark this vdev as an active spare.
*/
spa->spa_spares.sav_vdevs = kmem_zalloc(nspares * sizeof (void *),
KM_SLEEP);
for (i = 0; i < spa->spa_spares.sav_count; i++) {
VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
VDEV_ALLOC_SPARE) == 0);
ASSERT(vd != NULL);
spa->spa_spares.sav_vdevs[i] = vd;
if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
B_FALSE)) != NULL) {
if (!tvd->vdev_isspare)
spa_spare_add(tvd);
/*
* We only mark the spare active if we were successfully
* able to load the vdev. Otherwise, importing a pool
* with a bad active spare would result in strange
* behavior, because multiple pool would think the spare
* is actively in use.
*
* There is a vulnerability here to an equally bizarre
* circumstance, where a dead active spare is later
* brought back to life (onlined or otherwise). Given
* the rarity of this scenario, and the extra complexity
* it adds, we ignore the possibility.
*/
if (!vdev_is_dead(tvd))
spa_spare_activate(tvd);
}
vd->vdev_top = vd;
vd->vdev_aux = &spa->spa_spares;
if (vdev_open(vd) != 0)
continue;
if (vdev_validate_aux(vd) == 0)
spa_spare_add(vd);
}
/*
* Recompute the stashed list of spares, with status information
* this time.
*/
fnvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES);
spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
KM_SLEEP);
for (i = 0; i < spa->spa_spares.sav_count; i++)
spares[i] = vdev_config_generate(spa,
spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
fnvlist_add_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, (const nvlist_t * const *)spares,
spa->spa_spares.sav_count);
for (i = 0; i < spa->spa_spares.sav_count; i++)
nvlist_free(spares[i]);
kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
}
/*
* Load (or re-load) the current list of vdevs describing the active l2cache for
* this pool. When this is called, we have some form of basic information in
* 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and
* then re-generate a more complete list including status information.
* Devices which are already active have their details maintained, and are
* not re-opened.
*/
void
spa_load_l2cache(spa_t *spa)
{
nvlist_t **l2cache = NULL;
uint_t nl2cache;
int i, j, oldnvdevs;
uint64_t guid;
vdev_t *vd, **oldvdevs, **newvdevs;
spa_aux_vdev_t *sav = &spa->spa_l2cache;
#ifndef _KERNEL
/*
* zdb opens both the current state of the pool and the
* checkpointed state (if present), with a different spa_t.
*
* As L2 caches are part of the ARC which is shared among open
* pools, we skip loading them when we load the checkpointed
* state of the pool.
*/
if (!spa_writeable(spa))
return;
#endif
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
oldvdevs = sav->sav_vdevs;
oldnvdevs = sav->sav_count;
sav->sav_vdevs = NULL;
sav->sav_count = 0;
if (sav->sav_config == NULL) {
nl2cache = 0;
newvdevs = NULL;
goto out;
}
VERIFY0(nvlist_lookup_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache));
newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
/*
* Process new nvlist of vdevs.
*/
for (i = 0; i < nl2cache; i++) {
guid = fnvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID);
newvdevs[i] = NULL;
for (j = 0; j < oldnvdevs; j++) {
vd = oldvdevs[j];
if (vd != NULL && guid == vd->vdev_guid) {
/*
* Retain previous vdev for add/remove ops.
*/
newvdevs[i] = vd;
oldvdevs[j] = NULL;
break;
}
}
if (newvdevs[i] == NULL) {
/*
* Create new vdev
*/
VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
VDEV_ALLOC_L2CACHE) == 0);
ASSERT(vd != NULL);
newvdevs[i] = vd;
/*
* Commit this vdev as an l2cache device,
* even if it fails to open.
*/
spa_l2cache_add(vd);
vd->vdev_top = vd;
vd->vdev_aux = sav;
spa_l2cache_activate(vd);
if (vdev_open(vd) != 0)
continue;
(void) vdev_validate_aux(vd);
if (!vdev_is_dead(vd))
l2arc_add_vdev(spa, vd);
/*
* Upon cache device addition to a pool or pool
* creation with a cache device or if the header
* of the device is invalid we issue an async
* TRIM command for the whole device which will
* execute if l2arc_trim_ahead > 0.
*/
spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM);
}
}
sav->sav_vdevs = newvdevs;
sav->sav_count = (int)nl2cache;
/*
* Recompute the stashed list of l2cache devices, with status
* information this time.
*/
fnvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE);
if (sav->sav_count > 0)
l2cache = kmem_alloc(sav->sav_count * sizeof (void *),
KM_SLEEP);
for (i = 0; i < sav->sav_count; i++)
l2cache[i] = vdev_config_generate(spa,
sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
fnvlist_add_nvlist_array(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
(const nvlist_t * const *)l2cache, sav->sav_count);
out:
/*
* Purge vdevs that were dropped
*/
for (i = 0; i < oldnvdevs; i++) {
uint64_t pool;
vd = oldvdevs[i];
if (vd != NULL) {
ASSERT(vd->vdev_isl2cache);
if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
pool != 0ULL && l2arc_vdev_present(vd))
l2arc_remove_vdev(vd);
vdev_clear_stats(vd);
vdev_free(vd);
}
}
if (oldvdevs)
kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
for (i = 0; i < sav->sav_count; i++)
nvlist_free(l2cache[i]);
if (sav->sav_count)
kmem_free(l2cache, sav->sav_count * sizeof (void *));
}
static int
load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
{
dmu_buf_t *db;
char *packed = NULL;
size_t nvsize = 0;
int error;
*value = NULL;
error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db);
if (error)
return (error);
nvsize = *(uint64_t *)db->db_data;
dmu_buf_rele(db, FTAG);
packed = vmem_alloc(nvsize, KM_SLEEP);
error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
DMU_READ_PREFETCH);
if (error == 0)
error = nvlist_unpack(packed, nvsize, value, 0);
vmem_free(packed, nvsize);
return (error);
}
/*
* Concrete top-level vdevs that are not missing and are not logs. At every
* spa_sync we write new uberblocks to at least SPA_SYNC_MIN_VDEVS core tvds.
*/
static uint64_t
spa_healthy_core_tvds(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t tvds = 0;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
if (vd->vdev_islog)
continue;
if (vdev_is_concrete(vd) && !vdev_is_dead(vd))
tvds++;
}
return (tvds);
}
/*
* Checks to see if the given vdev could not be opened, in which case we post a
* sysevent to notify the autoreplace code that the device has been removed.
*/
static void
spa_check_removed(vdev_t *vd)
{
for (uint64_t c = 0; c < vd->vdev_children; c++)
spa_check_removed(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) &&
vdev_is_concrete(vd)) {
zfs_post_autoreplace(vd->vdev_spa, vd);
spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_CHECK);
}
}
static int
spa_check_for_missing_logs(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
/*
* If we're doing a normal import, then build up any additional
* diagnostic information about missing log devices.
* We'll pass this up to the user for further processing.
*/
if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) {
nvlist_t **child, *nv;
uint64_t idx = 0;
child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t *),
KM_SLEEP);
nv = fnvlist_alloc();
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
/*
* We consider a device as missing only if it failed
* to open (i.e. offline or faulted is not considered
* as missing).
*/
if (tvd->vdev_islog &&
tvd->vdev_state == VDEV_STATE_CANT_OPEN) {
child[idx++] = vdev_config_generate(spa, tvd,
B_FALSE, VDEV_CONFIG_MISSING);
}
}
if (idx > 0) {
fnvlist_add_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
(const nvlist_t * const *)child, idx);
fnvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_MISSING_DEVICES, nv);
for (uint64_t i = 0; i < idx; i++)
nvlist_free(child[i]);
}
nvlist_free(nv);
kmem_free(child, rvd->vdev_children * sizeof (char **));
if (idx > 0) {
spa_load_failed(spa, "some log devices are missing");
vdev_dbgmsg_print_tree(rvd, 2);
return (SET_ERROR(ENXIO));
}
} else {
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_islog &&
tvd->vdev_state == VDEV_STATE_CANT_OPEN) {
spa_set_log_state(spa, SPA_LOG_CLEAR);
spa_load_note(spa, "some log devices are "
"missing, ZIL is dropped.");
vdev_dbgmsg_print_tree(rvd, 2);
break;
}
}
}
return (0);
}
/*
* Check for missing log devices
*/
static boolean_t
spa_check_logs(spa_t *spa)
{
boolean_t rv = B_FALSE;
dsl_pool_t *dp = spa_get_dsl(spa);
switch (spa->spa_log_state) {
default:
break;
case SPA_LOG_MISSING:
/* need to recheck in case slog has been restored */
case SPA_LOG_UNKNOWN:
rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0);
if (rv)
spa_set_log_state(spa, SPA_LOG_MISSING);
break;
}
return (rv);
}
/*
* Passivate any log vdevs (note, does not apply to embedded log metaslabs).
*/
static boolean_t
spa_passivate_log(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
boolean_t slog_found = B_FALSE;
ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_islog) {
ASSERT3P(tvd->vdev_log_mg, ==, NULL);
metaslab_group_passivate(tvd->vdev_mg);
slog_found = B_TRUE;
}
}
return (slog_found);
}
/*
* Activate any log vdevs (note, does not apply to embedded log metaslabs).
*/
static void
spa_activate_log(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_islog) {
ASSERT3P(tvd->vdev_log_mg, ==, NULL);
metaslab_group_activate(tvd->vdev_mg);
}
}
}
int
spa_reset_logs(spa_t *spa)
{
int error;
error = dmu_objset_find(spa_name(spa), zil_reset,
NULL, DS_FIND_CHILDREN);
if (error == 0) {
/*
* We successfully offlined the log device, sync out the
* current txg so that the "stubby" block can be removed
* by zil_sync().
*/
txg_wait_synced(spa->spa_dsl_pool, 0);
}
return (error);
}
static void
spa_aux_check_removed(spa_aux_vdev_t *sav)
{
for (int i = 0; i < sav->sav_count; i++)
spa_check_removed(sav->sav_vdevs[i]);
}
void
spa_claim_notify(zio_t *zio)
{
spa_t *spa = zio->io_spa;
if (zio->io_error)
return;
mutex_enter(&spa->spa_props_lock); /* any mutex will do */
if (spa->spa_claim_max_txg < zio->io_bp->blk_birth)
spa->spa_claim_max_txg = zio->io_bp->blk_birth;
mutex_exit(&spa->spa_props_lock);
}
typedef struct spa_load_error {
boolean_t sle_verify_data;
uint64_t sle_meta_count;
uint64_t sle_data_count;
} spa_load_error_t;
static void
spa_load_verify_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
spa_load_error_t *sle = zio->io_private;
dmu_object_type_t type = BP_GET_TYPE(bp);
int error = zio->io_error;
spa_t *spa = zio->io_spa;
abd_free(zio->io_abd);
if (error) {
if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
type != DMU_OT_INTENT_LOG)
atomic_inc_64(&sle->sle_meta_count);
else
atomic_inc_64(&sle->sle_data_count);
}
mutex_enter(&spa->spa_scrub_lock);
spa->spa_load_verify_bytes -= BP_GET_PSIZE(bp);
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&spa->spa_scrub_lock);
}
/*
* Maximum number of inflight bytes is the log2 fraction of the arc size.
* By default, we set it to 1/16th of the arc.
*/
static int spa_load_verify_shift = 4;
static int spa_load_verify_metadata = B_TRUE;
static int spa_load_verify_data = B_TRUE;
static int
spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
zio_t *rio = arg;
spa_load_error_t *sle = rio->io_private;
(void) zilog, (void) dnp;
/*
* Note: normally this routine will not be called if
* spa_load_verify_metadata is not set. However, it may be useful
* to manually set the flag after the traversal has begun.
*/
if (!spa_load_verify_metadata)
return (0);
/*
* Sanity check the block pointer in order to detect obvious damage
* before using the contents in subsequent checks or in zio_read().
* When damaged consider it to be a metadata error since we cannot
* trust the BP_GET_TYPE and BP_GET_LEVEL values.
*/
if (!zfs_blkptr_verify(spa, bp, B_FALSE, BLK_VERIFY_LOG)) {
atomic_inc_64(&sle->sle_meta_count);
return (0);
}
if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) ||
BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
return (0);
if (!BP_IS_METADATA(bp) &&
(!spa_load_verify_data || !sle->sle_verify_data))
return (0);
uint64_t maxinflight_bytes =
arc_target_bytes() >> spa_load_verify_shift;
size_t size = BP_GET_PSIZE(bp);
mutex_enter(&spa->spa_scrub_lock);
while (spa->spa_load_verify_bytes >= maxinflight_bytes)
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
spa->spa_load_verify_bytes += size;
mutex_exit(&spa->spa_scrub_lock);
zio_nowait(zio_read(rio, spa, bp, abd_alloc_for_io(size, B_FALSE), size,
spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
return (0);
}
static int
verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
(void) dp, (void) arg;
if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
return (0);
}
static int
spa_load_verify(spa_t *spa)
{
zio_t *rio;
spa_load_error_t sle = { 0 };
zpool_load_policy_t policy;
boolean_t verify_ok = B_FALSE;
int error = 0;
zpool_get_load_policy(spa->spa_config, &policy);
if (policy.zlp_rewind & ZPOOL_NEVER_REWIND ||
policy.zlp_maxmeta == UINT64_MAX)
return (0);
dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
error = dmu_objset_find_dp(spa->spa_dsl_pool,
spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL,
DS_FIND_CHILDREN);
dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
if (error != 0)
return (error);
/*
* Verify data only if we are rewinding or error limit was set.
* Otherwise nothing except dbgmsg care about it to waste time.
*/
sle.sle_verify_data = (policy.zlp_rewind & ZPOOL_REWIND_MASK) ||
(policy.zlp_maxdata < UINT64_MAX);
rio = zio_root(spa, NULL, &sle,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
if (spa_load_verify_metadata) {
if (spa->spa_extreme_rewind) {
spa_load_note(spa, "performing a complete scan of the "
"pool since extreme rewind is on. This may take "
"a very long time.\n (spa_load_verify_data=%u, "
"spa_load_verify_metadata=%u)",
spa_load_verify_data, spa_load_verify_metadata);
}
error = traverse_pool(spa, spa->spa_verify_min_txg,
TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
TRAVERSE_NO_DECRYPT, spa_load_verify_cb, rio);
}
(void) zio_wait(rio);
ASSERT0(spa->spa_load_verify_bytes);
spa->spa_load_meta_errors = sle.sle_meta_count;
spa->spa_load_data_errors = sle.sle_data_count;
if (sle.sle_meta_count != 0 || sle.sle_data_count != 0) {
spa_load_note(spa, "spa_load_verify found %llu metadata errors "
"and %llu data errors", (u_longlong_t)sle.sle_meta_count,
(u_longlong_t)sle.sle_data_count);
}
if (spa_load_verify_dryrun ||
(!error && sle.sle_meta_count <= policy.zlp_maxmeta &&
sle.sle_data_count <= policy.zlp_maxdata)) {
int64_t loss = 0;
verify_ok = B_TRUE;
spa->spa_load_txg = spa->spa_uberblock.ub_txg;
spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp;
loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts;
fnvlist_add_uint64(spa->spa_load_info, ZPOOL_CONFIG_LOAD_TIME,
spa->spa_load_txg_ts);
fnvlist_add_int64(spa->spa_load_info, ZPOOL_CONFIG_REWIND_TIME,
loss);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_LOAD_META_ERRORS, sle.sle_meta_count);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count);
} else {
spa->spa_load_max_txg = spa->spa_uberblock.ub_txg;
}
if (spa_load_verify_dryrun)
return (0);
if (error) {
if (error != ENXIO && error != EIO)
error = SET_ERROR(EIO);
return (error);
}
return (verify_ok ? 0 : EIO);
}
/*
* Find a value in the pool props object.
*/
static void
spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val)
{
(void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object,
zpool_prop_to_name(prop), sizeof (uint64_t), 1, val);
}
/*
* Find a value in the pool directory object.
*/
static int
spa_dir_prop(spa_t *spa, const char *name, uint64_t *val, boolean_t log_enoent)
{
int error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
name, sizeof (uint64_t), 1, val);
if (error != 0 && (error != ENOENT || log_enoent)) {
spa_load_failed(spa, "couldn't get '%s' value in MOS directory "
"[error=%d]", name, error);
}
return (error);
}
static int
spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err)
{
vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux);
return (SET_ERROR(err));
}
boolean_t
spa_livelist_delete_check(spa_t *spa)
{
return (spa->spa_livelists_to_delete != 0);
}
static boolean_t
spa_livelist_delete_cb_check(void *arg, zthr_t *z)
{
(void) z;
spa_t *spa = arg;
return (spa_livelist_delete_check(spa));
}
static int
delete_blkptr_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
spa_t *spa = arg;
zio_free(spa, tx->tx_txg, bp);
dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD,
-bp_get_dsize_sync(spa, bp),
-BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx);
return (0);
}
static int
dsl_get_next_livelist_obj(objset_t *os, uint64_t zap_obj, uint64_t *llp)
{
int err;
zap_cursor_t zc;
zap_attribute_t za;
zap_cursor_init(&zc, os, zap_obj);
err = zap_cursor_retrieve(&zc, &za);
zap_cursor_fini(&zc);
if (err == 0)
*llp = za.za_first_integer;
return (err);
}
/*
* Components of livelist deletion that must be performed in syncing
* context: freeing block pointers and updating the pool-wide data
* structures to indicate how much work is left to do
*/
typedef struct sublist_delete_arg {
spa_t *spa;
dsl_deadlist_t *ll;
uint64_t key;
bplist_t *to_free;
} sublist_delete_arg_t;
static void
sublist_delete_sync(void *arg, dmu_tx_t *tx)
{
sublist_delete_arg_t *sda = arg;
spa_t *spa = sda->spa;
dsl_deadlist_t *ll = sda->ll;
uint64_t key = sda->key;
bplist_t *to_free = sda->to_free;
bplist_iterate(to_free, delete_blkptr_cb, spa, tx);
dsl_deadlist_remove_entry(ll, key, tx);
}
typedef struct livelist_delete_arg {
spa_t *spa;
uint64_t ll_obj;
uint64_t zap_obj;
} livelist_delete_arg_t;
static void
livelist_delete_sync(void *arg, dmu_tx_t *tx)
{
livelist_delete_arg_t *lda = arg;
spa_t *spa = lda->spa;
uint64_t ll_obj = lda->ll_obj;
uint64_t zap_obj = lda->zap_obj;
objset_t *mos = spa->spa_meta_objset;
uint64_t count;
/* free the livelist and decrement the feature count */
VERIFY0(zap_remove_int(mos, zap_obj, ll_obj, tx));
dsl_deadlist_free(mos, ll_obj, tx);
spa_feature_decr(spa, SPA_FEATURE_LIVELIST, tx);
VERIFY0(zap_count(mos, zap_obj, &count));
if (count == 0) {
/* no more livelists to delete */
VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_DELETED_CLONES, tx));
VERIFY0(zap_destroy(mos, zap_obj, tx));
spa->spa_livelists_to_delete = 0;
spa_notify_waiters(spa);
}
}
/*
* Load in the value for the livelist to be removed and open it. Then,
* load its first sublist and determine which block pointers should actually
* be freed. Then, call a synctask which performs the actual frees and updates
* the pool-wide livelist data.
*/
static void
spa_livelist_delete_cb(void *arg, zthr_t *z)
{
spa_t *spa = arg;
uint64_t ll_obj = 0, count;
objset_t *mos = spa->spa_meta_objset;
uint64_t zap_obj = spa->spa_livelists_to_delete;
/*
* Determine the next livelist to delete. This function should only
* be called if there is at least one deleted clone.
*/
VERIFY0(dsl_get_next_livelist_obj(mos, zap_obj, &ll_obj));
VERIFY0(zap_count(mos, ll_obj, &count));
if (count > 0) {
dsl_deadlist_t *ll;
dsl_deadlist_entry_t *dle;
bplist_t to_free;
ll = kmem_zalloc(sizeof (dsl_deadlist_t), KM_SLEEP);
dsl_deadlist_open(ll, mos, ll_obj);
dle = dsl_deadlist_first(ll);
ASSERT3P(dle, !=, NULL);
bplist_create(&to_free);
int err = dsl_process_sub_livelist(&dle->dle_bpobj, &to_free,
z, NULL);
if (err == 0) {
sublist_delete_arg_t sync_arg = {
.spa = spa,
.ll = ll,
.key = dle->dle_mintxg,
.to_free = &to_free
};
zfs_dbgmsg("deleting sublist (id %llu) from"
" livelist %llu, %lld remaining",
(u_longlong_t)dle->dle_bpobj.bpo_object,
(u_longlong_t)ll_obj, (longlong_t)count - 1);
VERIFY0(dsl_sync_task(spa_name(spa), NULL,
sublist_delete_sync, &sync_arg, 0,
ZFS_SPACE_CHECK_DESTROY));
} else {
VERIFY3U(err, ==, EINTR);
}
bplist_clear(&to_free);
bplist_destroy(&to_free);
dsl_deadlist_close(ll);
kmem_free(ll, sizeof (dsl_deadlist_t));
} else {
livelist_delete_arg_t sync_arg = {
.spa = spa,
.ll_obj = ll_obj,
.zap_obj = zap_obj
};
zfs_dbgmsg("deletion of livelist %llu completed",
(u_longlong_t)ll_obj);
VERIFY0(dsl_sync_task(spa_name(spa), NULL, livelist_delete_sync,
&sync_arg, 0, ZFS_SPACE_CHECK_DESTROY));
}
}
static void
spa_start_livelist_destroy_thread(spa_t *spa)
{
ASSERT3P(spa->spa_livelist_delete_zthr, ==, NULL);
spa->spa_livelist_delete_zthr =
zthr_create("z_livelist_destroy",
spa_livelist_delete_cb_check, spa_livelist_delete_cb, spa,
minclsyspri);
}
typedef struct livelist_new_arg {
bplist_t *allocs;
bplist_t *frees;
} livelist_new_arg_t;
static int
livelist_track_new_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(tx == NULL);
livelist_new_arg_t *lna = arg;
if (bp_freed) {
bplist_append(lna->frees, bp);
} else {
bplist_append(lna->allocs, bp);
zfs_livelist_condense_new_alloc++;
}
return (0);
}
typedef struct livelist_condense_arg {
spa_t *spa;
bplist_t to_keep;
uint64_t first_size;
uint64_t next_size;
} livelist_condense_arg_t;
static void
spa_livelist_condense_sync(void *arg, dmu_tx_t *tx)
{
livelist_condense_arg_t *lca = arg;
spa_t *spa = lca->spa;
bplist_t new_frees;
dsl_dataset_t *ds = spa->spa_to_condense.ds;
/* Have we been cancelled? */
if (spa->spa_to_condense.cancelled) {
zfs_livelist_condense_sync_cancel++;
goto out;
}
dsl_deadlist_entry_t *first = spa->spa_to_condense.first;
dsl_deadlist_entry_t *next = spa->spa_to_condense.next;
dsl_deadlist_t *ll = &ds->ds_dir->dd_livelist;
/*
* It's possible that the livelist was changed while the zthr was
* running. Therefore, we need to check for new blkptrs in the two
* entries being condensed and continue to track them in the livelist.
* Because of the way we handle remapped blkptrs (see dbuf_remap_impl),
* it's possible that the newly added blkptrs are FREEs or ALLOCs so
* we need to sort them into two different bplists.
*/
uint64_t first_obj = first->dle_bpobj.bpo_object;
uint64_t next_obj = next->dle_bpobj.bpo_object;
uint64_t cur_first_size = first->dle_bpobj.bpo_phys->bpo_num_blkptrs;
uint64_t cur_next_size = next->dle_bpobj.bpo_phys->bpo_num_blkptrs;
bplist_create(&new_frees);
livelist_new_arg_t new_bps = {
.allocs = &lca->to_keep,
.frees = &new_frees,
};
if (cur_first_size > lca->first_size) {
VERIFY0(livelist_bpobj_iterate_from_nofree(&first->dle_bpobj,
livelist_track_new_cb, &new_bps, lca->first_size));
}
if (cur_next_size > lca->next_size) {
VERIFY0(livelist_bpobj_iterate_from_nofree(&next->dle_bpobj,
livelist_track_new_cb, &new_bps, lca->next_size));
}
dsl_deadlist_clear_entry(first, ll, tx);
ASSERT(bpobj_is_empty(&first->dle_bpobj));
dsl_deadlist_remove_entry(ll, next->dle_mintxg, tx);
bplist_iterate(&lca->to_keep, dsl_deadlist_insert_alloc_cb, ll, tx);
bplist_iterate(&new_frees, dsl_deadlist_insert_free_cb, ll, tx);
bplist_destroy(&new_frees);
char dsname[ZFS_MAX_DATASET_NAME_LEN];
dsl_dataset_name(ds, dsname);
zfs_dbgmsg("txg %llu condensing livelist of %s (id %llu), bpobj %llu "
"(%llu blkptrs) and bpobj %llu (%llu blkptrs) -> bpobj %llu "
"(%llu blkptrs)", (u_longlong_t)tx->tx_txg, dsname,
(u_longlong_t)ds->ds_object, (u_longlong_t)first_obj,
(u_longlong_t)cur_first_size, (u_longlong_t)next_obj,
(u_longlong_t)cur_next_size,
(u_longlong_t)first->dle_bpobj.bpo_object,
(u_longlong_t)first->dle_bpobj.bpo_phys->bpo_num_blkptrs);
out:
dmu_buf_rele(ds->ds_dbuf, spa);
spa->spa_to_condense.ds = NULL;
bplist_clear(&lca->to_keep);
bplist_destroy(&lca->to_keep);
kmem_free(lca, sizeof (livelist_condense_arg_t));
spa->spa_to_condense.syncing = B_FALSE;
}
static void
spa_livelist_condense_cb(void *arg, zthr_t *t)
{
while (zfs_livelist_condense_zthr_pause &&
!(zthr_has_waiters(t) || zthr_iscancelled(t)))
delay(1);
spa_t *spa = arg;
dsl_deadlist_entry_t *first = spa->spa_to_condense.first;
dsl_deadlist_entry_t *next = spa->spa_to_condense.next;
uint64_t first_size, next_size;
livelist_condense_arg_t *lca =
kmem_alloc(sizeof (livelist_condense_arg_t), KM_SLEEP);
bplist_create(&lca->to_keep);
/*
* Process the livelists (matching FREEs and ALLOCs) in open context
* so we have minimal work in syncing context to condense.
*
* We save bpobj sizes (first_size and next_size) to use later in
* syncing context to determine if entries were added to these sublists
* while in open context. This is possible because the clone is still
* active and open for normal writes and we want to make sure the new,
* unprocessed blockpointers are inserted into the livelist normally.
*
* Note that dsl_process_sub_livelist() both stores the size number of
* blockpointers and iterates over them while the bpobj's lock held, so
* the sizes returned to us are consistent which what was actually
* processed.
*/
int err = dsl_process_sub_livelist(&first->dle_bpobj, &lca->to_keep, t,
&first_size);
if (err == 0)
err = dsl_process_sub_livelist(&next->dle_bpobj, &lca->to_keep,
t, &next_size);
if (err == 0) {
while (zfs_livelist_condense_sync_pause &&
!(zthr_has_waiters(t) || zthr_iscancelled(t)))
delay(1);
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
dmu_tx_mark_netfree(tx);
dmu_tx_hold_space(tx, 1);
err = dmu_tx_assign(tx, TXG_NOWAIT | TXG_NOTHROTTLE);
if (err == 0) {
/*
* Prevent the condense zthr restarting before
* the synctask completes.
*/
spa->spa_to_condense.syncing = B_TRUE;
lca->spa = spa;
lca->first_size = first_size;
lca->next_size = next_size;
dsl_sync_task_nowait(spa_get_dsl(spa),
spa_livelist_condense_sync, lca, tx);
dmu_tx_commit(tx);
return;
}
}
/*
* Condensing can not continue: either it was externally stopped or
* we were unable to assign to a tx because the pool has run out of
* space. In the second case, we'll just end up trying to condense
* again in a later txg.
*/
ASSERT(err != 0);
bplist_clear(&lca->to_keep);
bplist_destroy(&lca->to_keep);
kmem_free(lca, sizeof (livelist_condense_arg_t));
dmu_buf_rele(spa->spa_to_condense.ds->ds_dbuf, spa);
spa->spa_to_condense.ds = NULL;
if (err == EINTR)
zfs_livelist_condense_zthr_cancel++;
}
/*
* Check that there is something to condense but that a condense is not
* already in progress and that condensing has not been cancelled.
*/
static boolean_t
spa_livelist_condense_cb_check(void *arg, zthr_t *z)
{
(void) z;
spa_t *spa = arg;
if ((spa->spa_to_condense.ds != NULL) &&
(spa->spa_to_condense.syncing == B_FALSE) &&
(spa->spa_to_condense.cancelled == B_FALSE)) {
return (B_TRUE);
}
return (B_FALSE);
}
static void
spa_start_livelist_condensing_thread(spa_t *spa)
{
spa->spa_to_condense.ds = NULL;
spa->spa_to_condense.first = NULL;
spa->spa_to_condense.next = NULL;
spa->spa_to_condense.syncing = B_FALSE;
spa->spa_to_condense.cancelled = B_FALSE;
ASSERT3P(spa->spa_livelist_condense_zthr, ==, NULL);
spa->spa_livelist_condense_zthr =
zthr_create("z_livelist_condense",
spa_livelist_condense_cb_check,
spa_livelist_condense_cb, spa, minclsyspri);
}
static void
spa_spawn_aux_threads(spa_t *spa)
{
ASSERT(spa_writeable(spa));
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa_start_indirect_condensing_thread(spa);
spa_start_livelist_destroy_thread(spa);
spa_start_livelist_condensing_thread(spa);
ASSERT3P(spa->spa_checkpoint_discard_zthr, ==, NULL);
spa->spa_checkpoint_discard_zthr =
zthr_create("z_checkpoint_discard",
spa_checkpoint_discard_thread_check,
spa_checkpoint_discard_thread, spa, minclsyspri);
}
/*
* Fix up config after a partly-completed split. This is done with the
* ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off
* pool have that entry in their config, but only the splitting one contains
* a list of all the guids of the vdevs that are being split off.
*
* This function determines what to do with that list: either rejoin
* all the disks to the pool, or complete the splitting process. To attempt
* the rejoin, each disk that is offlined is marked online again, and
* we do a reopen() call. If the vdev label for every disk that was
* marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL)
* then we call vdev_split() on each disk, and complete the split.
*
* Otherwise we leave the config alone, with all the vdevs in place in
* the original pool.
*/
static void
spa_try_repair(spa_t *spa, nvlist_t *config)
{
uint_t extracted;
uint64_t *glist;
uint_t i, gcount;
nvlist_t *nvl;
vdev_t **vd;
boolean_t attempt_reopen;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0)
return;
/* check that the config is complete */
if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
&glist, &gcount) != 0)
return;
vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP);
/* attempt to online all the vdevs & validate */
attempt_reopen = B_TRUE;
for (i = 0; i < gcount; i++) {
if (glist[i] == 0) /* vdev is hole */
continue;
vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE);
if (vd[i] == NULL) {
/*
* Don't bother attempting to reopen the disks;
* just do the split.
*/
attempt_reopen = B_FALSE;
} else {
/* attempt to re-online it */
vd[i]->vdev_offline = B_FALSE;
}
}
if (attempt_reopen) {
vdev_reopen(spa->spa_root_vdev);
/* check each device to see what state it's in */
for (extracted = 0, i = 0; i < gcount; i++) {
if (vd[i] != NULL &&
vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL)
break;
++extracted;
}
}
/*
* If every disk has been moved to the new pool, or if we never
* even attempted to look at them, then we split them off for
* good.
*/
if (!attempt_reopen || gcount == extracted) {
for (i = 0; i < gcount; i++)
if (vd[i] != NULL)
vdev_split(vd[i]);
vdev_reopen(spa->spa_root_vdev);
}
kmem_free(vd, gcount * sizeof (vdev_t *));
}
static int
spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type)
{
const char *ereport = FM_EREPORT_ZFS_POOL;
int error;
spa->spa_load_state = state;
(void) spa_import_progress_set_state(spa_guid(spa),
spa_load_state(spa));
gethrestime(&spa->spa_loaded_ts);
error = spa_load_impl(spa, type, &ereport);
/*
* Don't count references from objsets that are already closed
* and are making their way through the eviction process.
*/
spa_evicting_os_wait(spa);
spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
if (error) {
if (error != EEXIST) {
spa->spa_loaded_ts.tv_sec = 0;
spa->spa_loaded_ts.tv_nsec = 0;
}
if (error != EBADF) {
(void) zfs_ereport_post(ereport, spa,
NULL, NULL, NULL, 0);
}
}
spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE;
spa->spa_ena = 0;
(void) spa_import_progress_set_state(spa_guid(spa),
spa_load_state(spa));
return (error);
}
#ifdef ZFS_DEBUG
/*
* Count the number of per-vdev ZAPs associated with all of the vdevs in the
* vdev tree rooted in the given vd, and ensure that each ZAP is present in the
* spa's per-vdev ZAP list.
*/
static uint64_t
vdev_count_verify_zaps(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
uint64_t total = 0;
if (vd->vdev_top_zap != 0) {
total++;
ASSERT0(zap_lookup_int(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, vd->vdev_top_zap));
}
if (vd->vdev_leaf_zap != 0) {
total++;
ASSERT0(zap_lookup_int(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, vd->vdev_leaf_zap));
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
total += vdev_count_verify_zaps(vd->vdev_child[i]);
}
return (total);
}
#else
#define vdev_count_verify_zaps(vd) ((void) sizeof (vd), 0)
#endif
/*
* Determine whether the activity check is required.
*/
static boolean_t
spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label,
nvlist_t *config)
{
uint64_t state = 0;
uint64_t hostid = 0;
uint64_t tryconfig_txg = 0;
uint64_t tryconfig_timestamp = 0;
uint16_t tryconfig_mmp_seq = 0;
nvlist_t *nvinfo;
if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
(void) nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG,
&tryconfig_txg);
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
&tryconfig_timestamp);
(void) nvlist_lookup_uint16(nvinfo, ZPOOL_CONFIG_MMP_SEQ,
&tryconfig_mmp_seq);
}
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state);
/*
* Disable the MMP activity check - This is used by zdb which
* is intended to be used on potentially active pools.
*/
if (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP)
return (B_FALSE);
/*
* Skip the activity check when the MMP feature is disabled.
*/
if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay == 0)
return (B_FALSE);
/*
* If the tryconfig_ values are nonzero, they are the results of an
* earlier tryimport. If they all match the uberblock we just found,
* then the pool has not changed and we return false so we do not test
* a second time.
*/
if (tryconfig_txg && tryconfig_txg == ub->ub_txg &&
tryconfig_timestamp && tryconfig_timestamp == ub->ub_timestamp &&
tryconfig_mmp_seq && tryconfig_mmp_seq ==
(MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0))
return (B_FALSE);
/*
* Allow the activity check to be skipped when importing the pool
* on the same host which last imported it. Since the hostid from
* configuration may be stale use the one read from the label.
*/
if (nvlist_exists(label, ZPOOL_CONFIG_HOSTID))
hostid = fnvlist_lookup_uint64(label, ZPOOL_CONFIG_HOSTID);
if (hostid == spa_get_hostid(spa))
return (B_FALSE);
/*
* Skip the activity test when the pool was cleanly exported.
*/
if (state != POOL_STATE_ACTIVE)
return (B_FALSE);
return (B_TRUE);
}
/*
* Nanoseconds the activity check must watch for changes on-disk.
*/
static uint64_t
spa_activity_check_duration(spa_t *spa, uberblock_t *ub)
{
uint64_t import_intervals = MAX(zfs_multihost_import_intervals, 1);
uint64_t multihost_interval = MSEC2NSEC(
MMP_INTERVAL_OK(zfs_multihost_interval));
uint64_t import_delay = MAX(NANOSEC, import_intervals *
multihost_interval);
/*
* Local tunables determine a minimum duration except for the case
* where we know when the remote host will suspend the pool if MMP
* writes do not land.
*
* See Big Theory comment at the top of mmp.c for the reasoning behind
* these cases and times.
*/
ASSERT(MMP_IMPORT_SAFETY_FACTOR >= 100);
if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) &&
MMP_FAIL_INT(ub) > 0) {
/* MMP on remote host will suspend pool after failed writes */
import_delay = MMP_FAIL_INT(ub) * MSEC2NSEC(MMP_INTERVAL(ub)) *
MMP_IMPORT_SAFETY_FACTOR / 100;
zfs_dbgmsg("fail_intvals>0 import_delay=%llu ub_mmp "
"mmp_fails=%llu ub_mmp mmp_interval=%llu "
"import_intervals=%llu", (u_longlong_t)import_delay,
(u_longlong_t)MMP_FAIL_INT(ub),
(u_longlong_t)MMP_INTERVAL(ub),
(u_longlong_t)import_intervals);
} else if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) &&
MMP_FAIL_INT(ub) == 0) {
/* MMP on remote host will never suspend pool */
import_delay = MAX(import_delay, (MSEC2NSEC(MMP_INTERVAL(ub)) +
ub->ub_mmp_delay) * import_intervals);
zfs_dbgmsg("fail_intvals=0 import_delay=%llu ub_mmp "
"mmp_interval=%llu ub_mmp_delay=%llu "
"import_intervals=%llu", (u_longlong_t)import_delay,
(u_longlong_t)MMP_INTERVAL(ub),
(u_longlong_t)ub->ub_mmp_delay,
(u_longlong_t)import_intervals);
} else if (MMP_VALID(ub)) {
/*
* zfs-0.7 compatibility case
*/
import_delay = MAX(import_delay, (multihost_interval +
ub->ub_mmp_delay) * import_intervals);
zfs_dbgmsg("import_delay=%llu ub_mmp_delay=%llu "
"import_intervals=%llu leaves=%u",
(u_longlong_t)import_delay,
(u_longlong_t)ub->ub_mmp_delay,
(u_longlong_t)import_intervals,
vdev_count_leaves(spa));
} else {
/* Using local tunings is the only reasonable option */
zfs_dbgmsg("pool last imported on non-MMP aware "
"host using import_delay=%llu multihost_interval=%llu "
"import_intervals=%llu", (u_longlong_t)import_delay,
(u_longlong_t)multihost_interval,
(u_longlong_t)import_intervals);
}
return (import_delay);
}
/*
* Perform the import activity check. If the user canceled the import or
* we detected activity then fail.
*/
static int
spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config)
{
uint64_t txg = ub->ub_txg;
uint64_t timestamp = ub->ub_timestamp;
uint64_t mmp_config = ub->ub_mmp_config;
uint16_t mmp_seq = MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0;
uint64_t import_delay;
hrtime_t import_expire;
nvlist_t *mmp_label = NULL;
vdev_t *rvd = spa->spa_root_vdev;
kcondvar_t cv;
kmutex_t mtx;
int error = 0;
cv_init(&cv, NULL, CV_DEFAULT, NULL);
mutex_init(&mtx, NULL, MUTEX_DEFAULT, NULL);
mutex_enter(&mtx);
/*
* If ZPOOL_CONFIG_MMP_TXG is present an activity check was performed
* during the earlier tryimport. If the txg recorded there is 0 then
* the pool is known to be active on another host.
*
* Otherwise, the pool might be in use on another host. Check for
* changes in the uberblocks on disk if necessary.
*/
if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
nvlist_t *nvinfo = fnvlist_lookup_nvlist(config,
ZPOOL_CONFIG_LOAD_INFO);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_TXG) &&
fnvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG) == 0) {
vdev_uberblock_load(rvd, ub, &mmp_label);
error = SET_ERROR(EREMOTEIO);
goto out;
}
}
import_delay = spa_activity_check_duration(spa, ub);
/* Add a small random factor in case of simultaneous imports (0-25%) */
import_delay += import_delay * random_in_range(250) / 1000;
import_expire = gethrtime() + import_delay;
while (gethrtime() < import_expire) {
(void) spa_import_progress_set_mmp_check(spa_guid(spa),
NSEC2SEC(import_expire - gethrtime()));
vdev_uberblock_load(rvd, ub, &mmp_label);
if (txg != ub->ub_txg || timestamp != ub->ub_timestamp ||
mmp_seq != (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)) {
zfs_dbgmsg("multihost activity detected "
"txg %llu ub_txg %llu "
"timestamp %llu ub_timestamp %llu "
"mmp_config %#llx ub_mmp_config %#llx",
(u_longlong_t)txg, (u_longlong_t)ub->ub_txg,
(u_longlong_t)timestamp,
(u_longlong_t)ub->ub_timestamp,
(u_longlong_t)mmp_config,
(u_longlong_t)ub->ub_mmp_config);
error = SET_ERROR(EREMOTEIO);
break;
}
if (mmp_label) {
nvlist_free(mmp_label);
mmp_label = NULL;
}
error = cv_timedwait_sig(&cv, &mtx, ddi_get_lbolt() + hz);
if (error != -1) {
error = SET_ERROR(EINTR);
break;
}
error = 0;
}
out:
mutex_exit(&mtx);
mutex_destroy(&mtx);
cv_destroy(&cv);
/*
* If the pool is determined to be active store the status in the
* spa->spa_load_info nvlist. If the remote hostname or hostid are
* available from configuration read from disk store them as well.
* This allows 'zpool import' to generate a more useful message.
*
* ZPOOL_CONFIG_MMP_STATE - observed pool status (mandatory)
* ZPOOL_CONFIG_MMP_HOSTNAME - hostname from the active pool
* ZPOOL_CONFIG_MMP_HOSTID - hostid from the active pool
*/
if (error == EREMOTEIO) {
const char *hostname = "<unknown>";
uint64_t hostid = 0;
if (mmp_label) {
if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTNAME)) {
hostname = fnvlist_lookup_string(mmp_label,
ZPOOL_CONFIG_HOSTNAME);
fnvlist_add_string(spa->spa_load_info,
ZPOOL_CONFIG_MMP_HOSTNAME, hostname);
}
if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTID)) {
hostid = fnvlist_lookup_uint64(mmp_label,
ZPOOL_CONFIG_HOSTID);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_HOSTID, hostid);
}
}
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_ACTIVE);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_TXG, 0);
error = spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO);
}
if (mmp_label)
nvlist_free(mmp_label);
return (error);
}
static int
spa_verify_host(spa_t *spa, nvlist_t *mos_config)
{
uint64_t hostid;
char *hostname;
uint64_t myhostid = 0;
if (!spa_is_root(spa) && nvlist_lookup_uint64(mos_config,
ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
hostname = fnvlist_lookup_string(mos_config,
ZPOOL_CONFIG_HOSTNAME);
myhostid = zone_get_hostid(NULL);
if (hostid != 0 && myhostid != 0 && hostid != myhostid) {
cmn_err(CE_WARN, "pool '%s' could not be "
"loaded as it was last accessed by "
"another system (host: %s hostid: 0x%llx). "
"See: https://openzfs.github.io/openzfs-docs/msg/"
"ZFS-8000-EY",
spa_name(spa), hostname, (u_longlong_t)hostid);
spa_load_failed(spa, "hostid verification failed: pool "
"last accessed by host: %s (hostid: 0x%llx)",
hostname, (u_longlong_t)hostid);
return (SET_ERROR(EBADF));
}
}
return (0);
}
static int
spa_ld_parse_config(spa_t *spa, spa_import_type_t type)
{
int error = 0;
nvlist_t *nvtree, *nvl, *config = spa->spa_config;
int parse;
vdev_t *rvd;
uint64_t pool_guid;
char *comment;
char *compatibility;
/*
* Versioning wasn't explicitly added to the label until later, so if
* it's not present treat it as the initial version.
*/
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&spa->spa_ubsync.ub_version) != 0)
spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) {
spa_load_failed(spa, "invalid config provided: '%s' missing",
ZPOOL_CONFIG_POOL_GUID);
return (SET_ERROR(EINVAL));
}
/*
* If we are doing an import, ensure that the pool is not already
* imported by checking if its pool guid already exists in the
* spa namespace.
*
* The only case that we allow an already imported pool to be
* imported again, is when the pool is checkpointed and we want to
* look at its checkpointed state from userland tools like zdb.
*/
#ifdef _KERNEL
if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
spa_guid_exists(pool_guid, 0)) {
#else
if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
spa_guid_exists(pool_guid, 0) &&
!spa_importing_readonly_checkpoint(spa)) {
#endif
spa_load_failed(spa, "a pool with guid %llu is already open",
(u_longlong_t)pool_guid);
return (SET_ERROR(EEXIST));
}
spa->spa_config_guid = pool_guid;
nvlist_free(spa->spa_load_info);
spa->spa_load_info = fnvlist_alloc();
ASSERT(spa->spa_comment == NULL);
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
spa->spa_comment = spa_strdup(comment);
ASSERT(spa->spa_compatibility == NULL);
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMPATIBILITY,
&compatibility) == 0)
spa->spa_compatibility = spa_strdup(compatibility);
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
&spa->spa_config_txg);
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) == 0)
spa->spa_config_splitting = fnvlist_dup(nvl);
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtree)) {
spa_load_failed(spa, "invalid config provided: '%s' missing",
ZPOOL_CONFIG_VDEV_TREE);
return (SET_ERROR(EINVAL));
}
/*
* Create "The Godfather" zio to hold all async IOs
*/
spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
KM_SLEEP);
for (int i = 0; i < max_ncpus; i++) {
spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
}
/*
* Parse the configuration into a vdev tree. We explicitly set the
* value that will be returned by spa_version() since parsing the
* configuration requires knowing the version number.
*/
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
parse = (type == SPA_IMPORT_EXISTING ?
VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
error = spa_config_parse(spa, &rvd, nvtree, NULL, 0, parse);
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0) {
spa_load_failed(spa, "unable to parse config [error=%d]",
error);
return (error);
}
ASSERT(spa->spa_root_vdev == rvd);
ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT);
if (type != SPA_IMPORT_ASSEMBLE) {
ASSERT(spa_guid(spa) == pool_guid);
}
return (0);
}
/*
* Recursively open all vdevs in the vdev tree. This function is called twice:
* first with the untrusted config, then with the trusted config.
*/
static int
spa_ld_open_vdevs(spa_t *spa)
{
int error = 0;
/*
* spa_missing_tvds_allowed defines how many top-level vdevs can be
* missing/unopenable for the root vdev to be still considered openable.
*/
if (spa->spa_trust_config) {
spa->spa_missing_tvds_allowed = zfs_max_missing_tvds;
} else if (spa->spa_config_source == SPA_CONFIG_SRC_CACHEFILE) {
spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_cachefile;
} else if (spa->spa_config_source == SPA_CONFIG_SRC_SCAN) {
spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_scan;
} else {
spa->spa_missing_tvds_allowed = 0;
}
spa->spa_missing_tvds_allowed =
MAX(zfs_max_missing_tvds, spa->spa_missing_tvds_allowed);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
error = vdev_open(spa->spa_root_vdev);
spa_config_exit(spa, SCL_ALL, FTAG);
if (spa->spa_missing_tvds != 0) {
spa_load_note(spa, "vdev tree has %lld missing top-level "
"vdevs.", (u_longlong_t)spa->spa_missing_tvds);
if (spa->spa_trust_config && (spa->spa_mode & SPA_MODE_WRITE)) {
/*
* Although theoretically we could allow users to open
* incomplete pools in RW mode, we'd need to add a lot
* of extra logic (e.g. adjust pool space to account
* for missing vdevs).
* This limitation also prevents users from accidentally
* opening the pool in RW mode during data recovery and
* damaging it further.
*/
spa_load_note(spa, "pools with missing top-level "
"vdevs can only be opened in read-only mode.");
error = SET_ERROR(ENXIO);
} else {
spa_load_note(spa, "current settings allow for maximum "
"%lld missing top-level vdevs at this stage.",
(u_longlong_t)spa->spa_missing_tvds_allowed);
}
}
if (error != 0) {
spa_load_failed(spa, "unable to open vdev tree [error=%d]",
error);
}
if (spa->spa_missing_tvds != 0 || error != 0)
vdev_dbgmsg_print_tree(spa->spa_root_vdev, 2);
return (error);
}
/*
* We need to validate the vdev labels against the configuration that
* we have in hand. This function is called twice: first with an untrusted
* config, then with a trusted config. The validation is more strict when the
* config is trusted.
*/
static int
spa_ld_validate_vdevs(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
error = vdev_validate(rvd);
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0) {
spa_load_failed(spa, "vdev_validate failed [error=%d]", error);
return (error);
}
if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
spa_load_failed(spa, "cannot open vdev tree after invalidating "
"some vdevs");
vdev_dbgmsg_print_tree(rvd, 2);
return (SET_ERROR(ENXIO));
}
return (0);
}
static void
spa_ld_select_uberblock_done(spa_t *spa, uberblock_t *ub)
{
spa->spa_state = POOL_STATE_ACTIVE;
spa->spa_ubsync = spa->spa_uberblock;
spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
spa->spa_first_txg = spa->spa_last_ubsync_txg ?
spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
spa->spa_claim_max_txg = spa->spa_first_txg;
spa->spa_prev_software_version = ub->ub_software_version;
}
static int
spa_ld_select_uberblock(spa_t *spa, spa_import_type_t type)
{
vdev_t *rvd = spa->spa_root_vdev;
nvlist_t *label;
uberblock_t *ub = &spa->spa_uberblock;
boolean_t activity_check = B_FALSE;
/*
* If we are opening the checkpointed state of the pool by
* rewinding to it, at this point we will have written the
* checkpointed uberblock to the vdev labels, so searching
* the labels will find the right uberblock. However, if
* we are opening the checkpointed state read-only, we have
* not modified the labels. Therefore, we must ignore the
* labels and continue using the spa_uberblock that was set
* by spa_ld_checkpoint_rewind.
*
* Note that it would be fine to ignore the labels when
* rewinding (opening writeable) as well. However, if we
* crash just after writing the labels, we will end up
* searching the labels. Doing so in the common case means
* that this code path gets exercised normally, rather than
* just in the edge case.
*/
if (ub->ub_checkpoint_txg != 0 &&
spa_importing_readonly_checkpoint(spa)) {
spa_ld_select_uberblock_done(spa, ub);
return (0);
}
/*
* Find the best uberblock.
*/
vdev_uberblock_load(rvd, ub, &label);
/*
* If we weren't able to find a single valid uberblock, return failure.
*/
if (ub->ub_txg == 0) {
nvlist_free(label);
spa_load_failed(spa, "no valid uberblock found");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
}
if (spa->spa_load_max_txg != UINT64_MAX) {
(void) spa_import_progress_set_max_txg(spa_guid(spa),
(u_longlong_t)spa->spa_load_max_txg);
}
spa_load_note(spa, "using uberblock with txg=%llu",
(u_longlong_t)ub->ub_txg);
/*
* For pools which have the multihost property on determine if the
* pool is truly inactive and can be safely imported. Prevent
* hosts which don't have a hostid set from importing the pool.
*/
activity_check = spa_activity_check_required(spa, ub, label,
spa->spa_config);
if (activity_check) {
if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay &&
spa_get_hostid(spa) == 0) {
nvlist_free(label);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
}
int error = spa_activity_check(spa, ub, spa->spa_config);
if (error) {
nvlist_free(label);
return (error);
}
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_INACTIVE);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_TXG, ub->ub_txg);
fnvlist_add_uint16(spa->spa_load_info,
ZPOOL_CONFIG_MMP_SEQ,
(MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0));
}
/*
* If the pool has an unsupported version we can't open it.
*/
if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
nvlist_free(label);
spa_load_failed(spa, "version %llu is not supported",
(u_longlong_t)ub->ub_version);
return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
}
if (ub->ub_version >= SPA_VERSION_FEATURES) {
nvlist_t *features;
/*
* If we weren't able to find what's necessary for reading the
* MOS in the label, return failure.
*/
if (label == NULL) {
spa_load_failed(spa, "label config unavailable");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
ENXIO));
}
if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_FEATURES_FOR_READ,
&features) != 0) {
nvlist_free(label);
spa_load_failed(spa, "invalid label: '%s' missing",
ZPOOL_CONFIG_FEATURES_FOR_READ);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
ENXIO));
}
/*
* Update our in-core representation with the definitive values
* from the label.
*/
nvlist_free(spa->spa_label_features);
spa->spa_label_features = fnvlist_dup(features);
}
nvlist_free(label);
/*
* Look through entries in the label nvlist's features_for_read. If
* there is a feature listed there which we don't understand then we
* cannot open a pool.
*/
if (ub->ub_version >= SPA_VERSION_FEATURES) {
nvlist_t *unsup_feat;
unsup_feat = fnvlist_alloc();
for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features,
NULL); nvp != NULL;
nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
if (!zfeature_is_supported(nvpair_name(nvp))) {
fnvlist_add_string(unsup_feat,
nvpair_name(nvp), "");
}
}
if (!nvlist_empty(unsup_feat)) {
fnvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
nvlist_free(unsup_feat);
spa_load_failed(spa, "some features are unsupported");
return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
ENOTSUP));
}
nvlist_free(unsup_feat);
}
if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_try_repair(spa, spa->spa_config);
spa_config_exit(spa, SCL_ALL, FTAG);
nvlist_free(spa->spa_config_splitting);
spa->spa_config_splitting = NULL;
}
/*
* Initialize internal SPA structures.
*/
spa_ld_select_uberblock_done(spa, ub);
return (0);
}
static int
spa_ld_open_rootbp(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
if (error != 0) {
spa_load_failed(spa, "unable to open rootbp in dsl_pool_init "
"[error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
return (0);
}
static int
spa_ld_trusted_config(spa_t *spa, spa_import_type_t type,
boolean_t reloading)
{
vdev_t *mrvd, *rvd = spa->spa_root_vdev;
nvlist_t *nv, *mos_config, *policy;
int error = 0, copy_error;
uint64_t healthy_tvds, healthy_tvds_mos;
uint64_t mos_config_txg;
if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object, B_TRUE)
!= 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* If we're assembling a pool from a split, the config provided is
* already trusted so there is nothing to do.
*/
if (type == SPA_IMPORT_ASSEMBLE)
return (0);
healthy_tvds = spa_healthy_core_tvds(spa);
if (load_nvlist(spa, spa->spa_config_object, &mos_config)
!= 0) {
spa_load_failed(spa, "unable to retrieve MOS config");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
/*
* If we are doing an open, pool owner wasn't verified yet, thus do
* the verification here.
*/
if (spa->spa_load_state == SPA_LOAD_OPEN) {
error = spa_verify_host(spa, mos_config);
if (error != 0) {
nvlist_free(mos_config);
return (error);
}
}
nv = fnvlist_lookup_nvlist(mos_config, ZPOOL_CONFIG_VDEV_TREE);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
/*
* Build a new vdev tree from the trusted config
*/
error = spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD);
if (error != 0) {
nvlist_free(mos_config);
spa_config_exit(spa, SCL_ALL, FTAG);
spa_load_failed(spa, "spa_config_parse failed [error=%d]",
error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
/*
* Vdev paths in the MOS may be obsolete. If the untrusted config was
* obtained by scanning /dev/dsk, then it will have the right vdev
* paths. We update the trusted MOS config with this information.
* We first try to copy the paths with vdev_copy_path_strict, which
* succeeds only when both configs have exactly the same vdev tree.
* If that fails, we fall back to a more flexible method that has a
* best effort policy.
*/
copy_error = vdev_copy_path_strict(rvd, mrvd);
if (copy_error != 0 || spa_load_print_vdev_tree) {
spa_load_note(spa, "provided vdev tree:");
vdev_dbgmsg_print_tree(rvd, 2);
spa_load_note(spa, "MOS vdev tree:");
vdev_dbgmsg_print_tree(mrvd, 2);
}
if (copy_error != 0) {
spa_load_note(spa, "vdev_copy_path_strict failed, falling "
"back to vdev_copy_path_relaxed");
vdev_copy_path_relaxed(rvd, mrvd);
}
vdev_close(rvd);
vdev_free(rvd);
spa->spa_root_vdev = mrvd;
rvd = mrvd;
spa_config_exit(spa, SCL_ALL, FTAG);
/*
* We will use spa_config if we decide to reload the spa or if spa_load
* fails and we rewind. We must thus regenerate the config using the
* MOS information with the updated paths. ZPOOL_LOAD_POLICY is used to
* pass settings on how to load the pool and is not stored in the MOS.
* We copy it over to our new, trusted config.
*/
mos_config_txg = fnvlist_lookup_uint64(mos_config,
ZPOOL_CONFIG_POOL_TXG);
nvlist_free(mos_config);
mos_config = spa_config_generate(spa, NULL, mos_config_txg, B_FALSE);
if (nvlist_lookup_nvlist(spa->spa_config, ZPOOL_LOAD_POLICY,
&policy) == 0)
fnvlist_add_nvlist(mos_config, ZPOOL_LOAD_POLICY, policy);
spa_config_set(spa, mos_config);
spa->spa_config_source = SPA_CONFIG_SRC_MOS;
/*
* Now that we got the config from the MOS, we should be more strict
* in checking blkptrs and can make assumptions about the consistency
* of the vdev tree. spa_trust_config must be set to true before opening
* vdevs in order for them to be writeable.
*/
spa->spa_trust_config = B_TRUE;
/*
* Open and validate the new vdev tree
*/
error = spa_ld_open_vdevs(spa);
if (error != 0)
return (error);
error = spa_ld_validate_vdevs(spa);
if (error != 0)
return (error);
if (copy_error != 0 || spa_load_print_vdev_tree) {
spa_load_note(spa, "final vdev tree:");
vdev_dbgmsg_print_tree(rvd, 2);
}
if (spa->spa_load_state != SPA_LOAD_TRYIMPORT &&
!spa->spa_extreme_rewind && zfs_max_missing_tvds == 0) {
/*
* Sanity check to make sure that we are indeed loading the
* latest uberblock. If we missed SPA_SYNC_MIN_VDEVS tvds
* in the config provided and they happened to be the only ones
* to have the latest uberblock, we could involuntarily perform
* an extreme rewind.
*/
healthy_tvds_mos = spa_healthy_core_tvds(spa);
if (healthy_tvds_mos - healthy_tvds >=
SPA_SYNC_MIN_VDEVS) {
spa_load_note(spa, "config provided misses too many "
"top-level vdevs compared to MOS (%lld vs %lld). ",
(u_longlong_t)healthy_tvds,
(u_longlong_t)healthy_tvds_mos);
spa_load_note(spa, "vdev tree:");
vdev_dbgmsg_print_tree(rvd, 2);
if (reloading) {
spa_load_failed(spa, "config was already "
"provided from MOS. Aborting.");
return (spa_vdev_err(rvd,
VDEV_AUX_CORRUPT_DATA, EIO));
}
spa_load_note(spa, "spa must be reloaded using MOS "
"config");
return (SET_ERROR(EAGAIN));
}
}
error = spa_check_for_missing_logs(spa);
if (error != 0)
return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
if (rvd->vdev_guid_sum != spa->spa_uberblock.ub_guid_sum) {
spa_load_failed(spa, "uberblock guid sum doesn't match MOS "
"guid sum (%llu != %llu)",
(u_longlong_t)spa->spa_uberblock.ub_guid_sum,
(u_longlong_t)rvd->vdev_guid_sum);
return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
ENXIO));
}
return (0);
}
static int
spa_ld_open_indirect_vdev_metadata(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
/*
* Everything that we read before spa_remove_init() must be stored
* on concreted vdevs. Therefore we do this as early as possible.
*/
error = spa_remove_init(spa);
if (error != 0) {
spa_load_failed(spa, "spa_remove_init failed [error=%d]",
error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
/*
* Retrieve information needed to condense indirect vdev mappings.
*/
error = spa_condense_init(spa);
if (error != 0) {
spa_load_failed(spa, "spa_condense_init failed [error=%d]",
error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
return (0);
}
static int
spa_ld_check_features(spa_t *spa, boolean_t *missing_feat_writep)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
if (spa_version(spa) >= SPA_VERSION_FEATURES) {
boolean_t missing_feat_read = B_FALSE;
nvlist_t *unsup_feat, *enabled_feat;
if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
&spa->spa_feat_for_read_obj, B_TRUE) != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
&spa->spa_feat_for_write_obj, B_TRUE) != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
&spa->spa_feat_desc_obj, B_TRUE) != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
enabled_feat = fnvlist_alloc();
unsup_feat = fnvlist_alloc();
if (!spa_features_check(spa, B_FALSE,
unsup_feat, enabled_feat))
missing_feat_read = B_TRUE;
if (spa_writeable(spa) ||
spa->spa_load_state == SPA_LOAD_TRYIMPORT) {
if (!spa_features_check(spa, B_TRUE,
unsup_feat, enabled_feat)) {
*missing_feat_writep = B_TRUE;
}
}
fnvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat);
if (!nvlist_empty(unsup_feat)) {
fnvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
}
fnvlist_free(enabled_feat);
fnvlist_free(unsup_feat);
if (!missing_feat_read) {
fnvlist_add_boolean(spa->spa_load_info,
ZPOOL_CONFIG_CAN_RDONLY);
}
/*
* If the state is SPA_LOAD_TRYIMPORT, our objective is
* twofold: to determine whether the pool is available for
* import in read-write mode and (if it is not) whether the
* pool is available for import in read-only mode. If the pool
* is available for import in read-write mode, it is displayed
* as available in userland; if it is not available for import
* in read-only mode, it is displayed as unavailable in
* userland. If the pool is available for import in read-only
* mode but not read-write mode, it is displayed as unavailable
* in userland with a special note that the pool is actually
* available for open in read-only mode.
*
* As a result, if the state is SPA_LOAD_TRYIMPORT and we are
* missing a feature for write, we must first determine whether
* the pool can be opened read-only before returning to
* userland in order to know whether to display the
* abovementioned note.
*/
if (missing_feat_read || (*missing_feat_writep &&
spa_writeable(spa))) {
spa_load_failed(spa, "pool uses unsupported features");
return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
ENOTSUP));
}
/*
* Load refcounts for ZFS features from disk into an in-memory
* cache during SPA initialization.
*/
for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
uint64_t refcount;
error = feature_get_refcount_from_disk(spa,
&spa_feature_table[i], &refcount);
if (error == 0) {
spa->spa_feat_refcount_cache[i] = refcount;
} else if (error == ENOTSUP) {
spa->spa_feat_refcount_cache[i] =
SPA_FEATURE_DISABLED;
} else {
spa_load_failed(spa, "error getting refcount "
"for feature %s [error=%d]",
spa_feature_table[i].fi_guid, error);
return (spa_vdev_err(rvd,
VDEV_AUX_CORRUPT_DATA, EIO));
}
}
}
if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) {
if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG,
&spa->spa_feat_enabled_txg_obj, B_TRUE) != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
/*
* Encryption was added before bookmark_v2, even though bookmark_v2
* is now a dependency. If this pool has encryption enabled without
* bookmark_v2, trigger an errata message.
*/
if (spa_feature_is_enabled(spa, SPA_FEATURE_ENCRYPTION) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_V2)) {
spa->spa_errata = ZPOOL_ERRATA_ZOL_8308_ENCRYPTION;
}
return (0);
}
static int
spa_ld_load_special_directories(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
spa->spa_is_initializing = B_TRUE;
error = dsl_pool_open(spa->spa_dsl_pool);
spa->spa_is_initializing = B_FALSE;
if (error != 0) {
spa_load_failed(spa, "dsl_pool_open failed [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
return (0);
}
static int
spa_ld_get_props(spa_t *spa)
{
int error = 0;
uint64_t obj;
vdev_t *rvd = spa->spa_root_vdev;
/* Grab the checksum salt from the MOS. */
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_CHECKSUM_SALT, 1,
sizeof (spa->spa_cksum_salt.zcs_bytes),
spa->spa_cksum_salt.zcs_bytes);
if (error == ENOENT) {
/* Generate a new salt for subsequent use */
(void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
sizeof (spa->spa_cksum_salt.zcs_bytes));
} else if (error != 0) {
spa_load_failed(spa, "unable to retrieve checksum salt from "
"MOS [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj, B_TRUE) != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
if (error != 0) {
spa_load_failed(spa, "error opening deferred-frees bpobj "
"[error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
/*
* Load the bit that tells us to use the new accounting function
* (raid-z deflation). If we have an older pool, this will not
* be present.
*/
error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
&spa->spa_creation_version, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the persistent error log. If we have an older pool, this will
* not be present.
*/
error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last,
B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
&spa->spa_errlog_scrub, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the livelist deletion field. If a livelist is queued for
* deletion, indicate that in the spa
*/
error = spa_dir_prop(spa, DMU_POOL_DELETED_CLONES,
&spa->spa_livelists_to_delete, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the history object. If we have an older pool, this
* will not be present.
*/
error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the per-vdev ZAP map. If we have an older pool, this will not
* be present; in this case, defer its creation to a later time to
* avoid dirtying the MOS this early / out of sync context. See
* spa_sync_config_object.
*/
/* The sentinel is only available in the MOS config. */
nvlist_t *mos_config;
if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0) {
spa_load_failed(spa, "unable to retrieve MOS config");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP,
&spa->spa_all_vdev_zaps, B_FALSE);
if (error == ENOENT) {
VERIFY(!nvlist_exists(mos_config,
ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
spa->spa_avz_action = AVZ_ACTION_INITIALIZE;
ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
} else if (error != 0) {
nvlist_free(mos_config);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
} else if (!nvlist_exists(mos_config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) {
/*
* An older version of ZFS overwrote the sentinel value, so
* we have orphaned per-vdev ZAPs in the MOS. Defer their
* destruction to later; see spa_sync_config_object.
*/
spa->spa_avz_action = AVZ_ACTION_DESTROY;
/*
* We're assuming that no vdevs have had their ZAPs created
* before this. Better be sure of it.
*/
ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
}
nvlist_free(mos_config);
spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object,
B_FALSE);
if (error && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
if (error == 0) {
uint64_t autoreplace = 0;
spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs);
spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace);
spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
spa_prop_find(spa, ZPOOL_PROP_MULTIHOST, &spa->spa_multihost);
spa_prop_find(spa, ZPOOL_PROP_AUTOTRIM, &spa->spa_autotrim);
spa->spa_autoreplace = (autoreplace != 0);
}
/*
* If we are importing a pool with missing top-level vdevs,
* we enforce that the pool doesn't panic or get suspended on
* error since the likelihood of missing data is extremely high.
*/
if (spa->spa_missing_tvds > 0 &&
spa->spa_failmode != ZIO_FAILURE_MODE_CONTINUE &&
spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
spa_load_note(spa, "forcing failmode to 'continue' "
"as some top level vdevs are missing");
spa->spa_failmode = ZIO_FAILURE_MODE_CONTINUE;
}
return (0);
}
static int
spa_ld_open_aux_vdevs(spa_t *spa, spa_import_type_t type)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
/*
* If we're assembling the pool from the split-off vdevs of
* an existing pool, we don't want to attach the spares & cache
* devices.
*/
/*
* Load any hot spares for this pool.
*/
error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object,
B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
if (load_nvlist(spa, spa->spa_spares.sav_object,
&spa->spa_spares.sav_config) != 0) {
spa_load_failed(spa, "error loading spares nvlist");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_spares(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
} else if (error == 0) {
spa->spa_spares.sav_sync = B_TRUE;
}
/*
* Load any level 2 ARC devices for this pool.
*/
error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
&spa->spa_l2cache.sav_object, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
if (load_nvlist(spa, spa->spa_l2cache.sav_object,
&spa->spa_l2cache.sav_config) != 0) {
spa_load_failed(spa, "error loading l2cache nvlist");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_l2cache(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
} else if (error == 0) {
spa->spa_l2cache.sav_sync = B_TRUE;
}
return (0);
}
static int
spa_ld_load_vdev_metadata(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
/*
* If the 'multihost' property is set, then never allow a pool to
* be imported when the system hostid is zero. The exception to
* this rule is zdb which is always allowed to access pools.
*/
if (spa_multihost(spa) && spa_get_hostid(spa) == 0 &&
(spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) == 0) {
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
}
/*
* If the 'autoreplace' property is set, then post a resource notifying
* the ZFS DE that it should not issue any faults for unopenable
* devices. We also iterate over the vdevs, and post a sysevent for any
* unopenable vdevs so that the normal autoreplace handler can take
* over.
*/
if (spa->spa_autoreplace && spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
spa_check_removed(spa->spa_root_vdev);
/*
* For the import case, this is done in spa_import(), because
* at this point we're using the spare definitions from
* the MOS config, not necessarily from the userland config.
*/
if (spa->spa_load_state != SPA_LOAD_IMPORT) {
spa_aux_check_removed(&spa->spa_spares);
spa_aux_check_removed(&spa->spa_l2cache);
}
}
/*
* Load the vdev metadata such as metaslabs, DTLs, spacemap object, etc.
*/
error = vdev_load(rvd);
if (error != 0) {
spa_load_failed(spa, "vdev_load failed [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
error = spa_ld_log_spacemaps(spa);
if (error != 0) {
spa_load_failed(spa, "spa_ld_log_spacemaps failed [error=%d]",
error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
/*
* Propagate the leaf DTLs we just loaded all the way up the vdev tree.
*/
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
vdev_dtl_reassess(rvd, 0, 0, B_FALSE, B_FALSE);
spa_config_exit(spa, SCL_ALL, FTAG);
return (0);
}
static int
spa_ld_load_dedup_tables(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
error = ddt_load(spa);
if (error != 0) {
spa_load_failed(spa, "ddt_load failed [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
return (0);
}
static int
spa_ld_verify_logs(spa_t *spa, spa_import_type_t type, const char **ereport)
{
vdev_t *rvd = spa->spa_root_vdev;
if (type != SPA_IMPORT_ASSEMBLE && spa_writeable(spa)) {
boolean_t missing = spa_check_logs(spa);
if (missing) {
if (spa->spa_missing_tvds != 0) {
spa_load_note(spa, "spa_check_logs failed "
"so dropping the logs");
} else {
*ereport = FM_EREPORT_ZFS_LOG_REPLAY;
spa_load_failed(spa, "spa_check_logs failed");
return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG,
ENXIO));
}
}
}
return (0);
}
static int
spa_ld_verify_pool_data(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
/*
* We've successfully opened the pool, verify that we're ready
* to start pushing transactions.
*/
if (spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
error = spa_load_verify(spa);
if (error != 0) {
spa_load_failed(spa, "spa_load_verify failed "
"[error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
error));
}
}
return (0);
}
static void
spa_ld_claim_log_blocks(spa_t *spa)
{
dmu_tx_t *tx;
dsl_pool_t *dp = spa_get_dsl(spa);
/*
* Claim log blocks that haven't been committed yet.
* This must all happen in a single txg.
* Note: spa_claim_max_txg is updated by spa_claim_notify(),
* invoked from zil_claim_log_block()'s i/o done callback.
* Price of rollback is that we abandon the log.
*/
spa->spa_claiming = B_TRUE;
tx = dmu_tx_create_assigned(dp, spa_first_txg(spa));
(void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
zil_claim, tx, DS_FIND_CHILDREN);
dmu_tx_commit(tx);
spa->spa_claiming = B_FALSE;
spa_set_log_state(spa, SPA_LOG_GOOD);
}
static void
spa_ld_check_for_config_update(spa_t *spa, uint64_t config_cache_txg,
boolean_t update_config_cache)
{
vdev_t *rvd = spa->spa_root_vdev;
int need_update = B_FALSE;
/*
* If the config cache is stale, or we have uninitialized
* metaslabs (see spa_vdev_add()), then update the config.
*
* If this is a verbatim import, trust the current
* in-core spa_config and update the disk labels.
*/
if (update_config_cache || config_cache_txg != spa->spa_config_txg ||
spa->spa_load_state == SPA_LOAD_IMPORT ||
spa->spa_load_state == SPA_LOAD_RECOVER ||
(spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
need_update = B_TRUE;
for (int c = 0; c < rvd->vdev_children; c++)
if (rvd->vdev_child[c]->vdev_ms_array == 0)
need_update = B_TRUE;
/*
* Update the config cache asynchronously in case we're the
* root pool, in which case the config cache isn't writable yet.
*/
if (need_update)
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
static void
spa_ld_prepare_for_reload(spa_t *spa)
{
spa_mode_t mode = spa->spa_mode;
int async_suspended = spa->spa_async_suspended;
spa_unload(spa);
spa_deactivate(spa);
spa_activate(spa, mode);
/*
* We save the value of spa_async_suspended as it gets reset to 0 by
* spa_unload(). We want to restore it back to the original value before
* returning as we might be calling spa_async_resume() later.
*/
spa->spa_async_suspended = async_suspended;
}
static int
spa_ld_read_checkpoint_txg(spa_t *spa)
{
uberblock_t checkpoint;
int error = 0;
ASSERT0(spa->spa_checkpoint_txg);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
if (error == ENOENT)
return (0);
if (error != 0)
return (error);
ASSERT3U(checkpoint.ub_txg, !=, 0);
ASSERT3U(checkpoint.ub_checkpoint_txg, !=, 0);
ASSERT3U(checkpoint.ub_timestamp, !=, 0);
spa->spa_checkpoint_txg = checkpoint.ub_txg;
spa->spa_checkpoint_info.sci_timestamp = checkpoint.ub_timestamp;
return (0);
}
static int
spa_ld_mos_init(spa_t *spa, spa_import_type_t type)
{
int error = 0;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
/*
* Never trust the config that is provided unless we are assembling
* a pool following a split.
* This means don't trust blkptrs and the vdev tree in general. This
* also effectively puts the spa in read-only mode since
* spa_writeable() checks for spa_trust_config to be true.
* We will later load a trusted config from the MOS.
*/
if (type != SPA_IMPORT_ASSEMBLE)
spa->spa_trust_config = B_FALSE;
/*
* Parse the config provided to create a vdev tree.
*/
error = spa_ld_parse_config(spa, type);
if (error != 0)
return (error);
spa_import_progress_add(spa);
/*
* Now that we have the vdev tree, try to open each vdev. This involves
* opening the underlying physical device, retrieving its geometry and
* probing the vdev with a dummy I/O. The state of each vdev will be set
* based on the success of those operations. After this we'll be ready
* to read from the vdevs.
*/
error = spa_ld_open_vdevs(spa);
if (error != 0)
return (error);
/*
* Read the label of each vdev and make sure that the GUIDs stored
* there match the GUIDs in the config provided.
* If we're assembling a new pool that's been split off from an
* existing pool, the labels haven't yet been updated so we skip
* validation for now.
*/
if (type != SPA_IMPORT_ASSEMBLE) {
error = spa_ld_validate_vdevs(spa);
if (error != 0)
return (error);
}
/*
* Read all vdev labels to find the best uberblock (i.e. latest,
* unless spa_load_max_txg is set) and store it in spa_uberblock. We
* get the list of features required to read blkptrs in the MOS from
* the vdev label with the best uberblock and verify that our version
* of zfs supports them all.
*/
error = spa_ld_select_uberblock(spa, type);
if (error != 0)
return (error);
/*
* Pass that uberblock to the dsl_pool layer which will open the root
* blkptr. This blkptr points to the latest version of the MOS and will
* allow us to read its contents.
*/
error = spa_ld_open_rootbp(spa);
if (error != 0)
return (error);
return (0);
}
static int
spa_ld_checkpoint_rewind(spa_t *spa)
{
uberblock_t checkpoint;
int error = 0;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
if (error != 0) {
spa_load_failed(spa, "unable to retrieve checkpointed "
"uberblock from the MOS config [error=%d]", error);
if (error == ENOENT)
error = ZFS_ERR_NO_CHECKPOINT;
return (error);
}
ASSERT3U(checkpoint.ub_txg, <, spa->spa_uberblock.ub_txg);
ASSERT3U(checkpoint.ub_txg, ==, checkpoint.ub_checkpoint_txg);
/*
* We need to update the txg and timestamp of the checkpointed
* uberblock to be higher than the latest one. This ensures that
* the checkpointed uberblock is selected if we were to close and
* reopen the pool right after we've written it in the vdev labels.
* (also see block comment in vdev_uberblock_compare)
*/
checkpoint.ub_txg = spa->spa_uberblock.ub_txg + 1;
checkpoint.ub_timestamp = gethrestime_sec();
/*
* Set current uberblock to be the checkpointed uberblock.
*/
spa->spa_uberblock = checkpoint;
/*
* If we are doing a normal rewind, then the pool is open for
* writing and we sync the "updated" checkpointed uberblock to
* disk. Once this is done, we've basically rewound the whole
* pool and there is no way back.
*
* There are cases when we don't want to attempt and sync the
* checkpointed uberblock to disk because we are opening a
* pool as read-only. Specifically, verifying the checkpointed
* state with zdb, and importing the checkpointed state to get
* a "preview" of its content.
*/
if (spa_writeable(spa)) {
vdev_t *rvd = spa->spa_root_vdev;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL };
int svdcount = 0;
int children = rvd->vdev_children;
int c0 = random_in_range(children);
for (int c = 0; c < children; c++) {
vdev_t *vd = rvd->vdev_child[(c0 + c) % children];
/* Stop when revisiting the first vdev */
if (c > 0 && svd[0] == vd)
break;
if (vd->vdev_ms_array == 0 || vd->vdev_islog ||
!vdev_is_concrete(vd))
continue;
svd[svdcount++] = vd;
if (svdcount == SPA_SYNC_MIN_VDEVS)
break;
}
error = vdev_config_sync(svd, svdcount, spa->spa_first_txg);
if (error == 0)
spa->spa_last_synced_guid = rvd->vdev_guid;
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0) {
spa_load_failed(spa, "failed to write checkpointed "
"uberblock to the vdev labels [error=%d]", error);
return (error);
}
}
return (0);
}
static int
spa_ld_mos_with_trusted_config(spa_t *spa, spa_import_type_t type,
boolean_t *update_config_cache)
{
int error;
/*
* Parse the config for pool, open and validate vdevs,
* select an uberblock, and use that uberblock to open
* the MOS.
*/
error = spa_ld_mos_init(spa, type);
if (error != 0)
return (error);
/*
* Retrieve the trusted config stored in the MOS and use it to create
* a new, exact version of the vdev tree, then reopen all vdevs.
*/
error = spa_ld_trusted_config(spa, type, B_FALSE);
if (error == EAGAIN) {
if (update_config_cache != NULL)
*update_config_cache = B_TRUE;
/*
* Redo the loading process with the trusted config if it is
* too different from the untrusted config.
*/
spa_ld_prepare_for_reload(spa);
spa_load_note(spa, "RELOADING");
error = spa_ld_mos_init(spa, type);
if (error != 0)
return (error);
error = spa_ld_trusted_config(spa, type, B_TRUE);
if (error != 0)
return (error);
} else if (error != 0) {
return (error);
}
return (0);
}
/*
* Load an existing storage pool, using the config provided. This config
* describes which vdevs are part of the pool and is later validated against
* partial configs present in each vdev's label and an entire copy of the
* config stored in the MOS.
*/
static int
spa_load_impl(spa_t *spa, spa_import_type_t type, const char **ereport)
{
int error = 0;
boolean_t missing_feat_write = B_FALSE;
boolean_t checkpoint_rewind =
(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
boolean_t update_config_cache = B_FALSE;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
spa_load_note(spa, "LOADING");
error = spa_ld_mos_with_trusted_config(spa, type, &update_config_cache);
if (error != 0)
return (error);
/*
* If we are rewinding to the checkpoint then we need to repeat
* everything we've done so far in this function but this time
* selecting the checkpointed uberblock and using that to open
* the MOS.
*/
if (checkpoint_rewind) {
/*
* If we are rewinding to the checkpoint update config cache
* anyway.
*/
update_config_cache = B_TRUE;
/*
* Extract the checkpointed uberblock from the current MOS
* and use this as the pool's uberblock from now on. If the
* pool is imported as writeable we also write the checkpoint
* uberblock to the labels, making the rewind permanent.
*/
error = spa_ld_checkpoint_rewind(spa);
if (error != 0)
return (error);
/*
* Redo the loading process again with the
* checkpointed uberblock.
*/
spa_ld_prepare_for_reload(spa);
spa_load_note(spa, "LOADING checkpointed uberblock");
error = spa_ld_mos_with_trusted_config(spa, type, NULL);
if (error != 0)
return (error);
}
/*
* Retrieve the checkpoint txg if the pool has a checkpoint.
*/
error = spa_ld_read_checkpoint_txg(spa);
if (error != 0)
return (error);
/*
* Retrieve the mapping of indirect vdevs. Those vdevs were removed
* from the pool and their contents were re-mapped to other vdevs. Note
* that everything that we read before this step must have been
* rewritten on concrete vdevs after the last device removal was
* initiated. Otherwise we could be reading from indirect vdevs before
* we have loaded their mappings.
*/
error = spa_ld_open_indirect_vdev_metadata(spa);
if (error != 0)
return (error);
/*
* Retrieve the full list of active features from the MOS and check if
* they are all supported.
*/
error = spa_ld_check_features(spa, &missing_feat_write);
if (error != 0)
return (error);
/*
* Load several special directories from the MOS needed by the dsl_pool
* layer.
*/
error = spa_ld_load_special_directories(spa);
if (error != 0)
return (error);
/*
* Retrieve pool properties from the MOS.
*/
error = spa_ld_get_props(spa);
if (error != 0)
return (error);
/*
* Retrieve the list of auxiliary devices - cache devices and spares -
* and open them.
*/
error = spa_ld_open_aux_vdevs(spa, type);
if (error != 0)
return (error);
/*
* Load the metadata for all vdevs. Also check if unopenable devices
* should be autoreplaced.
*/
error = spa_ld_load_vdev_metadata(spa);
if (error != 0)
return (error);
error = spa_ld_load_dedup_tables(spa);
if (error != 0)
return (error);
/*
* Verify the logs now to make sure we don't have any unexpected errors
* when we claim log blocks later.
*/
error = spa_ld_verify_logs(spa, type, ereport);
if (error != 0)
return (error);
if (missing_feat_write) {
ASSERT(spa->spa_load_state == SPA_LOAD_TRYIMPORT);
/*
* At this point, we know that we can open the pool in
* read-only mode but not read-write mode. We now have enough
* information and can return to userland.
*/
return (spa_vdev_err(spa->spa_root_vdev, VDEV_AUX_UNSUP_FEAT,
ENOTSUP));
}
/*
* Traverse the last txgs to make sure the pool was left off in a safe
* state. When performing an extreme rewind, we verify the whole pool,
* which can take a very long time.
*/
error = spa_ld_verify_pool_data(spa);
if (error != 0)
return (error);
/*
* Calculate the deflated space for the pool. This must be done before
* we write anything to the pool because we'd need to update the space
* accounting using the deflated sizes.
*/
spa_update_dspace(spa);
/*
* We have now retrieved all the information we needed to open the
* pool. If we are importing the pool in read-write mode, a few
* additional steps must be performed to finish the import.
*/
if (spa_writeable(spa) && (spa->spa_load_state == SPA_LOAD_RECOVER ||
spa->spa_load_max_txg == UINT64_MAX)) {
uint64_t config_cache_txg = spa->spa_config_txg;
ASSERT(spa->spa_load_state != SPA_LOAD_TRYIMPORT);
/*
* In case of a checkpoint rewind, log the original txg
* of the checkpointed uberblock.
*/
if (checkpoint_rewind) {
spa_history_log_internal(spa, "checkpoint rewind",
NULL, "rewound state to txg=%llu",
(u_longlong_t)spa->spa_uberblock.ub_checkpoint_txg);
}
/*
* Traverse the ZIL and claim all blocks.
*/
spa_ld_claim_log_blocks(spa);
/*
* Kick-off the syncing thread.
*/
spa->spa_sync_on = B_TRUE;
txg_sync_start(spa->spa_dsl_pool);
mmp_thread_start(spa);
/*
* Wait for all claims to sync. We sync up to the highest
* claimed log block birth time so that claimed log blocks
* don't appear to be from the future. spa_claim_max_txg
* will have been set for us by ZIL traversal operations
* performed above.
*/
txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
/*
* Check if we need to request an update of the config. On the
* next sync, we would update the config stored in vdev labels
* and the cachefile (by default /etc/zfs/zpool.cache).
*/
spa_ld_check_for_config_update(spa, config_cache_txg,
update_config_cache);
/*
* Check if a rebuild was in progress and if so resume it.
* Then check all DTLs to see if anything needs resilvering.
* The resilver will be deferred if a rebuild was started.
*/
if (vdev_rebuild_active(spa->spa_root_vdev)) {
vdev_rebuild_restart(spa);
} else if (!dsl_scan_resilvering(spa->spa_dsl_pool) &&
vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
spa_async_request(spa, SPA_ASYNC_RESILVER);
}
/*
* Log the fact that we booted up (so that we can detect if
* we rebooted in the middle of an operation).
*/
spa_history_log_version(spa, "open", NULL);
spa_restart_removal(spa);
spa_spawn_aux_threads(spa);
/*
* Delete any inconsistent datasets.
*
* Note:
* Since we may be issuing deletes for clones here,
* we make sure to do so after we've spawned all the
* auxiliary threads above (from which the livelist
* deletion zthr is part of).
*/
(void) dmu_objset_find(spa_name(spa),
dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
/*
* Clean up any stale temporary dataset userrefs.
*/
dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_initialize_restart(spa->spa_root_vdev);
vdev_trim_restart(spa->spa_root_vdev);
vdev_autotrim_restart(spa);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
spa_import_progress_remove(spa_guid(spa));
spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD);
spa_load_note(spa, "LOADED");
return (0);
}
static int
spa_load_retry(spa_t *spa, spa_load_state_t state)
{
spa_mode_t mode = spa->spa_mode;
spa_unload(spa);
spa_deactivate(spa);
spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1;
spa_activate(spa, mode);
spa_async_suspend(spa);
spa_load_note(spa, "spa_load_retry: rewind, max txg: %llu",
(u_longlong_t)spa->spa_load_max_txg);
return (spa_load(spa, state, SPA_IMPORT_EXISTING));
}
/*
* If spa_load() fails this function will try loading prior txg's. If
* 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool
* will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this
* function will not rewind the pool and will return the same error as
* spa_load().
*/
static int
spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request,
int rewind_flags)
{
nvlist_t *loadinfo = NULL;
nvlist_t *config = NULL;
int load_error, rewind_error;
uint64_t safe_rewind_txg;
uint64_t min_txg;
if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
spa->spa_load_max_txg = spa->spa_load_txg;
spa_set_log_state(spa, SPA_LOG_CLEAR);
} else {
spa->spa_load_max_txg = max_request;
if (max_request != UINT64_MAX)
spa->spa_extreme_rewind = B_TRUE;
}
load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING);
if (load_error == 0)
return (0);
if (load_error == ZFS_ERR_NO_CHECKPOINT) {
/*
* When attempting checkpoint-rewind on a pool with no
* checkpoint, we should not attempt to load uberblocks
* from previous txgs when spa_load fails.
*/
ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
spa_import_progress_remove(spa_guid(spa));
return (load_error);
}
if (spa->spa_root_vdev != NULL)
config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
if (rewind_flags & ZPOOL_NEVER_REWIND) {
nvlist_free(config);
spa_import_progress_remove(spa_guid(spa));
return (load_error);
}
if (state == SPA_LOAD_RECOVER) {
/* Price of rolling back is discarding txgs, including log */
spa_set_log_state(spa, SPA_LOG_CLEAR);
} else {
/*
* If we aren't rolling back save the load info from our first
* import attempt so that we can restore it after attempting
* to rewind.
*/
loadinfo = spa->spa_load_info;
spa->spa_load_info = fnvlist_alloc();
}
spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
TXG_INITIAL : safe_rewind_txg;
/*
* Continue as long as we're finding errors, we're still within
* the acceptable rewind range, and we're still finding uberblocks
*/
while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg &&
spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) {
if (spa->spa_load_max_txg < safe_rewind_txg)
spa->spa_extreme_rewind = B_TRUE;
rewind_error = spa_load_retry(spa, state);
}
spa->spa_extreme_rewind = B_FALSE;
spa->spa_load_max_txg = UINT64_MAX;
if (config && (rewind_error || state != SPA_LOAD_RECOVER))
spa_config_set(spa, config);
else
nvlist_free(config);
if (state == SPA_LOAD_RECOVER) {
ASSERT3P(loadinfo, ==, NULL);
spa_import_progress_remove(spa_guid(spa));
return (rewind_error);
} else {
/* Store the rewind info as part of the initial load info */
fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO,
spa->spa_load_info);
/* Restore the initial load info */
fnvlist_free(spa->spa_load_info);
spa->spa_load_info = loadinfo;
spa_import_progress_remove(spa_guid(spa));
return (load_error);
}
}
/*
* Pool Open/Import
*
* The import case is identical to an open except that the configuration is sent
* down from userland, instead of grabbed from the configuration cache. For the
* case of an open, the pool configuration will exist in the
* POOL_STATE_UNINITIALIZED state.
*
* The stats information (gen/count/ustats) is used to gather vdev statistics at
* the same time open the pool, without having to keep around the spa_t in some
* ambiguous state.
*/
static int
spa_open_common(const char *pool, spa_t **spapp, const void *tag,
nvlist_t *nvpolicy, nvlist_t **config)
{
spa_t *spa;
spa_load_state_t state = SPA_LOAD_OPEN;
int error;
int locked = B_FALSE;
int firstopen = B_FALSE;
*spapp = NULL;
/*
* As disgusting as this is, we need to support recursive calls to this
* function because dsl_dir_open() is called during spa_load(), and ends
* up calling spa_open() again. The real fix is to figure out how to
* avoid dsl_dir_open() calling this in the first place.
*/
if (MUTEX_NOT_HELD(&spa_namespace_lock)) {
mutex_enter(&spa_namespace_lock);
locked = B_TRUE;
}
if ((spa = spa_lookup(pool)) == NULL) {
if (locked)
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ENOENT));
}
if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
zpool_load_policy_t policy;
firstopen = B_TRUE;
zpool_get_load_policy(nvpolicy ? nvpolicy : spa->spa_config,
&policy);
if (policy.zlp_rewind & ZPOOL_DO_REWIND)
state = SPA_LOAD_RECOVER;
spa_activate(spa, spa_mode_global);
if (state != SPA_LOAD_RECOVER)
spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
zfs_dbgmsg("spa_open_common: opening %s", pool);
error = spa_load_best(spa, state, policy.zlp_txg,
policy.zlp_rewind);
if (error == EBADF) {
/*
* If vdev_validate() returns failure (indicated by
* EBADF), it indicates that one of the vdevs indicates
* that the pool has been exported or destroyed. If
* this is the case, the config cache is out of sync and
* we should remove the pool from the namespace.
*/
spa_unload(spa);
spa_deactivate(spa);
spa_write_cachefile(spa, B_TRUE, B_TRUE);
spa_remove(spa);
if (locked)
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ENOENT));
}
if (error) {
/*
* We can't open the pool, but we still have useful
* information: the state of each vdev after the
* attempted vdev_open(). Return this to the user.
*/
if (config != NULL && spa->spa_config) {
*config = fnvlist_dup(spa->spa_config);
fnvlist_add_nvlist(*config,
ZPOOL_CONFIG_LOAD_INFO,
spa->spa_load_info);
}
spa_unload(spa);
spa_deactivate(spa);
spa->spa_last_open_failed = error;
if (locked)
mutex_exit(&spa_namespace_lock);
*spapp = NULL;
return (error);
}
}
spa_open_ref(spa, tag);
if (config != NULL)
*config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
/*
* If we've recovered the pool, pass back any information we
* gathered while doing the load.
*/
if (state == SPA_LOAD_RECOVER) {
fnvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
spa->spa_load_info);
}
if (locked) {
spa->spa_last_open_failed = 0;
spa->spa_last_ubsync_txg = 0;
spa->spa_load_txg = 0;
mutex_exit(&spa_namespace_lock);
}
if (firstopen)
zvol_create_minors_recursive(spa_name(spa));
*spapp = spa;
return (0);
}
int
spa_open_rewind(const char *name, spa_t **spapp, const void *tag,
nvlist_t *policy, nvlist_t **config)
{
return (spa_open_common(name, spapp, tag, policy, config));
}
int
spa_open(const char *name, spa_t **spapp, const void *tag)
{
return (spa_open_common(name, spapp, tag, NULL, NULL));
}
/*
* Lookup the given spa_t, incrementing the inject count in the process,
* preventing it from being exported or destroyed.
*/
spa_t *
spa_inject_addref(char *name)
{
spa_t *spa;
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(name)) == NULL) {
mutex_exit(&spa_namespace_lock);
return (NULL);
}
spa->spa_inject_ref++;
mutex_exit(&spa_namespace_lock);
return (spa);
}
void
spa_inject_delref(spa_t *spa)
{
mutex_enter(&spa_namespace_lock);
spa->spa_inject_ref--;
mutex_exit(&spa_namespace_lock);
}
/*
* Add spares device information to the nvlist.
*/
static void
spa_add_spares(spa_t *spa, nvlist_t *config)
{
nvlist_t **spares;
uint_t i, nspares;
nvlist_t *nvroot;
uint64_t guid;
vdev_stat_t *vs;
uint_t vsc;
uint64_t pool;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
if (spa->spa_spares.sav_count == 0)
return;
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
VERIFY0(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, &spares, &nspares));
if (nspares != 0) {
fnvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
(const nvlist_t * const *)spares, nspares);
VERIFY0(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares));
/*
* Go through and find any spares which have since been
* repurposed as an active spare. If this is the case, update
* their status appropriately.
*/
for (i = 0; i < nspares; i++) {
guid = fnvlist_lookup_uint64(spares[i],
ZPOOL_CONFIG_GUID);
if (spa_spare_exists(guid, &pool, NULL) &&
pool != 0ULL) {
VERIFY0(nvlist_lookup_uint64_array(spares[i],
ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs,
&vsc));
vs->vs_state = VDEV_STATE_CANT_OPEN;
vs->vs_aux = VDEV_AUX_SPARED;
}
}
}
}
/*
* Add l2cache device information to the nvlist, including vdev stats.
*/
static void
spa_add_l2cache(spa_t *spa, nvlist_t *config)
{
nvlist_t **l2cache;
uint_t i, j, nl2cache;
nvlist_t *nvroot;
uint64_t guid;
vdev_t *vd;
vdev_stat_t *vs;
uint_t vsc;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
if (spa->spa_l2cache.sav_count == 0)
return;
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
VERIFY0(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache));
if (nl2cache != 0) {
fnvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
(const nvlist_t * const *)l2cache, nl2cache);
VERIFY0(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache));
/*
* Update level 2 cache device stats.
*/
for (i = 0; i < nl2cache; i++) {
guid = fnvlist_lookup_uint64(l2cache[i],
ZPOOL_CONFIG_GUID);
vd = NULL;
for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
if (guid ==
spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
vd = spa->spa_l2cache.sav_vdevs[j];
break;
}
}
ASSERT(vd != NULL);
VERIFY0(nvlist_lookup_uint64_array(l2cache[i],
ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc));
vdev_get_stats(vd, vs);
vdev_config_generate_stats(vd, l2cache[i]);
}
}
}
static void
spa_feature_stats_from_disk(spa_t *spa, nvlist_t *features)
{
zap_cursor_t zc;
zap_attribute_t za;
if (spa->spa_feat_for_read_obj != 0) {
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_feat_for_read_obj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
ASSERT(za.za_integer_length == sizeof (uint64_t) &&
za.za_num_integers == 1);
VERIFY0(nvlist_add_uint64(features, za.za_name,
za.za_first_integer));
}
zap_cursor_fini(&zc);
}
if (spa->spa_feat_for_write_obj != 0) {
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_feat_for_write_obj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
ASSERT(za.za_integer_length == sizeof (uint64_t) &&
za.za_num_integers == 1);
VERIFY0(nvlist_add_uint64(features, za.za_name,
za.za_first_integer));
}
zap_cursor_fini(&zc);
}
}
static void
spa_feature_stats_from_cache(spa_t *spa, nvlist_t *features)
{
int i;
for (i = 0; i < SPA_FEATURES; i++) {
zfeature_info_t feature = spa_feature_table[i];
uint64_t refcount;
if (feature_get_refcount(spa, &feature, &refcount) != 0)
continue;
VERIFY0(nvlist_add_uint64(features, feature.fi_guid, refcount));
}
}
/*
* Store a list of pool features and their reference counts in the
* config.
*
* The first time this is called on a spa, allocate a new nvlist, fetch
* the pool features and reference counts from disk, then save the list
* in the spa. In subsequent calls on the same spa use the saved nvlist
* and refresh its values from the cached reference counts. This
* ensures we don't block here on I/O on a suspended pool so 'zpool
* clear' can resume the pool.
*/
static void
spa_add_feature_stats(spa_t *spa, nvlist_t *config)
{
nvlist_t *features;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
mutex_enter(&spa->spa_feat_stats_lock);
features = spa->spa_feat_stats;
if (features != NULL) {
spa_feature_stats_from_cache(spa, features);
} else {
VERIFY0(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP));
spa->spa_feat_stats = features;
spa_feature_stats_from_disk(spa, features);
}
VERIFY0(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
features));
mutex_exit(&spa->spa_feat_stats_lock);
}
int
spa_get_stats(const char *name, nvlist_t **config,
char *altroot, size_t buflen)
{
int error;
spa_t *spa;
*config = NULL;
error = spa_open_common(name, &spa, FTAG, NULL, config);
if (spa != NULL) {
/*
* This still leaves a window of inconsistency where the spares
* or l2cache devices could change and the config would be
* self-inconsistent.
*/
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
if (*config != NULL) {
uint64_t loadtimes[2];
loadtimes[0] = spa->spa_loaded_ts.tv_sec;
loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
fnvlist_add_uint64_array(*config,
ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2);
fnvlist_add_uint64(*config,
ZPOOL_CONFIG_ERRCOUNT,
spa_get_errlog_size(spa));
if (spa_suspended(spa)) {
fnvlist_add_uint64(*config,
ZPOOL_CONFIG_SUSPENDED,
spa->spa_failmode);
fnvlist_add_uint64(*config,
ZPOOL_CONFIG_SUSPENDED_REASON,
spa->spa_suspended);
}
spa_add_spares(spa, *config);
spa_add_l2cache(spa, *config);
spa_add_feature_stats(spa, *config);
}
}
/*
* We want to get the alternate root even for faulted pools, so we cheat
* and call spa_lookup() directly.
*/
if (altroot) {
if (spa == NULL) {
mutex_enter(&spa_namespace_lock);
spa = spa_lookup(name);
if (spa)
spa_altroot(spa, altroot, buflen);
else
altroot[0] = '\0';
spa = NULL;
mutex_exit(&spa_namespace_lock);
} else {
spa_altroot(spa, altroot, buflen);
}
}
if (spa != NULL) {
spa_config_exit(spa, SCL_CONFIG, FTAG);
spa_close(spa, FTAG);
}
return (error);
}
/*
* Validate that the auxiliary device array is well formed. We must have an
* array of nvlists, each which describes a valid leaf vdev. If this is an
* import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
* specified, as long as they are well-formed.
*/
static int
spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
spa_aux_vdev_t *sav, const char *config, uint64_t version,
vdev_labeltype_t label)
{
nvlist_t **dev;
uint_t i, ndev;
vdev_t *vd;
int error;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
/*
* It's acceptable to have no devs specified.
*/
if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
return (0);
if (ndev == 0)
return (SET_ERROR(EINVAL));
/*
* Make sure the pool is formatted with a version that supports this
* device type.
*/
if (spa_version(spa) < version)
return (SET_ERROR(ENOTSUP));
/*
* Set the pending device list so we correctly handle device in-use
* checking.
*/
sav->sav_pending = dev;
sav->sav_npending = ndev;
for (i = 0; i < ndev; i++) {
if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
mode)) != 0)
goto out;
if (!vd->vdev_ops->vdev_op_leaf) {
vdev_free(vd);
error = SET_ERROR(EINVAL);
goto out;
}
vd->vdev_top = vd;
if ((error = vdev_open(vd)) == 0 &&
(error = vdev_label_init(vd, crtxg, label)) == 0) {
fnvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
vd->vdev_guid);
}
vdev_free(vd);
if (error &&
(mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
goto out;
else
error = 0;
}
out:
sav->sav_pending = NULL;
sav->sav_npending = 0;
return (error);
}
static int
spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
{
int error;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
&spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
VDEV_LABEL_SPARE)) != 0) {
return (error);
}
return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
&spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
VDEV_LABEL_L2CACHE));
}
static void
spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
const char *config)
{
int i;
if (sav->sav_config != NULL) {
nvlist_t **olddevs;
uint_t oldndevs;
nvlist_t **newdevs;
/*
* Generate new dev list by concatenating with the
* current dev list.
*/
VERIFY0(nvlist_lookup_nvlist_array(sav->sav_config, config,
&olddevs, &oldndevs));
newdevs = kmem_alloc(sizeof (void *) *
(ndevs + oldndevs), KM_SLEEP);
for (i = 0; i < oldndevs; i++)
newdevs[i] = fnvlist_dup(olddevs[i]);
for (i = 0; i < ndevs; i++)
newdevs[i + oldndevs] = fnvlist_dup(devs[i]);
fnvlist_remove(sav->sav_config, config);
fnvlist_add_nvlist_array(sav->sav_config, config,
(const nvlist_t * const *)newdevs, ndevs + oldndevs);
for (i = 0; i < oldndevs + ndevs; i++)
nvlist_free(newdevs[i]);
kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
} else {
/*
* Generate a new dev list.
*/
sav->sav_config = fnvlist_alloc();
fnvlist_add_nvlist_array(sav->sav_config, config,
(const nvlist_t * const *)devs, ndevs);
}
}
/*
* Stop and drop level 2 ARC devices
*/
void
spa_l2cache_drop(spa_t *spa)
{
vdev_t *vd;
int i;
spa_aux_vdev_t *sav = &spa->spa_l2cache;
for (i = 0; i < sav->sav_count; i++) {
uint64_t pool;
vd = sav->sav_vdevs[i];
ASSERT(vd != NULL);
if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
pool != 0ULL && l2arc_vdev_present(vd))
l2arc_remove_vdev(vd);
}
}
/*
* Verify encryption parameters for spa creation. If we are encrypting, we must
* have the encryption feature flag enabled.
*/
static int
spa_create_check_encryption_params(dsl_crypto_params_t *dcp,
boolean_t has_encryption)
{
if (dcp->cp_crypt != ZIO_CRYPT_OFF &&
dcp->cp_crypt != ZIO_CRYPT_INHERIT &&
!has_encryption)
return (SET_ERROR(ENOTSUP));
return (dmu_objset_create_crypt_check(NULL, dcp, NULL));
}
/*
* Pool Creation
*/
int
spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
nvlist_t *zplprops, dsl_crypto_params_t *dcp)
{
spa_t *spa;
char *altroot = NULL;
vdev_t *rvd;
dsl_pool_t *dp;
dmu_tx_t *tx;
int error = 0;
uint64_t txg = TXG_INITIAL;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
uint64_t version, obj, ndraid = 0;
boolean_t has_features;
boolean_t has_encryption;
boolean_t has_allocclass;
spa_feature_t feat;
char *feat_name;
char *poolname;
nvlist_t *nvl;
if (props == NULL ||
nvlist_lookup_string(props, "tname", &poolname) != 0)
poolname = (char *)pool;
/*
* If this pool already exists, return failure.
*/
mutex_enter(&spa_namespace_lock);
if (spa_lookup(poolname) != NULL) {
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(EEXIST));
}
/*
* Allocate a new spa_t structure.
*/
nvl = fnvlist_alloc();
fnvlist_add_string(nvl, ZPOOL_CONFIG_POOL_NAME, pool);
(void) nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
spa = spa_add(poolname, nvl, altroot);
fnvlist_free(nvl);
spa_activate(spa, spa_mode_global);
if (props && (error = spa_prop_validate(spa, props))) {
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
/*
* Temporary pool names should never be written to disk.
*/
if (poolname != pool)
spa->spa_import_flags |= ZFS_IMPORT_TEMP_NAME;
has_features = B_FALSE;
has_encryption = B_FALSE;
has_allocclass = B_FALSE;
for (nvpair_t *elem = nvlist_next_nvpair(props, NULL);
elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
if (zpool_prop_feature(nvpair_name(elem))) {
has_features = B_TRUE;
feat_name = strchr(nvpair_name(elem), '@') + 1;
VERIFY0(zfeature_lookup_name(feat_name, &feat));
if (feat == SPA_FEATURE_ENCRYPTION)
has_encryption = B_TRUE;
if (feat == SPA_FEATURE_ALLOCATION_CLASSES)
has_allocclass = B_TRUE;
}
}
/* verify encryption params, if they were provided */
if (dcp != NULL) {
error = spa_create_check_encryption_params(dcp, has_encryption);
if (error != 0) {
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
}
if (!has_allocclass && zfs_special_devs(nvroot, NULL)) {
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (ENOTSUP);
}
if (has_features || nvlist_lookup_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) {
version = SPA_VERSION;
}
ASSERT(SPA_VERSION_IS_SUPPORTED(version));
spa->spa_first_txg = txg;
spa->spa_uberblock.ub_txg = txg - 1;
spa->spa_uberblock.ub_version = version;
spa->spa_ubsync = spa->spa_uberblock;
spa->spa_load_state = SPA_LOAD_CREATE;
spa->spa_removing_phys.sr_state = DSS_NONE;
spa->spa_removing_phys.sr_removing_vdev = -1;
spa->spa_removing_phys.sr_prev_indirect_vdev = -1;
spa->spa_indirect_vdevs_loaded = B_TRUE;
/*
* Create "The Godfather" zio to hold all async IOs
*/
spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
KM_SLEEP);
for (int i = 0; i < max_ncpus; i++) {
spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
}
/*
* Create the root vdev.
*/
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
ASSERT(error != 0 || rvd != NULL);
ASSERT(error != 0 || spa->spa_root_vdev == rvd);
if (error == 0 && !zfs_allocatable_devs(nvroot))
error = SET_ERROR(EINVAL);
if (error == 0 &&
(error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
(error = vdev_draid_spare_create(nvroot, rvd, &ndraid, 0)) == 0 &&
(error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) == 0) {
/*
* instantiate the metaslab groups (this will dirty the vdevs)
* we can no longer error exit past this point
*/
for (int c = 0; error == 0 && c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
vdev_metaslab_set_size(vd);
vdev_expand(vd, txg);
}
}
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0) {
spa_unload(spa);
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
/*
* Get the list of spares, if specified.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
spa->spa_spares.sav_config = fnvlist_alloc();
fnvlist_add_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, (const nvlist_t * const *)spares,
nspares);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_spares(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_spares.sav_sync = B_TRUE;
}
/*
* Get the list of level 2 cache devices, if specified.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0) {
VERIFY0(nvlist_alloc(&spa->spa_l2cache.sav_config,
NV_UNIQUE_NAME, KM_SLEEP));
fnvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, (const nvlist_t * const *)l2cache,
nl2cache);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_l2cache(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_l2cache.sav_sync = B_TRUE;
}
spa->spa_is_initializing = B_TRUE;
spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, dcp, txg);
spa->spa_is_initializing = B_FALSE;
/*
* Create DDTs (dedup tables).
*/
ddt_create(spa);
spa_update_dspace(spa);
tx = dmu_tx_create_assigned(dp, txg);
/*
* Create the pool's history object.
*/
if (version >= SPA_VERSION_ZPOOL_HISTORY && !spa->spa_history)
spa_history_create_obj(spa, tx);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_CREATE);
spa_history_log_version(spa, "create", tx);
/*
* Create the pool config object.
*/
spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
cmn_err(CE_PANIC, "failed to add pool config");
}
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
sizeof (uint64_t), 1, &version, tx) != 0) {
cmn_err(CE_PANIC, "failed to add pool version");
}
/* Newly created pools with the right version are always deflated. */
if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
spa->spa_deflate = TRUE;
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
cmn_err(CE_PANIC, "failed to add deflate");
}
}
/*
* Create the deferred-free bpobj. Turn off compression
* because sync-to-convergence takes longer if the blocksize
* keeps changing.
*/
obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
dmu_object_set_compress(spa->spa_meta_objset, obj,
ZIO_COMPRESS_OFF, tx);
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
sizeof (uint64_t), 1, &obj, tx) != 0) {
cmn_err(CE_PANIC, "failed to add bpobj");
}
VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
spa->spa_meta_objset, obj));
/*
* Generate some random noise for salted checksums to operate on.
*/
(void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
sizeof (spa->spa_cksum_salt.zcs_bytes));
/*
* Set pool properties.
*/
spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
spa->spa_multihost = zpool_prop_default_numeric(ZPOOL_PROP_MULTIHOST);
spa->spa_autotrim = zpool_prop_default_numeric(ZPOOL_PROP_AUTOTRIM);
if (props != NULL) {
spa_configfile_set(spa, props, B_FALSE);
spa_sync_props(props, tx);
}
for (int i = 0; i < ndraid; i++)
spa_feature_incr(spa, SPA_FEATURE_DRAID, tx);
dmu_tx_commit(tx);
spa->spa_sync_on = B_TRUE;
txg_sync_start(dp);
mmp_thread_start(spa);
txg_wait_synced(dp, txg);
spa_spawn_aux_threads(spa);
spa_write_cachefile(spa, B_FALSE, B_TRUE);
/*
* Don't count references from objsets that are already closed
* and are making their way through the eviction process.
*/
spa_evicting_os_wait(spa);
spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
spa->spa_load_state = SPA_LOAD_NONE;
spa_import_os(spa);
mutex_exit(&spa_namespace_lock);
return (0);
}
/*
* Import a non-root pool into the system.
*/
int
spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
{
spa_t *spa;
char *altroot = NULL;
spa_load_state_t state = SPA_LOAD_IMPORT;
zpool_load_policy_t policy;
spa_mode_t mode = spa_mode_global;
uint64_t readonly = B_FALSE;
int error;
nvlist_t *nvroot;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
/*
* If a pool with this name exists, return failure.
*/
mutex_enter(&spa_namespace_lock);
if (spa_lookup(pool) != NULL) {
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(EEXIST));
}
/*
* Create and initialize the spa structure.
*/
(void) nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
(void) nvlist_lookup_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
if (readonly)
mode = SPA_MODE_READ;
spa = spa_add(pool, config, altroot);
spa->spa_import_flags = flags;
/*
* Verbatim import - Take a pool and insert it into the namespace
* as if it had been loaded at boot.
*/
if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) {
if (props != NULL)
spa_configfile_set(spa, props, B_FALSE);
spa_write_cachefile(spa, B_FALSE, B_TRUE);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
zfs_dbgmsg("spa_import: verbatim import of %s", pool);
mutex_exit(&spa_namespace_lock);
return (0);
}
spa_activate(spa, mode);
/*
* Don't start async tasks until we know everything is healthy.
*/
spa_async_suspend(spa);
zpool_get_load_policy(config, &policy);
if (policy.zlp_rewind & ZPOOL_DO_REWIND)
state = SPA_LOAD_RECOVER;
spa->spa_config_source = SPA_CONFIG_SRC_TRYIMPORT;
if (state != SPA_LOAD_RECOVER) {
spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
zfs_dbgmsg("spa_import: importing %s", pool);
} else {
zfs_dbgmsg("spa_import: importing %s, max_txg=%lld "
"(RECOVERY MODE)", pool, (longlong_t)policy.zlp_txg);
}
error = spa_load_best(spa, state, policy.zlp_txg, policy.zlp_rewind);
/*
* Propagate anything learned while loading the pool and pass it
* back to caller (i.e. rewind info, missing devices, etc).
*/
fnvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, spa->spa_load_info);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
/*
* Toss any existing sparelist, as it doesn't have any validity
* anymore, and conflicts with spa_has_spare().
*/
if (spa->spa_spares.sav_config) {
nvlist_free(spa->spa_spares.sav_config);
spa->spa_spares.sav_config = NULL;
spa_load_spares(spa);
}
if (spa->spa_l2cache.sav_config) {
nvlist_free(spa->spa_l2cache.sav_config);
spa->spa_l2cache.sav_config = NULL;
spa_load_l2cache(spa);
}
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
spa_config_exit(spa, SCL_ALL, FTAG);
if (props != NULL)
spa_configfile_set(spa, props, B_FALSE);
if (error != 0 || (props && spa_writeable(spa) &&
(error = spa_prop_set(spa, props)))) {
spa_unload(spa);
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
spa_async_resume(spa);
/*
* Override any spares and level 2 cache devices as specified by
* the user, as these may have correct device names/devids, etc.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
if (spa->spa_spares.sav_config)
fnvlist_remove(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES);
else
spa->spa_spares.sav_config = fnvlist_alloc();
fnvlist_add_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, (const nvlist_t * const *)spares,
nspares);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_spares(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_spares.sav_sync = B_TRUE;
}
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0) {
if (spa->spa_l2cache.sav_config)
fnvlist_remove(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE);
else
spa->spa_l2cache.sav_config = fnvlist_alloc();
fnvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, (const nvlist_t * const *)l2cache,
nl2cache);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_l2cache(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_l2cache.sav_sync = B_TRUE;
}
/*
* Check for any removed devices.
*/
if (spa->spa_autoreplace) {
spa_aux_check_removed(&spa->spa_spares);
spa_aux_check_removed(&spa->spa_l2cache);
}
if (spa_writeable(spa)) {
/*
* Update the config cache to include the newly-imported pool.
*/
spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
}
/*
* It's possible that the pool was expanded while it was exported.
* We kick off an async task to handle this for us.
*/
spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
spa_history_log_version(spa, "import", NULL);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
mutex_exit(&spa_namespace_lock);
zvol_create_minors_recursive(pool);
spa_import_os(spa);
return (0);
}
nvlist_t *
spa_tryimport(nvlist_t *tryconfig)
{
nvlist_t *config = NULL;
char *poolname, *cachefile;
spa_t *spa;
uint64_t state;
int error;
zpool_load_policy_t policy;
if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
return (NULL);
if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
return (NULL);
/*
* Create and initialize the spa structure.
*/
mutex_enter(&spa_namespace_lock);
spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL);
spa_activate(spa, SPA_MODE_READ);
/*
* Rewind pool if a max txg was provided.
*/
zpool_get_load_policy(spa->spa_config, &policy);
if (policy.zlp_txg != UINT64_MAX) {
spa->spa_load_max_txg = policy.zlp_txg;
spa->spa_extreme_rewind = B_TRUE;
zfs_dbgmsg("spa_tryimport: importing %s, max_txg=%lld",
poolname, (longlong_t)policy.zlp_txg);
} else {
zfs_dbgmsg("spa_tryimport: importing %s", poolname);
}
if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_CACHEFILE, &cachefile)
== 0) {
zfs_dbgmsg("spa_tryimport: using cachefile '%s'", cachefile);
spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
} else {
spa->spa_config_source = SPA_CONFIG_SRC_SCAN;
}
error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING);
/*
* If 'tryconfig' was at least parsable, return the current config.
*/
if (spa->spa_root_vdev != NULL) {
config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
fnvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, poolname);
fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, state);
fnvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
spa->spa_uberblock.ub_timestamp);
fnvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
spa->spa_load_info);
fnvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA,
spa->spa_errata);
/*
* If the bootfs property exists on this pool then we
* copy it out so that external consumers can tell which
* pools are bootable.
*/
if ((!error || error == EEXIST) && spa->spa_bootfs) {
char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
/*
* We have to play games with the name since the
* pool was opened as TRYIMPORT_NAME.
*/
if (dsl_dsobj_to_dsname(spa_name(spa),
spa->spa_bootfs, tmpname) == 0) {
char *cp;
char *dsname;
dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
cp = strchr(tmpname, '/');
if (cp == NULL) {
(void) strlcpy(dsname, tmpname,
MAXPATHLEN);
} else {
(void) snprintf(dsname, MAXPATHLEN,
"%s/%s", poolname, ++cp);
}
fnvlist_add_string(config, ZPOOL_CONFIG_BOOTFS,
dsname);
kmem_free(dsname, MAXPATHLEN);
}
kmem_free(tmpname, MAXPATHLEN);
}
/*
* Add the list of hot spares and level 2 cache devices.
*/
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa_add_spares(spa, config);
spa_add_l2cache(spa, config);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
spa_unload(spa);
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (config);
}
/*
* Pool export/destroy
*
* The act of destroying or exporting a pool is very simple. We make sure there
* is no more pending I/O and any references to the pool are gone. Then, we
* update the pool state and sync all the labels to disk, removing the
* configuration from the cache afterwards. If the 'hardforce' flag is set, then
* we don't sync the labels or remove the configuration cache.
*/
static int
spa_export_common(const char *pool, int new_state, nvlist_t **oldconfig,
boolean_t force, boolean_t hardforce)
{
int error;
spa_t *spa;
if (oldconfig)
*oldconfig = NULL;
if (!(spa_mode_global & SPA_MODE_WRITE))
return (SET_ERROR(EROFS));
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(pool)) == NULL) {
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ENOENT));
}
if (spa->spa_is_exporting) {
/* the pool is being exported by another thread */
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ZFS_ERR_EXPORT_IN_PROGRESS));
}
spa->spa_is_exporting = B_TRUE;
/*
* Put a hold on the pool, drop the namespace lock, stop async tasks,
* reacquire the namespace lock, and see if we can export.
*/
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
spa_async_suspend(spa);
if (spa->spa_zvol_taskq) {
zvol_remove_minors(spa, spa_name(spa), B_TRUE);
taskq_wait(spa->spa_zvol_taskq);
}
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
if (spa->spa_state == POOL_STATE_UNINITIALIZED)
goto export_spa;
/*
* The pool will be in core if it's openable, in which case we can
* modify its state. Objsets may be open only because they're dirty,
* so we have to force it to sync before checking spa_refcnt.
*/
if (spa->spa_sync_on) {
txg_wait_synced(spa->spa_dsl_pool, 0);
spa_evicting_os_wait(spa);
}
/*
* A pool cannot be exported or destroyed if there are active
* references. If we are resetting a pool, allow references by
* fault injection handlers.
*/
if (!spa_refcount_zero(spa) || (spa->spa_inject_ref != 0)) {
error = SET_ERROR(EBUSY);
goto fail;
}
if (spa->spa_sync_on) {
/*
* A pool cannot be exported if it has an active shared spare.
* This is to prevent other pools stealing the active spare
* from an exported pool. At user's own will, such pool can
* be forcedly exported.
*/
if (!force && new_state == POOL_STATE_EXPORTED &&
spa_has_active_shared_spare(spa)) {
error = SET_ERROR(EXDEV);
goto fail;
}
/*
* We're about to export or destroy this pool. Make sure
* we stop all initialization and trim activity here before
* we set the spa_final_txg. This will ensure that all
* dirty data resulting from the initialization is
* committed to disk before we unload the pool.
*/
if (spa->spa_root_vdev != NULL) {
vdev_t *rvd = spa->spa_root_vdev;
vdev_initialize_stop_all(rvd, VDEV_INITIALIZE_ACTIVE);
vdev_trim_stop_all(rvd, VDEV_TRIM_ACTIVE);
vdev_autotrim_stop_all(spa);
vdev_rebuild_stop_all(spa);
}
/*
* We want this to be reflected on every label,
* so mark them all dirty. spa_unload() will do the
* final sync that pushes these changes out.
*/
if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa->spa_state = new_state;
vdev_config_dirty(spa->spa_root_vdev);
spa_config_exit(spa, SCL_ALL, FTAG);
}
/*
* If the log space map feature is enabled and the pool is
* getting exported (but not destroyed), we want to spend some
* time flushing as many metaslabs as we can in an attempt to
* destroy log space maps and save import time. This has to be
* done before we set the spa_final_txg, otherwise
* spa_sync() -> spa_flush_metaslabs() may dirty the final TXGs.
* spa_should_flush_logs_on_unload() should be called after
* spa_state has been set to the new_state.
*/
if (spa_should_flush_logs_on_unload(spa))
spa_unload_log_sm_flush_all(spa);
if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa->spa_final_txg = spa_last_synced_txg(spa) +
TXG_DEFER_SIZE + 1;
spa_config_exit(spa, SCL_ALL, FTAG);
}
}
export_spa:
spa_export_os(spa);
if (new_state == POOL_STATE_DESTROYED)
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_DESTROY);
else if (new_state == POOL_STATE_EXPORTED)
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_EXPORT);
if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
spa_unload(spa);
spa_deactivate(spa);
}
if (oldconfig && spa->spa_config)
*oldconfig = fnvlist_dup(spa->spa_config);
if (new_state != POOL_STATE_UNINITIALIZED) {
if (!hardforce)
spa_write_cachefile(spa, B_TRUE, B_TRUE);
spa_remove(spa);
} else {
/*
* If spa_remove() is not called for this spa_t and
* there is any possibility that it can be reused,
* we make sure to reset the exporting flag.
*/
spa->spa_is_exporting = B_FALSE;
}
mutex_exit(&spa_namespace_lock);
return (0);
fail:
spa->spa_is_exporting = B_FALSE;
spa_async_resume(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
/*
* Destroy a storage pool.
*/
int
spa_destroy(const char *pool)
{
return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
B_FALSE, B_FALSE));
}
/*
* Export a storage pool.
*/
int
spa_export(const char *pool, nvlist_t **oldconfig, boolean_t force,
boolean_t hardforce)
{
return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
force, hardforce));
}
/*
* Similar to spa_export(), this unloads the spa_t without actually removing it
* from the namespace in any way.
*/
int
spa_reset(const char *pool)
{
return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
B_FALSE, B_FALSE));
}
/*
* ==========================================================================
* Device manipulation
* ==========================================================================
*/
/*
* This is called as a synctask to increment the draid feature flag
*/
static void
spa_draid_feature_incr(void *arg, dmu_tx_t *tx)
{
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
int draid = (int)(uintptr_t)arg;
for (int c = 0; c < draid; c++)
spa_feature_incr(spa, SPA_FEATURE_DRAID, tx);
}
/*
* Add a device to a storage pool.
*/
int
spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
{
uint64_t txg, ndraid = 0;
int error;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *vd, *tvd;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
VDEV_ALLOC_ADD)) != 0)
return (spa_vdev_exit(spa, NULL, txg, error));
spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
&nspares) != 0)
nspares = 0;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
&nl2cache) != 0)
nl2cache = 0;
if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
return (spa_vdev_exit(spa, vd, txg, EINVAL));
if (vd->vdev_children != 0 &&
(error = vdev_create(vd, txg, B_FALSE)) != 0) {
return (spa_vdev_exit(spa, vd, txg, error));
}
/*
* The virtual dRAID spares must be added after vdev tree is created
* and the vdev guids are generated. The guid of their associated
* dRAID is stored in the config and used when opening the spare.
*/
if ((error = vdev_draid_spare_create(nvroot, vd, &ndraid,
rvd->vdev_children)) == 0) {
if (ndraid > 0 && nvlist_lookup_nvlist_array(nvroot,
ZPOOL_CONFIG_SPARES, &spares, &nspares) != 0)
nspares = 0;
} else {
return (spa_vdev_exit(spa, vd, txg, error));
}
/*
* We must validate the spares and l2cache devices after checking the
* children. Otherwise, vdev_inuse() will blindly overwrite the spare.
*/
if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
return (spa_vdev_exit(spa, vd, txg, error));
/*
* If we are in the middle of a device removal, we can only add
* devices which match the existing devices in the pool.
* If we are in the middle of a removal, or have some indirect
* vdevs, we can not add raidz or dRAID top levels.
*/
if (spa->spa_vdev_removal != NULL ||
spa->spa_removing_phys.sr_prev_indirect_vdev != -1) {
for (int c = 0; c < vd->vdev_children; c++) {
tvd = vd->vdev_child[c];
if (spa->spa_vdev_removal != NULL &&
tvd->vdev_ashift != spa->spa_max_ashift) {
return (spa_vdev_exit(spa, vd, txg, EINVAL));
}
/* Fail if top level vdev is raidz or a dRAID */
if (vdev_get_nparity(tvd) != 0)
return (spa_vdev_exit(spa, vd, txg, EINVAL));
/*
* Need the top level mirror to be
* a mirror of leaf vdevs only
*/
if (tvd->vdev_ops == &vdev_mirror_ops) {
for (uint64_t cid = 0;
cid < tvd->vdev_children; cid++) {
vdev_t *cvd = tvd->vdev_child[cid];
if (!cvd->vdev_ops->vdev_op_leaf) {
return (spa_vdev_exit(spa, vd,
txg, EINVAL));
}
}
}
}
}
for (int c = 0; c < vd->vdev_children; c++) {
tvd = vd->vdev_child[c];
vdev_remove_child(vd, tvd);
tvd->vdev_id = rvd->vdev_children;
vdev_add_child(rvd, tvd);
vdev_config_dirty(tvd);
}
if (nspares != 0) {
spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
ZPOOL_CONFIG_SPARES);
spa_load_spares(spa);
spa->spa_spares.sav_sync = B_TRUE;
}
if (nl2cache != 0) {
spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
ZPOOL_CONFIG_L2CACHE);
spa_load_l2cache(spa);
spa->spa_l2cache.sav_sync = B_TRUE;
}
/*
* We can't increment a feature while holding spa_vdev so we
* have to do it in a synctask.
*/
if (ndraid != 0) {
dmu_tx_t *tx;
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
dsl_sync_task_nowait(spa->spa_dsl_pool, spa_draid_feature_incr,
(void *)(uintptr_t)ndraid, tx);
dmu_tx_commit(tx);
}
/*
* We have to be careful when adding new vdevs to an existing pool.
* If other threads start allocating from these vdevs before we
* sync the config cache, and we lose power, then upon reboot we may
* fail to open the pool because there are DVAs that the config cache
* can't translate. Therefore, we first add the vdevs without
* initializing metaslabs; sync the config cache (via spa_vdev_exit());
* and then let spa_config_update() initialize the new metaslabs.
*
* spa_load() checks for added-but-not-initialized vdevs, so that
* if we lose power at any point in this sequence, the remaining
* steps will be completed the next time we load the pool.
*/
(void) spa_vdev_exit(spa, vd, txg, 0);
mutex_enter(&spa_namespace_lock);
spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_VDEV_ADD);
mutex_exit(&spa_namespace_lock);
return (0);
}
/*
* Attach a device to a mirror. The arguments are the path to any device
* in the mirror, and the nvroot for the new device. If the path specifies
* a device that is not mirrored, we automatically insert the mirror vdev.
*
* If 'replacing' is specified, the new device is intended to replace the
* existing device; in this case the two devices are made into their own
* mirror using the 'replacing' vdev, which is functionally identical to
* the mirror vdev (it actually reuses all the same ops) but has a few
* extra rules: you can't attach to it after it's been created, and upon
* completion of resilvering, the first disk (the one being replaced)
* is automatically detached.
*
* If 'rebuild' is specified, then sequential reconstruction (a.ka. rebuild)
* should be performed instead of traditional healing reconstruction. From
* an administrators perspective these are both resilver operations.
*/
int
spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing,
int rebuild)
{
uint64_t txg, dtl_max_txg;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
vdev_ops_t *pvops;
char *oldvdpath, *newvdpath;
int newvd_isspare;
int error;
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
return (spa_vdev_exit(spa, NULL, txg, error));
}
if (rebuild) {
if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD))
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
if (dsl_scan_resilvering(spa_get_dsl(spa)))
return (spa_vdev_exit(spa, NULL, txg,
ZFS_ERR_RESILVER_IN_PROGRESS));
} else {
if (vdev_rebuild_active(rvd))
return (spa_vdev_exit(spa, NULL, txg,
ZFS_ERR_REBUILD_IN_PROGRESS));
}
if (spa->spa_vdev_removal != NULL)
return (spa_vdev_exit(spa, NULL, txg, EBUSY));
if (oldvd == NULL)
return (spa_vdev_exit(spa, NULL, txg, ENODEV));
if (!oldvd->vdev_ops->vdev_op_leaf)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
pvd = oldvd->vdev_parent;
if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
VDEV_ALLOC_ATTACH)) != 0)
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
if (newrootvd->vdev_children != 1)
return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
newvd = newrootvd->vdev_child[0];
if (!newvd->vdev_ops->vdev_op_leaf)
return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
return (spa_vdev_exit(spa, newrootvd, txg, error));
/*
* Spares can't replace logs
*/
if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
/*
* A dRAID spare can only replace a child of its parent dRAID vdev.
*/
if (newvd->vdev_ops == &vdev_draid_spare_ops &&
oldvd->vdev_top != vdev_draid_spare_get_parent(newvd)) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
}
if (rebuild) {
/*
* For rebuilds, the top vdev must support reconstruction
* using only space maps. This means the only allowable
* vdevs types are the root vdev, a mirror, or dRAID.
*/
tvd = pvd;
if (pvd->vdev_top != NULL)
tvd = pvd->vdev_top;
if (tvd->vdev_ops != &vdev_mirror_ops &&
tvd->vdev_ops != &vdev_root_ops &&
tvd->vdev_ops != &vdev_draid_ops) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
}
}
if (!replacing) {
/*
* For attach, the only allowable parent is a mirror or the root
* vdev.
*/
if (pvd->vdev_ops != &vdev_mirror_ops &&
pvd->vdev_ops != &vdev_root_ops)
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
pvops = &vdev_mirror_ops;
} else {
/*
* Active hot spares can only be replaced by inactive hot
* spares.
*/
if (pvd->vdev_ops == &vdev_spare_ops &&
oldvd->vdev_isspare &&
!spa_has_spare(spa, newvd->vdev_guid))
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
/*
* If the source is a hot spare, and the parent isn't already a
* spare, then we want to create a new hot spare. Otherwise, we
* want to create a replacing vdev. The user is not allowed to
* attach to a spared vdev child unless the 'isspare' state is
* the same (spare replaces spare, non-spare replaces
* non-spare).
*/
if (pvd->vdev_ops == &vdev_replacing_ops &&
spa_version(spa) < SPA_VERSION_MULTI_REPLACE) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
} else if (pvd->vdev_ops == &vdev_spare_ops &&
newvd->vdev_isspare != oldvd->vdev_isspare) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
}
if (newvd->vdev_isspare)
pvops = &vdev_spare_ops;
else
pvops = &vdev_replacing_ops;
}
/*
* Make sure the new device is big enough.
*/
if (newvd->vdev_asize < vdev_get_min_asize(oldvd))
return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
/*
* The new device cannot have a higher alignment requirement
* than the top-level vdev.
*/
if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
/*
* If this is an in-place replacement, update oldvd's path and devid
* to make it distinguishable from newvd, and unopenable from now on.
*/
if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
spa_strfree(oldvd->vdev_path);
oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
KM_SLEEP);
(void) snprintf(oldvd->vdev_path, strlen(newvd->vdev_path) + 5,
"%s/%s", newvd->vdev_path, "old");
if (oldvd->vdev_devid != NULL) {
spa_strfree(oldvd->vdev_devid);
oldvd->vdev_devid = NULL;
}
}
/*
* If the parent is not a mirror, or if we're replacing, insert the new
* mirror/replacing/spare vdev above oldvd.
*/
if (pvd->vdev_ops != pvops)
pvd = vdev_add_parent(oldvd, pvops);
ASSERT(pvd->vdev_top->vdev_parent == rvd);
ASSERT(pvd->vdev_ops == pvops);
ASSERT(oldvd->vdev_parent == pvd);
/*
* Extract the new device from its root and add it to pvd.
*/
vdev_remove_child(newrootvd, newvd);
newvd->vdev_id = pvd->vdev_children;
newvd->vdev_crtxg = oldvd->vdev_crtxg;
vdev_add_child(pvd, newvd);
/*
* Reevaluate the parent vdev state.
*/
vdev_propagate_state(pvd);
tvd = newvd->vdev_top;
ASSERT(pvd->vdev_top == tvd);
ASSERT(tvd->vdev_parent == rvd);
vdev_config_dirty(tvd);
/*
* Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account
* for any dmu_sync-ed blocks. It will propagate upward when
* spa_vdev_exit() calls vdev_dtl_reassess().
*/
dtl_max_txg = txg + TXG_CONCURRENT_STATES;
vdev_dtl_dirty(newvd, DTL_MISSING,
TXG_INITIAL, dtl_max_txg - TXG_INITIAL);
if (newvd->vdev_isspare) {
spa_spare_activate(newvd);
spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_SPARE);
}
oldvdpath = spa_strdup(oldvd->vdev_path);
newvdpath = spa_strdup(newvd->vdev_path);
newvd_isspare = newvd->vdev_isspare;
/*
* Mark newvd's DTL dirty in this txg.
*/
vdev_dirty(tvd, VDD_DTL, newvd, txg);
/*
* Schedule the resilver or rebuild to restart in the future. We do
* this to ensure that dmu_sync-ed blocks have been stitched into the
* respective datasets.
*/
if (rebuild) {
newvd->vdev_rebuild_txg = txg;
vdev_rebuild(tvd);
} else {
newvd->vdev_resilver_txg = txg;
if (dsl_scan_resilvering(spa_get_dsl(spa)) &&
spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER)) {
vdev_defer_resilver(newvd);
} else {
dsl_scan_restart_resilver(spa->spa_dsl_pool,
dtl_max_txg);
}
}
if (spa->spa_bootfs)
spa_event_notify(spa, newvd, NULL, ESC_ZFS_BOOTFS_VDEV_ATTACH);
spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_ATTACH);
/*
* Commit the config
*/
(void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
spa_history_log_internal(spa, "vdev attach", NULL,
"%s vdev=%s %s vdev=%s",
replacing && newvd_isspare ? "spare in" :
replacing ? "replace" : "attach", newvdpath,
replacing ? "for" : "to", oldvdpath);
spa_strfree(oldvdpath);
spa_strfree(newvdpath);
return (0);
}
/*
* Detach a device from a mirror or replacing vdev.
*
* If 'replace_done' is specified, only detach if the parent
* is a replacing vdev.
*/
int
spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
{
uint64_t txg;
int error;
vdev_t *rvd __maybe_unused = spa->spa_root_vdev;
vdev_t *vd, *pvd, *cvd, *tvd;
boolean_t unspare = B_FALSE;
uint64_t unspare_guid = 0;
char *vdpath;
ASSERT(spa_writeable(spa));
txg = spa_vdev_detach_enter(spa, guid);
vd = spa_lookup_by_guid(spa, guid, B_FALSE);
/*
* Besides being called directly from the userland through the
* ioctl interface, spa_vdev_detach() can be potentially called
* at the end of spa_vdev_resilver_done().
*
* In the regular case, when we have a checkpoint this shouldn't
* happen as we never empty the DTLs of a vdev during the scrub
* [see comment in dsl_scan_done()]. Thus spa_vdev_resilvering_done()
* should never get here when we have a checkpoint.
*
* That said, even in a case when we checkpoint the pool exactly
* as spa_vdev_resilver_done() calls this function everything
* should be fine as the resilver will return right away.
*/
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
return (spa_vdev_exit(spa, NULL, txg, error));
}
if (vd == NULL)
return (spa_vdev_exit(spa, NULL, txg, ENODEV));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
pvd = vd->vdev_parent;
/*
* If the parent/child relationship is not as expected, don't do it.
* Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
* vdev that's replacing B with C. The user's intent in replacing
* is to go from M(A,B) to M(A,C). If the user decides to cancel
* the replace by detaching C, the expected behavior is to end up
* M(A,B). But suppose that right after deciding to detach C,
* the replacement of B completes. We would have M(A,C), and then
* ask to detach C, which would leave us with just A -- not what
* the user wanted. To prevent this, we make sure that the
* parent/child relationship hasn't changed -- in this example,
* that C's parent is still the replacing vdev R.
*/
if (pvd->vdev_guid != pguid && pguid != 0)
return (spa_vdev_exit(spa, NULL, txg, EBUSY));
/*
* Only 'replacing' or 'spare' vdevs can be replaced.
*/
if (replace_done && pvd->vdev_ops != &vdev_replacing_ops &&
pvd->vdev_ops != &vdev_spare_ops)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
spa_version(spa) >= SPA_VERSION_SPARES);
/*
* Only mirror, replacing, and spare vdevs support detach.
*/
if (pvd->vdev_ops != &vdev_replacing_ops &&
pvd->vdev_ops != &vdev_mirror_ops &&
pvd->vdev_ops != &vdev_spare_ops)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
/*
* If this device has the only valid copy of some data,
* we cannot safely detach it.
*/
if (vdev_dtl_required(vd))
return (spa_vdev_exit(spa, NULL, txg, EBUSY));
ASSERT(pvd->vdev_children >= 2);
/*
* If we are detaching the second disk from a replacing vdev, then
* check to see if we changed the original vdev's path to have "/old"
* at the end in spa_vdev_attach(). If so, undo that change now.
*/
if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 &&
vd->vdev_path != NULL) {
size_t len = strlen(vd->vdev_path);
for (int c = 0; c < pvd->vdev_children; c++) {
cvd = pvd->vdev_child[c];
if (cvd == vd || cvd->vdev_path == NULL)
continue;
if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
strcmp(cvd->vdev_path + len, "/old") == 0) {
spa_strfree(cvd->vdev_path);
cvd->vdev_path = spa_strdup(vd->vdev_path);
break;
}
}
}
/*
* If we are detaching the original disk from a normal spare, then it
* implies that the spare should become a real disk, and be removed
* from the active spare list for the pool. dRAID spares on the
* other hand are coupled to the pool and thus should never be removed
* from the spares list.
*/
if (pvd->vdev_ops == &vdev_spare_ops && vd->vdev_id == 0) {
vdev_t *last_cvd = pvd->vdev_child[pvd->vdev_children - 1];
if (last_cvd->vdev_isspare &&
last_cvd->vdev_ops != &vdev_draid_spare_ops) {
unspare = B_TRUE;
}
}
/*
* Erase the disk labels so the disk can be used for other things.
* This must be done after all other error cases are handled,
* but before we disembowel vd (so we can still do I/O to it).
* But if we can't do it, don't treat the error as fatal --
* it may be that the unwritability of the disk is the reason
* it's being detached!
*/
error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
/*
* Remove vd from its parent and compact the parent's children.
*/
vdev_remove_child(pvd, vd);
vdev_compact_children(pvd);
/*
* Remember one of the remaining children so we can get tvd below.
*/
cvd = pvd->vdev_child[pvd->vdev_children - 1];
/*
* If we need to remove the remaining child from the list of hot spares,
* do it now, marking the vdev as no longer a spare in the process.
* We must do this before vdev_remove_parent(), because that can
* change the GUID if it creates a new toplevel GUID. For a similar
* reason, we must remove the spare now, in the same txg as the detach;
* otherwise someone could attach a new sibling, change the GUID, and
* the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
*/
if (unspare) {
ASSERT(cvd->vdev_isspare);
spa_spare_remove(cvd);
unspare_guid = cvd->vdev_guid;
(void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
cvd->vdev_unspare = B_TRUE;
}
/*
* If the parent mirror/replacing vdev only has one child,
* the parent is no longer needed. Remove it from the tree.
*/
if (pvd->vdev_children == 1) {
if (pvd->vdev_ops == &vdev_spare_ops)
cvd->vdev_unspare = B_FALSE;
vdev_remove_parent(cvd);
}
/*
* We don't set tvd until now because the parent we just removed
* may have been the previous top-level vdev.
*/
tvd = cvd->vdev_top;
ASSERT(tvd->vdev_parent == rvd);
/*
* Reevaluate the parent vdev state.
*/
vdev_propagate_state(cvd);
/*
* If the 'autoexpand' property is set on the pool then automatically
* try to expand the size of the pool. For example if the device we
* just detached was smaller than the others, it may be possible to
* add metaslabs (i.e. grow the pool). We need to reopen the vdev
* first so that we can obtain the updated sizes of the leaf vdevs.
*/
if (spa->spa_autoexpand) {
vdev_reopen(tvd);
vdev_expand(tvd, txg);
}
vdev_config_dirty(tvd);
/*
* Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that
* vd->vdev_detached is set and free vd's DTL object in syncing context.
* But first make sure we're not on any *other* txg's DTL list, to
* prevent vd from being accessed after it's freed.
*/
vdpath = spa_strdup(vd->vdev_path ? vd->vdev_path : "none");
for (int t = 0; t < TXG_SIZE; t++)
(void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
vd->vdev_detached = B_TRUE;
vdev_dirty(tvd, VDD_DTL, vd, txg);
spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE);
spa_notify_waiters(spa);
/* hang on to the spa before we release the lock */
spa_open_ref(spa, FTAG);
error = spa_vdev_exit(spa, vd, txg, 0);
spa_history_log_internal(spa, "detach", NULL,
"vdev=%s", vdpath);
spa_strfree(vdpath);
/*
* If this was the removal of the original device in a hot spare vdev,
* then we want to go through and remove the device from the hot spare
* list of every other pool.
*/
if (unspare) {
spa_t *altspa = NULL;
mutex_enter(&spa_namespace_lock);
while ((altspa = spa_next(altspa)) != NULL) {
if (altspa->spa_state != POOL_STATE_ACTIVE ||
altspa == spa)
continue;
spa_open_ref(altspa, FTAG);
mutex_exit(&spa_namespace_lock);
(void) spa_vdev_remove(altspa, unspare_guid, B_TRUE);
mutex_enter(&spa_namespace_lock);
spa_close(altspa, FTAG);
}
mutex_exit(&spa_namespace_lock);
/* search the rest of the vdevs for spares to remove */
spa_vdev_resilver_done(spa);
}
/* all done with the spa; OK to release */
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
mutex_exit(&spa_namespace_lock);
return (error);
}
static int
spa_vdev_initialize_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
list_t *vd_list)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
/* Look up vdev and ensure it's a leaf. */
vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (vd == NULL || vd->vdev_detached) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(ENODEV));
} else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EINVAL));
} else if (!vdev_writeable(vd)) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EROFS));
}
mutex_enter(&vd->vdev_initialize_lock);
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
/*
* When we activate an initialize action we check to see
* if the vdev_initialize_thread is NULL. We do this instead
* of using the vdev_initialize_state since there might be
* a previous initialization process which has completed but
* the thread is not exited.
*/
if (cmd_type == POOL_INITIALIZE_START &&
(vd->vdev_initialize_thread != NULL ||
vd->vdev_top->vdev_removing)) {
mutex_exit(&vd->vdev_initialize_lock);
return (SET_ERROR(EBUSY));
} else if (cmd_type == POOL_INITIALIZE_CANCEL &&
(vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE &&
vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED)) {
mutex_exit(&vd->vdev_initialize_lock);
return (SET_ERROR(ESRCH));
} else if (cmd_type == POOL_INITIALIZE_SUSPEND &&
vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE) {
mutex_exit(&vd->vdev_initialize_lock);
return (SET_ERROR(ESRCH));
}
switch (cmd_type) {
case POOL_INITIALIZE_START:
vdev_initialize(vd);
break;
case POOL_INITIALIZE_CANCEL:
vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED, vd_list);
break;
case POOL_INITIALIZE_SUSPEND:
vdev_initialize_stop(vd, VDEV_INITIALIZE_SUSPENDED, vd_list);
break;
default:
panic("invalid cmd_type %llu", (unsigned long long)cmd_type);
}
mutex_exit(&vd->vdev_initialize_lock);
return (0);
}
int
spa_vdev_initialize(spa_t *spa, nvlist_t *nv, uint64_t cmd_type,
nvlist_t *vdev_errlist)
{
int total_errors = 0;
list_t vd_list;
list_create(&vd_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_initialize_node));
/*
* We hold the namespace lock through the whole function
* to prevent any changes to the pool while we're starting or
* stopping initialization. The config and state locks are held so that
* we can properly assess the vdev state before we commit to
* the initializing operation.
*/
mutex_enter(&spa_namespace_lock);
for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
pair != NULL; pair = nvlist_next_nvpair(nv, pair)) {
uint64_t vdev_guid = fnvpair_value_uint64(pair);
int error = spa_vdev_initialize_impl(spa, vdev_guid, cmd_type,
&vd_list);
if (error != 0) {
char guid_as_str[MAXNAMELEN];
(void) snprintf(guid_as_str, sizeof (guid_as_str),
"%llu", (unsigned long long)vdev_guid);
fnvlist_add_int64(vdev_errlist, guid_as_str, error);
total_errors++;
}
}
/* Wait for all initialize threads to stop. */
vdev_initialize_stop_wait(spa, &vd_list);
/* Sync out the initializing state */
txg_wait_synced(spa->spa_dsl_pool, 0);
mutex_exit(&spa_namespace_lock);
list_destroy(&vd_list);
return (total_errors);
}
static int
spa_vdev_trim_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
uint64_t rate, boolean_t partial, boolean_t secure, list_t *vd_list)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
/* Look up vdev and ensure it's a leaf. */
vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (vd == NULL || vd->vdev_detached) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(ENODEV));
} else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EINVAL));
} else if (!vdev_writeable(vd)) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EROFS));
} else if (!vd->vdev_has_trim) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EOPNOTSUPP));
} else if (secure && !vd->vdev_has_securetrim) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EOPNOTSUPP));
}
mutex_enter(&vd->vdev_trim_lock);
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
/*
* When we activate a TRIM action we check to see if the
* vdev_trim_thread is NULL. We do this instead of using the
* vdev_trim_state since there might be a previous TRIM process
* which has completed but the thread is not exited.
*/
if (cmd_type == POOL_TRIM_START &&
(vd->vdev_trim_thread != NULL || vd->vdev_top->vdev_removing)) {
mutex_exit(&vd->vdev_trim_lock);
return (SET_ERROR(EBUSY));
} else if (cmd_type == POOL_TRIM_CANCEL &&
(vd->vdev_trim_state != VDEV_TRIM_ACTIVE &&
vd->vdev_trim_state != VDEV_TRIM_SUSPENDED)) {
mutex_exit(&vd->vdev_trim_lock);
return (SET_ERROR(ESRCH));
} else if (cmd_type == POOL_TRIM_SUSPEND &&
vd->vdev_trim_state != VDEV_TRIM_ACTIVE) {
mutex_exit(&vd->vdev_trim_lock);
return (SET_ERROR(ESRCH));
}
switch (cmd_type) {
case POOL_TRIM_START:
vdev_trim(vd, rate, partial, secure);
break;
case POOL_TRIM_CANCEL:
vdev_trim_stop(vd, VDEV_TRIM_CANCELED, vd_list);
break;
case POOL_TRIM_SUSPEND:
vdev_trim_stop(vd, VDEV_TRIM_SUSPENDED, vd_list);
break;
default:
panic("invalid cmd_type %llu", (unsigned long long)cmd_type);
}
mutex_exit(&vd->vdev_trim_lock);
return (0);
}
/*
* Initiates a manual TRIM for the requested vdevs. This kicks off individual
* TRIM threads for each child vdev. These threads pass over all of the free
* space in the vdev's metaslabs and issues TRIM commands for that space.
*/
int
spa_vdev_trim(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, uint64_t rate,
boolean_t partial, boolean_t secure, nvlist_t *vdev_errlist)
{
int total_errors = 0;
list_t vd_list;
list_create(&vd_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_trim_node));
/*
* We hold the namespace lock through the whole function
* to prevent any changes to the pool while we're starting or
* stopping TRIM. The config and state locks are held so that
* we can properly assess the vdev state before we commit to
* the TRIM operation.
*/
mutex_enter(&spa_namespace_lock);
for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
pair != NULL; pair = nvlist_next_nvpair(nv, pair)) {
uint64_t vdev_guid = fnvpair_value_uint64(pair);
int error = spa_vdev_trim_impl(spa, vdev_guid, cmd_type,
rate, partial, secure, &vd_list);
if (error != 0) {
char guid_as_str[MAXNAMELEN];
(void) snprintf(guid_as_str, sizeof (guid_as_str),
"%llu", (unsigned long long)vdev_guid);
fnvlist_add_int64(vdev_errlist, guid_as_str, error);
total_errors++;
}
}
/* Wait for all TRIM threads to stop. */
vdev_trim_stop_wait(spa, &vd_list);
/* Sync out the TRIM state */
txg_wait_synced(spa->spa_dsl_pool, 0);
mutex_exit(&spa_namespace_lock);
list_destroy(&vd_list);
return (total_errors);
}
/*
* Split a set of devices from their mirrors, and create a new pool from them.
*/
int
spa_vdev_split_mirror(spa_t *spa, const char *newname, nvlist_t *config,
nvlist_t *props, boolean_t exp)
{
int error = 0;
uint64_t txg, *glist;
spa_t *newspa;
uint_t c, children, lastlog;
nvlist_t **child, *nvl, *tmp;
dmu_tx_t *tx;
char *altroot = NULL;
vdev_t *rvd, **vml = NULL; /* vdev modify list */
boolean_t activate_slog;
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
return (spa_vdev_exit(spa, NULL, txg, error));
}
/* clear the log and flush everything up to now */
activate_slog = spa_passivate_log(spa);
(void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
error = spa_reset_logs(spa);
txg = spa_vdev_config_enter(spa);
if (activate_slog)
spa_activate_log(spa);
if (error != 0)
return (spa_vdev_exit(spa, NULL, txg, error));
/* check new spa name before going any further */
if (spa_lookup(newname) != NULL)
return (spa_vdev_exit(spa, NULL, txg, EEXIST));
/*
* scan through all the children to ensure they're all mirrors
*/
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 ||
nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child,
&children) != 0)
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
/* first, check to ensure we've got the right child count */
rvd = spa->spa_root_vdev;
lastlog = 0;
for (c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
/* don't count the holes & logs as children */
if (vd->vdev_islog || (vd->vdev_ops != &vdev_indirect_ops &&
!vdev_is_concrete(vd))) {
if (lastlog == 0)
lastlog = c;
continue;
}
lastlog = 0;
}
if (children != (lastlog != 0 ? lastlog : rvd->vdev_children))
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
/* next, ensure no spare or cache devices are part of the split */
if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 ||
nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0)
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP);
glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP);
/* then, loop over each vdev and validate it */
for (c = 0; c < children; c++) {
uint64_t is_hole = 0;
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
&is_hole);
if (is_hole != 0) {
if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole ||
spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
continue;
} else {
error = SET_ERROR(EINVAL);
break;
}
}
/* deal with indirect vdevs */
if (spa->spa_root_vdev->vdev_child[c]->vdev_ops ==
&vdev_indirect_ops)
continue;
/* which disk is going to be split? */
if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
&glist[c]) != 0) {
error = SET_ERROR(EINVAL);
break;
}
/* look it up in the spa */
vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
if (vml[c] == NULL) {
error = SET_ERROR(ENODEV);
break;
}
/* make sure there's nothing stopping the split */
if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops ||
vml[c]->vdev_islog ||
!vdev_is_concrete(vml[c]) ||
vml[c]->vdev_isspare ||
vml[c]->vdev_isl2cache ||
!vdev_writeable(vml[c]) ||
vml[c]->vdev_children != 0 ||
vml[c]->vdev_state != VDEV_STATE_HEALTHY ||
c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
error = SET_ERROR(EINVAL);
break;
}
if (vdev_dtl_required(vml[c]) ||
vdev_resilver_needed(vml[c], NULL, NULL)) {
error = SET_ERROR(EBUSY);
break;
}
/* we need certain info from the top level */
fnvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY,
vml[c]->vdev_top->vdev_ms_array);
fnvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT,
vml[c]->vdev_top->vdev_ms_shift);
fnvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE,
vml[c]->vdev_top->vdev_asize);
fnvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT,
vml[c]->vdev_top->vdev_ashift);
/* transfer per-vdev ZAPs */
ASSERT3U(vml[c]->vdev_leaf_zap, !=, 0);
VERIFY0(nvlist_add_uint64(child[c],
ZPOOL_CONFIG_VDEV_LEAF_ZAP, vml[c]->vdev_leaf_zap));
ASSERT3U(vml[c]->vdev_top->vdev_top_zap, !=, 0);
VERIFY0(nvlist_add_uint64(child[c],
ZPOOL_CONFIG_VDEV_TOP_ZAP,
vml[c]->vdev_parent->vdev_top_zap));
}
if (error != 0) {
kmem_free(vml, children * sizeof (vdev_t *));
kmem_free(glist, children * sizeof (uint64_t));
return (spa_vdev_exit(spa, NULL, txg, error));
}
/* stop writers from using the disks */
for (c = 0; c < children; c++) {
if (vml[c] != NULL)
vml[c]->vdev_offline = B_TRUE;
}
vdev_reopen(spa->spa_root_vdev);
/*
* Temporarily record the splitting vdevs in the spa config. This
* will disappear once the config is regenerated.
*/
nvl = fnvlist_alloc();
fnvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, glist, children);
kmem_free(glist, children * sizeof (uint64_t));
mutex_enter(&spa->spa_props_lock);
fnvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT, nvl);
mutex_exit(&spa->spa_props_lock);
spa->spa_config_splitting = nvl;
vdev_config_dirty(spa->spa_root_vdev);
/* configure and create the new pool */
fnvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname);
fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE);
fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, spa_version(spa));
fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, spa->spa_config_txg);
fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
spa_generate_guid(NULL));
VERIFY0(nvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
(void) nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
/* add the new pool to the namespace */
newspa = spa_add(newname, config, altroot);
newspa->spa_avz_action = AVZ_ACTION_REBUILD;
newspa->spa_config_txg = spa->spa_config_txg;
spa_set_log_state(newspa, SPA_LOG_CLEAR);
/* release the spa config lock, retaining the namespace lock */
spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
if (zio_injection_enabled)
zio_handle_panic_injection(spa, FTAG, 1);
spa_activate(newspa, spa_mode_global);
spa_async_suspend(newspa);
/*
* Temporarily stop the initializing and TRIM activity. We set the
* state to ACTIVE so that we know to resume initializing or TRIM
* once the split has completed.
*/
list_t vd_initialize_list;
list_create(&vd_initialize_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_initialize_node));
list_t vd_trim_list;
list_create(&vd_trim_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_trim_node));
for (c = 0; c < children; c++) {
if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) {
mutex_enter(&vml[c]->vdev_initialize_lock);
vdev_initialize_stop(vml[c],
VDEV_INITIALIZE_ACTIVE, &vd_initialize_list);
mutex_exit(&vml[c]->vdev_initialize_lock);
mutex_enter(&vml[c]->vdev_trim_lock);
vdev_trim_stop(vml[c], VDEV_TRIM_ACTIVE, &vd_trim_list);
mutex_exit(&vml[c]->vdev_trim_lock);
}
}
vdev_initialize_stop_wait(spa, &vd_initialize_list);
vdev_trim_stop_wait(spa, &vd_trim_list);
list_destroy(&vd_initialize_list);
list_destroy(&vd_trim_list);
newspa->spa_config_source = SPA_CONFIG_SRC_SPLIT;
newspa->spa_is_splitting = B_TRUE;
/* create the new pool from the disks of the original pool */
error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE);
if (error)
goto out;
/* if that worked, generate a real config for the new pool */
if (newspa->spa_root_vdev != NULL) {
newspa->spa_config_splitting = fnvlist_alloc();
fnvlist_add_uint64(newspa->spa_config_splitting,
ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa));
spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL,
B_TRUE));
}
/* set the props */
if (props != NULL) {
spa_configfile_set(newspa, props, B_FALSE);
error = spa_prop_set(newspa, props);
if (error)
goto out;
}
/* flush everything */
txg = spa_vdev_config_enter(newspa);
vdev_config_dirty(newspa->spa_root_vdev);
(void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG);
if (zio_injection_enabled)
zio_handle_panic_injection(spa, FTAG, 2);
spa_async_resume(newspa);
/* finally, update the original pool's config */
txg = spa_vdev_config_enter(spa);
tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0)
dmu_tx_abort(tx);
for (c = 0; c < children; c++) {
if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) {
vdev_t *tvd = vml[c]->vdev_top;
/*
* Need to be sure the detachable VDEV is not
* on any *other* txg's DTL list to prevent it
* from being accessed after it's freed.
*/
for (int t = 0; t < TXG_SIZE; t++) {
(void) txg_list_remove_this(
&tvd->vdev_dtl_list, vml[c], t);
}
vdev_split(vml[c]);
if (error == 0)
spa_history_log_internal(spa, "detach", tx,
"vdev=%s", vml[c]->vdev_path);
vdev_free(vml[c]);
}
}
spa->spa_avz_action = AVZ_ACTION_REBUILD;
vdev_config_dirty(spa->spa_root_vdev);
spa->spa_config_splitting = NULL;
nvlist_free(nvl);
if (error == 0)
dmu_tx_commit(tx);
(void) spa_vdev_exit(spa, NULL, txg, 0);
if (zio_injection_enabled)
zio_handle_panic_injection(spa, FTAG, 3);
/* split is complete; log a history record */
spa_history_log_internal(newspa, "split", NULL,
"from pool %s", spa_name(spa));
newspa->spa_is_splitting = B_FALSE;
kmem_free(vml, children * sizeof (vdev_t *));
/* if we're not going to mount the filesystems in userland, export */
if (exp)
error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL,
B_FALSE, B_FALSE);
return (error);
out:
spa_unload(newspa);
spa_deactivate(newspa);
spa_remove(newspa);
txg = spa_vdev_config_enter(spa);
/* re-online all offlined disks */
for (c = 0; c < children; c++) {
if (vml[c] != NULL)
vml[c]->vdev_offline = B_FALSE;
}
/* restart initializing or trimming disks as necessary */
spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART);
spa_async_request(spa, SPA_ASYNC_TRIM_RESTART);
spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART);
vdev_reopen(spa->spa_root_vdev);
nvlist_free(spa->spa_config_splitting);
spa->spa_config_splitting = NULL;
(void) spa_vdev_exit(spa, NULL, txg, error);
kmem_free(vml, children * sizeof (vdev_t *));
return (error);
}
/*
* Find any device that's done replacing, or a vdev marked 'unspare' that's
* currently spared, so we can detach it.
*/
static vdev_t *
spa_vdev_resilver_done_hunt(vdev_t *vd)
{
vdev_t *newvd, *oldvd;
for (int c = 0; c < vd->vdev_children; c++) {
oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
if (oldvd != NULL)
return (oldvd);
}
/*
* Check for a completed replacement. We always consider the first
* vdev in the list to be the oldest vdev, and the last one to be
* the newest (see spa_vdev_attach() for how that works). In
* the case where the newest vdev is faulted, we will not automatically
* remove it after a resilver completes. This is OK as it will require
* user intervention to determine which disk the admin wishes to keep.
*/
if (vd->vdev_ops == &vdev_replacing_ops) {
ASSERT(vd->vdev_children > 1);
newvd = vd->vdev_child[vd->vdev_children - 1];
oldvd = vd->vdev_child[0];
if (vdev_dtl_empty(newvd, DTL_MISSING) &&
vdev_dtl_empty(newvd, DTL_OUTAGE) &&
!vdev_dtl_required(oldvd))
return (oldvd);
}
/*
* Check for a completed resilver with the 'unspare' flag set.
* Also potentially update faulted state.
*/
if (vd->vdev_ops == &vdev_spare_ops) {
vdev_t *first = vd->vdev_child[0];
vdev_t *last = vd->vdev_child[vd->vdev_children - 1];
if (last->vdev_unspare) {
oldvd = first;
newvd = last;
} else if (first->vdev_unspare) {
oldvd = last;
newvd = first;
} else {
oldvd = NULL;
}
if (oldvd != NULL &&
vdev_dtl_empty(newvd, DTL_MISSING) &&
vdev_dtl_empty(newvd, DTL_OUTAGE) &&
!vdev_dtl_required(oldvd))
return (oldvd);
vdev_propagate_state(vd);
/*
* If there are more than two spares attached to a disk,
* and those spares are not required, then we want to
* attempt to free them up now so that they can be used
* by other pools. Once we're back down to a single
* disk+spare, we stop removing them.
*/
if (vd->vdev_children > 2) {
newvd = vd->vdev_child[1];
if (newvd->vdev_isspare && last->vdev_isspare &&
vdev_dtl_empty(last, DTL_MISSING) &&
vdev_dtl_empty(last, DTL_OUTAGE) &&
!vdev_dtl_required(newvd))
return (newvd);
}
}
return (NULL);
}
static void
spa_vdev_resilver_done(spa_t *spa)
{
vdev_t *vd, *pvd, *ppvd;
uint64_t guid, sguid, pguid, ppguid;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
pvd = vd->vdev_parent;
ppvd = pvd->vdev_parent;
guid = vd->vdev_guid;
pguid = pvd->vdev_guid;
ppguid = ppvd->vdev_guid;
sguid = 0;
/*
* If we have just finished replacing a hot spared device, then
* we need to detach the parent's first child (the original hot
* spare) as well.
*/
if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 &&
ppvd->vdev_children == 2) {
ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
sguid = ppvd->vdev_child[1]->vdev_guid;
}
ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd));
spa_config_exit(spa, SCL_ALL, FTAG);
if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
return;
if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
return;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
}
spa_config_exit(spa, SCL_ALL, FTAG);
/*
* If a detach was not performed above replace waiters will not have
* been notified. In which case we must do so now.
*/
spa_notify_waiters(spa);
}
/*
* Update the stored path or FRU for this vdev.
*/
static int
spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
boolean_t ispath)
{
vdev_t *vd;
boolean_t sync = B_FALSE;
ASSERT(spa_writeable(spa));
spa_vdev_state_enter(spa, SCL_ALL);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, ENOENT));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
if (ispath) {
if (strcmp(value, vd->vdev_path) != 0) {
spa_strfree(vd->vdev_path);
vd->vdev_path = spa_strdup(value);
sync = B_TRUE;
}
} else {
if (vd->vdev_fru == NULL) {
vd->vdev_fru = spa_strdup(value);
sync = B_TRUE;
} else if (strcmp(value, vd->vdev_fru) != 0) {
spa_strfree(vd->vdev_fru);
vd->vdev_fru = spa_strdup(value);
sync = B_TRUE;
}
}
return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0));
}
int
spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
{
return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
}
int
spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
{
return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
}
/*
* ==========================================================================
* SPA Scanning
* ==========================================================================
*/
int
spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd)
{
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
if (dsl_scan_resilvering(spa->spa_dsl_pool))
return (SET_ERROR(EBUSY));
return (dsl_scrub_set_pause_resume(spa->spa_dsl_pool, cmd));
}
int
spa_scan_stop(spa_t *spa)
{
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
if (dsl_scan_resilvering(spa->spa_dsl_pool))
return (SET_ERROR(EBUSY));
return (dsl_scan_cancel(spa->spa_dsl_pool));
}
int
spa_scan(spa_t *spa, pool_scan_func_t func)
{
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
return (SET_ERROR(ENOTSUP));
if (func == POOL_SCAN_RESILVER &&
!spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER))
return (SET_ERROR(ENOTSUP));
/*
* If a resilver was requested, but there is no DTL on a
* writeable leaf device, we have nothing to do.
*/
if (func == POOL_SCAN_RESILVER &&
!vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
return (0);
}
return (dsl_scan(spa->spa_dsl_pool, func));
}
/*
* ==========================================================================
* SPA async task processing
* ==========================================================================
*/
static void
spa_async_remove(spa_t *spa, vdev_t *vd)
{
if (vd->vdev_remove_wanted) {
vd->vdev_remove_wanted = B_FALSE;
vd->vdev_delayed_close = B_FALSE;
vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
/*
* We want to clear the stats, but we don't want to do a full
* vdev_clear() as that will cause us to throw away
* degraded/faulted state as well as attempt to reopen the
* device, all of which is a waste.
*/
vd->vdev_stat.vs_read_errors = 0;
vd->vdev_stat.vs_write_errors = 0;
vd->vdev_stat.vs_checksum_errors = 0;
vdev_state_dirty(vd->vdev_top);
/* Tell userspace that the vdev is gone. */
zfs_post_remove(spa, vd);
}
for (int c = 0; c < vd->vdev_children; c++)
spa_async_remove(spa, vd->vdev_child[c]);
}
static void
spa_async_probe(spa_t *spa, vdev_t *vd)
{
if (vd->vdev_probe_wanted) {
vd->vdev_probe_wanted = B_FALSE;
vdev_reopen(vd); /* vdev_open() does the actual probe */
}
for (int c = 0; c < vd->vdev_children; c++)
spa_async_probe(spa, vd->vdev_child[c]);
}
static void
spa_async_autoexpand(spa_t *spa, vdev_t *vd)
{
if (!spa->spa_autoexpand)
return;
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
spa_async_autoexpand(spa, cvd);
}
if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL)
return;
spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_AUTOEXPAND);
}
static __attribute__((noreturn)) void
spa_async_thread(void *arg)
{
spa_t *spa = (spa_t *)arg;
dsl_pool_t *dp = spa->spa_dsl_pool;
int tasks;
ASSERT(spa->spa_sync_on);
mutex_enter(&spa->spa_async_lock);
tasks = spa->spa_async_tasks;
spa->spa_async_tasks = 0;
mutex_exit(&spa->spa_async_lock);
/*
* See if the config needs to be updated.
*/
if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
uint64_t old_space, new_space;
mutex_enter(&spa_namespace_lock);
old_space = metaslab_class_get_space(spa_normal_class(spa));
old_space += metaslab_class_get_space(spa_special_class(spa));
old_space += metaslab_class_get_space(spa_dedup_class(spa));
old_space += metaslab_class_get_space(
spa_embedded_log_class(spa));
spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
new_space = metaslab_class_get_space(spa_normal_class(spa));
new_space += metaslab_class_get_space(spa_special_class(spa));
new_space += metaslab_class_get_space(spa_dedup_class(spa));
new_space += metaslab_class_get_space(
spa_embedded_log_class(spa));
mutex_exit(&spa_namespace_lock);
/*
* If the pool grew as a result of the config update,
* then log an internal history event.
*/
if (new_space != old_space) {
spa_history_log_internal(spa, "vdev online", NULL,
"pool '%s' size: %llu(+%llu)",
spa_name(spa), (u_longlong_t)new_space,
(u_longlong_t)(new_space - old_space));
}
}
/*
* See if any devices need to be marked REMOVED.
*/
if (tasks & SPA_ASYNC_REMOVE) {
spa_vdev_state_enter(spa, SCL_NONE);
spa_async_remove(spa, spa->spa_root_vdev);
for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
for (int i = 0; i < spa->spa_spares.sav_count; i++)
spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
(void) spa_vdev_state_exit(spa, NULL, 0);
}
if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa_async_autoexpand(spa, spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
/*
* See if any devices need to be probed.
*/
if (tasks & SPA_ASYNC_PROBE) {
spa_vdev_state_enter(spa, SCL_NONE);
spa_async_probe(spa, spa->spa_root_vdev);
(void) spa_vdev_state_exit(spa, NULL, 0);
}
/*
* If any devices are done replacing, detach them.
*/
if (tasks & SPA_ASYNC_RESILVER_DONE ||
tasks & SPA_ASYNC_REBUILD_DONE) {
spa_vdev_resilver_done(spa);
}
/*
* Kick off a resilver.
*/
if (tasks & SPA_ASYNC_RESILVER &&
!vdev_rebuild_active(spa->spa_root_vdev) &&
(!dsl_scan_resilvering(dp) ||
!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER)))
dsl_scan_restart_resilver(dp, 0);
if (tasks & SPA_ASYNC_INITIALIZE_RESTART) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_initialize_restart(spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
}
if (tasks & SPA_ASYNC_TRIM_RESTART) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_trim_restart(spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
}
if (tasks & SPA_ASYNC_AUTOTRIM_RESTART) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_autotrim_restart(spa);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
}
/*
* Kick off L2 cache whole device TRIM.
*/
if (tasks & SPA_ASYNC_L2CACHE_TRIM) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_trim_l2arc(spa);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
}
/*
* Kick off L2 cache rebuilding.
*/
if (tasks & SPA_ASYNC_L2CACHE_REBUILD) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_L2ARC, FTAG, RW_READER);
l2arc_spa_rebuild_start(spa);
spa_config_exit(spa, SCL_L2ARC, FTAG);
mutex_exit(&spa_namespace_lock);
}
/*
* Let the world know that we're done.
*/
mutex_enter(&spa->spa_async_lock);
spa->spa_async_thread = NULL;
cv_broadcast(&spa->spa_async_cv);
mutex_exit(&spa->spa_async_lock);
thread_exit();
}
void
spa_async_suspend(spa_t *spa)
{
mutex_enter(&spa->spa_async_lock);
spa->spa_async_suspended++;
while (spa->spa_async_thread != NULL)
cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
mutex_exit(&spa->spa_async_lock);
spa_vdev_remove_suspend(spa);
zthr_t *condense_thread = spa->spa_condense_zthr;
if (condense_thread != NULL)
zthr_cancel(condense_thread);
zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
if (discard_thread != NULL)
zthr_cancel(discard_thread);
zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr;
if (ll_delete_thread != NULL)
zthr_cancel(ll_delete_thread);
zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
if (ll_condense_thread != NULL)
zthr_cancel(ll_condense_thread);
}
void
spa_async_resume(spa_t *spa)
{
mutex_enter(&spa->spa_async_lock);
ASSERT(spa->spa_async_suspended != 0);
spa->spa_async_suspended--;
mutex_exit(&spa->spa_async_lock);
spa_restart_removal(spa);
zthr_t *condense_thread = spa->spa_condense_zthr;
if (condense_thread != NULL)
zthr_resume(condense_thread);
zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
if (discard_thread != NULL)
zthr_resume(discard_thread);
zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr;
if (ll_delete_thread != NULL)
zthr_resume(ll_delete_thread);
zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
if (ll_condense_thread != NULL)
zthr_resume(ll_condense_thread);
}
static boolean_t
spa_async_tasks_pending(spa_t *spa)
{
uint_t non_config_tasks;
uint_t config_task;
boolean_t config_task_suspended;
non_config_tasks = spa->spa_async_tasks & ~SPA_ASYNC_CONFIG_UPDATE;
config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE;
if (spa->spa_ccw_fail_time == 0) {
config_task_suspended = B_FALSE;
} else {
config_task_suspended =
(gethrtime() - spa->spa_ccw_fail_time) <
((hrtime_t)zfs_ccw_retry_interval * NANOSEC);
}
return (non_config_tasks || (config_task && !config_task_suspended));
}
static void
spa_async_dispatch(spa_t *spa)
{
mutex_enter(&spa->spa_async_lock);
if (spa_async_tasks_pending(spa) &&
!spa->spa_async_suspended &&
spa->spa_async_thread == NULL)
spa->spa_async_thread = thread_create(NULL, 0,
spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
mutex_exit(&spa->spa_async_lock);
}
void
spa_async_request(spa_t *spa, int task)
{
zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task);
mutex_enter(&spa->spa_async_lock);
spa->spa_async_tasks |= task;
mutex_exit(&spa->spa_async_lock);
}
int
spa_async_tasks(spa_t *spa)
{
return (spa->spa_async_tasks);
}
/*
* ==========================================================================
* SPA syncing routines
* ==========================================================================
*/
static int
bpobj_enqueue_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
bpobj_t *bpo = arg;
bpobj_enqueue(bpo, bp, bp_freed, tx);
return (0);
}
int
bpobj_enqueue_alloc_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
return (bpobj_enqueue_cb(arg, bp, B_FALSE, tx));
}
int
bpobj_enqueue_free_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
return (bpobj_enqueue_cb(arg, bp, B_TRUE, tx));
}
static int
spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
zio_t *pio = arg;
zio_nowait(zio_free_sync(pio, pio->io_spa, dmu_tx_get_txg(tx), bp,
pio->io_flags));
return (0);
}
static int
bpobj_spa_free_sync_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(!bp_freed);
return (spa_free_sync_cb(arg, bp, tx));
}
/*
* Note: this simple function is not inlined to make it easier to dtrace the
* amount of time spent syncing frees.
*/
static void
spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx)
{
zio_t *zio = zio_root(spa, NULL, NULL, 0);
bplist_iterate(bpl, spa_free_sync_cb, zio, tx);
VERIFY(zio_wait(zio) == 0);
}
/*
* Note: this simple function is not inlined to make it easier to dtrace the
* amount of time spent syncing deferred frees.
*/
static void
spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx)
{
if (spa_sync_pass(spa) != 1)
return;
/*
* Note:
* If the log space map feature is active, we stop deferring
* frees to the next TXG and therefore running this function
* would be considered a no-op as spa_deferred_bpobj should
* not have any entries.
*
* That said we run this function anyway (instead of returning
* immediately) for the edge-case scenario where we just
* activated the log space map feature in this TXG but we have
* deferred frees from the previous TXG.
*/
zio_t *zio = zio_root(spa, NULL, NULL, 0);
VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj,
bpobj_spa_free_sync_cb, zio, tx), ==, 0);
VERIFY0(zio_wait(zio));
}
static void
spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
{
char *packed = NULL;
size_t bufsize;
size_t nvsize = 0;
dmu_buf_t *db;
VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
/*
* Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
* information. This avoids the dmu_buf_will_dirty() path and
* saves us a pre-read to get data we don't actually care about.
*/
bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
packed = vmem_alloc(bufsize, KM_SLEEP);
VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
KM_SLEEP) == 0);
memset(packed + nvsize, 0, bufsize - nvsize);
dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
vmem_free(packed, bufsize);
VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
dmu_buf_will_dirty(db, tx);
*(uint64_t *)db->db_data = nvsize;
dmu_buf_rele(db, FTAG);
}
static void
spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
const char *config, const char *entry)
{
nvlist_t *nvroot;
nvlist_t **list;
int i;
if (!sav->sav_sync)
return;
/*
* Update the MOS nvlist describing the list of available devices.
* spa_validate_aux() will have already made sure this nvlist is
* valid and the vdevs are labeled appropriately.
*/
if (sav->sav_object == 0) {
sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
sizeof (uint64_t), tx);
VERIFY(zap_update(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
&sav->sav_object, tx) == 0);
}
nvroot = fnvlist_alloc();
if (sav->sav_count == 0) {
fnvlist_add_nvlist_array(nvroot, config,
(const nvlist_t * const *)NULL, 0);
} else {
list = kmem_alloc(sav->sav_count*sizeof (void *), KM_SLEEP);
for (i = 0; i < sav->sav_count; i++)
list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
B_FALSE, VDEV_CONFIG_L2CACHE);
fnvlist_add_nvlist_array(nvroot, config,
(const nvlist_t * const *)list, sav->sav_count);
for (i = 0; i < sav->sav_count; i++)
nvlist_free(list[i]);
kmem_free(list, sav->sav_count * sizeof (void *));
}
spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
nvlist_free(nvroot);
sav->sav_sync = B_FALSE;
}
/*
* Rebuild spa's all-vdev ZAP from the vdev ZAPs indicated in each vdev_t.
* The all-vdev ZAP must be empty.
*/
static void
spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
if (vd->vdev_top_zap != 0) {
VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
vd->vdev_top_zap, tx));
}
if (vd->vdev_leaf_zap != 0) {
VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
vd->vdev_leaf_zap, tx));
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
spa_avz_build(vd->vdev_child[i], avz, tx);
}
}
static void
spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
{
nvlist_t *config;
/*
* If the pool is being imported from a pre-per-vdev-ZAP version of ZFS,
* its config may not be dirty but we still need to build per-vdev ZAPs.
* Similarly, if the pool is being assembled (e.g. after a split), we
* need to rebuild the AVZ although the config may not be dirty.
*/
if (list_is_empty(&spa->spa_config_dirty_list) &&
spa->spa_avz_action == AVZ_ACTION_NONE)
return;
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE ||
spa->spa_avz_action == AVZ_ACTION_INITIALIZE ||
spa->spa_all_vdev_zaps != 0);
if (spa->spa_avz_action == AVZ_ACTION_REBUILD) {
/* Make and build the new AVZ */
uint64_t new_avz = zap_create(spa->spa_meta_objset,
DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
spa_avz_build(spa->spa_root_vdev, new_avz, tx);
/* Diff old AVZ with new one */
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_all_vdev_zaps);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
uint64_t vdzap = za.za_first_integer;
if (zap_lookup_int(spa->spa_meta_objset, new_avz,
vdzap) == ENOENT) {
/*
* ZAP is listed in old AVZ but not in new one;
* destroy it
*/
VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap,
tx));
}
}
zap_cursor_fini(&zc);
/* Destroy the old AVZ */
VERIFY0(zap_destroy(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, tx));
/* Replace the old AVZ in the dir obj with the new one */
VERIFY0(zap_update(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP,
sizeof (new_avz), 1, &new_avz, tx));
spa->spa_all_vdev_zaps = new_avz;
} else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) {
zap_cursor_t zc;
zap_attribute_t za;
/* Walk through the AVZ and destroy all listed ZAPs */
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_all_vdev_zaps);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
uint64_t zap = za.za_first_integer;
VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx));
}
zap_cursor_fini(&zc);
/* Destroy and unlink the AVZ itself */
VERIFY0(zap_destroy(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, tx));
VERIFY0(zap_remove(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, tx));
spa->spa_all_vdev_zaps = 0;
}
if (spa->spa_all_vdev_zaps == 0) {
spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset,
DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_VDEV_ZAP_MAP, tx);
}
spa->spa_avz_action = AVZ_ACTION_NONE;
/* Create ZAPs for vdevs that don't have them. */
vdev_construct_zaps(spa->spa_root_vdev, tx);
config = spa_config_generate(spa, spa->spa_root_vdev,
dmu_tx_get_txg(tx), B_FALSE);
/*
* If we're upgrading the spa version then make sure that
* the config object gets updated with the correct version.
*/
if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version)
fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
spa->spa_uberblock.ub_version);
spa_config_exit(spa, SCL_STATE, FTAG);
nvlist_free(spa->spa_config_syncing);
spa->spa_config_syncing = config;
spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
}
static void
spa_sync_version(void *arg, dmu_tx_t *tx)
{
uint64_t *versionp = arg;
uint64_t version = *versionp;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
/*
* Setting the version is special cased when first creating the pool.
*/
ASSERT(tx->tx_txg != TXG_INITIAL);
ASSERT(SPA_VERSION_IS_SUPPORTED(version));
ASSERT(version >= spa_version(spa));
spa->spa_uberblock.ub_version = version;
vdev_config_dirty(spa->spa_root_vdev);
spa_history_log_internal(spa, "set", tx, "version=%lld",
(longlong_t)version);
}
/*
* Set zpool properties.
*/
static void
spa_sync_props(void *arg, dmu_tx_t *tx)
{
nvlist_t *nvp = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
objset_t *mos = spa->spa_meta_objset;
nvpair_t *elem = NULL;
mutex_enter(&spa->spa_props_lock);
while ((elem = nvlist_next_nvpair(nvp, elem))) {
uint64_t intval;
char *strval, *fname;
zpool_prop_t prop;
const char *propname;
zprop_type_t proptype;
spa_feature_t fid;
switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
case ZPOOL_PROP_INVAL:
/*
* We checked this earlier in spa_prop_validate().
*/
ASSERT(zpool_prop_feature(nvpair_name(elem)));
fname = strchr(nvpair_name(elem), '@') + 1;
VERIFY0(zfeature_lookup_name(fname, &fid));
spa_feature_enable(spa, fid, tx);
spa_history_log_internal(spa, "set", tx,
"%s=enabled", nvpair_name(elem));
break;
case ZPOOL_PROP_VERSION:
intval = fnvpair_value_uint64(elem);
/*
* The version is synced separately before other
* properties and should be correct by now.
*/
ASSERT3U(spa_version(spa), >=, intval);
break;
case ZPOOL_PROP_ALTROOT:
/*
* 'altroot' is a non-persistent property. It should
* have been set temporarily at creation or import time.
*/
ASSERT(spa->spa_root != NULL);
break;
case ZPOOL_PROP_READONLY:
case ZPOOL_PROP_CACHEFILE:
/*
* 'readonly' and 'cachefile' are also non-persistent
* properties.
*/
break;
case ZPOOL_PROP_COMMENT:
strval = fnvpair_value_string(elem);
if (spa->spa_comment != NULL)
spa_strfree(spa->spa_comment);
spa->spa_comment = spa_strdup(strval);
/*
* We need to dirty the configuration on all the vdevs
* so that their labels get updated. We also need to
* update the cache file to keep it in sync with the
* MOS version. It's unnecessary to do this for pool
* creation since the vdev's configuration has already
* been dirtied.
*/
if (tx->tx_txg != TXG_INITIAL) {
vdev_config_dirty(spa->spa_root_vdev);
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
spa_history_log_internal(spa, "set", tx,
"%s=%s", nvpair_name(elem), strval);
break;
case ZPOOL_PROP_COMPATIBILITY:
strval = fnvpair_value_string(elem);
if (spa->spa_compatibility != NULL)
spa_strfree(spa->spa_compatibility);
spa->spa_compatibility = spa_strdup(strval);
/*
* Dirty the configuration on vdevs as above.
*/
if (tx->tx_txg != TXG_INITIAL) {
vdev_config_dirty(spa->spa_root_vdev);
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
spa_history_log_internal(spa, "set", tx,
"%s=%s", nvpair_name(elem), strval);
break;
default:
/*
* Set pool property values in the poolprops mos object.
*/
if (spa->spa_pool_props_object == 0) {
spa->spa_pool_props_object =
zap_create_link(mos, DMU_OT_POOL_PROPS,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
tx);
}
/* normalize the property name */
propname = zpool_prop_to_name(prop);
proptype = zpool_prop_get_type(prop);
if (nvpair_type(elem) == DATA_TYPE_STRING) {
ASSERT(proptype == PROP_TYPE_STRING);
strval = fnvpair_value_string(elem);
VERIFY0(zap_update(mos,
spa->spa_pool_props_object, propname,
1, strlen(strval) + 1, strval, tx));
spa_history_log_internal(spa, "set", tx,
"%s=%s", nvpair_name(elem), strval);
} else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
intval = fnvpair_value_uint64(elem);
if (proptype == PROP_TYPE_INDEX) {
const char *unused;
VERIFY0(zpool_prop_index_to_string(
prop, intval, &unused));
}
VERIFY0(zap_update(mos,
spa->spa_pool_props_object, propname,
8, 1, &intval, tx));
spa_history_log_internal(spa, "set", tx,
"%s=%lld", nvpair_name(elem),
(longlong_t)intval);
} else {
ASSERT(0); /* not allowed */
}
switch (prop) {
case ZPOOL_PROP_DELEGATION:
spa->spa_delegation = intval;
break;
case ZPOOL_PROP_BOOTFS:
spa->spa_bootfs = intval;
break;
case ZPOOL_PROP_FAILUREMODE:
spa->spa_failmode = intval;
break;
case ZPOOL_PROP_AUTOTRIM:
spa->spa_autotrim = intval;
spa_async_request(spa,
SPA_ASYNC_AUTOTRIM_RESTART);
break;
case ZPOOL_PROP_AUTOEXPAND:
spa->spa_autoexpand = intval;
if (tx->tx_txg != TXG_INITIAL)
spa_async_request(spa,
SPA_ASYNC_AUTOEXPAND);
break;
case ZPOOL_PROP_MULTIHOST:
spa->spa_multihost = intval;
break;
default:
break;
}
}
}
mutex_exit(&spa->spa_props_lock);
}
/*
* Perform one-time upgrade on-disk changes. spa_version() does not
* reflect the new version this txg, so there must be no changes this
* txg to anything that the upgrade code depends on after it executes.
* Therefore this must be called after dsl_pool_sync() does the sync
* tasks.
*/
static void
spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx)
{
if (spa_sync_pass(spa) != 1)
return;
dsl_pool_t *dp = spa->spa_dsl_pool;
rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
dsl_pool_create_origin(dp, tx);
/* Keeping the origin open increases spa_minref */
spa->spa_minref += 3;
}
if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
dsl_pool_upgrade_clones(dp, tx);
}
if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
dsl_pool_upgrade_dir_clones(dp, tx);
/* Keeping the freedir open increases spa_minref */
spa->spa_minref += 3;
}
if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES &&
spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
spa_feature_create_zap_objects(spa, tx);
}
/*
* LZ4_COMPRESS feature's behaviour was changed to activate_on_enable
* when possibility to use lz4 compression for metadata was added
* Old pools that have this feature enabled must be upgraded to have
* this feature active
*/
if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
boolean_t lz4_en = spa_feature_is_enabled(spa,
SPA_FEATURE_LZ4_COMPRESS);
boolean_t lz4_ac = spa_feature_is_active(spa,
SPA_FEATURE_LZ4_COMPRESS);
if (lz4_en && !lz4_ac)
spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx);
}
/*
* If we haven't written the salt, do so now. Note that the
* feature may not be activated yet, but that's fine since
* the presence of this ZAP entry is backwards compatible.
*/
if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_CHECKSUM_SALT) == ENOENT) {
VERIFY0(zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CHECKSUM_SALT, 1,
sizeof (spa->spa_cksum_salt.zcs_bytes),
spa->spa_cksum_salt.zcs_bytes, tx));
}
rrw_exit(&dp->dp_config_rwlock, FTAG);
}
static void
vdev_indirect_state_sync_verify(vdev_t *vd)
{
vdev_indirect_mapping_t *vim __maybe_unused = vd->vdev_indirect_mapping;
vdev_indirect_births_t *vib __maybe_unused = vd->vdev_indirect_births;
if (vd->vdev_ops == &vdev_indirect_ops) {
ASSERT(vim != NULL);
ASSERT(vib != NULL);
}
uint64_t obsolete_sm_object = 0;
ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (obsolete_sm_object != 0) {
ASSERT(vd->vdev_obsolete_sm != NULL);
ASSERT(vd->vdev_removing ||
vd->vdev_ops == &vdev_indirect_ops);
ASSERT(vdev_indirect_mapping_num_entries(vim) > 0);
ASSERT(vdev_indirect_mapping_bytes_mapped(vim) > 0);
ASSERT3U(obsolete_sm_object, ==,
space_map_object(vd->vdev_obsolete_sm));
ASSERT3U(vdev_indirect_mapping_bytes_mapped(vim), >=,
space_map_allocated(vd->vdev_obsolete_sm));
}
ASSERT(vd->vdev_obsolete_segments != NULL);
/*
* Since frees / remaps to an indirect vdev can only
* happen in syncing context, the obsolete segments
* tree must be empty when we start syncing.
*/
ASSERT0(range_tree_space(vd->vdev_obsolete_segments));
}
/*
* Set the top-level vdev's max queue depth. Evaluate each top-level's
* async write queue depth in case it changed. The max queue depth will
* not change in the middle of syncing out this txg.
*/
static void
spa_sync_adjust_vdev_max_queue_depth(spa_t *spa)
{
ASSERT(spa_writeable(spa));
vdev_t *rvd = spa->spa_root_vdev;
uint32_t max_queue_depth = zfs_vdev_async_write_max_active *
zfs_vdev_queue_depth_pct / 100;
metaslab_class_t *normal = spa_normal_class(spa);
metaslab_class_t *special = spa_special_class(spa);
metaslab_class_t *dedup = spa_dedup_class(spa);
uint64_t slots_per_allocator = 0;
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
if (mg == NULL || !metaslab_group_initialized(mg))
continue;
metaslab_class_t *mc = mg->mg_class;
if (mc != normal && mc != special && mc != dedup)
continue;
/*
* It is safe to do a lock-free check here because only async
* allocations look at mg_max_alloc_queue_depth, and async
* allocations all happen from spa_sync().
*/
for (int i = 0; i < mg->mg_allocators; i++) {
ASSERT0(zfs_refcount_count(
&(mg->mg_allocator[i].mga_alloc_queue_depth)));
}
mg->mg_max_alloc_queue_depth = max_queue_depth;
for (int i = 0; i < mg->mg_allocators; i++) {
mg->mg_allocator[i].mga_cur_max_alloc_queue_depth =
zfs_vdev_def_queue_depth;
}
slots_per_allocator += zfs_vdev_def_queue_depth;
}
for (int i = 0; i < spa->spa_alloc_count; i++) {
ASSERT0(zfs_refcount_count(&normal->mc_allocator[i].
mca_alloc_slots));
ASSERT0(zfs_refcount_count(&special->mc_allocator[i].
mca_alloc_slots));
ASSERT0(zfs_refcount_count(&dedup->mc_allocator[i].
mca_alloc_slots));
normal->mc_allocator[i].mca_alloc_max_slots =
slots_per_allocator;
special->mc_allocator[i].mca_alloc_max_slots =
slots_per_allocator;
dedup->mc_allocator[i].mca_alloc_max_slots =
slots_per_allocator;
}
normal->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
special->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
dedup->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
}
static void
spa_sync_condense_indirect(spa_t *spa, dmu_tx_t *tx)
{
ASSERT(spa_writeable(spa));
vdev_t *rvd = spa->spa_root_vdev;
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
vdev_indirect_state_sync_verify(vd);
if (vdev_indirect_should_condense(vd)) {
spa_condense_indirect_start_sync(vd, tx);
break;
}
}
}
static void
spa_sync_iterate_to_convergence(spa_t *spa, dmu_tx_t *tx)
{
objset_t *mos = spa->spa_meta_objset;
dsl_pool_t *dp = spa->spa_dsl_pool;
uint64_t txg = tx->tx_txg;
bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
do {
int pass = ++spa->spa_sync_pass;
spa_sync_config_object(spa, tx);
spa_sync_aux_dev(spa, &spa->spa_spares, tx,
ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
spa_errlog_sync(spa, txg);
dsl_pool_sync(dp, txg);
if (pass < zfs_sync_pass_deferred_free ||
spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
/*
* If the log space map feature is active we don't
* care about deferred frees and the deferred bpobj
* as the log space map should effectively have the
* same results (i.e. appending only to one object).
*/
spa_sync_frees(spa, free_bpl, tx);
} else {
/*
* We can not defer frees in pass 1, because
* we sync the deferred frees later in pass 1.
*/
ASSERT3U(pass, >, 1);
bplist_iterate(free_bpl, bpobj_enqueue_alloc_cb,
&spa->spa_deferred_bpobj, tx);
}
ddt_sync(spa, txg);
dsl_scan_sync(dp, tx);
svr_sync(spa, tx);
spa_sync_upgrades(spa, tx);
spa_flush_metaslabs(spa, tx);
vdev_t *vd = NULL;
while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg))
!= NULL)
vdev_sync(vd, txg);
/*
* Note: We need to check if the MOS is dirty because we could
* have marked the MOS dirty without updating the uberblock
* (e.g. if we have sync tasks but no dirty user data). We need
* to check the uberblock's rootbp because it is updated if we
* have synced out dirty data (though in this case the MOS will
* most likely also be dirty due to second order effects, we
* don't want to rely on that here).
*/
if (pass == 1 &&
spa->spa_uberblock.ub_rootbp.blk_birth < txg &&
!dmu_objset_is_dirty(mos, txg)) {
/*
* Nothing changed on the first pass, therefore this
* TXG is a no-op. Avoid syncing deferred frees, so
* that we can keep this TXG as a no-op.
*/
ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg));
ASSERT(txg_list_empty(&dp->dp_early_sync_tasks, txg));
break;
}
spa_sync_deferred_frees(spa, tx);
} while (dmu_objset_is_dirty(mos, txg));
}
/*
* Rewrite the vdev configuration (which includes the uberblock) to
* commit the transaction group.
*
* If there are no dirty vdevs, we sync the uberblock to a few random
* top-level vdevs that are known to be visible in the config cache
* (see spa_vdev_add() for a complete description). If there *are* dirty
* vdevs, sync the uberblock to all vdevs.
*/
static void
spa_sync_rewrite_vdev_config(spa_t *spa, dmu_tx_t *tx)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t txg = tx->tx_txg;
for (;;) {
int error = 0;
/*
* We hold SCL_STATE to prevent vdev open/close/etc.
* while we're attempting to write the vdev labels.
*/
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
if (list_is_empty(&spa->spa_config_dirty_list)) {
vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL };
int svdcount = 0;
int children = rvd->vdev_children;
int c0 = random_in_range(children);
for (int c = 0; c < children; c++) {
vdev_t *vd =
rvd->vdev_child[(c0 + c) % children];
/* Stop when revisiting the first vdev */
if (c > 0 && svd[0] == vd)
break;
if (vd->vdev_ms_array == 0 ||
vd->vdev_islog ||
!vdev_is_concrete(vd))
continue;
svd[svdcount++] = vd;
if (svdcount == SPA_SYNC_MIN_VDEVS)
break;
}
error = vdev_config_sync(svd, svdcount, txg);
} else {
error = vdev_config_sync(rvd->vdev_child,
rvd->vdev_children, txg);
}
if (error == 0)
spa->spa_last_synced_guid = rvd->vdev_guid;
spa_config_exit(spa, SCL_STATE, FTAG);
if (error == 0)
break;
zio_suspend(spa, NULL, ZIO_SUSPEND_IOERR);
zio_resume_wait(spa);
}
}
/*
* Sync the specified transaction group. New blocks may be dirtied as
* part of the process, so we iterate until it converges.
*/
void
spa_sync(spa_t *spa, uint64_t txg)
{
vdev_t *vd = NULL;
VERIFY(spa_writeable(spa));
/*
* Wait for i/os issued in open context that need to complete
* before this txg syncs.
*/
(void) zio_wait(spa->spa_txg_zio[txg & TXG_MASK]);
spa->spa_txg_zio[txg & TXG_MASK] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL);
/*
* Lock out configuration changes.
*/
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa->spa_syncing_txg = txg;
spa->spa_sync_pass = 0;
for (int i = 0; i < spa->spa_alloc_count; i++) {
mutex_enter(&spa->spa_allocs[i].spaa_lock);
VERIFY0(avl_numnodes(&spa->spa_allocs[i].spaa_tree));
mutex_exit(&spa->spa_allocs[i].spaa_lock);
}
/*
* If there are any pending vdev state changes, convert them
* into config changes that go out with this transaction group.
*/
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
while (list_head(&spa->spa_state_dirty_list) != NULL) {
/*
* We need the write lock here because, for aux vdevs,
* calling vdev_config_dirty() modifies sav_config.
* This is ugly and will become unnecessary when we
* eliminate the aux vdev wart by integrating all vdevs
* into the root vdev tree.
*/
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
vdev_state_clean(vd);
vdev_config_dirty(vd);
}
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
}
spa_config_exit(spa, SCL_STATE, FTAG);
dsl_pool_t *dp = spa->spa_dsl_pool;
dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
spa->spa_sync_starttime = gethrtime();
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
NSEC_TO_TICK(spa->spa_deadman_synctime));
/*
* If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
* set spa_deflate if we have no raid-z vdevs.
*/
if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
vdev_t *rvd = spa->spa_root_vdev;
int i;
for (i = 0; i < rvd->vdev_children; i++) {
vd = rvd->vdev_child[i];
if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
break;
}
if (i == rvd->vdev_children) {
spa->spa_deflate = TRUE;
VERIFY0(zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
sizeof (uint64_t), 1, &spa->spa_deflate, tx));
}
}
spa_sync_adjust_vdev_max_queue_depth(spa);
spa_sync_condense_indirect(spa, tx);
spa_sync_iterate_to_convergence(spa, tx);
#ifdef ZFS_DEBUG
if (!list_is_empty(&spa->spa_config_dirty_list)) {
/*
* Make sure that the number of ZAPs for all the vdevs matches
* the number of ZAPs in the per-vdev ZAP list. This only gets
* called if the config is dirty; otherwise there may be
* outstanding AVZ operations that weren't completed in
* spa_sync_config_object.
*/
uint64_t all_vdev_zap_entry_count;
ASSERT0(zap_count(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count));
ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==,
all_vdev_zap_entry_count);
}
#endif
if (spa->spa_vdev_removal != NULL) {
ASSERT0(spa->spa_vdev_removal->svr_bytes_done[txg & TXG_MASK]);
}
spa_sync_rewrite_vdev_config(spa, tx);
dmu_tx_commit(tx);
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
spa->spa_deadman_tqid = 0;
/*
* Clear the dirty config list.
*/
while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
vdev_config_clean(vd);
/*
* Now that the new config has synced transactionally,
* let it become visible to the config cache.
*/
if (spa->spa_config_syncing != NULL) {
spa_config_set(spa, spa->spa_config_syncing);
spa->spa_config_txg = txg;
spa->spa_config_syncing = NULL;
}
dsl_pool_sync_done(dp, txg);
for (int i = 0; i < spa->spa_alloc_count; i++) {
mutex_enter(&spa->spa_allocs[i].spaa_lock);
VERIFY0(avl_numnodes(&spa->spa_allocs[i].spaa_tree));
mutex_exit(&spa->spa_allocs[i].spaa_lock);
}
/*
* Update usable space statistics.
*/
while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
!= NULL)
vdev_sync_done(vd, txg);
metaslab_class_evict_old(spa->spa_normal_class, txg);
metaslab_class_evict_old(spa->spa_log_class, txg);
spa_sync_close_syncing_log_sm(spa);
spa_update_dspace(spa);
/*
* It had better be the case that we didn't dirty anything
* since vdev_config_sync().
*/
ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
while (zfs_pause_spa_sync)
delay(1);
spa->spa_sync_pass = 0;
/*
* Update the last synced uberblock here. We want to do this at
* the end of spa_sync() so that consumers of spa_last_synced_txg()
* will be guaranteed that all the processing associated with
* that txg has been completed.
*/
spa->spa_ubsync = spa->spa_uberblock;
spa_config_exit(spa, SCL_CONFIG, FTAG);
spa_handle_ignored_writes(spa);
/*
* If any async tasks have been requested, kick them off.
*/
spa_async_dispatch(spa);
}
/*
* Sync all pools. We don't want to hold the namespace lock across these
* operations, so we take a reference on the spa_t and drop the lock during the
* sync.
*/
void
spa_sync_allpools(void)
{
spa_t *spa = NULL;
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL) {
if (spa_state(spa) != POOL_STATE_ACTIVE ||
!spa_writeable(spa) || spa_suspended(spa))
continue;
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
txg_wait_synced(spa_get_dsl(spa), 0);
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
}
mutex_exit(&spa_namespace_lock);
}
/*
* ==========================================================================
* Miscellaneous routines
* ==========================================================================
*/
/*
* Remove all pools in the system.
*/
void
spa_evict_all(void)
{
spa_t *spa;
/*
* Remove all cached state. All pools should be closed now,
* so every spa in the AVL tree should be unreferenced.
*/
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(NULL)) != NULL) {
/*
* Stop async tasks. The async thread may need to detach
* a device that's been replaced, which requires grabbing
* spa_namespace_lock, so we must drop it here.
*/
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
spa_async_suspend(spa);
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
spa_unload(spa);
spa_deactivate(spa);
}
spa_remove(spa);
}
mutex_exit(&spa_namespace_lock);
}
vdev_t *
spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
{
vdev_t *vd;
int i;
if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
return (vd);
if (aux) {
for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
vd = spa->spa_l2cache.sav_vdevs[i];
if (vd->vdev_guid == guid)
return (vd);
}
for (i = 0; i < spa->spa_spares.sav_count; i++) {
vd = spa->spa_spares.sav_vdevs[i];
if (vd->vdev_guid == guid)
return (vd);
}
}
return (NULL);
}
void
spa_upgrade(spa_t *spa, uint64_t version)
{
ASSERT(spa_writeable(spa));
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
/*
* This should only be called for a non-faulted pool, and since a
* future version would result in an unopenable pool, this shouldn't be
* possible.
*/
ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version));
ASSERT3U(version, >=, spa->spa_uberblock.ub_version);
spa->spa_uberblock.ub_version = version;
vdev_config_dirty(spa->spa_root_vdev);
spa_config_exit(spa, SCL_ALL, FTAG);
txg_wait_synced(spa_get_dsl(spa), 0);
}
static boolean_t
spa_has_aux_vdev(spa_t *spa, uint64_t guid, spa_aux_vdev_t *sav)
{
(void) spa;
int i;
uint64_t vdev_guid;
for (i = 0; i < sav->sav_count; i++)
if (sav->sav_vdevs[i]->vdev_guid == guid)
return (B_TRUE);
for (i = 0; i < sav->sav_npending; i++) {
if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
&vdev_guid) == 0 && vdev_guid == guid)
return (B_TRUE);
}
return (B_FALSE);
}
boolean_t
spa_has_l2cache(spa_t *spa, uint64_t guid)
{
return (spa_has_aux_vdev(spa, guid, &spa->spa_l2cache));
}
boolean_t
spa_has_spare(spa_t *spa, uint64_t guid)
{
return (spa_has_aux_vdev(spa, guid, &spa->spa_spares));
}
/*
* Check if a pool has an active shared spare device.
* Note: reference count of an active spare is 2, as a spare and as a replace
*/
static boolean_t
spa_has_active_shared_spare(spa_t *spa)
{
int i, refcnt;
uint64_t pool;
spa_aux_vdev_t *sav = &spa->spa_spares;
for (i = 0; i < sav->sav_count; i++) {
if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
&refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
refcnt > 2)
return (B_TRUE);
}
return (B_FALSE);
}
uint64_t
spa_total_metaslabs(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t m = 0;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
if (!vdev_is_concrete(vd))
continue;
m += vd->vdev_ms_count;
}
return (m);
}
/*
* Notify any waiting threads that some activity has switched from being in-
* progress to not-in-progress so that the thread can wake up and determine
* whether it is finished waiting.
*/
void
spa_notify_waiters(spa_t *spa)
{
/*
* Acquiring spa_activities_lock here prevents the cv_broadcast from
* happening between the waiting thread's check and cv_wait.
*/
mutex_enter(&spa->spa_activities_lock);
cv_broadcast(&spa->spa_activities_cv);
mutex_exit(&spa->spa_activities_lock);
}
/*
* Notify any waiting threads that the pool is exporting, and then block until
* they are finished using the spa_t.
*/
void
spa_wake_waiters(spa_t *spa)
{
mutex_enter(&spa->spa_activities_lock);
spa->spa_waiters_cancel = B_TRUE;
cv_broadcast(&spa->spa_activities_cv);
while (spa->spa_waiters != 0)
cv_wait(&spa->spa_waiters_cv, &spa->spa_activities_lock);
spa->spa_waiters_cancel = B_FALSE;
mutex_exit(&spa->spa_activities_lock);
}
/* Whether the vdev or any of its descendants are being initialized/trimmed. */
static boolean_t
spa_vdev_activity_in_progress_impl(vdev_t *vd, zpool_wait_activity_t activity)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_READER));
ASSERT(MUTEX_HELD(&spa->spa_activities_lock));
ASSERT(activity == ZPOOL_WAIT_INITIALIZE ||
activity == ZPOOL_WAIT_TRIM);
kmutex_t *lock = activity == ZPOOL_WAIT_INITIALIZE ?
&vd->vdev_initialize_lock : &vd->vdev_trim_lock;
mutex_exit(&spa->spa_activities_lock);
mutex_enter(lock);
mutex_enter(&spa->spa_activities_lock);
boolean_t in_progress = (activity == ZPOOL_WAIT_INITIALIZE) ?
(vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) :
(vd->vdev_trim_state == VDEV_TRIM_ACTIVE);
mutex_exit(lock);
if (in_progress)
return (B_TRUE);
for (int i = 0; i < vd->vdev_children; i++) {
if (spa_vdev_activity_in_progress_impl(vd->vdev_child[i],
activity))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* If use_guid is true, this checks whether the vdev specified by guid is
* being initialized/trimmed. Otherwise, it checks whether any vdev in the pool
* is being initialized/trimmed. The caller must hold the config lock and
* spa_activities_lock.
*/
static int
spa_vdev_activity_in_progress(spa_t *spa, boolean_t use_guid, uint64_t guid,
zpool_wait_activity_t activity, boolean_t *in_progress)
{
mutex_exit(&spa->spa_activities_lock);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
mutex_enter(&spa->spa_activities_lock);
vdev_t *vd;
if (use_guid) {
vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (vd == NULL || !vd->vdev_ops->vdev_op_leaf) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (EINVAL);
}
} else {
vd = spa->spa_root_vdev;
}
*in_progress = spa_vdev_activity_in_progress_impl(vd, activity);
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (0);
}
/*
* Locking for waiting threads
* ---------------------------
*
* Waiting threads need a way to check whether a given activity is in progress,
* and then, if it is, wait for it to complete. Each activity will have some
* in-memory representation of the relevant on-disk state which can be used to
* determine whether or not the activity is in progress. The in-memory state and
* the locking used to protect it will be different for each activity, and may
* not be suitable for use with a cvar (e.g., some state is protected by the
* config lock). To allow waiting threads to wait without any races, another
* lock, spa_activities_lock, is used.
*
* When the state is checked, both the activity-specific lock (if there is one)
* and spa_activities_lock are held. In some cases, the activity-specific lock
* is acquired explicitly (e.g. the config lock). In others, the locking is
* internal to some check (e.g. bpobj_is_empty). After checking, the waiting
* thread releases the activity-specific lock and, if the activity is in
* progress, then cv_waits using spa_activities_lock.
*
* The waiting thread is woken when another thread, one completing some
* activity, updates the state of the activity and then calls
* spa_notify_waiters, which will cv_broadcast. This 'completing' thread only
* needs to hold its activity-specific lock when updating the state, and this
* lock can (but doesn't have to) be dropped before calling spa_notify_waiters.
*
* Because spa_notify_waiters acquires spa_activities_lock before broadcasting,
* and because it is held when the waiting thread checks the state of the
* activity, it can never be the case that the completing thread both updates
* the activity state and cv_broadcasts in between the waiting thread's check
* and cv_wait. Thus, a waiting thread can never miss a wakeup.
*
* In order to prevent deadlock, when the waiting thread does its check, in some
* cases it will temporarily drop spa_activities_lock in order to acquire the
* activity-specific lock. The order in which spa_activities_lock and the
* activity specific lock are acquired in the waiting thread is determined by
* the order in which they are acquired in the completing thread; if the
* completing thread calls spa_notify_waiters with the activity-specific lock
* held, then the waiting thread must also acquire the activity-specific lock
* first.
*/
static int
spa_activity_in_progress(spa_t *spa, zpool_wait_activity_t activity,
boolean_t use_tag, uint64_t tag, boolean_t *in_progress)
{
int error = 0;
ASSERT(MUTEX_HELD(&spa->spa_activities_lock));
switch (activity) {
case ZPOOL_WAIT_CKPT_DISCARD:
*in_progress =
(spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT) &&
zap_contains(spa_meta_objset(spa),
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ZPOOL_CHECKPOINT) ==
ENOENT);
break;
case ZPOOL_WAIT_FREE:
*in_progress = ((spa_version(spa) >= SPA_VERSION_DEADLISTS &&
!bpobj_is_empty(&spa->spa_dsl_pool->dp_free_bpobj)) ||
spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY) ||
spa_livelist_delete_check(spa));
break;
case ZPOOL_WAIT_INITIALIZE:
case ZPOOL_WAIT_TRIM:
error = spa_vdev_activity_in_progress(spa, use_tag, tag,
activity, in_progress);
break;
case ZPOOL_WAIT_REPLACE:
mutex_exit(&spa->spa_activities_lock);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
mutex_enter(&spa->spa_activities_lock);
*in_progress = vdev_replace_in_progress(spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
break;
case ZPOOL_WAIT_REMOVE:
*in_progress = (spa->spa_removing_phys.sr_state ==
DSS_SCANNING);
break;
case ZPOOL_WAIT_RESILVER:
if ((*in_progress = vdev_rebuild_active(spa->spa_root_vdev)))
break;
zfs_fallthrough;
case ZPOOL_WAIT_SCRUB:
{
boolean_t scanning, paused, is_scrub;
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
is_scrub = (scn->scn_phys.scn_func == POOL_SCAN_SCRUB);
scanning = (scn->scn_phys.scn_state == DSS_SCANNING);
paused = dsl_scan_is_paused_scrub(scn);
*in_progress = (scanning && !paused &&
is_scrub == (activity == ZPOOL_WAIT_SCRUB));
break;
}
default:
panic("unrecognized value for activity %d", activity);
}
return (error);
}
static int
spa_wait_common(const char *pool, zpool_wait_activity_t activity,
boolean_t use_tag, uint64_t tag, boolean_t *waited)
{
/*
* The tag is used to distinguish between instances of an activity.
* 'initialize' and 'trim' are the only activities that we use this for.
* The other activities can only have a single instance in progress in a
* pool at one time, making the tag unnecessary.
*
* There can be multiple devices being replaced at once, but since they
* all finish once resilvering finishes, we don't bother keeping track
* of them individually, we just wait for them all to finish.
*/
if (use_tag && activity != ZPOOL_WAIT_INITIALIZE &&
activity != ZPOOL_WAIT_TRIM)
return (EINVAL);
if (activity < 0 || activity >= ZPOOL_WAIT_NUM_ACTIVITIES)
return (EINVAL);
spa_t *spa;
int error = spa_open(pool, &spa, FTAG);
if (error != 0)
return (error);
/*
* Increment the spa's waiter count so that we can call spa_close and
* still ensure that the spa_t doesn't get freed before this thread is
* finished with it when the pool is exported. We want to call spa_close
* before we start waiting because otherwise the additional ref would
* prevent the pool from being exported or destroyed throughout the
* potentially long wait.
*/
mutex_enter(&spa->spa_activities_lock);
spa->spa_waiters++;
spa_close(spa, FTAG);
*waited = B_FALSE;
for (;;) {
boolean_t in_progress;
error = spa_activity_in_progress(spa, activity, use_tag, tag,
&in_progress);
if (error || !in_progress || spa->spa_waiters_cancel)
break;
*waited = B_TRUE;
if (cv_wait_sig(&spa->spa_activities_cv,
&spa->spa_activities_lock) == 0) {
error = EINTR;
break;
}
}
spa->spa_waiters--;
cv_signal(&spa->spa_waiters_cv);
mutex_exit(&spa->spa_activities_lock);
return (error);
}
/*
* Wait for a particular instance of the specified activity to complete, where
* the instance is identified by 'tag'
*/
int
spa_wait_tag(const char *pool, zpool_wait_activity_t activity, uint64_t tag,
boolean_t *waited)
{
return (spa_wait_common(pool, activity, B_TRUE, tag, waited));
}
/*
* Wait for all instances of the specified activity complete
*/
int
spa_wait(const char *pool, zpool_wait_activity_t activity, boolean_t *waited)
{
return (spa_wait_common(pool, activity, B_FALSE, 0, waited));
}
sysevent_t *
spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
{
sysevent_t *ev = NULL;
#ifdef _KERNEL
nvlist_t *resource;
resource = zfs_event_create(spa, vd, FM_SYSEVENT_CLASS, name, hist_nvl);
if (resource) {
ev = kmem_alloc(sizeof (sysevent_t), KM_SLEEP);
ev->resource = resource;
}
#else
(void) spa, (void) vd, (void) hist_nvl, (void) name;
#endif
return (ev);
}
void
spa_event_post(sysevent_t *ev)
{
#ifdef _KERNEL
if (ev) {
zfs_zevent_post(ev->resource, NULL, zfs_zevent_post_cb);
kmem_free(ev, sizeof (*ev));
}
#else
(void) ev;
#endif
}
/*
* Post a zevent corresponding to the given sysevent. The 'name' must be one
* of the event definitions in sys/sysevent/eventdefs.h. The payload will be
* filled in from the spa and (optionally) the vdev. This doesn't do anything
* in the userland libzpool, as we don't want consumers to misinterpret ztest
* or zdb as real changes.
*/
void
spa_event_notify(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
{
spa_event_post(spa_event_create(spa, vd, hist_nvl, name));
}
/* state manipulation functions */
EXPORT_SYMBOL(spa_open);
EXPORT_SYMBOL(spa_open_rewind);
EXPORT_SYMBOL(spa_get_stats);
EXPORT_SYMBOL(spa_create);
EXPORT_SYMBOL(spa_import);
EXPORT_SYMBOL(spa_tryimport);
EXPORT_SYMBOL(spa_destroy);
EXPORT_SYMBOL(spa_export);
EXPORT_SYMBOL(spa_reset);
EXPORT_SYMBOL(spa_async_request);
EXPORT_SYMBOL(spa_async_suspend);
EXPORT_SYMBOL(spa_async_resume);
EXPORT_SYMBOL(spa_inject_addref);
EXPORT_SYMBOL(spa_inject_delref);
EXPORT_SYMBOL(spa_scan_stat_init);
EXPORT_SYMBOL(spa_scan_get_stats);
/* device manipulation */
EXPORT_SYMBOL(spa_vdev_add);
EXPORT_SYMBOL(spa_vdev_attach);
EXPORT_SYMBOL(spa_vdev_detach);
EXPORT_SYMBOL(spa_vdev_setpath);
EXPORT_SYMBOL(spa_vdev_setfru);
EXPORT_SYMBOL(spa_vdev_split_mirror);
/* spare statech is global across all pools) */
EXPORT_SYMBOL(spa_spare_add);
EXPORT_SYMBOL(spa_spare_remove);
EXPORT_SYMBOL(spa_spare_exists);
EXPORT_SYMBOL(spa_spare_activate);
/* L2ARC statech is global across all pools) */
EXPORT_SYMBOL(spa_l2cache_add);
EXPORT_SYMBOL(spa_l2cache_remove);
EXPORT_SYMBOL(spa_l2cache_exists);
EXPORT_SYMBOL(spa_l2cache_activate);
EXPORT_SYMBOL(spa_l2cache_drop);
/* scanning */
EXPORT_SYMBOL(spa_scan);
EXPORT_SYMBOL(spa_scan_stop);
/* spa syncing */
EXPORT_SYMBOL(spa_sync); /* only for DMU use */
EXPORT_SYMBOL(spa_sync_allpools);
/* properties */
EXPORT_SYMBOL(spa_prop_set);
EXPORT_SYMBOL(spa_prop_get);
EXPORT_SYMBOL(spa_prop_clear_bootfs);
/* asynchronous event notification */
EXPORT_SYMBOL(spa_event_notify);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_shift, INT, ZMOD_RW,
"log2 fraction of arc that can be used by inflight I/Os when "
"verifying pool during import");
/* END CSTYLED */
ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_metadata, INT, ZMOD_RW,
"Set to traverse metadata on pool import");
ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_data, INT, ZMOD_RW,
"Set to traverse data on pool import");
ZFS_MODULE_PARAM(zfs_spa, spa_, load_print_vdev_tree, INT, ZMOD_RW,
"Print vdev tree to zfs_dbgmsg during pool import");
ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_pct, UINT, ZMOD_RD,
"Percentage of CPUs to run an IO worker thread");
ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_tpq, UINT, ZMOD_RD,
"Number of threads per IO worker taskqueue");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs, zfs_, max_missing_tvds, ULONG, ZMOD_RW,
"Allow importing pool with up to this number of missing top-level "
"vdevs (in read-only mode)");
/* END CSTYLED */
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_pause, INT,
ZMOD_RW, "Set the livelist condense zthr to pause");
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_pause, INT,
ZMOD_RW, "Set the livelist condense synctask to pause");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_cancel,
INT, ZMOD_RW,
"Whether livelist condensing was canceled in the synctask");
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_cancel,
INT, ZMOD_RW,
"Whether livelist condensing was canceled in the zthr function");
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, new_alloc, INT,
ZMOD_RW,
"Whether extra ALLOC blkptrs were added to a livelist entry while it "
"was being condensed");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/spa_boot.c b/sys/contrib/openzfs/module/zfs/spa_boot.c
deleted file mode 100644
index fddb5c3c9683..000000000000
--- a/sys/contrib/openzfs/module/zfs/spa_boot.c
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or https://opensource.org/licenses/CDDL-1.0.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifdef _KERNEL
-
-#include <sys/zio.h>
-#include <sys/spa_boot.h>
-#include <sys/sunddi.h>
-
-char *
-spa_get_bootprop(char *propname)
-{
- char *value;
-
- if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
- DDI_PROP_DONTPASS, propname, &value) != DDI_SUCCESS)
- return (NULL);
- return (value);
-}
-
-void
-spa_free_bootprop(char *value)
-{
- ddi_prop_free(value);
-}
-
-#endif /* _KERNEL */
diff --git a/sys/contrib/openzfs/module/zfs/spa_checkpoint.c b/sys/contrib/openzfs/module/zfs/spa_checkpoint.c
index b5b1dfa8a083..a837b1ce97ec 100644
--- a/sys/contrib/openzfs/module/zfs/spa_checkpoint.c
+++ b/sys/contrib/openzfs/module/zfs/spa_checkpoint.c
@@ -1,637 +1,637 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2017 by Delphix. All rights reserved.
*/
/*
* Storage Pool Checkpoint
*
* A storage pool checkpoint can be thought of as a pool-wide snapshot or
* a stable version of extreme rewind that guarantees no blocks from the
* checkpointed state will have been overwritten. It remembers the entire
* state of the storage pool (e.g. snapshots, dataset names, etc..) from the
* point that it was taken and the user can rewind back to that point even if
* they applied destructive operations on their datasets or even enabled new
* zpool on-disk features. If a pool has a checkpoint that is no longer
* needed, the user can discard it.
*
* == On disk data structures used ==
*
* - The pool has a new feature flag and a new entry in the MOS. The feature
* flag is set to active when we create the checkpoint and remains active
* until the checkpoint is fully discarded. The entry in the MOS config
* (DMU_POOL_ZPOOL_CHECKPOINT) is populated with the uberblock that
* references the state of the pool when we take the checkpoint. The entry
* remains populated until we start discarding the checkpoint or we rewind
* back to it.
*
* - Each vdev contains a vdev-wide space map while the pool has a checkpoint,
* which persists until the checkpoint is fully discarded. The space map
* contains entries that have been freed in the current state of the pool
* but we want to keep around in case we decide to rewind to the checkpoint.
* [see vdev_checkpoint_sm]
*
* - Each metaslab's ms_sm space map behaves the same as without the
* checkpoint, with the only exception being the scenario when we free
* blocks that belong to the checkpoint. In this case, these blocks remain
* ALLOCATED in the metaslab's space map and they are added as FREE in the
* vdev's checkpoint space map.
*
* - Each uberblock has a field (ub_checkpoint_txg) which holds the txg that
* the uberblock was checkpointed. For normal uberblocks this field is 0.
*
* == Overview of operations ==
*
* - To create a checkpoint, we first wait for the current TXG to be synced,
* so we can use the most recently synced uberblock (spa_ubsync) as the
* checkpointed uberblock. Then we use an early synctask to place that
* uberblock in MOS config, increment the feature flag for the checkpoint
* (marking it active), and setting spa_checkpoint_txg (see its use below)
* to the TXG of the checkpointed uberblock. We use an early synctask for
* the aforementioned operations to ensure that no blocks were dirtied
* between the current TXG and the TXG of the checkpointed uberblock
* (e.g the previous txg).
*
* - When a checkpoint exists, we need to ensure that the blocks that
* belong to the checkpoint are freed but never reused. This means that
* these blocks should never end up in the ms_allocatable or the ms_freeing
* trees of a metaslab. Therefore, whenever there is a checkpoint the new
* ms_checkpointing tree is used in addition to the aforementioned ones.
*
* Whenever a block is freed and we find out that it is referenced by the
* checkpoint (we find out by comparing its birth to spa_checkpoint_txg),
* we place it in the ms_checkpointing tree instead of the ms_freeingtree.
* This way, we divide the blocks that are being freed into checkpointed
* and not-checkpointed blocks.
*
* In order to persist these frees, we write the extents from the
* ms_freeingtree to the ms_sm as usual, and the extents from the
* ms_checkpointing tree to the vdev_checkpoint_sm. This way, these
* checkpointed extents will remain allocated in the metaslab's ms_sm space
* map, and therefore won't be reused [see metaslab_sync()]. In addition,
* when we discard the checkpoint, we can find the entries that have
* actually been freed in vdev_checkpoint_sm.
* [see spa_checkpoint_discard_thread_sync()]
*
* - To discard the checkpoint we use an early synctask to delete the
* checkpointed uberblock from the MOS config, set spa_checkpoint_txg to 0,
* and wakeup the discarding zthr thread (an open-context async thread).
* We use an early synctask to ensure that the operation happens before any
* new data end up in the checkpoint's data structures.
*
* Once the synctask is done and the discarding zthr is awake, we discard
* the checkpointed data over multiple TXGs by having the zthr prefetching
* entries from vdev_checkpoint_sm and then starting a synctask that places
* them as free blocks into their respective ms_allocatable and ms_sm
* structures.
* [see spa_checkpoint_discard_thread()]
*
* When there are no entries left in the vdev_checkpoint_sm of all
* top-level vdevs, a final synctask runs that decrements the feature flag.
*
* - To rewind to the checkpoint, we first use the current uberblock and
* open the MOS so we can access the checkpointed uberblock from the MOS
* config. After we retrieve the checkpointed uberblock, we use it as the
* current uberblock for the pool by writing it to disk with an updated
* TXG, opening its version of the MOS, and moving on as usual from there.
* [see spa_ld_checkpoint_rewind()]
*
* An important note on rewinding to the checkpoint has to do with how we
* handle ZIL blocks. In the scenario of a rewind, we clear out any ZIL
* blocks that have not been claimed by the time we took the checkpoint
* as they should no longer be valid.
* [see comment in zil_claim()]
*
* == Miscellaneous information ==
*
* - In the hypothetical event that we take a checkpoint, remove a vdev,
* and attempt to rewind, the rewind would fail as the checkpointed
* uberblock would reference data in the removed device. For this reason
* and others of similar nature, we disallow the following operations that
* can change the config:
* vdev removal and attach/detach, mirror splitting, and pool reguid.
*
* - As most of the checkpoint logic is implemented in the SPA and doesn't
* distinguish datasets when it comes to space accounting, having a
* checkpoint can potentially break the boundaries set by dataset
* reservations.
*/
#include <sys/dmu_tx.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_synctask.h>
#include <sys/metaslab_impl.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/spa_checkpoint.h>
#include <sys/vdev_impl.h>
#include <sys/zap.h>
#include <sys/zfeature.h>
/*
* The following parameter limits the amount of memory to be used for the
* prefetching of the checkpoint space map done on each vdev while
* discarding the checkpoint.
*
* The reason it exists is because top-level vdevs with long checkpoint
* space maps can potentially take up a lot of memory depending on the
* amount of checkpointed data that has been freed within them while
* the pool had a checkpoint.
*/
static unsigned long zfs_spa_discard_memory_limit = 16 * 1024 * 1024;
int
spa_checkpoint_get_stats(spa_t *spa, pool_checkpoint_stat_t *pcs)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT))
return (SET_ERROR(ZFS_ERR_NO_CHECKPOINT));
memset(pcs, 0, sizeof (pool_checkpoint_stat_t));
int error = zap_contains(spa_meta_objset(spa),
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ZPOOL_CHECKPOINT);
ASSERT(error == 0 || error == ENOENT);
if (error == ENOENT)
pcs->pcs_state = CS_CHECKPOINT_DISCARDING;
else
pcs->pcs_state = CS_CHECKPOINT_EXISTS;
pcs->pcs_space = spa->spa_checkpoint_info.sci_dspace;
pcs->pcs_start_time = spa->spa_checkpoint_info.sci_timestamp;
return (0);
}
static void
spa_checkpoint_discard_complete_sync(void *arg, dmu_tx_t *tx)
{
spa_t *spa = arg;
spa->spa_checkpoint_info.sci_timestamp = 0;
spa_feature_decr(spa, SPA_FEATURE_POOL_CHECKPOINT, tx);
spa_notify_waiters(spa);
spa_history_log_internal(spa, "spa discard checkpoint", tx,
"finished discarding checkpointed state from the pool");
}
typedef struct spa_checkpoint_discard_sync_callback_arg {
vdev_t *sdc_vd;
uint64_t sdc_txg;
uint64_t sdc_entry_limit;
} spa_checkpoint_discard_sync_callback_arg_t;
static int
spa_checkpoint_discard_sync_callback(space_map_entry_t *sme, void *arg)
{
spa_checkpoint_discard_sync_callback_arg_t *sdc = arg;
vdev_t *vd = sdc->sdc_vd;
metaslab_t *ms = vd->vdev_ms[sme->sme_offset >> vd->vdev_ms_shift];
uint64_t end = sme->sme_offset + sme->sme_run;
if (sdc->sdc_entry_limit == 0)
return (SET_ERROR(EINTR));
/*
* Since the space map is not condensed, we know that
* none of its entries is crossing the boundaries of
* its respective metaslab.
*
* That said, there is no fundamental requirement that
* the checkpoint's space map entries should not cross
* metaslab boundaries. So if needed we could add code
* that handles metaslab-crossing segments in the future.
*/
VERIFY3U(sme->sme_type, ==, SM_FREE);
VERIFY3U(sme->sme_offset, >=, ms->ms_start);
VERIFY3U(end, <=, ms->ms_start + ms->ms_size);
/*
* At this point we should not be processing any
* other frees concurrently, so the lock is technically
* unnecessary. We use the lock anyway though to
* potentially save ourselves from future headaches.
*/
mutex_enter(&ms->ms_lock);
if (range_tree_is_empty(ms->ms_freeing))
vdev_dirty(vd, VDD_METASLAB, ms, sdc->sdc_txg);
range_tree_add(ms->ms_freeing, sme->sme_offset, sme->sme_run);
mutex_exit(&ms->ms_lock);
ASSERT3U(vd->vdev_spa->spa_checkpoint_info.sci_dspace, >=,
sme->sme_run);
ASSERT3U(vd->vdev_stat.vs_checkpoint_space, >=, sme->sme_run);
vd->vdev_spa->spa_checkpoint_info.sci_dspace -= sme->sme_run;
vd->vdev_stat.vs_checkpoint_space -= sme->sme_run;
sdc->sdc_entry_limit--;
return (0);
}
#ifdef ZFS_DEBUG
static void
spa_checkpoint_accounting_verify(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t ckpoint_sm_space_sum = 0;
uint64_t vs_ckpoint_space_sum = 0;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
if (vd->vdev_checkpoint_sm != NULL) {
ckpoint_sm_space_sum +=
-space_map_allocated(vd->vdev_checkpoint_sm);
vs_ckpoint_space_sum +=
vd->vdev_stat.vs_checkpoint_space;
ASSERT3U(ckpoint_sm_space_sum, ==,
vs_ckpoint_space_sum);
} else {
ASSERT0(vd->vdev_stat.vs_checkpoint_space);
}
}
ASSERT3U(spa->spa_checkpoint_info.sci_dspace, ==, ckpoint_sm_space_sum);
}
#endif
static void
spa_checkpoint_discard_thread_sync(void *arg, dmu_tx_t *tx)
{
vdev_t *vd = arg;
int error;
/*
* The space map callback is applied only to non-debug entries.
* Because the number of debug entries is less or equal to the
* number of non-debug entries, we want to ensure that we only
* read what we prefetched from open-context.
*
* Thus, we set the maximum entries that the space map callback
* will be applied to be half the entries that could fit in the
* imposed memory limit.
*
* Note that since this is a conservative estimate we also
* assume the worst case scenario in our computation where each
* entry is two-word.
*/
uint64_t max_entry_limit =
(zfs_spa_discard_memory_limit / (2 * sizeof (uint64_t))) >> 1;
/*
* Iterate from the end of the space map towards the beginning,
* placing its entries on ms_freeing and removing them from the
* space map. The iteration stops if one of the following
* conditions is true:
*
* 1] We reached the beginning of the space map. At this point
* the space map should be completely empty and
* space_map_incremental_destroy should have returned 0.
* The next step would be to free and close the space map
* and remove its entry from its vdev's top zap. This allows
* spa_checkpoint_discard_thread() to move on to the next vdev.
*
* 2] We reached the memory limit (amount of memory used to hold
* space map entries in memory) and space_map_incremental_destroy
* returned EINTR. This means that there are entries remaining
* in the space map that will be cleared in a future invocation
* of this function by spa_checkpoint_discard_thread().
*/
spa_checkpoint_discard_sync_callback_arg_t sdc;
sdc.sdc_vd = vd;
sdc.sdc_txg = tx->tx_txg;
sdc.sdc_entry_limit = max_entry_limit;
uint64_t words_before =
space_map_length(vd->vdev_checkpoint_sm) / sizeof (uint64_t);
error = space_map_incremental_destroy(vd->vdev_checkpoint_sm,
spa_checkpoint_discard_sync_callback, &sdc, tx);
uint64_t words_after =
space_map_length(vd->vdev_checkpoint_sm) / sizeof (uint64_t);
#ifdef ZFS_DEBUG
spa_checkpoint_accounting_verify(vd->vdev_spa);
#endif
zfs_dbgmsg("discarding checkpoint: txg %llu, vdev id %lld, "
"deleted %llu words - %llu words are left",
(u_longlong_t)tx->tx_txg, (longlong_t)vd->vdev_id,
(u_longlong_t)(words_before - words_after),
(u_longlong_t)words_after);
if (error != EINTR) {
if (error != 0) {
zfs_panic_recover("zfs: error %lld was returned "
"while incrementally destroying the checkpoint "
- "space map of vdev %u\n",
+ "space map of vdev %llu\n",
(longlong_t)error, vd->vdev_id);
}
ASSERT0(words_after);
ASSERT0(space_map_allocated(vd->vdev_checkpoint_sm));
ASSERT0(space_map_length(vd->vdev_checkpoint_sm));
space_map_free(vd->vdev_checkpoint_sm, tx);
space_map_close(vd->vdev_checkpoint_sm);
vd->vdev_checkpoint_sm = NULL;
VERIFY0(zap_remove(spa_meta_objset(vd->vdev_spa),
vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, tx));
}
}
static boolean_t
spa_checkpoint_discard_is_done(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(!spa_has_checkpoint(spa));
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT));
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
if (rvd->vdev_child[c]->vdev_checkpoint_sm != NULL)
return (B_FALSE);
ASSERT0(rvd->vdev_child[c]->vdev_stat.vs_checkpoint_space);
}
return (B_TRUE);
}
boolean_t
spa_checkpoint_discard_thread_check(void *arg, zthr_t *zthr)
{
(void) zthr;
spa_t *spa = arg;
if (!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT))
return (B_FALSE);
if (spa_has_checkpoint(spa))
return (B_FALSE);
return (B_TRUE);
}
void
spa_checkpoint_discard_thread(void *arg, zthr_t *zthr)
{
spa_t *spa = arg;
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
while (vd->vdev_checkpoint_sm != NULL) {
space_map_t *checkpoint_sm = vd->vdev_checkpoint_sm;
int numbufs;
dmu_buf_t **dbp;
if (zthr_iscancelled(zthr))
return;
ASSERT3P(vd->vdev_ops, !=, &vdev_indirect_ops);
uint64_t size = MIN(space_map_length(checkpoint_sm),
zfs_spa_discard_memory_limit);
uint64_t offset =
space_map_length(checkpoint_sm) - size;
/*
* Ensure that the part of the space map that will
* be destroyed by the synctask, is prefetched in
* memory before the synctask runs.
*/
int error = dmu_buf_hold_array_by_bonus(
checkpoint_sm->sm_dbuf, offset, size,
B_TRUE, FTAG, &numbufs, &dbp);
if (error != 0) {
zfs_panic_recover("zfs: error %d was returned "
"while prefetching checkpoint space map "
"entries of vdev %llu\n",
error, vd->vdev_id);
}
VERIFY0(dsl_sync_task(spa->spa_name, NULL,
spa_checkpoint_discard_thread_sync, vd,
0, ZFS_SPACE_CHECK_NONE));
dmu_buf_rele_array(dbp, numbufs, FTAG);
}
}
VERIFY(spa_checkpoint_discard_is_done(spa));
VERIFY0(spa->spa_checkpoint_info.sci_dspace);
VERIFY0(dsl_sync_task(spa->spa_name, NULL,
spa_checkpoint_discard_complete_sync, spa,
0, ZFS_SPACE_CHECK_NONE));
}
static int
spa_checkpoint_check(void *arg, dmu_tx_t *tx)
{
(void) arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
if (!spa_feature_is_enabled(spa, SPA_FEATURE_POOL_CHECKPOINT))
return (SET_ERROR(ENOTSUP));
if (!spa_top_vdevs_spacemap_addressable(spa))
return (SET_ERROR(ZFS_ERR_VDEV_TOO_BIG));
if (spa->spa_removing_phys.sr_state == DSS_SCANNING)
return (SET_ERROR(ZFS_ERR_DEVRM_IN_PROGRESS));
if (spa->spa_checkpoint_txg != 0)
return (SET_ERROR(ZFS_ERR_CHECKPOINT_EXISTS));
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT))
return (SET_ERROR(ZFS_ERR_DISCARDING_CHECKPOINT));
return (0);
}
static void
spa_checkpoint_sync(void *arg, dmu_tx_t *tx)
{
(void) arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
spa_t *spa = dp->dp_spa;
uberblock_t checkpoint = spa->spa_ubsync;
/*
* At this point, there should not be a checkpoint in the MOS.
*/
ASSERT3U(zap_contains(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ZPOOL_CHECKPOINT), ==, ENOENT);
ASSERT0(spa->spa_checkpoint_info.sci_timestamp);
ASSERT0(spa->spa_checkpoint_info.sci_dspace);
/*
* Since the checkpointed uberblock is the one that just got synced
* (we use spa_ubsync), its txg must be equal to the txg number of
* the txg we are syncing, minus 1.
*/
ASSERT3U(checkpoint.ub_txg, ==, spa->spa_syncing_txg - 1);
/*
* Once the checkpoint is in place, we need to ensure that none of
* its blocks will be marked for reuse after it has been freed.
* When there is a checkpoint and a block is freed, we compare its
* birth txg to the txg of the checkpointed uberblock to see if the
* block is part of the checkpoint or not. Therefore, we have to set
* spa_checkpoint_txg before any frees happen in this txg (which is
* why this is done as an early_synctask as explained in the comment
* in spa_checkpoint()).
*/
spa->spa_checkpoint_txg = checkpoint.ub_txg;
spa->spa_checkpoint_info.sci_timestamp = checkpoint.ub_timestamp;
checkpoint.ub_checkpoint_txg = checkpoint.ub_txg;
VERIFY0(zap_add(spa->spa_dsl_pool->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ZPOOL_CHECKPOINT,
sizeof (uint64_t), sizeof (uberblock_t) / sizeof (uint64_t),
&checkpoint, tx));
/*
* Increment the feature refcount and thus activate the feature.
* Note that the feature will be deactivated when we've
* completely discarded all checkpointed state (both vdev
* space maps and uberblock).
*/
spa_feature_incr(spa, SPA_FEATURE_POOL_CHECKPOINT, tx);
spa_history_log_internal(spa, "spa checkpoint", tx,
"checkpointed uberblock txg=%llu", (u_longlong_t)checkpoint.ub_txg);
}
/*
* Create a checkpoint for the pool.
*/
int
spa_checkpoint(const char *pool)
{
int error;
spa_t *spa;
error = spa_open(pool, &spa, FTAG);
if (error != 0)
return (error);
mutex_enter(&spa->spa_vdev_top_lock);
/*
* Wait for current syncing txg to finish so the latest synced
* uberblock (spa_ubsync) has all the changes that we expect
* to see if we were to revert later to the checkpoint. In other
* words we want the checkpointed uberblock to include/reference
* all the changes that were pending at the time that we issued
* the checkpoint command.
*/
txg_wait_synced(spa_get_dsl(spa), 0);
/*
* As the checkpointed uberblock references blocks from the previous
* txg (spa_ubsync) we want to ensure that are not freeing any of
* these blocks in the same txg that the following synctask will
* run. Thus, we run it as an early synctask, so the dirty changes
* that are synced to disk afterwards during zios and other synctasks
* do not reuse checkpointed blocks.
*/
error = dsl_early_sync_task(pool, spa_checkpoint_check,
spa_checkpoint_sync, NULL, 0, ZFS_SPACE_CHECK_NORMAL);
mutex_exit(&spa->spa_vdev_top_lock);
spa_close(spa, FTAG);
return (error);
}
static int
spa_checkpoint_discard_check(void *arg, dmu_tx_t *tx)
{
(void) arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
if (!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT))
return (SET_ERROR(ZFS_ERR_NO_CHECKPOINT));
if (spa->spa_checkpoint_txg == 0)
return (SET_ERROR(ZFS_ERR_DISCARDING_CHECKPOINT));
VERIFY0(zap_contains(spa_meta_objset(spa),
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ZPOOL_CHECKPOINT));
return (0);
}
static void
spa_checkpoint_discard_sync(void *arg, dmu_tx_t *tx)
{
(void) arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
VERIFY0(zap_remove(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ZPOOL_CHECKPOINT, tx));
spa->spa_checkpoint_txg = 0;
zthr_wakeup(spa->spa_checkpoint_discard_zthr);
spa_history_log_internal(spa, "spa discard checkpoint", tx,
"started discarding checkpointed state from the pool");
}
/*
* Discard the checkpoint from a pool.
*/
int
spa_checkpoint_discard(const char *pool)
{
/*
* Similarly to spa_checkpoint(), we want our synctask to run
* before any pending dirty data are written to disk so they
* won't end up in the checkpoint's data structures (e.g.
* ms_checkpointing and vdev_checkpoint_sm) and re-create any
* space maps that the discarding open-context thread has
* deleted.
* [see spa_discard_checkpoint_sync and spa_discard_checkpoint_thread]
*/
return (dsl_early_sync_task(pool, spa_checkpoint_discard_check,
spa_checkpoint_discard_sync, NULL, 0,
ZFS_SPACE_CHECK_DISCARD_CHECKPOINT));
}
EXPORT_SYMBOL(spa_checkpoint_get_stats);
EXPORT_SYMBOL(spa_checkpoint_discard_thread);
EXPORT_SYMBOL(spa_checkpoint_discard_thread_check);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_spa, zfs_spa_, discard_memory_limit, ULONG, ZMOD_RW,
"Limit for memory used in prefetching the checkpoint space map done "
"on each vdev while discarding the checkpoint");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/spa_log_spacemap.c b/sys/contrib/openzfs/module/zfs/spa_log_spacemap.c
index 19e334916bd0..4ecce8214f6a 100644
--- a/sys/contrib/openzfs/module/zfs/spa_log_spacemap.c
+++ b/sys/contrib/openzfs/module/zfs/spa_log_spacemap.c
@@ -1,1399 +1,1399 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2018, 2019 by Delphix. All rights reserved.
*/
#include <sys/dmu_objset.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/spa_log_spacemap.h>
#include <sys/vdev_impl.h>
#include <sys/zap.h>
/*
* Log Space Maps
*
* Log space maps are an optimization in ZFS metadata allocations for pools
* whose workloads are primarily random-writes. Random-write workloads are also
* typically random-free, meaning that they are freeing from locations scattered
* throughout the pool. This means that each TXG we will have to append some
* FREE records to almost every metaslab. With log space maps, we hold their
* changes in memory and log them altogether in one pool-wide space map on-disk
* for persistence. As more blocks are accumulated in the log space maps and
* more unflushed changes are accounted in memory, we flush a selected group
* of metaslabs every TXG to relieve memory pressure and potential overheads
* when loading the pool. Flushing a metaslab to disk relieves memory as we
* flush any unflushed changes from memory to disk (i.e. the metaslab's space
* map) and saves import time by making old log space maps obsolete and
* eventually destroying them. [A log space map is said to be obsolete when all
* its entries have made it to their corresponding metaslab space maps].
*
* == On disk data structures used ==
*
* - The pool has a new feature flag and a new entry in the MOS. The feature
* is activated when we create the first log space map and remains active
* for the lifetime of the pool. The new entry in the MOS Directory [refer
* to DMU_POOL_LOG_SPACEMAP_ZAP] is populated with a ZAP whose key-value
* pairs are of the form <key: txg, value: log space map object for that txg>.
* This entry is our on-disk reference of the log space maps that exist in
* the pool for each TXG and it is used during import to load all the
* metaslab unflushed changes in memory. To see how this structure is first
* created and later populated refer to spa_generate_syncing_log_sm(). To see
* how it is used during import time refer to spa_ld_log_sm_metadata().
*
* - Each vdev has a new entry in its vdev_top_zap (see field
* VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS) which holds the msp_unflushed_txg of
* each metaslab in this vdev. This field is the on-disk counterpart of the
* in-memory field ms_unflushed_txg which tells us from which TXG and onwards
* the metaslab haven't had its changes flushed. During import, we use this
* to ignore any entries in the space map log that are for this metaslab but
* from a TXG before msp_unflushed_txg. At that point, we also populate its
* in-memory counterpart and from there both fields are updated every time
* we flush that metaslab.
*
* - A space map is created every TXG and, during that TXG, it is used to log
* all incoming changes (the log space map). When created, the log space map
* is referenced in memory by spa_syncing_log_sm and its object ID is inserted
* to the space map ZAP mentioned above. The log space map is closed at the
* end of the TXG and will be destroyed when it becomes fully obsolete. We
* know when a log space map has become obsolete by looking at the oldest
* (and smallest) ms_unflushed_txg in the pool. If the value of that is bigger
* than the log space map's TXG, then it means that there is no metaslab who
* doesn't have the changes from that log and we can therefore destroy it.
* [see spa_cleanup_old_sm_logs()].
*
* == Important in-memory structures ==
*
* - The per-spa field spa_metaslabs_by_flushed sorts all the metaslabs in
* the pool by their ms_unflushed_txg field. It is primarily used for three
* reasons. First of all, it is used during flushing where we try to flush
* metaslabs in-order from the oldest-flushed to the most recently flushed
* every TXG. Secondly, it helps us to lookup the ms_unflushed_txg of the
* oldest flushed metaslab to distinguish which log space maps have become
* obsolete and which ones are still relevant. Finally it tells us which
* metaslabs have unflushed changes in a pool where this feature was just
* enabled, as we don't immediately add all of the pool's metaslabs but we
* add them over time as they go through metaslab_sync(). The reason that
* we do that is to ease these pools into the behavior of the flushing
* algorithm (described later on).
*
* - The per-spa field spa_sm_logs_by_txg can be thought as the in-memory
* counterpart of the space map ZAP mentioned above. It's an AVL tree whose
* nodes represent the log space maps in the pool. This in-memory
* representation of log space maps in the pool sorts the log space maps by
* the TXG that they were created (which is also the TXG of their unflushed
* changes). It also contains the following extra information for each
* space map:
* [1] The number of metaslabs that were last flushed on that TXG. This is
* important because if that counter is zero and this is the oldest
* log then it means that it is also obsolete.
* [2] The number of blocks of that space map. This field is used by the
* block heuristic of our flushing algorithm (described later on).
* It represents how many blocks of metadata changes ZFS had to write
* to disk for that TXG.
*
* - The per-spa field spa_log_summary is a list of entries that summarizes
* the metaslab and block counts of all the nodes of the spa_sm_logs_by_txg
* AVL tree mentioned above. The reason this exists is that our flushing
* algorithm (described later) tries to estimate how many metaslabs to flush
* in each TXG by iterating over all the log space maps and looking at their
* block counts. Summarizing that information means that don't have to
* iterate through each space map, minimizing the runtime overhead of the
* flushing algorithm which would be induced in syncing context. In terms of
* implementation the log summary is used as a queue:
* * we modify or pop entries from its head when we flush metaslabs
* * we modify or append entries to its tail when we sync changes.
*
* - Each metaslab has two new range trees that hold its unflushed changes,
* ms_unflushed_allocs and ms_unflushed_frees. These are always disjoint.
*
* == Flushing algorithm ==
*
* The decision of how many metaslabs to flush on a give TXG is guided by
* two heuristics:
*
* [1] The memory heuristic -
* We keep track of the memory used by the unflushed trees from all the
* metaslabs [see sus_memused of spa_unflushed_stats] and we ensure that it
* stays below a certain threshold which is determined by an arbitrary hard
* limit and an arbitrary percentage of the system's memory [see
* spa_log_exceeds_memlimit()]. When we see that the memory usage of the
* unflushed changes are passing that threshold, we flush metaslabs, which
* empties their unflushed range trees, reducing the memory used.
*
* [2] The block heuristic -
* We try to keep the total number of blocks in the log space maps in check
* so the log doesn't grow indefinitely and we don't induce a lot of overhead
* when loading the pool. At the same time we don't want to flush a lot of
* metaslabs too often as this would defeat the purpose of the log space map.
* As a result we set a limit in the amount of blocks that we think it's
* acceptable for the log space maps to have and try not to cross it.
* [see sus_blocklimit from spa_unflushed_stats].
*
* In order to stay below the block limit every TXG we have to estimate how
* many metaslabs we need to flush based on the current rate of incoming blocks
* and our history of log space map blocks. The main idea here is to answer
* the question of how many metaslabs do we need to flush in order to get rid
* at least an X amount of log space map blocks. We can answer this question
* by iterating backwards from the oldest log space map to the newest one
* and looking at their metaslab and block counts. At this point the log summary
* mentioned above comes handy as it reduces the amount of things that we have
* to iterate (even though it may reduce the preciseness of our estimates due
* to its aggregation of data). So with that in mind, we project the incoming
* rate of the current TXG into the future and attempt to approximate how many
* metaslabs would we need to flush from now in order to avoid exceeding our
* block limit in different points in the future (granted that we would keep
* flushing the same number of metaslabs for every TXG). Then we take the
* maximum number from all these estimates to be on the safe side. For the
* exact implementation details of algorithm refer to
* spa_estimate_metaslabs_to_flush.
*/
/*
* This is used as the block size for the space maps used for the
* log space map feature. These space maps benefit from a bigger
* block size as we expect to be writing a lot of data to them at
* once.
*/
static const unsigned long zfs_log_sm_blksz = 1ULL << 17;
/*
* Percentage of the overall system's memory that ZFS allows to be
* used for unflushed changes (e.g. the sum of size of all the nodes
* in the unflushed trees).
*
* Note that this value is calculated over 1000000 for finer granularity
* (thus the _ppm suffix; reads as "parts per million"). As an example,
* the default of 1000 allows 0.1% of memory to be used.
*/
static unsigned long zfs_unflushed_max_mem_ppm = 1000;
/*
* Specific hard-limit in memory that ZFS allows to be used for
* unflushed changes.
*/
static unsigned long zfs_unflushed_max_mem_amt = 1ULL << 30;
/*
* The following tunable determines the number of blocks that can be used for
* the log space maps. It is expressed as a percentage of the total number of
* metaslabs in the pool (i.e. the default of 400 means that the number of log
* blocks is capped at 4 times the number of metaslabs).
*
* This value exists to tune our flushing algorithm, with higher values
* flushing metaslabs less often (doing less I/Os) per TXG versus lower values
* flushing metaslabs more aggressively with the upside of saving overheads
* when loading the pool. Another factor in this tradeoff is that flushing
* less often can potentially lead to better utilization of the metaslab space
* map's block size as we accumulate more changes per flush.
*
* Given that this tunable indirectly controls the flush rate (metaslabs
* flushed per txg) and that's why making it a percentage in terms of the
* number of metaslabs in the pool makes sense here.
*
* As a rule of thumb we default this tunable to 400% based on the following:
*
* 1] Assuming a constant flush rate and a constant incoming rate of log blocks
* it is reasonable to expect that the amount of obsolete entries changes
* linearly from txg to txg (e.g. the oldest log should have the most
* obsolete entries, and the most recent one the least). With this we could
* say that, at any given time, about half of the entries in the whole space
* map log are obsolete. Thus for every two entries for a metaslab in the
* log space map, only one of them is valid and actually makes it to the
* metaslab's space map.
* [factor of 2]
* 2] Each entry in the log space map is guaranteed to be two words while
* entries in metaslab space maps are generally single-word.
* [an extra factor of 2 - 400% overall]
* 3] Even if [1] and [2] are slightly less than 2 each, we haven't taken into
* account any consolidation of segments from the log space map to the
* unflushed range trees nor their history (e.g. a segment being allocated,
* then freed, then allocated again means 3 log space map entries but 0
* metaslab space map entries). Depending on the workload, we've seen ~1.8
* non-obsolete log space map entries per metaslab entry, for a total of
* ~600%. Since most of these estimates though are workload dependent, we
* default on 400% to be conservative.
*
* Thus we could say that even in the worst
* case of [1] and [2], the factor should end up being 4.
*
* That said, regardless of the number of metaslabs in the pool we need to
* provide upper and lower bounds for the log block limit.
* [see zfs_unflushed_log_block_{min,max}]
*/
static unsigned long zfs_unflushed_log_block_pct = 400;
/*
* If the number of metaslabs is small and our incoming rate is high, we could
* get into a situation that we are flushing all our metaslabs every TXG. Thus
* we always allow at least this many log blocks.
*/
static unsigned long zfs_unflushed_log_block_min = 1000;
/*
* If the log becomes too big, the import time of the pool can take a hit in
* terms of performance. Thus we have a hard limit in the size of the log in
* terms of blocks.
*/
static unsigned long zfs_unflushed_log_block_max = (1ULL << 17);
/*
* Also we have a hard limit in the size of the log in terms of dirty TXGs.
*/
static unsigned long zfs_unflushed_log_txg_max = 1000;
/*
* Max # of rows allowed for the log_summary. The tradeoff here is accuracy and
* stability of the flushing algorithm (longer summary) vs its runtime overhead
* (smaller summary is faster to traverse).
*/
static unsigned long zfs_max_logsm_summary_length = 10;
/*
* Tunable that sets the lower bound on the metaslabs to flush every TXG.
*
* Setting this to 0 has no effect since if the pool is idle we won't even be
* creating log space maps and therefore we won't be flushing. On the other
* hand if the pool has any incoming workload our block heuristic will start
* flushing metaslabs anyway.
*
* The point of this tunable is to be used in extreme cases where we really
* want to flush more metaslabs than our adaptable heuristic plans to flush.
*/
static unsigned long zfs_min_metaslabs_to_flush = 1;
/*
* Tunable that specifies how far in the past do we want to look when trying to
* estimate the incoming log blocks for the current TXG.
*
* Setting this too high may not only increase runtime but also minimize the
* effect of the incoming rates from the most recent TXGs as we take the
* average over all the blocks that we walk
* [see spa_estimate_incoming_log_blocks].
*/
static unsigned long zfs_max_log_walking = 5;
/*
* This tunable exists solely for testing purposes. It ensures that the log
* spacemaps are not flushed and destroyed during export in order for the
* relevant log spacemap import code paths to be tested (effectively simulating
* a crash).
*/
int zfs_keep_log_spacemaps_at_export = 0;
static uint64_t
spa_estimate_incoming_log_blocks(spa_t *spa)
{
ASSERT3U(spa_sync_pass(spa), ==, 1);
uint64_t steps = 0, sum = 0;
for (spa_log_sm_t *sls = avl_last(&spa->spa_sm_logs_by_txg);
sls != NULL && steps < zfs_max_log_walking;
sls = AVL_PREV(&spa->spa_sm_logs_by_txg, sls)) {
if (sls->sls_txg == spa_syncing_txg(spa)) {
/*
* skip the log created in this TXG as this would
* make our estimations inaccurate.
*/
continue;
}
sum += sls->sls_nblocks;
steps++;
}
return ((steps > 0) ? DIV_ROUND_UP(sum, steps) : 0);
}
uint64_t
spa_log_sm_blocklimit(spa_t *spa)
{
return (spa->spa_unflushed_stats.sus_blocklimit);
}
void
spa_log_sm_set_blocklimit(spa_t *spa)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
ASSERT0(spa_log_sm_blocklimit(spa));
return;
}
uint64_t msdcount = 0;
for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
e; e = list_next(&spa->spa_log_summary, e))
msdcount += e->lse_msdcount;
uint64_t limit = msdcount * zfs_unflushed_log_block_pct / 100;
spa->spa_unflushed_stats.sus_blocklimit = MIN(MAX(limit,
zfs_unflushed_log_block_min), zfs_unflushed_log_block_max);
}
uint64_t
spa_log_sm_nblocks(spa_t *spa)
{
return (spa->spa_unflushed_stats.sus_nblocks);
}
/*
* Ensure that the in-memory log space map structures and the summary
* have the same block and metaslab counts.
*/
static void
spa_log_summary_verify_counts(spa_t *spa)
{
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
if ((zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) == 0)
return;
uint64_t ms_in_avl = avl_numnodes(&spa->spa_metaslabs_by_flushed);
uint64_t ms_in_summary = 0, blk_in_summary = 0;
for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
e; e = list_next(&spa->spa_log_summary, e)) {
ms_in_summary += e->lse_mscount;
blk_in_summary += e->lse_blkcount;
}
uint64_t ms_in_logs = 0, blk_in_logs = 0;
for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
ms_in_logs += sls->sls_mscount;
blk_in_logs += sls->sls_nblocks;
}
VERIFY3U(ms_in_logs, ==, ms_in_summary);
VERIFY3U(ms_in_logs, ==, ms_in_avl);
VERIFY3U(blk_in_logs, ==, blk_in_summary);
VERIFY3U(blk_in_logs, ==, spa_log_sm_nblocks(spa));
}
static boolean_t
summary_entry_is_full(spa_t *spa, log_summary_entry_t *e, uint64_t txg)
{
if (e->lse_end == txg)
return (0);
if (e->lse_txgcount >= DIV_ROUND_UP(zfs_unflushed_log_txg_max,
zfs_max_logsm_summary_length))
return (1);
uint64_t blocks_per_row = MAX(1,
DIV_ROUND_UP(spa_log_sm_blocklimit(spa),
zfs_max_logsm_summary_length));
return (blocks_per_row <= e->lse_blkcount);
}
/*
* Update the log summary information to reflect the fact that a metaslab
* was flushed or destroyed (e.g due to device removal or pool export/destroy).
*
* We typically flush the oldest flushed metaslab so the first (and oldest)
* entry of the summary is updated. However if that metaslab is getting loaded
* we may flush the second oldest one which may be part of an entry later in
* the summary. Moreover, if we call into this function from metaslab_fini()
* the metaslabs probably won't be ordered by ms_unflushed_txg. Thus we ask
* for a txg as an argument so we can locate the appropriate summary entry for
* the metaslab.
*/
void
spa_log_summary_decrement_mscount(spa_t *spa, uint64_t txg, boolean_t dirty)
{
/*
* We don't track summary data for read-only pools and this function
* can be called from metaslab_fini(). In that case return immediately.
*/
if (!spa_writeable(spa))
return;
log_summary_entry_t *target = NULL;
for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
e != NULL; e = list_next(&spa->spa_log_summary, e)) {
if (e->lse_start > txg)
break;
target = e;
}
if (target == NULL || target->lse_mscount == 0) {
/*
* We didn't find a summary entry for this metaslab. We must be
* at the teardown of a spa_load() attempt that got an error
* while reading the log space maps.
*/
VERIFY3S(spa_load_state(spa), ==, SPA_LOAD_ERROR);
return;
}
target->lse_mscount--;
if (dirty)
target->lse_msdcount--;
}
/*
* Update the log summary information to reflect the fact that we destroyed
* old log space maps. Since we can only destroy the oldest log space maps,
* we decrement the block count of the oldest summary entry and potentially
* destroy it when that count hits 0.
*
* This function is called after a metaslab is flushed and typically that
* metaslab is the oldest flushed, which means that this function will
* typically decrement the block count of the first entry of the summary and
* potentially free it if the block count gets to zero (its metaslab count
* should be zero too at that point).
*
* There are certain scenarios though that don't work exactly like that so we
* need to account for them:
*
* Scenario [1]: It is possible that after we flushed the oldest flushed
* metaslab and we destroyed the oldest log space map, more recent logs had 0
* metaslabs pointing to them so we got rid of them too. This can happen due
* to metaslabs being destroyed through device removal, or because the oldest
* flushed metaslab was loading but we kept flushing more recently flushed
* metaslabs due to the memory pressure of unflushed changes. Because of that,
* we always iterate from the beginning of the summary and if blocks_gone is
* bigger than the block_count of the current entry we free that entry (we
* expect its metaslab count to be zero), we decrement blocks_gone and on to
* the next entry repeating this procedure until blocks_gone gets decremented
* to 0. Doing this also works for the typical case mentioned above.
*
* Scenario [2]: The oldest flushed metaslab isn't necessarily accounted by
* the first (and oldest) entry in the summary. If the first few entries of
* the summary were only accounting metaslabs from a device that was just
* removed, then the current oldest flushed metaslab could be accounted by an
* entry somewhere in the middle of the summary. Moreover flushing that
* metaslab will destroy all the log space maps older than its ms_unflushed_txg
* because they became obsolete after the removal. Thus, iterating as we did
* for scenario [1] works out for this case too.
*
* Scenario [3]: At times we decide to flush all the metaslabs in the pool
* in one TXG (either because we are exporting the pool or because our flushing
* heuristics decided to do so). When that happens all the log space maps get
* destroyed except the one created for the current TXG which doesn't have
* any log blocks yet. As log space maps get destroyed with every metaslab that
* we flush, entries in the summary are also destroyed. This brings a weird
* corner-case when we flush the last metaslab and the log space map of the
* current TXG is in the same summary entry with other log space maps that
* are older. When that happens we are eventually left with this one last
* summary entry whose blocks are gone (blocks_gone equals the entry's block
* count) but its metaslab count is non-zero (because it accounts all the
* metaslabs in the pool as they all got flushed). Under this scenario we can't
* free this last summary entry as it's referencing all the metaslabs in the
* pool and its block count will get incremented at the end of this sync (when
* we close the syncing log space map). Thus we just decrement its current
* block count and leave it alone. In the case that the pool gets exported,
* its metaslab count will be decremented over time as we call metaslab_fini()
* for all the metaslabs in the pool and the entry will be freed at
* spa_unload_log_sm_metadata().
*/
void
spa_log_summary_decrement_blkcount(spa_t *spa, uint64_t blocks_gone)
{
log_summary_entry_t *e = list_head(&spa->spa_log_summary);
if (e->lse_txgcount > 0)
e->lse_txgcount--;
for (; e != NULL; e = list_head(&spa->spa_log_summary)) {
if (e->lse_blkcount > blocks_gone) {
e->lse_blkcount -= blocks_gone;
blocks_gone = 0;
break;
} else if (e->lse_mscount == 0) {
/* remove obsolete entry */
blocks_gone -= e->lse_blkcount;
list_remove(&spa->spa_log_summary, e);
kmem_free(e, sizeof (log_summary_entry_t));
} else {
/* Verify that this is scenario [3] mentioned above. */
VERIFY3U(blocks_gone, ==, e->lse_blkcount);
/*
* Assert that this is scenario [3] further by ensuring
* that this is the only entry in the summary.
*/
VERIFY3P(e, ==, list_tail(&spa->spa_log_summary));
ASSERT3P(e, ==, list_head(&spa->spa_log_summary));
blocks_gone = e->lse_blkcount = 0;
break;
}
}
/*
* Ensure that there is no way we are trying to remove more blocks
* than the # of blocks in the summary.
*/
ASSERT0(blocks_gone);
}
void
spa_log_sm_decrement_mscount(spa_t *spa, uint64_t txg)
{
spa_log_sm_t target = { .sls_txg = txg };
spa_log_sm_t *sls = avl_find(&spa->spa_sm_logs_by_txg,
&target, NULL);
if (sls == NULL) {
/*
* We must be at the teardown of a spa_load() attempt that
* got an error while reading the log space maps.
*/
VERIFY3S(spa_load_state(spa), ==, SPA_LOAD_ERROR);
return;
}
ASSERT(sls->sls_mscount > 0);
sls->sls_mscount--;
}
void
spa_log_sm_increment_current_mscount(spa_t *spa)
{
spa_log_sm_t *last_sls = avl_last(&spa->spa_sm_logs_by_txg);
ASSERT3U(last_sls->sls_txg, ==, spa_syncing_txg(spa));
last_sls->sls_mscount++;
}
static void
summary_add_data(spa_t *spa, uint64_t txg, uint64_t metaslabs_flushed,
uint64_t metaslabs_dirty, uint64_t nblocks)
{
log_summary_entry_t *e = list_tail(&spa->spa_log_summary);
if (e == NULL || summary_entry_is_full(spa, e, txg)) {
e = kmem_zalloc(sizeof (log_summary_entry_t), KM_SLEEP);
e->lse_start = e->lse_end = txg;
e->lse_txgcount = 1;
list_insert_tail(&spa->spa_log_summary, e);
}
ASSERT3U(e->lse_start, <=, txg);
if (e->lse_end < txg) {
e->lse_end = txg;
e->lse_txgcount++;
}
e->lse_mscount += metaslabs_flushed;
e->lse_msdcount += metaslabs_dirty;
e->lse_blkcount += nblocks;
}
static void
spa_log_summary_add_incoming_blocks(spa_t *spa, uint64_t nblocks)
{
summary_add_data(spa, spa_syncing_txg(spa), 0, 0, nblocks);
}
void
spa_log_summary_add_flushed_metaslab(spa_t *spa, boolean_t dirty)
{
summary_add_data(spa, spa_syncing_txg(spa), 1, dirty ? 1 : 0, 0);
}
void
spa_log_summary_dirty_flushed_metaslab(spa_t *spa, uint64_t txg)
{
log_summary_entry_t *target = NULL;
for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
e != NULL; e = list_next(&spa->spa_log_summary, e)) {
if (e->lse_start > txg)
break;
target = e;
}
ASSERT3P(target, !=, NULL);
ASSERT3U(target->lse_mscount, !=, 0);
target->lse_msdcount++;
}
/*
* This function attempts to estimate how many metaslabs should
* we flush to satisfy our block heuristic for the log spacemap
* for the upcoming TXGs.
*
* Specifically, it first tries to estimate the number of incoming
* blocks in this TXG. Then by projecting that incoming rate to
* future TXGs and using the log summary, it figures out how many
* flushes we would need to do for future TXGs individually to
* stay below our block limit and returns the maximum number of
* flushes from those estimates.
*/
static uint64_t
spa_estimate_metaslabs_to_flush(spa_t *spa)
{
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
ASSERT3U(spa_sync_pass(spa), ==, 1);
ASSERT(spa_log_sm_blocklimit(spa) != 0);
/*
* This variable contains the incoming rate that will be projected
* and used for our flushing estimates in the future.
*/
uint64_t incoming = spa_estimate_incoming_log_blocks(spa);
/*
* At any point in time this variable tells us how many
* TXGs in the future we are so we can make our estimations.
*/
uint64_t txgs_in_future = 1;
/*
* This variable tells us how much room do we have until we hit
* our limit. When it goes negative, it means that we've exceeded
* our limit and we need to flush.
*
* Note that since we start at the first TXG in the future (i.e.
* txgs_in_future starts from 1) we already decrement this
* variable by the incoming rate.
*/
int64_t available_blocks =
spa_log_sm_blocklimit(spa) - spa_log_sm_nblocks(spa) - incoming;
int64_t available_txgs = zfs_unflushed_log_txg_max;
for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
e; e = list_next(&spa->spa_log_summary, e))
available_txgs -= e->lse_txgcount;
/*
* This variable tells us the total number of flushes needed to
* keep the log size within the limit when we reach txgs_in_future.
*/
uint64_t total_flushes = 0;
/* Holds the current maximum of our estimates so far. */
uint64_t max_flushes_pertxg = zfs_min_metaslabs_to_flush;
/*
* For our estimations we only look as far in the future
* as the summary allows us.
*/
for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
e; e = list_next(&spa->spa_log_summary, e)) {
/*
* If there is still room before we exceed our limit
* then keep skipping TXGs accumulating more blocks
* based on the incoming rate until we exceed it.
*/
if (available_blocks >= 0 && available_txgs >= 0) {
uint64_t skip_txgs = MIN(available_txgs + 1,
(available_blocks / incoming) + 1);
available_blocks -= (skip_txgs * incoming);
available_txgs -= skip_txgs;
txgs_in_future += skip_txgs;
ASSERT3S(available_blocks, >=, -incoming);
ASSERT3S(available_txgs, >=, -1);
}
/*
* At this point we're far enough into the future where
* the limit was just exceeded and we flush metaslabs
* based on the current entry in the summary, updating
* our available_blocks.
*/
ASSERT(available_blocks < 0 || available_txgs < 0);
available_blocks += e->lse_blkcount;
available_txgs += e->lse_txgcount;
total_flushes += e->lse_msdcount;
/*
* Keep the running maximum of the total_flushes that
* we've done so far over the number of TXGs in the
* future that we are. The idea here is to estimate
* the average number of flushes that we should do
* every TXG so that when we are that many TXGs in the
* future we stay under the limit.
*/
max_flushes_pertxg = MAX(max_flushes_pertxg,
DIV_ROUND_UP(total_flushes, txgs_in_future));
}
return (max_flushes_pertxg);
}
uint64_t
spa_log_sm_memused(spa_t *spa)
{
return (spa->spa_unflushed_stats.sus_memused);
}
static boolean_t
spa_log_exceeds_memlimit(spa_t *spa)
{
if (spa_log_sm_memused(spa) > zfs_unflushed_max_mem_amt)
return (B_TRUE);
uint64_t system_mem_allowed = ((physmem * PAGESIZE) *
zfs_unflushed_max_mem_ppm) / 1000000;
if (spa_log_sm_memused(spa) > system_mem_allowed)
return (B_TRUE);
return (B_FALSE);
}
boolean_t
spa_flush_all_logs_requested(spa_t *spa)
{
return (spa->spa_log_flushall_txg != 0);
}
void
spa_flush_metaslabs(spa_t *spa, dmu_tx_t *tx)
{
uint64_t txg = dmu_tx_get_txg(tx);
if (spa_sync_pass(spa) != 1)
return;
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
/*
* If we don't have any metaslabs with unflushed changes
* return immediately.
*/
if (avl_numnodes(&spa->spa_metaslabs_by_flushed) == 0)
return;
/*
* During SPA export we leave a few empty TXGs to go by [see
* spa_final_dirty_txg() to understand why]. For this specific
* case, it is important to not flush any metaslabs as that
* would dirty this TXG.
*
* That said, during one of these dirty TXGs that is less or
* equal to spa_final_dirty(), spa_unload() will request that
* we try to flush all the metaslabs for that TXG before
* exporting the pool, thus we ensure that we didn't get a
* request of flushing everything before we attempt to return
* immediately.
*/
if (spa->spa_uberblock.ub_rootbp.blk_birth < txg &&
!dmu_objset_is_dirty(spa_meta_objset(spa), txg) &&
!spa_flush_all_logs_requested(spa))
return;
/*
* We need to generate a log space map before flushing because this
* will set up the in-memory data (i.e. node in spa_sm_logs_by_txg)
* for this TXG's flushed metaslab count (aka sls_mscount which is
* manipulated in many ways down the metaslab_flush() codepath).
*
* That is not to say that we may generate a log space map when we
* don't need it. If we are flushing metaslabs, that means that we
* were going to write changes to disk anyway, so even if we were
* not flushing, a log space map would have been created anyway in
* metaslab_sync().
*/
spa_generate_syncing_log_sm(spa, tx);
/*
* This variable tells us how many metaslabs we want to flush based
* on the block-heuristic of our flushing algorithm (see block comment
* of log space map feature). We also decrement this as we flush
* metaslabs and attempt to destroy old log space maps.
*/
uint64_t want_to_flush;
if (spa_flush_all_logs_requested(spa)) {
ASSERT3S(spa_state(spa), ==, POOL_STATE_EXPORTED);
want_to_flush = UINT64_MAX;
} else {
want_to_flush = spa_estimate_metaslabs_to_flush(spa);
}
/* Used purely for verification purposes */
uint64_t visited = 0;
/*
* Ideally we would only iterate through spa_metaslabs_by_flushed
* using only one variable (curr). We can't do that because
* metaslab_flush() mutates position of curr in the AVL when
* it flushes that metaslab by moving it to the end of the tree.
* Thus we always keep track of the original next node of the
* current node (curr) in another variable (next).
*/
metaslab_t *next = NULL;
for (metaslab_t *curr = avl_first(&spa->spa_metaslabs_by_flushed);
curr != NULL; curr = next) {
next = AVL_NEXT(&spa->spa_metaslabs_by_flushed, curr);
/*
* If this metaslab has been flushed this txg then we've done
* a full circle over the metaslabs.
*/
if (metaslab_unflushed_txg(curr) == txg)
break;
/*
* If we are done flushing for the block heuristic and the
* unflushed changes don't exceed the memory limit just stop.
*/
if (want_to_flush == 0 && !spa_log_exceeds_memlimit(spa))
break;
if (metaslab_unflushed_dirty(curr)) {
mutex_enter(&curr->ms_sync_lock);
mutex_enter(&curr->ms_lock);
metaslab_flush(curr, tx);
mutex_exit(&curr->ms_lock);
mutex_exit(&curr->ms_sync_lock);
if (want_to_flush > 0)
want_to_flush--;
} else
metaslab_unflushed_bump(curr, tx, B_FALSE);
visited++;
}
ASSERT3U(avl_numnodes(&spa->spa_metaslabs_by_flushed), >=, visited);
spa_log_sm_set_blocklimit(spa);
}
/*
* Close the log space map for this TXG and update the block counts
* for the log's in-memory structure and the summary.
*/
void
spa_sync_close_syncing_log_sm(spa_t *spa)
{
if (spa_syncing_log_sm(spa) == NULL)
return;
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
spa_log_sm_t *sls = avl_last(&spa->spa_sm_logs_by_txg);
ASSERT3U(sls->sls_txg, ==, spa_syncing_txg(spa));
sls->sls_nblocks = space_map_nblocks(spa_syncing_log_sm(spa));
spa->spa_unflushed_stats.sus_nblocks += sls->sls_nblocks;
/*
* Note that we can't assert that sls_mscount is not 0,
* because there is the case where the first metaslab
* in spa_metaslabs_by_flushed is loading and we were
* not able to flush any metaslabs the current TXG.
*/
ASSERT(sls->sls_nblocks != 0);
spa_log_summary_add_incoming_blocks(spa, sls->sls_nblocks);
spa_log_summary_verify_counts(spa);
space_map_close(spa->spa_syncing_log_sm);
spa->spa_syncing_log_sm = NULL;
/*
* At this point we tried to flush as many metaslabs as we
* can as the pool is getting exported. Reset the "flush all"
* so the last few TXGs before closing the pool can be empty
* (e.g. not dirty).
*/
if (spa_flush_all_logs_requested(spa)) {
ASSERT3S(spa_state(spa), ==, POOL_STATE_EXPORTED);
spa->spa_log_flushall_txg = 0;
}
}
void
spa_cleanup_old_sm_logs(spa_t *spa, dmu_tx_t *tx)
{
objset_t *mos = spa_meta_objset(spa);
uint64_t spacemap_zap;
int error = zap_lookup(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_LOG_SPACEMAP_ZAP, sizeof (spacemap_zap), 1, &spacemap_zap);
if (error == ENOENT) {
ASSERT(avl_is_empty(&spa->spa_sm_logs_by_txg));
return;
}
VERIFY0(error);
metaslab_t *oldest = avl_first(&spa->spa_metaslabs_by_flushed);
uint64_t oldest_flushed_txg = metaslab_unflushed_txg(oldest);
/* Free all log space maps older than the oldest_flushed_txg. */
for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
sls && sls->sls_txg < oldest_flushed_txg;
sls = avl_first(&spa->spa_sm_logs_by_txg)) {
ASSERT0(sls->sls_mscount);
avl_remove(&spa->spa_sm_logs_by_txg, sls);
space_map_free_obj(mos, sls->sls_sm_obj, tx);
VERIFY0(zap_remove_int(mos, spacemap_zap, sls->sls_txg, tx));
spa_log_summary_decrement_blkcount(spa, sls->sls_nblocks);
spa->spa_unflushed_stats.sus_nblocks -= sls->sls_nblocks;
kmem_free(sls, sizeof (spa_log_sm_t));
}
}
static spa_log_sm_t *
spa_log_sm_alloc(uint64_t sm_obj, uint64_t txg)
{
spa_log_sm_t *sls = kmem_zalloc(sizeof (*sls), KM_SLEEP);
sls->sls_sm_obj = sm_obj;
sls->sls_txg = txg;
return (sls);
}
void
spa_generate_syncing_log_sm(spa_t *spa, dmu_tx_t *tx)
{
uint64_t txg = dmu_tx_get_txg(tx);
objset_t *mos = spa_meta_objset(spa);
if (spa_syncing_log_sm(spa) != NULL)
return;
if (!spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
uint64_t spacemap_zap;
int error = zap_lookup(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_LOG_SPACEMAP_ZAP, sizeof (spacemap_zap), 1, &spacemap_zap);
if (error == ENOENT) {
ASSERT(avl_is_empty(&spa->spa_sm_logs_by_txg));
error = 0;
spacemap_zap = zap_create(mos,
DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
VERIFY0(zap_add(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_LOG_SPACEMAP_ZAP, sizeof (spacemap_zap), 1,
&spacemap_zap, tx));
spa_feature_incr(spa, SPA_FEATURE_LOG_SPACEMAP, tx);
}
VERIFY0(error);
uint64_t sm_obj;
ASSERT3U(zap_lookup_int_key(mos, spacemap_zap, txg, &sm_obj),
==, ENOENT);
sm_obj = space_map_alloc(mos, zfs_log_sm_blksz, tx);
VERIFY0(zap_add_int_key(mos, spacemap_zap, txg, sm_obj, tx));
avl_add(&spa->spa_sm_logs_by_txg, spa_log_sm_alloc(sm_obj, txg));
/*
* We pass UINT64_MAX as the space map's representation size
* and SPA_MINBLOCKSHIFT as the shift, to make the space map
* accept any sorts of segments since there's no real advantage
* to being more restrictive (given that we're already going
* to be using 2-word entries).
*/
VERIFY0(space_map_open(&spa->spa_syncing_log_sm, mos, sm_obj,
0, UINT64_MAX, SPA_MINBLOCKSHIFT));
spa_log_sm_set_blocklimit(spa);
}
/*
* Find all the log space maps stored in the space map ZAP and sort
* them by their TXG in spa_sm_logs_by_txg.
*/
static int
spa_ld_log_sm_metadata(spa_t *spa)
{
int error;
uint64_t spacemap_zap;
ASSERT(avl_is_empty(&spa->spa_sm_logs_by_txg));
error = zap_lookup(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_LOG_SPACEMAP_ZAP, sizeof (spacemap_zap), 1, &spacemap_zap);
if (error == ENOENT) {
/* the space map ZAP doesn't exist yet */
return (0);
} else if (error != 0) {
spa_load_failed(spa, "spa_ld_log_sm_metadata(): failed at "
"zap_lookup(DMU_POOL_DIRECTORY_OBJECT) [error %d]",
error);
return (error);
}
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, spa_meta_objset(spa), spacemap_zap);
(error = zap_cursor_retrieve(&zc, &za)) == 0;
zap_cursor_advance(&zc)) {
uint64_t log_txg = zfs_strtonum(za.za_name, NULL);
spa_log_sm_t *sls =
spa_log_sm_alloc(za.za_first_integer, log_txg);
avl_add(&spa->spa_sm_logs_by_txg, sls);
}
zap_cursor_fini(&zc);
if (error != ENOENT) {
spa_load_failed(spa, "spa_ld_log_sm_metadata(): failed at "
"zap_cursor_retrieve(spacemap_zap) [error %d]",
error);
return (error);
}
for (metaslab_t *m = avl_first(&spa->spa_metaslabs_by_flushed);
m; m = AVL_NEXT(&spa->spa_metaslabs_by_flushed, m)) {
spa_log_sm_t target = { .sls_txg = metaslab_unflushed_txg(m) };
spa_log_sm_t *sls = avl_find(&spa->spa_sm_logs_by_txg,
&target, NULL);
/*
* At this point if sls is zero it means that a bug occurred
* in ZFS the last time the pool was open or earlier in the
* import code path. In general, we would have placed a
* VERIFY() here or in this case just let the kernel panic
* with NULL pointer dereference when incrementing sls_mscount,
* but since this is the import code path we can be a bit more
* lenient. Thus, for DEBUG bits we always cause a panic, while
* in production we log the error and just fail the import.
*/
ASSERT(sls != NULL);
if (sls == NULL) {
spa_load_failed(spa, "spa_ld_log_sm_metadata(): bug "
"encountered: could not find log spacemap for "
"TXG %llu [error %d]",
(u_longlong_t)metaslab_unflushed_txg(m), ENOENT);
return (ENOENT);
}
sls->sls_mscount++;
}
return (0);
}
typedef struct spa_ld_log_sm_arg {
spa_t *slls_spa;
uint64_t slls_txg;
} spa_ld_log_sm_arg_t;
static int
spa_ld_log_sm_cb(space_map_entry_t *sme, void *arg)
{
uint64_t offset = sme->sme_offset;
uint64_t size = sme->sme_run;
uint32_t vdev_id = sme->sme_vdev;
spa_ld_log_sm_arg_t *slls = arg;
spa_t *spa = slls->slls_spa;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
/*
* If the vdev has been removed (i.e. it is indirect or a hole)
* skip this entry. The contents of this vdev have already moved
* elsewhere.
*/
if (!vdev_is_concrete(vd))
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(!ms->ms_loaded);
/*
* If we have already flushed entries for this TXG to this
* metaslab's space map, then ignore it. Note that we flush
* before processing any allocations/frees for that TXG, so
* the metaslab's space map only has entries from *before*
* the unflushed TXG.
*/
if (slls->slls_txg < metaslab_unflushed_txg(ms))
return (0);
switch (sme->sme_type) {
case SM_ALLOC:
range_tree_remove_xor_add_segment(offset, offset + size,
ms->ms_unflushed_frees, ms->ms_unflushed_allocs);
break;
case SM_FREE:
range_tree_remove_xor_add_segment(offset, offset + size,
ms->ms_unflushed_allocs, ms->ms_unflushed_frees);
break;
default:
panic("invalid maptype_t");
break;
}
if (!metaslab_unflushed_dirty(ms)) {
metaslab_set_unflushed_dirty(ms, B_TRUE);
spa_log_summary_dirty_flushed_metaslab(spa,
metaslab_unflushed_txg(ms));
}
return (0);
}
static int
spa_ld_log_sm_data(spa_t *spa)
{
spa_log_sm_t *sls, *psls;
int error = 0;
/*
* If we are not going to do any writes there is no need
* to read the log space maps.
*/
if (!spa_writeable(spa))
return (0);
ASSERT0(spa->spa_unflushed_stats.sus_nblocks);
ASSERT0(spa->spa_unflushed_stats.sus_memused);
hrtime_t read_logs_starttime = gethrtime();
/* Prefetch log spacemaps dnodes. */
for (sls = avl_first(&spa->spa_sm_logs_by_txg); sls;
sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
dmu_prefetch(spa_meta_objset(spa), sls->sls_sm_obj,
0, 0, 0, ZIO_PRIORITY_SYNC_READ);
}
uint_t pn = 0;
uint64_t ps = 0;
psls = sls = avl_first(&spa->spa_sm_logs_by_txg);
while (sls != NULL) {
/* Prefetch log spacemaps up to 16 TXGs or MBs ahead. */
if (psls != NULL && pn < 16 &&
(pn < 2 || ps < 2 * dmu_prefetch_max)) {
error = space_map_open(&psls->sls_sm,
spa_meta_objset(spa), psls->sls_sm_obj, 0,
UINT64_MAX, SPA_MINBLOCKSHIFT);
if (error != 0) {
spa_load_failed(spa, "spa_ld_log_sm_data(): "
"failed at space_map_open(obj=%llu) "
"[error %d]",
(u_longlong_t)sls->sls_sm_obj, error);
goto out;
}
dmu_prefetch(spa_meta_objset(spa), psls->sls_sm_obj,
0, 0, space_map_length(psls->sls_sm),
ZIO_PRIORITY_ASYNC_READ);
pn++;
ps += space_map_length(psls->sls_sm);
psls = AVL_NEXT(&spa->spa_sm_logs_by_txg, psls);
continue;
}
/* Load TXG log spacemap into ms_unflushed_allocs/frees. */
- cond_resched();
+ kpreempt(KPREEMPT_SYNC);
ASSERT0(sls->sls_nblocks);
sls->sls_nblocks = space_map_nblocks(sls->sls_sm);
spa->spa_unflushed_stats.sus_nblocks += sls->sls_nblocks;
summary_add_data(spa, sls->sls_txg,
sls->sls_mscount, 0, sls->sls_nblocks);
struct spa_ld_log_sm_arg vla = {
.slls_spa = spa,
.slls_txg = sls->sls_txg
};
error = space_map_iterate(sls->sls_sm,
space_map_length(sls->sls_sm), spa_ld_log_sm_cb, &vla);
if (error != 0) {
spa_load_failed(spa, "spa_ld_log_sm_data(): failed "
"at space_map_iterate(obj=%llu) [error %d]",
(u_longlong_t)sls->sls_sm_obj, error);
goto out;
}
pn--;
ps -= space_map_length(sls->sls_sm);
space_map_close(sls->sls_sm);
sls->sls_sm = NULL;
sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls);
/* Update log block limits considering just loaded. */
spa_log_sm_set_blocklimit(spa);
}
hrtime_t read_logs_endtime = gethrtime();
spa_load_note(spa,
"read %llu log space maps (%llu total blocks - blksz = %llu bytes) "
"in %lld ms", (u_longlong_t)avl_numnodes(&spa->spa_sm_logs_by_txg),
(u_longlong_t)spa_log_sm_nblocks(spa),
(u_longlong_t)zfs_log_sm_blksz,
(longlong_t)((read_logs_endtime - read_logs_starttime) / 1000000));
out:
if (error != 0) {
for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
if (sls->sls_sm) {
space_map_close(sls->sls_sm);
sls->sls_sm = NULL;
}
}
} else {
ASSERT0(pn);
ASSERT0(ps);
}
/*
* Now that the metaslabs contain their unflushed changes:
* [1] recalculate their actual allocated space
* [2] recalculate their weights
* [3] sum up the memory usage of their unflushed range trees
* [4] optionally load them, if debug_load is set
*
* Note that even in the case where we get here because of an
* error (e.g. error != 0), we still want to update the fields
* below in order to have a proper teardown in spa_unload().
*/
for (metaslab_t *m = avl_first(&spa->spa_metaslabs_by_flushed);
m != NULL; m = AVL_NEXT(&spa->spa_metaslabs_by_flushed, m)) {
mutex_enter(&m->ms_lock);
m->ms_allocated_space = space_map_allocated(m->ms_sm) +
range_tree_space(m->ms_unflushed_allocs) -
range_tree_space(m->ms_unflushed_frees);
vdev_t *vd = m->ms_group->mg_vd;
metaslab_space_update(vd, m->ms_group->mg_class,
range_tree_space(m->ms_unflushed_allocs), 0, 0);
metaslab_space_update(vd, m->ms_group->mg_class,
-range_tree_space(m->ms_unflushed_frees), 0, 0);
ASSERT0(m->ms_weight & METASLAB_ACTIVE_MASK);
metaslab_recalculate_weight_and_sort(m);
spa->spa_unflushed_stats.sus_memused +=
metaslab_unflushed_changes_memused(m);
if (metaslab_debug_load && m->ms_sm != NULL) {
VERIFY0(metaslab_load(m));
metaslab_set_selected_txg(m, 0);
}
mutex_exit(&m->ms_lock);
}
return (error);
}
static int
spa_ld_unflushed_txgs(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa_meta_objset(spa);
if (vd->vdev_top_zap == 0)
return (0);
uint64_t object = 0;
int error = zap_lookup(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS,
sizeof (uint64_t), 1, &object);
if (error == ENOENT)
return (0);
else if (error != 0) {
spa_load_failed(spa, "spa_ld_unflushed_txgs(): failed at "
"zap_lookup(vdev_top_zap=%llu) [error %d]",
(u_longlong_t)vd->vdev_top_zap, error);
return (error);
}
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *ms = vd->vdev_ms[m];
ASSERT(ms != NULL);
metaslab_unflushed_phys_t entry;
uint64_t entry_size = sizeof (entry);
uint64_t entry_offset = ms->ms_id * entry_size;
error = dmu_read(mos, object,
entry_offset, entry_size, &entry, 0);
if (error != 0) {
spa_load_failed(spa, "spa_ld_unflushed_txgs(): "
"failed at dmu_read(obj=%llu) [error %d]",
(u_longlong_t)object, error);
return (error);
}
ms->ms_unflushed_txg = entry.msp_unflushed_txg;
ms->ms_unflushed_dirty = B_FALSE;
ASSERT(range_tree_is_empty(ms->ms_unflushed_allocs));
ASSERT(range_tree_is_empty(ms->ms_unflushed_frees));
if (ms->ms_unflushed_txg != 0) {
mutex_enter(&spa->spa_flushed_ms_lock);
avl_add(&spa->spa_metaslabs_by_flushed, ms);
mutex_exit(&spa->spa_flushed_ms_lock);
}
}
return (0);
}
/*
* Read all the log space map entries into their respective
* metaslab unflushed trees and keep them sorted by TXG in the
* SPA's metadata. In addition, setup all the metadata for the
* memory and the block heuristics.
*/
int
spa_ld_log_spacemaps(spa_t *spa)
{
int error;
spa_log_sm_set_blocklimit(spa);
for (uint64_t c = 0; c < spa->spa_root_vdev->vdev_children; c++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[c];
error = spa_ld_unflushed_txgs(vd);
if (error != 0)
return (error);
}
error = spa_ld_log_sm_metadata(spa);
if (error != 0)
return (error);
/*
* Note: we don't actually expect anything to change at this point
* but we grab the config lock so we don't fail any assertions
* when using vdev_lookup_top().
*/
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
error = spa_ld_log_sm_data(spa);
spa_config_exit(spa, SCL_CONFIG, FTAG);
return (error);
}
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs, zfs_, unflushed_max_mem_amt, ULONG, ZMOD_RW,
"Specific hard-limit in memory that ZFS allows to be used for "
"unflushed changes");
ZFS_MODULE_PARAM(zfs, zfs_, unflushed_max_mem_ppm, ULONG, ZMOD_RW,
"Percentage of the overall system memory that ZFS allows to be "
"used for unflushed changes (value is calculated over 1000000 for "
"finer granularity)");
ZFS_MODULE_PARAM(zfs, zfs_, unflushed_log_block_max, ULONG, ZMOD_RW,
"Hard limit (upper-bound) in the size of the space map log "
"in terms of blocks.");
ZFS_MODULE_PARAM(zfs, zfs_, unflushed_log_block_min, ULONG, ZMOD_RW,
"Lower-bound limit for the maximum amount of blocks allowed in "
"log spacemap (see zfs_unflushed_log_block_max)");
ZFS_MODULE_PARAM(zfs, zfs_, unflushed_log_txg_max, ULONG, ZMOD_RW,
"Hard limit (upper-bound) in the size of the space map log "
"in terms of dirty TXGs.");
ZFS_MODULE_PARAM(zfs, zfs_, unflushed_log_block_pct, ULONG, ZMOD_RW,
"Tunable used to determine the number of blocks that can be used for "
"the spacemap log, expressed as a percentage of the total number of "
"metaslabs in the pool (e.g. 400 means the number of log blocks is "
"capped at 4 times the number of metaslabs)");
ZFS_MODULE_PARAM(zfs, zfs_, max_log_walking, ULONG, ZMOD_RW,
"The number of past TXGs that the flushing algorithm of the log "
"spacemap feature uses to estimate incoming log blocks");
ZFS_MODULE_PARAM(zfs, zfs_, keep_log_spacemaps_at_export, INT, ZMOD_RW,
"Prevent the log spacemaps from being flushed and destroyed "
"during pool export/destroy");
/* END CSTYLED */
ZFS_MODULE_PARAM(zfs, zfs_, max_logsm_summary_length, ULONG, ZMOD_RW,
"Maximum number of rows allowed in the summary of the spacemap log");
ZFS_MODULE_PARAM(zfs, zfs_, min_metaslabs_to_flush, ULONG, ZMOD_RW,
"Minimum number of metaslabs to flush per dirty TXG");
diff --git a/sys/contrib/openzfs/module/zfs/vdev.c b/sys/contrib/openzfs/module/zfs/vdev.c
index ea0245610fb7..048616c253c4 100644
--- a/sys/contrib/openzfs/module/zfs/vdev.c
+++ b/sys/contrib/openzfs/module/zfs/vdev.c
@@ -1,6077 +1,6107 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2021 by Delphix. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Toomas Soome <tsoome@me.com>
* Copyright 2017 Joyent, Inc.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, Datto Inc. All rights reserved.
* Copyright (c) 2021, Klara Inc.
* Copyright [2021] Hewlett Packard Enterprise Development LP
*/
#include <sys/zfs_context.h>
#include <sys/fm/fs/zfs.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/bpobj.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_dir.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_rebuild.h>
#include <sys/vdev_draid.h>
#include <sys/uberblock_impl.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
#include <sys/space_map.h>
#include <sys/space_reftree.h>
#include <sys/zio.h>
#include <sys/zap.h>
#include <sys/fs/zfs.h>
#include <sys/arc.h>
#include <sys/zil.h>
#include <sys/dsl_scan.h>
#include <sys/vdev_raidz.h>
#include <sys/abd.h>
#include <sys/vdev_initialize.h>
#include <sys/vdev_trim.h>
#include <sys/zvol.h>
#include <sys/zfs_ratelimit.h>
#include "zfs_prop.h"
/*
* One metaslab from each (normal-class) vdev is used by the ZIL. These are
* called "embedded slog metaslabs", are referenced by vdev_log_mg, and are
* part of the spa_embedded_log_class. The metaslab with the most free space
* in each vdev is selected for this purpose when the pool is opened (or a
* vdev is added). See vdev_metaslab_init().
*
* Log blocks can be allocated from the following locations. Each one is tried
* in order until the allocation succeeds:
* 1. dedicated log vdevs, aka "slog" (spa_log_class)
* 2. embedded slog metaslabs (spa_embedded_log_class)
* 3. other metaslabs in normal vdevs (spa_normal_class)
*
* zfs_embedded_slog_min_ms disables the embedded slog if there are fewer
* than this number of metaslabs in the vdev. This ensures that we don't set
* aside an unreasonable amount of space for the ZIL. If set to less than
* 1 << (spa_slop_shift + 1), on small pools the usable space may be reduced
* (by more than 1<<spa_slop_shift) due to the embedded slog metaslab.
*/
static int zfs_embedded_slog_min_ms = 64;
/* default target for number of metaslabs per top-level vdev */
static int zfs_vdev_default_ms_count = 200;
/* minimum number of metaslabs per top-level vdev */
static int zfs_vdev_min_ms_count = 16;
/* practical upper limit of total metaslabs per top-level vdev */
static int zfs_vdev_ms_count_limit = 1ULL << 17;
/* lower limit for metaslab size (512M) */
static int zfs_vdev_default_ms_shift = 29;
/* upper limit for metaslab size (16G) */
static const int zfs_vdev_max_ms_shift = 34;
int vdev_validate_skip = B_FALSE;
/*
* Since the DTL space map of a vdev is not expected to have a lot of
* entries, we default its block size to 4K.
*/
int zfs_vdev_dtl_sm_blksz = (1 << 12);
/*
* Rate limit slow IO (delay) events to this many per second.
*/
static unsigned int zfs_slow_io_events_per_second = 20;
/*
* Rate limit checksum events after this many checksum errors per second.
*/
static unsigned int zfs_checksum_events_per_second = 20;
/*
* Ignore errors during scrub/resilver. Allows to work around resilver
* upon import when there are pool errors.
*/
static int zfs_scan_ignore_errors = 0;
/*
* vdev-wide space maps that have lots of entries written to them at
* the end of each transaction can benefit from a higher I/O bandwidth
* (e.g. vdev_obsolete_sm), thus we default their block size to 128K.
*/
int zfs_vdev_standard_sm_blksz = (1 << 17);
/*
* Tunable parameter for debugging or performance analysis. Setting this
* will cause pool corruption on power loss if a volatile out-of-order
* write cache is enabled.
*/
int zfs_nocacheflush = 0;
-uint64_t zfs_vdev_max_auto_ashift = ASHIFT_MAX;
+/*
+ * Maximum and minimum ashift values that can be automatically set based on
+ * vdev's physical ashift (disk's physical sector size). While ASHIFT_MAX
+ * is higher than the maximum value, it is intentionally limited here to not
+ * excessively impact pool space efficiency. Higher ashift values may still
+ * be forced by vdev logical ashift or by user via ashift property, but won't
+ * be set automatically as a performance optimization.
+ */
+uint64_t zfs_vdev_max_auto_ashift = 14;
uint64_t zfs_vdev_min_auto_ashift = ASHIFT_MIN;
void
vdev_dbgmsg(vdev_t *vd, const char *fmt, ...)
{
va_list adx;
char buf[256];
va_start(adx, fmt);
(void) vsnprintf(buf, sizeof (buf), fmt, adx);
va_end(adx);
if (vd->vdev_path != NULL) {
zfs_dbgmsg("%s vdev '%s': %s", vd->vdev_ops->vdev_op_type,
vd->vdev_path, buf);
} else {
zfs_dbgmsg("%s-%llu vdev (guid %llu): %s",
vd->vdev_ops->vdev_op_type,
(u_longlong_t)vd->vdev_id,
(u_longlong_t)vd->vdev_guid, buf);
}
}
void
vdev_dbgmsg_print_tree(vdev_t *vd, int indent)
{
char state[20];
if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) {
zfs_dbgmsg("%*svdev %llu: %s", indent, "",
(u_longlong_t)vd->vdev_id,
vd->vdev_ops->vdev_op_type);
return;
}
switch (vd->vdev_state) {
case VDEV_STATE_UNKNOWN:
(void) snprintf(state, sizeof (state), "unknown");
break;
case VDEV_STATE_CLOSED:
(void) snprintf(state, sizeof (state), "closed");
break;
case VDEV_STATE_OFFLINE:
(void) snprintf(state, sizeof (state), "offline");
break;
case VDEV_STATE_REMOVED:
(void) snprintf(state, sizeof (state), "removed");
break;
case VDEV_STATE_CANT_OPEN:
(void) snprintf(state, sizeof (state), "can't open");
break;
case VDEV_STATE_FAULTED:
(void) snprintf(state, sizeof (state), "faulted");
break;
case VDEV_STATE_DEGRADED:
(void) snprintf(state, sizeof (state), "degraded");
break;
case VDEV_STATE_HEALTHY:
(void) snprintf(state, sizeof (state), "healthy");
break;
default:
(void) snprintf(state, sizeof (state), "<state %u>",
(uint_t)vd->vdev_state);
}
zfs_dbgmsg("%*svdev %u: %s%s, guid: %llu, path: %s, %s", indent,
"", (int)vd->vdev_id, vd->vdev_ops->vdev_op_type,
vd->vdev_islog ? " (log)" : "",
(u_longlong_t)vd->vdev_guid,
vd->vdev_path ? vd->vdev_path : "N/A", state);
for (uint64_t i = 0; i < vd->vdev_children; i++)
vdev_dbgmsg_print_tree(vd->vdev_child[i], indent + 2);
}
/*
* Virtual device management.
*/
static const vdev_ops_t *const vdev_ops_table[] = {
&vdev_root_ops,
&vdev_raidz_ops,
&vdev_draid_ops,
&vdev_draid_spare_ops,
&vdev_mirror_ops,
&vdev_replacing_ops,
&vdev_spare_ops,
&vdev_disk_ops,
&vdev_file_ops,
&vdev_missing_ops,
&vdev_hole_ops,
&vdev_indirect_ops,
NULL
};
/*
* Given a vdev type, return the appropriate ops vector.
*/
static vdev_ops_t *
vdev_getops(const char *type)
{
const vdev_ops_t *ops, *const *opspp;
for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++)
if (strcmp(ops->vdev_op_type, type) == 0)
break;
return (ops);
}
/*
* Given a vdev and a metaslab class, find which metaslab group we're
* interested in. All vdevs may belong to two different metaslab classes.
* Dedicated slog devices use only the primary metaslab group, rather than a
* separate log group. For embedded slogs, the vdev_log_mg will be non-NULL.
*/
metaslab_group_t *
vdev_get_mg(vdev_t *vd, metaslab_class_t *mc)
{
if (mc == spa_embedded_log_class(vd->vdev_spa) &&
vd->vdev_log_mg != NULL)
return (vd->vdev_log_mg);
else
return (vd->vdev_mg);
}
void
vdev_default_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
range_seg64_t *physical_rs, range_seg64_t *remain_rs)
{
(void) vd, (void) remain_rs;
physical_rs->rs_start = logical_rs->rs_start;
physical_rs->rs_end = logical_rs->rs_end;
}
/*
* Derive the enumerated allocation bias from string input.
* String origin is either the per-vdev zap or zpool(8).
*/
static vdev_alloc_bias_t
vdev_derive_alloc_bias(const char *bias)
{
vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE;
if (strcmp(bias, VDEV_ALLOC_BIAS_LOG) == 0)
alloc_bias = VDEV_BIAS_LOG;
else if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0)
alloc_bias = VDEV_BIAS_SPECIAL;
else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0)
alloc_bias = VDEV_BIAS_DEDUP;
return (alloc_bias);
}
/*
* Default asize function: return the MAX of psize with the asize of
* all children. This is what's used by anything other than RAID-Z.
*/
uint64_t
vdev_default_asize(vdev_t *vd, uint64_t psize)
{
uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift);
uint64_t csize;
for (int c = 0; c < vd->vdev_children; c++) {
csize = vdev_psize_to_asize(vd->vdev_child[c], psize);
asize = MAX(asize, csize);
}
return (asize);
}
uint64_t
vdev_default_min_asize(vdev_t *vd)
{
return (vd->vdev_min_asize);
}
/*
* Get the minimum allocatable size. We define the allocatable size as
* the vdev's asize rounded to the nearest metaslab. This allows us to
* replace or attach devices which don't have the same physical size but
* can still satisfy the same number of allocations.
*/
uint64_t
vdev_get_min_asize(vdev_t *vd)
{
vdev_t *pvd = vd->vdev_parent;
/*
* If our parent is NULL (inactive spare or cache) or is the root,
* just return our own asize.
*/
if (pvd == NULL)
return (vd->vdev_asize);
/*
* The top-level vdev just returns the allocatable size rounded
* to the nearest metaslab.
*/
if (vd == vd->vdev_top)
return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift));
return (pvd->vdev_ops->vdev_op_min_asize(pvd));
}
void
vdev_set_min_asize(vdev_t *vd)
{
vd->vdev_min_asize = vdev_get_min_asize(vd);
for (int c = 0; c < vd->vdev_children; c++)
vdev_set_min_asize(vd->vdev_child[c]);
}
/*
* Get the minimal allocation size for the top-level vdev.
*/
uint64_t
vdev_get_min_alloc(vdev_t *vd)
{
uint64_t min_alloc = 1ULL << vd->vdev_ashift;
if (vd->vdev_ops->vdev_op_min_alloc != NULL)
min_alloc = vd->vdev_ops->vdev_op_min_alloc(vd);
return (min_alloc);
}
/*
* Get the parity level for a top-level vdev.
*/
uint64_t
vdev_get_nparity(vdev_t *vd)
{
uint64_t nparity = 0;
if (vd->vdev_ops->vdev_op_nparity != NULL)
nparity = vd->vdev_ops->vdev_op_nparity(vd);
return (nparity);
}
/*
* Get the number of data disks for a top-level vdev.
*/
uint64_t
vdev_get_ndisks(vdev_t *vd)
{
uint64_t ndisks = 1;
if (vd->vdev_ops->vdev_op_ndisks != NULL)
ndisks = vd->vdev_ops->vdev_op_ndisks(vd);
return (ndisks);
}
vdev_t *
vdev_lookup_top(spa_t *spa, uint64_t vdev)
{
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
if (vdev < rvd->vdev_children) {
ASSERT(rvd->vdev_child[vdev] != NULL);
return (rvd->vdev_child[vdev]);
}
return (NULL);
}
vdev_t *
vdev_lookup_by_guid(vdev_t *vd, uint64_t guid)
{
vdev_t *mvd;
if (vd->vdev_guid == guid)
return (vd);
for (int c = 0; c < vd->vdev_children; c++)
if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) !=
NULL)
return (mvd);
return (NULL);
}
static int
vdev_count_leaves_impl(vdev_t *vd)
{
int n = 0;
if (vd->vdev_ops->vdev_op_leaf)
return (1);
for (int c = 0; c < vd->vdev_children; c++)
n += vdev_count_leaves_impl(vd->vdev_child[c]);
return (n);
}
int
vdev_count_leaves(spa_t *spa)
{
int rc;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
rc = vdev_count_leaves_impl(spa->spa_root_vdev);
spa_config_exit(spa, SCL_VDEV, FTAG);
return (rc);
}
void
vdev_add_child(vdev_t *pvd, vdev_t *cvd)
{
size_t oldsize, newsize;
uint64_t id = cvd->vdev_id;
vdev_t **newchild;
ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
ASSERT(cvd->vdev_parent == NULL);
cvd->vdev_parent = pvd;
if (pvd == NULL)
return;
ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL);
oldsize = pvd->vdev_children * sizeof (vdev_t *);
pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
newsize = pvd->vdev_children * sizeof (vdev_t *);
newchild = kmem_alloc(newsize, KM_SLEEP);
if (pvd->vdev_child != NULL) {
memcpy(newchild, pvd->vdev_child, oldsize);
kmem_free(pvd->vdev_child, oldsize);
}
pvd->vdev_child = newchild;
pvd->vdev_child[id] = cvd;
cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd);
ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL);
/*
* Walk up all ancestors to update guid sum.
*/
for (; pvd != NULL; pvd = pvd->vdev_parent)
pvd->vdev_guid_sum += cvd->vdev_guid_sum;
if (cvd->vdev_ops->vdev_op_leaf) {
list_insert_head(&cvd->vdev_spa->spa_leaf_list, cvd);
cvd->vdev_spa->spa_leaf_list_gen++;
}
}
void
vdev_remove_child(vdev_t *pvd, vdev_t *cvd)
{
int c;
uint_t id = cvd->vdev_id;
ASSERT(cvd->vdev_parent == pvd);
if (pvd == NULL)
return;
ASSERT(id < pvd->vdev_children);
ASSERT(pvd->vdev_child[id] == cvd);
pvd->vdev_child[id] = NULL;
cvd->vdev_parent = NULL;
for (c = 0; c < pvd->vdev_children; c++)
if (pvd->vdev_child[c])
break;
if (c == pvd->vdev_children) {
kmem_free(pvd->vdev_child, c * sizeof (vdev_t *));
pvd->vdev_child = NULL;
pvd->vdev_children = 0;
}
if (cvd->vdev_ops->vdev_op_leaf) {
spa_t *spa = cvd->vdev_spa;
list_remove(&spa->spa_leaf_list, cvd);
spa->spa_leaf_list_gen++;
}
/*
* Walk up all ancestors to update guid sum.
*/
for (; pvd != NULL; pvd = pvd->vdev_parent)
pvd->vdev_guid_sum -= cvd->vdev_guid_sum;
}
/*
* Remove any holes in the child array.
*/
void
vdev_compact_children(vdev_t *pvd)
{
vdev_t **newchild, *cvd;
int oldc = pvd->vdev_children;
int newc;
ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
if (oldc == 0)
return;
for (int c = newc = 0; c < oldc; c++)
if (pvd->vdev_child[c])
newc++;
if (newc > 0) {
newchild = kmem_zalloc(newc * sizeof (vdev_t *), KM_SLEEP);
for (int c = newc = 0; c < oldc; c++) {
if ((cvd = pvd->vdev_child[c]) != NULL) {
newchild[newc] = cvd;
cvd->vdev_id = newc++;
}
}
} else {
newchild = NULL;
}
kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *));
pvd->vdev_child = newchild;
pvd->vdev_children = newc;
}
/*
* Allocate and minimally initialize a vdev_t.
*/
vdev_t *
vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
{
vdev_t *vd;
vdev_indirect_config_t *vic;
vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP);
vic = &vd->vdev_indirect_config;
if (spa->spa_root_vdev == NULL) {
ASSERT(ops == &vdev_root_ops);
spa->spa_root_vdev = vd;
spa->spa_load_guid = spa_generate_guid(NULL);
}
if (guid == 0 && ops != &vdev_hole_ops) {
if (spa->spa_root_vdev == vd) {
/*
* The root vdev's guid will also be the pool guid,
* which must be unique among all pools.
*/
guid = spa_generate_guid(NULL);
} else {
/*
* Any other vdev's guid must be unique within the pool.
*/
guid = spa_generate_guid(spa);
}
ASSERT(!spa_guid_exists(spa_guid(spa), guid));
}
vd->vdev_spa = spa;
vd->vdev_id = id;
vd->vdev_guid = guid;
vd->vdev_guid_sum = guid;
vd->vdev_ops = ops;
vd->vdev_state = VDEV_STATE_CLOSED;
vd->vdev_ishole = (ops == &vdev_hole_ops);
vic->vic_prev_indirect_vdev = UINT64_MAX;
rw_init(&vd->vdev_indirect_rwlock, NULL, RW_DEFAULT, NULL);
mutex_init(&vd->vdev_obsolete_lock, NULL, MUTEX_DEFAULT, NULL);
vd->vdev_obsolete_segments = range_tree_create(NULL, RANGE_SEG64, NULL,
0, 0);
/*
* Initialize rate limit structs for events. We rate limit ZIO delay
* and checksum events so that we don't overwhelm ZED with thousands
* of events when a disk is acting up.
*/
zfs_ratelimit_init(&vd->vdev_delay_rl, &zfs_slow_io_events_per_second,
1);
zfs_ratelimit_init(&vd->vdev_deadman_rl, &zfs_slow_io_events_per_second,
1);
zfs_ratelimit_init(&vd->vdev_checksum_rl,
&zfs_checksum_events_per_second, 1);
list_link_init(&vd->vdev_config_dirty_node);
list_link_init(&vd->vdev_state_dirty_node);
list_link_init(&vd->vdev_initialize_node);
list_link_init(&vd->vdev_leaf_node);
list_link_init(&vd->vdev_trim_node);
mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_NOLOCKDEP, NULL);
mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_scan_io_queue_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_initialize_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_initialize_io_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vd->vdev_initialize_cv, NULL, CV_DEFAULT, NULL);
cv_init(&vd->vdev_initialize_io_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&vd->vdev_trim_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_autotrim_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_trim_io_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vd->vdev_trim_cv, NULL, CV_DEFAULT, NULL);
cv_init(&vd->vdev_autotrim_cv, NULL, CV_DEFAULT, NULL);
cv_init(&vd->vdev_trim_io_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&vd->vdev_rebuild_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vd->vdev_rebuild_cv, NULL, CV_DEFAULT, NULL);
for (int t = 0; t < DTL_TYPES; t++) {
vd->vdev_dtl[t] = range_tree_create(NULL, RANGE_SEG64, NULL, 0,
0);
}
txg_list_create(&vd->vdev_ms_list, spa,
offsetof(struct metaslab, ms_txg_node));
txg_list_create(&vd->vdev_dtl_list, spa,
offsetof(struct vdev, vdev_dtl_node));
vd->vdev_stat.vs_timestamp = gethrtime();
vdev_queue_init(vd);
vdev_cache_init(vd);
return (vd);
}
/*
* Allocate a new vdev. The 'alloctype' is used to control whether we are
* creating a new vdev or loading an existing one - the behavior is slightly
* different for each case.
*/
int
vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
int alloctype)
{
vdev_ops_t *ops;
char *type;
uint64_t guid = 0, islog;
vdev_t *vd;
vdev_indirect_config_t *vic;
char *tmp = NULL;
int rc;
vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE;
boolean_t top_level = (parent && !parent->vdev_parent);
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
return (SET_ERROR(EINVAL));
if ((ops = vdev_getops(type)) == NULL)
return (SET_ERROR(EINVAL));
/*
* If this is a load, get the vdev guid from the nvlist.
* Otherwise, vdev_alloc_common() will generate one for us.
*/
if (alloctype == VDEV_ALLOC_LOAD) {
uint64_t label_id;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) ||
label_id != id)
return (SET_ERROR(EINVAL));
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_SPARE) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_L2CACHE) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_ROOTPOOL) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
}
/*
* The first allocated vdev must be of type 'root'.
*/
if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL)
return (SET_ERROR(EINVAL));
/*
* Determine whether we're a log vdev.
*/
islog = 0;
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog);
if (islog && spa_version(spa) < SPA_VERSION_SLOGS)
return (SET_ERROR(ENOTSUP));
if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES)
return (SET_ERROR(ENOTSUP));
if (top_level && alloctype == VDEV_ALLOC_ADD) {
char *bias;
/*
* If creating a top-level vdev, check for allocation
* classes input.
*/
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_ALLOCATION_BIAS,
&bias) == 0) {
alloc_bias = vdev_derive_alloc_bias(bias);
/* spa_vdev_add() expects feature to be enabled */
if (spa->spa_load_state != SPA_LOAD_CREATE &&
!spa_feature_is_enabled(spa,
SPA_FEATURE_ALLOCATION_CLASSES)) {
return (SET_ERROR(ENOTSUP));
}
}
/* spa_vdev_add() expects feature to be enabled */
if (ops == &vdev_draid_ops &&
spa->spa_load_state != SPA_LOAD_CREATE &&
!spa_feature_is_enabled(spa, SPA_FEATURE_DRAID)) {
return (SET_ERROR(ENOTSUP));
}
}
/*
* Initialize the vdev specific data. This is done before calling
* vdev_alloc_common() since it may fail and this simplifies the
* error reporting and cleanup code paths.
*/
void *tsd = NULL;
if (ops->vdev_op_init != NULL) {
rc = ops->vdev_op_init(spa, nv, &tsd);
if (rc != 0) {
return (rc);
}
}
vd = vdev_alloc_common(spa, id, guid, ops);
vd->vdev_tsd = tsd;
vd->vdev_islog = islog;
if (top_level && alloc_bias != VDEV_BIAS_NONE)
vd->vdev_alloc_bias = alloc_bias;
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0)
vd->vdev_path = spa_strdup(vd->vdev_path);
/*
* ZPOOL_CONFIG_AUX_STATE = "external" means we previously forced a
* fault on a vdev and want it to persist across imports (like with
* zpool offline -f).
*/
rc = nvlist_lookup_string(nv, ZPOOL_CONFIG_AUX_STATE, &tmp);
if (rc == 0 && tmp != NULL && strcmp(tmp, "external") == 0) {
vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL;
vd->vdev_faulted = 1;
vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
}
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0)
vd->vdev_devid = spa_strdup(vd->vdev_devid);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH,
&vd->vdev_physpath) == 0)
vd->vdev_physpath = spa_strdup(vd->vdev_physpath);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
&vd->vdev_enc_sysfs_path) == 0)
vd->vdev_enc_sysfs_path = spa_strdup(vd->vdev_enc_sysfs_path);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0)
vd->vdev_fru = spa_strdup(vd->vdev_fru);
/*
* Set the whole_disk property. If it's not specified, leave the value
* as -1.
*/
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
&vd->vdev_wholedisk) != 0)
vd->vdev_wholedisk = -1ULL;
vic = &vd->vdev_indirect_config;
ASSERT0(vic->vic_mapping_object);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT,
&vic->vic_mapping_object);
ASSERT0(vic->vic_births_object);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS,
&vic->vic_births_object);
ASSERT3U(vic->vic_prev_indirect_vdev, ==, UINT64_MAX);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV,
&vic->vic_prev_indirect_vdev);
/*
* Look for the 'not present' flag. This will only be set if the device
* was not present at the time of import.
*/
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
&vd->vdev_not_present);
/*
* Get the alignment requirement.
*/
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift);
/*
* Retrieve the vdev creation time.
*/
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG,
&vd->vdev_crtxg);
/*
* If we're a top-level vdev, try to load the allocation parameters.
*/
if (top_level &&
(alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
&vd->vdev_ms_array);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
&vd->vdev_ms_shift);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE,
&vd->vdev_asize);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NONALLOCATING,
&vd->vdev_noalloc);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVING,
&vd->vdev_removing);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP,
&vd->vdev_top_zap);
} else {
ASSERT0(vd->vdev_top_zap);
}
if (top_level && alloctype != VDEV_ALLOC_ATTACH) {
ASSERT(alloctype == VDEV_ALLOC_LOAD ||
alloctype == VDEV_ALLOC_ADD ||
alloctype == VDEV_ALLOC_SPLIT ||
alloctype == VDEV_ALLOC_ROOTPOOL);
/* Note: metaslab_group_create() is now deferred */
}
if (vd->vdev_ops->vdev_op_leaf &&
(alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
(void) nvlist_lookup_uint64(nv,
ZPOOL_CONFIG_VDEV_LEAF_ZAP, &vd->vdev_leaf_zap);
} else {
ASSERT0(vd->vdev_leaf_zap);
}
/*
* If we're a leaf vdev, try to load the DTL object and other state.
*/
if (vd->vdev_ops->vdev_op_leaf &&
(alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE ||
alloctype == VDEV_ALLOC_ROOTPOOL)) {
if (alloctype == VDEV_ALLOC_LOAD) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL,
&vd->vdev_dtl_object);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE,
&vd->vdev_unspare);
}
if (alloctype == VDEV_ALLOC_ROOTPOOL) {
uint64_t spare = 0;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
&spare) == 0 && spare)
spa_spare_add(vd);
}
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE,
&vd->vdev_offline);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG,
&vd->vdev_resilver_txg);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REBUILD_TXG,
&vd->vdev_rebuild_txg);
if (nvlist_exists(nv, ZPOOL_CONFIG_RESILVER_DEFER))
vdev_defer_resilver(vd);
/*
* In general, when importing a pool we want to ignore the
* persistent fault state, as the diagnosis made on another
* system may not be valid in the current context. The only
* exception is if we forced a vdev to a persistently faulted
* state with 'zpool offline -f'. The persistent fault will
* remain across imports until cleared.
*
* Local vdevs will remain in the faulted state.
*/
if (spa_load_state(spa) == SPA_LOAD_OPEN ||
spa_load_state(spa) == SPA_LOAD_IMPORT) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED,
&vd->vdev_faulted);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED,
&vd->vdev_degraded);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED,
&vd->vdev_removed);
if (vd->vdev_faulted || vd->vdev_degraded) {
char *aux;
vd->vdev_label_aux =
VDEV_AUX_ERR_EXCEEDED;
if (nvlist_lookup_string(nv,
ZPOOL_CONFIG_AUX_STATE, &aux) == 0 &&
strcmp(aux, "external") == 0)
vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
else
vd->vdev_faulted = 0ULL;
}
}
}
/*
* Add ourselves to the parent's list of children.
*/
vdev_add_child(parent, vd);
*vdp = vd;
return (0);
}
void
vdev_free(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
ASSERT3P(vd->vdev_trim_thread, ==, NULL);
ASSERT3P(vd->vdev_autotrim_thread, ==, NULL);
ASSERT3P(vd->vdev_rebuild_thread, ==, NULL);
/*
* Scan queues are normally destroyed at the end of a scan. If the
* queue exists here, that implies the vdev is being removed while
* the scan is still running.
*/
if (vd->vdev_scan_io_queue != NULL) {
mutex_enter(&vd->vdev_scan_io_queue_lock);
dsl_scan_io_queue_destroy(vd->vdev_scan_io_queue);
vd->vdev_scan_io_queue = NULL;
mutex_exit(&vd->vdev_scan_io_queue_lock);
}
/*
* vdev_free() implies closing the vdev first. This is simpler than
* trying to ensure complicated semantics for all callers.
*/
vdev_close(vd);
ASSERT(!list_link_active(&vd->vdev_config_dirty_node));
ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
/*
* Free all children.
*/
for (int c = 0; c < vd->vdev_children; c++)
vdev_free(vd->vdev_child[c]);
ASSERT(vd->vdev_child == NULL);
ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
if (vd->vdev_ops->vdev_op_fini != NULL)
vd->vdev_ops->vdev_op_fini(vd);
/*
* Discard allocation state.
*/
if (vd->vdev_mg != NULL) {
vdev_metaslab_fini(vd);
metaslab_group_destroy(vd->vdev_mg);
vd->vdev_mg = NULL;
}
if (vd->vdev_log_mg != NULL) {
ASSERT0(vd->vdev_ms_count);
metaslab_group_destroy(vd->vdev_log_mg);
vd->vdev_log_mg = NULL;
}
ASSERT0(vd->vdev_stat.vs_space);
ASSERT0(vd->vdev_stat.vs_dspace);
ASSERT0(vd->vdev_stat.vs_alloc);
/*
* Remove this vdev from its parent's child list.
*/
vdev_remove_child(vd->vdev_parent, vd);
ASSERT(vd->vdev_parent == NULL);
ASSERT(!list_link_active(&vd->vdev_leaf_node));
/*
* Clean up vdev structure.
*/
vdev_queue_fini(vd);
vdev_cache_fini(vd);
if (vd->vdev_path)
spa_strfree(vd->vdev_path);
if (vd->vdev_devid)
spa_strfree(vd->vdev_devid);
if (vd->vdev_physpath)
spa_strfree(vd->vdev_physpath);
if (vd->vdev_enc_sysfs_path)
spa_strfree(vd->vdev_enc_sysfs_path);
if (vd->vdev_fru)
spa_strfree(vd->vdev_fru);
if (vd->vdev_isspare)
spa_spare_remove(vd);
if (vd->vdev_isl2cache)
spa_l2cache_remove(vd);
txg_list_destroy(&vd->vdev_ms_list);
txg_list_destroy(&vd->vdev_dtl_list);
mutex_enter(&vd->vdev_dtl_lock);
space_map_close(vd->vdev_dtl_sm);
for (int t = 0; t < DTL_TYPES; t++) {
range_tree_vacate(vd->vdev_dtl[t], NULL, NULL);
range_tree_destroy(vd->vdev_dtl[t]);
}
mutex_exit(&vd->vdev_dtl_lock);
EQUIV(vd->vdev_indirect_births != NULL,
vd->vdev_indirect_mapping != NULL);
if (vd->vdev_indirect_births != NULL) {
vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
vdev_indirect_births_close(vd->vdev_indirect_births);
}
if (vd->vdev_obsolete_sm != NULL) {
ASSERT(vd->vdev_removing ||
vd->vdev_ops == &vdev_indirect_ops);
space_map_close(vd->vdev_obsolete_sm);
vd->vdev_obsolete_sm = NULL;
}
range_tree_destroy(vd->vdev_obsolete_segments);
rw_destroy(&vd->vdev_indirect_rwlock);
mutex_destroy(&vd->vdev_obsolete_lock);
mutex_destroy(&vd->vdev_dtl_lock);
mutex_destroy(&vd->vdev_stat_lock);
mutex_destroy(&vd->vdev_probe_lock);
mutex_destroy(&vd->vdev_scan_io_queue_lock);
mutex_destroy(&vd->vdev_initialize_lock);
mutex_destroy(&vd->vdev_initialize_io_lock);
cv_destroy(&vd->vdev_initialize_io_cv);
cv_destroy(&vd->vdev_initialize_cv);
mutex_destroy(&vd->vdev_trim_lock);
mutex_destroy(&vd->vdev_autotrim_lock);
mutex_destroy(&vd->vdev_trim_io_lock);
cv_destroy(&vd->vdev_trim_cv);
cv_destroy(&vd->vdev_autotrim_cv);
cv_destroy(&vd->vdev_trim_io_cv);
mutex_destroy(&vd->vdev_rebuild_lock);
cv_destroy(&vd->vdev_rebuild_cv);
zfs_ratelimit_fini(&vd->vdev_delay_rl);
zfs_ratelimit_fini(&vd->vdev_deadman_rl);
zfs_ratelimit_fini(&vd->vdev_checksum_rl);
if (vd == spa->spa_root_vdev)
spa->spa_root_vdev = NULL;
kmem_free(vd, sizeof (vdev_t));
}
/*
* Transfer top-level vdev state from svd to tvd.
*/
static void
vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
{
spa_t *spa = svd->vdev_spa;
metaslab_t *msp;
vdev_t *vd;
int t;
ASSERT(tvd == tvd->vdev_top);
tvd->vdev_pending_fastwrite = svd->vdev_pending_fastwrite;
tvd->vdev_ms_array = svd->vdev_ms_array;
tvd->vdev_ms_shift = svd->vdev_ms_shift;
tvd->vdev_ms_count = svd->vdev_ms_count;
tvd->vdev_top_zap = svd->vdev_top_zap;
svd->vdev_ms_array = 0;
svd->vdev_ms_shift = 0;
svd->vdev_ms_count = 0;
svd->vdev_top_zap = 0;
if (tvd->vdev_mg)
ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg);
if (tvd->vdev_log_mg)
ASSERT3P(tvd->vdev_log_mg, ==, svd->vdev_log_mg);
tvd->vdev_mg = svd->vdev_mg;
tvd->vdev_log_mg = svd->vdev_log_mg;
tvd->vdev_ms = svd->vdev_ms;
svd->vdev_mg = NULL;
svd->vdev_log_mg = NULL;
svd->vdev_ms = NULL;
if (tvd->vdev_mg != NULL)
tvd->vdev_mg->mg_vd = tvd;
if (tvd->vdev_log_mg != NULL)
tvd->vdev_log_mg->mg_vd = tvd;
tvd->vdev_checkpoint_sm = svd->vdev_checkpoint_sm;
svd->vdev_checkpoint_sm = NULL;
tvd->vdev_alloc_bias = svd->vdev_alloc_bias;
svd->vdev_alloc_bias = VDEV_BIAS_NONE;
tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc;
tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space;
tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace;
svd->vdev_stat.vs_alloc = 0;
svd->vdev_stat.vs_space = 0;
svd->vdev_stat.vs_dspace = 0;
/*
* State which may be set on a top-level vdev that's in the
* process of being removed.
*/
ASSERT0(tvd->vdev_indirect_config.vic_births_object);
ASSERT0(tvd->vdev_indirect_config.vic_mapping_object);
ASSERT3U(tvd->vdev_indirect_config.vic_prev_indirect_vdev, ==, -1ULL);
ASSERT3P(tvd->vdev_indirect_mapping, ==, NULL);
ASSERT3P(tvd->vdev_indirect_births, ==, NULL);
ASSERT3P(tvd->vdev_obsolete_sm, ==, NULL);
ASSERT0(tvd->vdev_noalloc);
ASSERT0(tvd->vdev_removing);
ASSERT0(tvd->vdev_rebuilding);
tvd->vdev_noalloc = svd->vdev_noalloc;
tvd->vdev_removing = svd->vdev_removing;
tvd->vdev_rebuilding = svd->vdev_rebuilding;
tvd->vdev_rebuild_config = svd->vdev_rebuild_config;
tvd->vdev_indirect_config = svd->vdev_indirect_config;
tvd->vdev_indirect_mapping = svd->vdev_indirect_mapping;
tvd->vdev_indirect_births = svd->vdev_indirect_births;
range_tree_swap(&svd->vdev_obsolete_segments,
&tvd->vdev_obsolete_segments);
tvd->vdev_obsolete_sm = svd->vdev_obsolete_sm;
svd->vdev_indirect_config.vic_mapping_object = 0;
svd->vdev_indirect_config.vic_births_object = 0;
svd->vdev_indirect_config.vic_prev_indirect_vdev = -1ULL;
svd->vdev_indirect_mapping = NULL;
svd->vdev_indirect_births = NULL;
svd->vdev_obsolete_sm = NULL;
svd->vdev_noalloc = 0;
svd->vdev_removing = 0;
svd->vdev_rebuilding = 0;
for (t = 0; t < TXG_SIZE; t++) {
while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL)
(void) txg_list_add(&tvd->vdev_ms_list, msp, t);
while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL)
(void) txg_list_add(&tvd->vdev_dtl_list, vd, t);
if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t))
(void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t);
}
if (list_link_active(&svd->vdev_config_dirty_node)) {
vdev_config_clean(svd);
vdev_config_dirty(tvd);
}
if (list_link_active(&svd->vdev_state_dirty_node)) {
vdev_state_clean(svd);
vdev_state_dirty(tvd);
}
tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio;
svd->vdev_deflate_ratio = 0;
tvd->vdev_islog = svd->vdev_islog;
svd->vdev_islog = 0;
dsl_scan_io_queue_vdev_xfer(svd, tvd);
}
static void
vdev_top_update(vdev_t *tvd, vdev_t *vd)
{
if (vd == NULL)
return;
vd->vdev_top = tvd;
for (int c = 0; c < vd->vdev_children; c++)
vdev_top_update(tvd, vd->vdev_child[c]);
}
/*
* Add a mirror/replacing vdev above an existing vdev. There is no need to
* call .vdev_op_init() since mirror/replacing vdevs do not have private state.
*/
vdev_t *
vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops)
{
spa_t *spa = cvd->vdev_spa;
vdev_t *pvd = cvd->vdev_parent;
vdev_t *mvd;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops);
mvd->vdev_asize = cvd->vdev_asize;
mvd->vdev_min_asize = cvd->vdev_min_asize;
mvd->vdev_max_asize = cvd->vdev_max_asize;
mvd->vdev_psize = cvd->vdev_psize;
mvd->vdev_ashift = cvd->vdev_ashift;
mvd->vdev_logical_ashift = cvd->vdev_logical_ashift;
mvd->vdev_physical_ashift = cvd->vdev_physical_ashift;
mvd->vdev_state = cvd->vdev_state;
mvd->vdev_crtxg = cvd->vdev_crtxg;
vdev_remove_child(pvd, cvd);
vdev_add_child(pvd, mvd);
cvd->vdev_id = mvd->vdev_children;
vdev_add_child(mvd, cvd);
vdev_top_update(cvd->vdev_top, cvd->vdev_top);
if (mvd == mvd->vdev_top)
vdev_top_transfer(cvd, mvd);
return (mvd);
}
/*
* Remove a 1-way mirror/replacing vdev from the tree.
*/
void
vdev_remove_parent(vdev_t *cvd)
{
vdev_t *mvd = cvd->vdev_parent;
vdev_t *pvd = mvd->vdev_parent;
ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
ASSERT(mvd->vdev_children == 1);
ASSERT(mvd->vdev_ops == &vdev_mirror_ops ||
mvd->vdev_ops == &vdev_replacing_ops ||
mvd->vdev_ops == &vdev_spare_ops);
cvd->vdev_ashift = mvd->vdev_ashift;
cvd->vdev_logical_ashift = mvd->vdev_logical_ashift;
cvd->vdev_physical_ashift = mvd->vdev_physical_ashift;
vdev_remove_child(mvd, cvd);
vdev_remove_child(pvd, mvd);
/*
* If cvd will replace mvd as a top-level vdev, preserve mvd's guid.
* Otherwise, we could have detached an offline device, and when we
* go to import the pool we'll think we have two top-level vdevs,
* instead of a different version of the same top-level vdev.
*/
if (mvd->vdev_top == mvd) {
uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid;
cvd->vdev_orig_guid = cvd->vdev_guid;
cvd->vdev_guid += guid_delta;
cvd->vdev_guid_sum += guid_delta;
/*
* If pool not set for autoexpand, we need to also preserve
* mvd's asize to prevent automatic expansion of cvd.
* Otherwise if we are adjusting the mirror by attaching and
* detaching children of non-uniform sizes, the mirror could
* autoexpand, unexpectedly requiring larger devices to
* re-establish the mirror.
*/
if (!cvd->vdev_spa->spa_autoexpand)
cvd->vdev_asize = mvd->vdev_asize;
}
cvd->vdev_id = mvd->vdev_id;
vdev_add_child(pvd, cvd);
vdev_top_update(cvd->vdev_top, cvd->vdev_top);
if (cvd == cvd->vdev_top)
vdev_top_transfer(mvd, cvd);
ASSERT(mvd->vdev_children == 0);
vdev_free(mvd);
}
void
vdev_metaslab_group_create(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
/*
* metaslab_group_create was delayed until allocation bias was available
*/
if (vd->vdev_mg == NULL) {
metaslab_class_t *mc;
if (vd->vdev_islog && vd->vdev_alloc_bias == VDEV_BIAS_NONE)
vd->vdev_alloc_bias = VDEV_BIAS_LOG;
ASSERT3U(vd->vdev_islog, ==,
(vd->vdev_alloc_bias == VDEV_BIAS_LOG));
switch (vd->vdev_alloc_bias) {
case VDEV_BIAS_LOG:
mc = spa_log_class(spa);
break;
case VDEV_BIAS_SPECIAL:
mc = spa_special_class(spa);
break;
case VDEV_BIAS_DEDUP:
mc = spa_dedup_class(spa);
break;
default:
mc = spa_normal_class(spa);
}
vd->vdev_mg = metaslab_group_create(mc, vd,
spa->spa_alloc_count);
if (!vd->vdev_islog) {
vd->vdev_log_mg = metaslab_group_create(
spa_embedded_log_class(spa), vd, 1);
}
/*
* The spa ashift min/max only apply for the normal metaslab
* class. Class destination is late binding so ashift boundary
* setting had to wait until now.
*/
if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
mc == spa_normal_class(spa) && vd->vdev_aux == NULL) {
if (vd->vdev_ashift > spa->spa_max_ashift)
spa->spa_max_ashift = vd->vdev_ashift;
if (vd->vdev_ashift < spa->spa_min_ashift)
spa->spa_min_ashift = vd->vdev_ashift;
uint64_t min_alloc = vdev_get_min_alloc(vd);
if (min_alloc < spa->spa_min_alloc)
spa->spa_min_alloc = min_alloc;
}
}
}
int
vdev_metaslab_init(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
uint64_t oldc = vd->vdev_ms_count;
uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift;
metaslab_t **mspp;
int error;
boolean_t expanding = (oldc != 0);
ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER));
/*
* This vdev is not being allocated from yet or is a hole.
*/
if (vd->vdev_ms_shift == 0)
return (0);
ASSERT(!vd->vdev_ishole);
ASSERT(oldc <= newc);
mspp = vmem_zalloc(newc * sizeof (*mspp), KM_SLEEP);
if (expanding) {
memcpy(mspp, vd->vdev_ms, oldc * sizeof (*mspp));
vmem_free(vd->vdev_ms, oldc * sizeof (*mspp));
}
vd->vdev_ms = mspp;
vd->vdev_ms_count = newc;
for (uint64_t m = oldc; m < newc; m++) {
uint64_t object = 0;
/*
* vdev_ms_array may be 0 if we are creating the "fake"
* metaslabs for an indirect vdev for zdb's leak detection.
* See zdb_leak_init().
*/
if (txg == 0 && vd->vdev_ms_array != 0) {
error = dmu_read(spa->spa_meta_objset,
vd->vdev_ms_array,
m * sizeof (uint64_t), sizeof (uint64_t), &object,
DMU_READ_PREFETCH);
if (error != 0) {
vdev_dbgmsg(vd, "unable to read the metaslab "
"array [error=%d]", error);
return (error);
}
}
error = metaslab_init(vd->vdev_mg, m, object, txg,
&(vd->vdev_ms[m]));
if (error != 0) {
vdev_dbgmsg(vd, "metaslab_init failed [error=%d]",
error);
return (error);
}
}
/*
* Find the emptiest metaslab on the vdev and mark it for use for
* embedded slog by moving it from the regular to the log metaslab
* group.
*/
if (vd->vdev_mg->mg_class == spa_normal_class(spa) &&
vd->vdev_ms_count > zfs_embedded_slog_min_ms &&
avl_is_empty(&vd->vdev_log_mg->mg_metaslab_tree)) {
uint64_t slog_msid = 0;
uint64_t smallest = UINT64_MAX;
/*
* Note, we only search the new metaslabs, because the old
* (pre-existing) ones may be active (e.g. have non-empty
* range_tree's), and we don't move them to the new
* metaslab_t.
*/
for (uint64_t m = oldc; m < newc; m++) {
uint64_t alloc =
space_map_allocated(vd->vdev_ms[m]->ms_sm);
if (alloc < smallest) {
slog_msid = m;
smallest = alloc;
}
}
metaslab_t *slog_ms = vd->vdev_ms[slog_msid];
/*
* The metaslab was marked as dirty at the end of
* metaslab_init(). Remove it from the dirty list so that we
* can uninitialize and reinitialize it to the new class.
*/
if (txg != 0) {
(void) txg_list_remove_this(&vd->vdev_ms_list,
slog_ms, txg);
}
uint64_t sm_obj = space_map_object(slog_ms->ms_sm);
metaslab_fini(slog_ms);
VERIFY0(metaslab_init(vd->vdev_log_mg, slog_msid, sm_obj, txg,
&vd->vdev_ms[slog_msid]));
}
if (txg == 0)
spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER);
/*
* If the vdev is marked as non-allocating then don't
* activate the metaslabs since we want to ensure that
* no allocations are performed on this device.
*/
if (vd->vdev_noalloc) {
/* track non-allocating vdev space */
spa->spa_nonallocating_dspace += spa_deflate(spa) ?
vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space;
} else if (!expanding) {
metaslab_group_activate(vd->vdev_mg);
if (vd->vdev_log_mg != NULL)
metaslab_group_activate(vd->vdev_log_mg);
}
if (txg == 0)
spa_config_exit(spa, SCL_ALLOC, FTAG);
return (0);
}
void
vdev_metaslab_fini(vdev_t *vd)
{
if (vd->vdev_checkpoint_sm != NULL) {
ASSERT(spa_feature_is_active(vd->vdev_spa,
SPA_FEATURE_POOL_CHECKPOINT));
space_map_close(vd->vdev_checkpoint_sm);
/*
* Even though we close the space map, we need to set its
* pointer to NULL. The reason is that vdev_metaslab_fini()
* may be called multiple times for certain operations
* (i.e. when destroying a pool) so we need to ensure that
* this clause never executes twice. This logic is similar
* to the one used for the vdev_ms clause below.
*/
vd->vdev_checkpoint_sm = NULL;
}
if (vd->vdev_ms != NULL) {
metaslab_group_t *mg = vd->vdev_mg;
metaslab_group_passivate(mg);
if (vd->vdev_log_mg != NULL) {
ASSERT(!vd->vdev_islog);
metaslab_group_passivate(vd->vdev_log_mg);
}
uint64_t count = vd->vdev_ms_count;
for (uint64_t m = 0; m < count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
if (msp != NULL)
metaslab_fini(msp);
}
vmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
vd->vdev_ms = NULL;
vd->vdev_ms_count = 0;
for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
ASSERT0(mg->mg_histogram[i]);
if (vd->vdev_log_mg != NULL)
ASSERT0(vd->vdev_log_mg->mg_histogram[i]);
}
}
ASSERT0(vd->vdev_ms_count);
ASSERT3U(vd->vdev_pending_fastwrite, ==, 0);
}
typedef struct vdev_probe_stats {
boolean_t vps_readable;
boolean_t vps_writeable;
int vps_flags;
} vdev_probe_stats_t;
static void
vdev_probe_done(zio_t *zio)
{
spa_t *spa = zio->io_spa;
vdev_t *vd = zio->io_vd;
vdev_probe_stats_t *vps = zio->io_private;
ASSERT(vd->vdev_probe_zio != NULL);
if (zio->io_type == ZIO_TYPE_READ) {
if (zio->io_error == 0)
vps->vps_readable = 1;
if (zio->io_error == 0 && spa_writeable(spa)) {
zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd,
zio->io_offset, zio->io_size, zio->io_abd,
ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE));
} else {
abd_free(zio->io_abd);
}
} else if (zio->io_type == ZIO_TYPE_WRITE) {
if (zio->io_error == 0)
vps->vps_writeable = 1;
abd_free(zio->io_abd);
} else if (zio->io_type == ZIO_TYPE_NULL) {
zio_t *pio;
zio_link_t *zl;
vd->vdev_cant_read |= !vps->vps_readable;
vd->vdev_cant_write |= !vps->vps_writeable;
if (vdev_readable(vd) &&
(vdev_writeable(vd) || !spa_writeable(spa))) {
zio->io_error = 0;
} else {
ASSERT(zio->io_error != 0);
vdev_dbgmsg(vd, "failed probe");
(void) zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE,
spa, vd, NULL, NULL, 0);
zio->io_error = SET_ERROR(ENXIO);
}
mutex_enter(&vd->vdev_probe_lock);
ASSERT(vd->vdev_probe_zio == zio);
vd->vdev_probe_zio = NULL;
mutex_exit(&vd->vdev_probe_lock);
zl = NULL;
while ((pio = zio_walk_parents(zio, &zl)) != NULL)
if (!vdev_accessible(vd, pio))
pio->io_error = SET_ERROR(ENXIO);
kmem_free(vps, sizeof (*vps));
}
}
/*
* Determine whether this device is accessible.
*
* Read and write to several known locations: the pad regions of each
* vdev label but the first, which we leave alone in case it contains
* a VTOC.
*/
zio_t *
vdev_probe(vdev_t *vd, zio_t *zio)
{
spa_t *spa = vd->vdev_spa;
vdev_probe_stats_t *vps = NULL;
zio_t *pio;
ASSERT(vd->vdev_ops->vdev_op_leaf);
/*
* Don't probe the probe.
*/
if (zio && (zio->io_flags & ZIO_FLAG_PROBE))
return (NULL);
/*
* To prevent 'probe storms' when a device fails, we create
* just one probe i/o at a time. All zios that want to probe
* this vdev will become parents of the probe io.
*/
mutex_enter(&vd->vdev_probe_lock);
if ((pio = vd->vdev_probe_zio) == NULL) {
vps = kmem_zalloc(sizeof (*vps), KM_SLEEP);
vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE |
ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE |
ZIO_FLAG_TRYHARD;
if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) {
/*
* vdev_cant_read and vdev_cant_write can only
* transition from TRUE to FALSE when we have the
* SCL_ZIO lock as writer; otherwise they can only
* transition from FALSE to TRUE. This ensures that
* any zio looking at these values can assume that
* failures persist for the life of the I/O. That's
* important because when a device has intermittent
* connectivity problems, we want to ensure that
* they're ascribed to the device (ENXIO) and not
* the zio (EIO).
*
* Since we hold SCL_ZIO as writer here, clear both
* values so the probe can reevaluate from first
* principles.
*/
vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER;
vd->vdev_cant_read = B_FALSE;
vd->vdev_cant_write = B_FALSE;
}
vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd,
vdev_probe_done, vps,
vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE);
/*
* We can't change the vdev state in this context, so we
* kick off an async task to do it on our behalf.
*/
if (zio != NULL) {
vd->vdev_probe_wanted = B_TRUE;
spa_async_request(spa, SPA_ASYNC_PROBE);
}
}
if (zio != NULL)
zio_add_child(zio, pio);
mutex_exit(&vd->vdev_probe_lock);
if (vps == NULL) {
ASSERT(zio != NULL);
return (NULL);
}
for (int l = 1; l < VDEV_LABELS; l++) {
zio_nowait(zio_read_phys(pio, vd,
vdev_label_offset(vd->vdev_psize, l,
offsetof(vdev_label_t, vl_be)), VDEV_PAD_SIZE,
abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE),
ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE));
}
if (zio == NULL)
return (pio);
zio_nowait(pio);
return (NULL);
}
static void
vdev_load_child(void *arg)
{
vdev_t *vd = arg;
vd->vdev_load_error = vdev_load(vd);
}
static void
vdev_open_child(void *arg)
{
vdev_t *vd = arg;
vd->vdev_open_thread = curthread;
vd->vdev_open_error = vdev_open(vd);
vd->vdev_open_thread = NULL;
}
static boolean_t
vdev_uses_zvols(vdev_t *vd)
{
#ifdef _KERNEL
if (zvol_is_zvol(vd->vdev_path))
return (B_TRUE);
#endif
for (int c = 0; c < vd->vdev_children; c++)
if (vdev_uses_zvols(vd->vdev_child[c]))
return (B_TRUE);
return (B_FALSE);
}
/*
* Returns B_TRUE if the passed child should be opened.
*/
static boolean_t
vdev_default_open_children_func(vdev_t *vd)
{
(void) vd;
return (B_TRUE);
}
/*
* Open the requested child vdevs. If any of the leaf vdevs are using
* a ZFS volume then do the opens in a single thread. This avoids a
* deadlock when the current thread is holding the spa_namespace_lock.
*/
static void
vdev_open_children_impl(vdev_t *vd, vdev_open_children_func_t *open_func)
{
int children = vd->vdev_children;
taskq_t *tq = taskq_create("vdev_open", children, minclsyspri,
children, children, TASKQ_PREPOPULATE);
vd->vdev_nonrot = B_TRUE;
for (int c = 0; c < children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (open_func(cvd) == B_FALSE)
continue;
if (tq == NULL || vdev_uses_zvols(vd)) {
cvd->vdev_open_error = vdev_open(cvd);
} else {
VERIFY(taskq_dispatch(tq, vdev_open_child,
cvd, TQ_SLEEP) != TASKQID_INVALID);
}
vd->vdev_nonrot &= cvd->vdev_nonrot;
}
if (tq != NULL) {
taskq_wait(tq);
taskq_destroy(tq);
}
}
/*
* Open all child vdevs.
*/
void
vdev_open_children(vdev_t *vd)
{
vdev_open_children_impl(vd, vdev_default_open_children_func);
}
/*
* Conditionally open a subset of child vdevs.
*/
void
vdev_open_children_subset(vdev_t *vd, vdev_open_children_func_t *open_func)
{
vdev_open_children_impl(vd, open_func);
}
/*
* Compute the raidz-deflation ratio. Note, we hard-code
* in 128k (1 << 17) because it is the "typical" blocksize.
* Even though SPA_MAXBLOCKSIZE changed, this algorithm can not change,
* otherwise it would inconsistently account for existing bp's.
*/
static void
vdev_set_deflate_ratio(vdev_t *vd)
{
if (vd == vd->vdev_top && !vd->vdev_ishole && vd->vdev_ashift != 0) {
vd->vdev_deflate_ratio = (1 << 17) /
(vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT);
}
}
+/*
+ * Choose the best of two ashifts, preferring one between logical ashift
+ * (absolute minimum) and administrator defined maximum, otherwise take
+ * the biggest of the two.
+ */
+uint64_t
+vdev_best_ashift(uint64_t logical, uint64_t a, uint64_t b)
+{
+ if (a > logical && a <= zfs_vdev_max_auto_ashift) {
+ if (b <= logical || b > zfs_vdev_max_auto_ashift)
+ return (a);
+ else
+ return (MAX(a, b));
+ } else if (b <= logical || b > zfs_vdev_max_auto_ashift)
+ return (MAX(a, b));
+ return (b);
+}
+
/*
* Maximize performance by inflating the configured ashift for top level
* vdevs to be as close to the physical ashift as possible while maintaining
* administrator defined limits and ensuring it doesn't go below the
* logical ashift.
*/
static void
vdev_ashift_optimize(vdev_t *vd)
{
ASSERT(vd == vd->vdev_top);
- if (vd->vdev_ashift < vd->vdev_physical_ashift) {
+ if (vd->vdev_ashift < vd->vdev_physical_ashift &&
+ vd->vdev_physical_ashift <= zfs_vdev_max_auto_ashift) {
vd->vdev_ashift = MIN(
MAX(zfs_vdev_max_auto_ashift, vd->vdev_ashift),
MAX(zfs_vdev_min_auto_ashift,
vd->vdev_physical_ashift));
} else {
/*
* If the logical and physical ashifts are the same, then
* we ensure that the top-level vdev's ashift is not smaller
* than our minimum ashift value. For the unusual case
* where logical ashift > physical ashift, we can't cap
* the calculated ashift based on max ashift as that
* would cause failures.
* We still check if we need to increase it to match
* the min ashift.
*/
vd->vdev_ashift = MAX(zfs_vdev_min_auto_ashift,
vd->vdev_ashift);
}
}
/*
* Prepare a virtual device for access.
*/
int
vdev_open(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
int error;
uint64_t osize = 0;
uint64_t max_osize = 0;
uint64_t asize, max_asize, psize;
uint64_t logical_ashift = 0;
uint64_t physical_ashift = 0;
ASSERT(vd->vdev_open_thread == curthread ||
spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
ASSERT(vd->vdev_state == VDEV_STATE_CLOSED ||
vd->vdev_state == VDEV_STATE_CANT_OPEN ||
vd->vdev_state == VDEV_STATE_OFFLINE);
vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
vd->vdev_cant_read = B_FALSE;
vd->vdev_cant_write = B_FALSE;
vd->vdev_min_asize = vdev_get_min_asize(vd);
/*
* If this vdev is not removed, check its fault status. If it's
* faulted, bail out of the open.
*/
if (!vd->vdev_removed && vd->vdev_faulted) {
ASSERT(vd->vdev_children == 0);
ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
vd->vdev_label_aux);
return (SET_ERROR(ENXIO));
} else if (vd->vdev_offline) {
ASSERT(vd->vdev_children == 0);
vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
return (SET_ERROR(ENXIO));
}
error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize,
&logical_ashift, &physical_ashift);
/*
* Physical volume size should never be larger than its max size, unless
* the disk has shrunk while we were reading it or the device is buggy
* or damaged: either way it's not safe for use, bail out of the open.
*/
if (osize > max_osize) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_OPEN_FAILED);
return (SET_ERROR(ENXIO));
}
/*
* Reset the vdev_reopening flag so that we actually close
* the vdev on error.
*/
vd->vdev_reopening = B_FALSE;
if (zio_injection_enabled && error == 0)
error = zio_handle_device_injection(vd, NULL, SET_ERROR(ENXIO));
if (error) {
if (vd->vdev_removed &&
vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED)
vd->vdev_removed = B_FALSE;
if (vd->vdev_stat.vs_aux == VDEV_AUX_CHILDREN_OFFLINE) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE,
vd->vdev_stat.vs_aux);
} else {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
vd->vdev_stat.vs_aux);
}
return (error);
}
vd->vdev_removed = B_FALSE;
/*
* Recheck the faulted flag now that we have confirmed that
* the vdev is accessible. If we're faulted, bail.
*/
if (vd->vdev_faulted) {
ASSERT(vd->vdev_children == 0);
ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
vd->vdev_label_aux);
return (SET_ERROR(ENXIO));
}
if (vd->vdev_degraded) {
ASSERT(vd->vdev_children == 0);
vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
VDEV_AUX_ERR_EXCEEDED);
} else {
vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0);
}
/*
* For hole or missing vdevs we just return success.
*/
if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops)
return (0);
for (int c = 0; c < vd->vdev_children; c++) {
if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
VDEV_AUX_NONE);
break;
}
}
osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t));
max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t));
if (vd->vdev_children == 0) {
if (osize < SPA_MINDEVSIZE) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_TOO_SMALL);
return (SET_ERROR(EOVERFLOW));
}
psize = osize;
asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
max_asize = max_osize - (VDEV_LABEL_START_SIZE +
VDEV_LABEL_END_SIZE);
} else {
if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE -
(VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_TOO_SMALL);
return (SET_ERROR(EOVERFLOW));
}
psize = 0;
asize = osize;
max_asize = max_osize;
}
/*
* If the vdev was expanded, record this so that we can re-create the
* uberblock rings in labels {2,3}, during the next sync.
*/
if ((psize > vd->vdev_psize) && (vd->vdev_psize != 0))
vd->vdev_copy_uberblocks = B_TRUE;
vd->vdev_psize = psize;
/*
* Make sure the allocatable size hasn't shrunk too much.
*/
if (asize < vd->vdev_min_asize) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_LABEL);
return (SET_ERROR(EINVAL));
}
/*
* We can always set the logical/physical ashift members since
* their values are only used to calculate the vdev_ashift when
* the device is first added to the config. These values should
* not be used for anything else since they may change whenever
* the device is reopened and we don't store them in the label.
*/
vd->vdev_physical_ashift =
MAX(physical_ashift, vd->vdev_physical_ashift);
vd->vdev_logical_ashift = MAX(logical_ashift,
vd->vdev_logical_ashift);
if (vd->vdev_asize == 0) {
/*
* This is the first-ever open, so use the computed values.
* For compatibility, a different ashift can be requested.
*/
vd->vdev_asize = asize;
vd->vdev_max_asize = max_asize;
/*
* If the vdev_ashift was not overridden at creation time,
* then set it the logical ashift and optimize the ashift.
*/
if (vd->vdev_ashift == 0) {
vd->vdev_ashift = vd->vdev_logical_ashift;
if (vd->vdev_logical_ashift > ASHIFT_MAX) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_ASHIFT_TOO_BIG);
return (SET_ERROR(EDOM));
}
if (vd->vdev_top == vd) {
vdev_ashift_optimize(vd);
}
}
if (vd->vdev_ashift != 0 && (vd->vdev_ashift < ASHIFT_MIN ||
vd->vdev_ashift > ASHIFT_MAX)) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_ASHIFT);
return (SET_ERROR(EDOM));
}
} else {
/*
* Make sure the alignment required hasn't increased.
*/
if (vd->vdev_ashift > vd->vdev_top->vdev_ashift &&
vd->vdev_ops->vdev_op_leaf) {
(void) zfs_ereport_post(
FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT,
spa, vd, NULL, NULL, 0);
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_LABEL);
return (SET_ERROR(EDOM));
}
vd->vdev_max_asize = max_asize;
}
/*
* If all children are healthy we update asize if either:
* The asize has increased, due to a device expansion caused by dynamic
* LUN growth or vdev replacement, and automatic expansion is enabled;
* making the additional space available.
*
* The asize has decreased, due to a device shrink usually caused by a
* vdev replace with a smaller device. This ensures that calculations
* based of max_asize and asize e.g. esize are always valid. It's safe
* to do this as we've already validated that asize is greater than
* vdev_min_asize.
*/
if (vd->vdev_state == VDEV_STATE_HEALTHY &&
((asize > vd->vdev_asize &&
(vd->vdev_expanding || spa->spa_autoexpand)) ||
(asize < vd->vdev_asize)))
vd->vdev_asize = asize;
vdev_set_min_asize(vd);
/*
* Ensure we can issue some IO before declaring the
* vdev open for business.
*/
if (vd->vdev_ops->vdev_op_leaf &&
(error = zio_wait(vdev_probe(vd, NULL))) != 0) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
VDEV_AUX_ERR_EXCEEDED);
return (error);
}
/*
* Track the minimum allocation size.
*/
if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
vd->vdev_islog == 0 && vd->vdev_aux == NULL) {
uint64_t min_alloc = vdev_get_min_alloc(vd);
if (min_alloc < spa->spa_min_alloc)
spa->spa_min_alloc = min_alloc;
}
/*
* If this is a leaf vdev, assess whether a resilver is needed.
* But don't do this if we are doing a reopen for a scrub, since
* this would just restart the scrub we are already doing.
*/
if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen)
dsl_scan_assess_vdev(spa->spa_dsl_pool, vd);
return (0);
}
static void
vdev_validate_child(void *arg)
{
vdev_t *vd = arg;
vd->vdev_validate_thread = curthread;
vd->vdev_validate_error = vdev_validate(vd);
vd->vdev_validate_thread = NULL;
}
/*
* Called once the vdevs are all opened, this routine validates the label
* contents. This needs to be done before vdev_load() so that we don't
* inadvertently do repair I/Os to the wrong device.
*
* This function will only return failure if one of the vdevs indicates that it
* has since been destroyed or exported. This is only possible if
* /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state
* will be updated but the function will return 0.
*/
int
vdev_validate(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
taskq_t *tq = NULL;
nvlist_t *label;
uint64_t guid = 0, aux_guid = 0, top_guid;
uint64_t state;
nvlist_t *nvl;
uint64_t txg;
int children = vd->vdev_children;
if (vdev_validate_skip)
return (0);
if (children > 0) {
tq = taskq_create("vdev_validate", children, minclsyspri,
children, children, TASKQ_PREPOPULATE);
}
for (uint64_t c = 0; c < children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (tq == NULL || vdev_uses_zvols(cvd)) {
vdev_validate_child(cvd);
} else {
VERIFY(taskq_dispatch(tq, vdev_validate_child, cvd,
TQ_SLEEP) != TASKQID_INVALID);
}
}
if (tq != NULL) {
taskq_wait(tq);
taskq_destroy(tq);
}
for (int c = 0; c < children; c++) {
int error = vd->vdev_child[c]->vdev_validate_error;
if (error != 0)
return (SET_ERROR(EBADF));
}
/*
* If the device has already failed, or was marked offline, don't do
* any further validation. Otherwise, label I/O will fail and we will
* overwrite the previous state.
*/
if (!vd->vdev_ops->vdev_op_leaf || !vdev_readable(vd))
return (0);
/*
* If we are performing an extreme rewind, we allow for a label that
* was modified at a point after the current txg.
* If config lock is not held do not check for the txg. spa_sync could
* be updating the vdev's label before updating spa_last_synced_txg.
*/
if (spa->spa_extreme_rewind || spa_last_synced_txg(spa) == 0 ||
spa_config_held(spa, SCL_CONFIG, RW_WRITER) != SCL_CONFIG)
txg = UINT64_MAX;
else
txg = spa_last_synced_txg(spa);
if ((label = vdev_label_read_config(vd, txg)) == NULL) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_LABEL);
vdev_dbgmsg(vd, "vdev_validate: failed reading config for "
"txg %llu", (u_longlong_t)txg);
return (0);
}
/*
* Determine if this vdev has been split off into another
* pool. If so, then refuse to open it.
*/
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID,
&aux_guid) == 0 && aux_guid == spa_guid(spa)) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_SPLIT_POOL);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: vdev split into other pool");
return (0);
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &guid) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
ZPOOL_CONFIG_POOL_GUID);
return (0);
}
/*
* If config is not trusted then ignore the spa guid check. This is
* necessary because if the machine crashed during a re-guid the new
* guid might have been written to all of the vdev labels, but not the
* cached config. The check will be performed again once we have the
* trusted config from the MOS.
*/
if (spa->spa_trust_config && guid != spa_guid(spa)) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: vdev label pool_guid doesn't "
"match config (%llu != %llu)", (u_longlong_t)guid,
(u_longlong_t)spa_guid(spa));
return (0);
}
if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl)
!= 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID,
&aux_guid) != 0)
aux_guid = 0;
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
ZPOOL_CONFIG_GUID);
return (0);
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, &top_guid)
!= 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
ZPOOL_CONFIG_TOP_GUID);
return (0);
}
/*
* If this vdev just became a top-level vdev because its sibling was
* detached, it will have adopted the parent's vdev guid -- but the
* label may or may not be on disk yet. Fortunately, either version
* of the label will have the same top guid, so if we're a top-level
* vdev, we can safely compare to that instead.
* However, if the config comes from a cachefile that failed to update
* after the detach, a top-level vdev will appear as a non top-level
* vdev in the config. Also relax the constraints if we perform an
* extreme rewind.
*
* If we split this vdev off instead, then we also check the
* original pool's guid. We don't want to consider the vdev
* corrupt if it is partway through a split operation.
*/
if (vd->vdev_guid != guid && vd->vdev_guid != aux_guid) {
boolean_t mismatch = B_FALSE;
if (spa->spa_trust_config && !spa->spa_extreme_rewind) {
if (vd != vd->vdev_top || vd->vdev_guid != top_guid)
mismatch = B_TRUE;
} else {
if (vd->vdev_guid != top_guid &&
vd->vdev_top->vdev_guid != guid)
mismatch = B_TRUE;
}
if (mismatch) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: config guid "
"doesn't match label guid");
vdev_dbgmsg(vd, "CONFIG: guid %llu, top_guid %llu",
(u_longlong_t)vd->vdev_guid,
(u_longlong_t)vd->vdev_top->vdev_guid);
vdev_dbgmsg(vd, "LABEL: guid %llu, top_guid %llu, "
"aux_guid %llu", (u_longlong_t)guid,
(u_longlong_t)top_guid, (u_longlong_t)aux_guid);
return (0);
}
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
&state) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
ZPOOL_CONFIG_POOL_STATE);
return (0);
}
nvlist_free(label);
/*
* If this is a verbatim import, no need to check the
* state of the pool.
*/
if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) &&
spa_load_state(spa) == SPA_LOAD_OPEN &&
state != POOL_STATE_ACTIVE) {
vdev_dbgmsg(vd, "vdev_validate: invalid pool state (%llu) "
"for spa %s", (u_longlong_t)state, spa->spa_name);
return (SET_ERROR(EBADF));
}
/*
* If we were able to open and validate a vdev that was
* previously marked permanently unavailable, clear that state
* now.
*/
if (vd->vdev_not_present)
vd->vdev_not_present = 0;
return (0);
}
static void
vdev_copy_path_impl(vdev_t *svd, vdev_t *dvd)
{
char *old, *new;
if (svd->vdev_path != NULL && dvd->vdev_path != NULL) {
if (strcmp(svd->vdev_path, dvd->vdev_path) != 0) {
zfs_dbgmsg("vdev_copy_path: vdev %llu: path changed "
"from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid,
dvd->vdev_path, svd->vdev_path);
spa_strfree(dvd->vdev_path);
dvd->vdev_path = spa_strdup(svd->vdev_path);
}
} else if (svd->vdev_path != NULL) {
dvd->vdev_path = spa_strdup(svd->vdev_path);
zfs_dbgmsg("vdev_copy_path: vdev %llu: path set to '%s'",
(u_longlong_t)dvd->vdev_guid, dvd->vdev_path);
}
/*
* Our enclosure sysfs path may have changed between imports
*/
old = dvd->vdev_enc_sysfs_path;
new = svd->vdev_enc_sysfs_path;
if ((old != NULL && new == NULL) ||
(old == NULL && new != NULL) ||
((old != NULL && new != NULL) && strcmp(new, old) != 0)) {
zfs_dbgmsg("vdev_copy_path: vdev %llu: vdev_enc_sysfs_path "
"changed from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid,
old, new);
if (dvd->vdev_enc_sysfs_path)
spa_strfree(dvd->vdev_enc_sysfs_path);
if (svd->vdev_enc_sysfs_path) {
dvd->vdev_enc_sysfs_path = spa_strdup(
svd->vdev_enc_sysfs_path);
} else {
dvd->vdev_enc_sysfs_path = NULL;
}
}
}
/*
* Recursively copy vdev paths from one vdev to another. Source and destination
* vdev trees must have same geometry otherwise return error. Intended to copy
* paths from userland config into MOS config.
*/
int
vdev_copy_path_strict(vdev_t *svd, vdev_t *dvd)
{
if ((svd->vdev_ops == &vdev_missing_ops) ||
(svd->vdev_ishole && dvd->vdev_ishole) ||
(dvd->vdev_ops == &vdev_indirect_ops))
return (0);
if (svd->vdev_ops != dvd->vdev_ops) {
vdev_dbgmsg(svd, "vdev_copy_path: vdev type mismatch: %s != %s",
svd->vdev_ops->vdev_op_type, dvd->vdev_ops->vdev_op_type);
return (SET_ERROR(EINVAL));
}
if (svd->vdev_guid != dvd->vdev_guid) {
vdev_dbgmsg(svd, "vdev_copy_path: guids mismatch (%llu != "
"%llu)", (u_longlong_t)svd->vdev_guid,
(u_longlong_t)dvd->vdev_guid);
return (SET_ERROR(EINVAL));
}
if (svd->vdev_children != dvd->vdev_children) {
vdev_dbgmsg(svd, "vdev_copy_path: children count mismatch: "
"%llu != %llu", (u_longlong_t)svd->vdev_children,
(u_longlong_t)dvd->vdev_children);
return (SET_ERROR(EINVAL));
}
for (uint64_t i = 0; i < svd->vdev_children; i++) {
int error = vdev_copy_path_strict(svd->vdev_child[i],
dvd->vdev_child[i]);
if (error != 0)
return (error);
}
if (svd->vdev_ops->vdev_op_leaf)
vdev_copy_path_impl(svd, dvd);
return (0);
}
static void
vdev_copy_path_search(vdev_t *stvd, vdev_t *dvd)
{
ASSERT(stvd->vdev_top == stvd);
ASSERT3U(stvd->vdev_id, ==, dvd->vdev_top->vdev_id);
for (uint64_t i = 0; i < dvd->vdev_children; i++) {
vdev_copy_path_search(stvd, dvd->vdev_child[i]);
}
if (!dvd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(dvd))
return;
/*
* The idea here is that while a vdev can shift positions within
* a top vdev (when replacing, attaching mirror, etc.) it cannot
* step outside of it.
*/
vdev_t *vd = vdev_lookup_by_guid(stvd, dvd->vdev_guid);
if (vd == NULL || vd->vdev_ops != dvd->vdev_ops)
return;
ASSERT(vd->vdev_ops->vdev_op_leaf);
vdev_copy_path_impl(vd, dvd);
}
/*
* Recursively copy vdev paths from one root vdev to another. Source and
* destination vdev trees may differ in geometry. For each destination leaf
* vdev, search a vdev with the same guid and top vdev id in the source.
* Intended to copy paths from userland config into MOS config.
*/
void
vdev_copy_path_relaxed(vdev_t *srvd, vdev_t *drvd)
{
uint64_t children = MIN(srvd->vdev_children, drvd->vdev_children);
ASSERT(srvd->vdev_ops == &vdev_root_ops);
ASSERT(drvd->vdev_ops == &vdev_root_ops);
for (uint64_t i = 0; i < children; i++) {
vdev_copy_path_search(srvd->vdev_child[i],
drvd->vdev_child[i]);
}
}
/*
* Close a virtual device.
*/
void
vdev_close(vdev_t *vd)
{
vdev_t *pvd = vd->vdev_parent;
spa_t *spa __maybe_unused = vd->vdev_spa;
ASSERT(vd != NULL);
ASSERT(vd->vdev_open_thread == curthread ||
spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
/*
* If our parent is reopening, then we are as well, unless we are
* going offline.
*/
if (pvd != NULL && pvd->vdev_reopening)
vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline);
vd->vdev_ops->vdev_op_close(vd);
vdev_cache_purge(vd);
/*
* We record the previous state before we close it, so that if we are
* doing a reopen(), we don't generate FMA ereports if we notice that
* it's still faulted.
*/
vd->vdev_prevstate = vd->vdev_state;
if (vd->vdev_offline)
vd->vdev_state = VDEV_STATE_OFFLINE;
else
vd->vdev_state = VDEV_STATE_CLOSED;
vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
}
void
vdev_hold(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_is_root(spa));
if (spa->spa_state == POOL_STATE_UNINITIALIZED)
return;
for (int c = 0; c < vd->vdev_children; c++)
vdev_hold(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_ops->vdev_op_hold != NULL)
vd->vdev_ops->vdev_op_hold(vd);
}
void
vdev_rele(vdev_t *vd)
{
ASSERT(spa_is_root(vd->vdev_spa));
for (int c = 0; c < vd->vdev_children; c++)
vdev_rele(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_ops->vdev_op_rele != NULL)
vd->vdev_ops->vdev_op_rele(vd);
}
/*
* Reopen all interior vdevs and any unopened leaves. We don't actually
* reopen leaf vdevs which had previously been opened as they might deadlock
* on the spa_config_lock. Instead we only obtain the leaf's physical size.
* If the leaf has never been opened then open it, as usual.
*/
void
vdev_reopen(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
/* set the reopening flag unless we're taking the vdev offline */
vd->vdev_reopening = !vd->vdev_offline;
vdev_close(vd);
(void) vdev_open(vd);
/*
* Call vdev_validate() here to make sure we have the same device.
* Otherwise, a device with an invalid label could be successfully
* opened in response to vdev_reopen().
*/
if (vd->vdev_aux) {
(void) vdev_validate_aux(vd);
if (vdev_readable(vd) && vdev_writeable(vd) &&
vd->vdev_aux == &spa->spa_l2cache) {
/*
* In case the vdev is present we should evict all ARC
* buffers and pointers to log blocks and reclaim their
* space before restoring its contents to L2ARC.
*/
if (l2arc_vdev_present(vd)) {
l2arc_rebuild_vdev(vd, B_TRUE);
} else {
l2arc_add_vdev(spa, vd);
}
spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD);
spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM);
}
} else {
(void) vdev_validate(vd);
}
/*
* Reassess parent vdev's health.
*/
vdev_propagate_state(vd);
}
int
vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing)
{
int error;
/*
* Normally, partial opens (e.g. of a mirror) are allowed.
* For a create, however, we want to fail the request if
* there are any components we can't open.
*/
error = vdev_open(vd);
if (error || vd->vdev_state != VDEV_STATE_HEALTHY) {
vdev_close(vd);
return (error ? error : SET_ERROR(ENXIO));
}
/*
* Recursively load DTLs and initialize all labels.
*/
if ((error = vdev_dtl_load(vd)) != 0 ||
(error = vdev_label_init(vd, txg, isreplacing ?
VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) {
vdev_close(vd);
return (error);
}
return (0);
}
void
vdev_metaslab_set_size(vdev_t *vd)
{
uint64_t asize = vd->vdev_asize;
uint64_t ms_count = asize >> zfs_vdev_default_ms_shift;
uint64_t ms_shift;
/*
* There are two dimensions to the metaslab sizing calculation:
* the size of the metaslab and the count of metaslabs per vdev.
*
* The default values used below are a good balance between memory
* usage (larger metaslab size means more memory needed for loaded
* metaslabs; more metaslabs means more memory needed for the
* metaslab_t structs), metaslab load time (larger metaslabs take
* longer to load), and metaslab sync time (more metaslabs means
* more time spent syncing all of them).
*
* In general, we aim for zfs_vdev_default_ms_count (200) metaslabs.
* The range of the dimensions are as follows:
*
* 2^29 <= ms_size <= 2^34
* 16 <= ms_count <= 131,072
*
* On the lower end of vdev sizes, we aim for metaslabs sizes of
* at least 512MB (2^29) to minimize fragmentation effects when
* testing with smaller devices. However, the count constraint
* of at least 16 metaslabs will override this minimum size goal.
*
* On the upper end of vdev sizes, we aim for a maximum metaslab
* size of 16GB. However, we will cap the total count to 2^17
* metaslabs to keep our memory footprint in check and let the
* metaslab size grow from there if that limit is hit.
*
* The net effect of applying above constrains is summarized below.
*
* vdev size metaslab count
* --------------|-----------------
* < 8GB ~16
* 8GB - 100GB one per 512MB
* 100GB - 3TB ~200
* 3TB - 2PB one per 16GB
* > 2PB ~131,072
* --------------------------------
*
* Finally, note that all of the above calculate the initial
* number of metaslabs. Expanding a top-level vdev will result
* in additional metaslabs being allocated making it possible
* to exceed the zfs_vdev_ms_count_limit.
*/
if (ms_count < zfs_vdev_min_ms_count)
ms_shift = highbit64(asize / zfs_vdev_min_ms_count);
else if (ms_count > zfs_vdev_default_ms_count)
ms_shift = highbit64(asize / zfs_vdev_default_ms_count);
else
ms_shift = zfs_vdev_default_ms_shift;
if (ms_shift < SPA_MAXBLOCKSHIFT) {
ms_shift = SPA_MAXBLOCKSHIFT;
} else if (ms_shift > zfs_vdev_max_ms_shift) {
ms_shift = zfs_vdev_max_ms_shift;
/* cap the total count to constrain memory footprint */
if ((asize >> ms_shift) > zfs_vdev_ms_count_limit)
ms_shift = highbit64(asize / zfs_vdev_ms_count_limit);
}
vd->vdev_ms_shift = ms_shift;
ASSERT3U(vd->vdev_ms_shift, >=, SPA_MAXBLOCKSHIFT);
}
void
vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg)
{
ASSERT(vd == vd->vdev_top);
/* indirect vdevs don't have metaslabs or dtls */
ASSERT(vdev_is_concrete(vd) || flags == 0);
ASSERT(ISP2(flags));
ASSERT(spa_writeable(vd->vdev_spa));
if (flags & VDD_METASLAB)
(void) txg_list_add(&vd->vdev_ms_list, arg, txg);
if (flags & VDD_DTL)
(void) txg_list_add(&vd->vdev_dtl_list, arg, txg);
(void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg);
}
void
vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg)
{
for (int c = 0; c < vd->vdev_children; c++)
vdev_dirty_leaves(vd->vdev_child[c], flags, txg);
if (vd->vdev_ops->vdev_op_leaf)
vdev_dirty(vd->vdev_top, flags, vd, txg);
}
/*
* DTLs.
*
* A vdev's DTL (dirty time log) is the set of transaction groups for which
* the vdev has less than perfect replication. There are four kinds of DTL:
*
* DTL_MISSING: txgs for which the vdev has no valid copies of the data
*
* DTL_PARTIAL: txgs for which data is available, but not fully replicated
*
* DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon
* scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of
* txgs that was scrubbed.
*
* DTL_OUTAGE: txgs which cannot currently be read, whether due to
* persistent errors or just some device being offline.
* Unlike the other three, the DTL_OUTAGE map is not generally
* maintained; it's only computed when needed, typically to
* determine whether a device can be detached.
*
* For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device
* either has the data or it doesn't.
*
* For interior vdevs such as mirror and RAID-Z the picture is more complex.
* A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because
* if any child is less than fully replicated, then so is its parent.
* A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs,
* comprising only those txgs which appear in 'maxfaults' or more children;
* those are the txgs we don't have enough replication to read. For example,
* double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2);
* thus, its DTL_MISSING consists of the set of txgs that appear in more than
* two child DTL_MISSING maps.
*
* It should be clear from the above that to compute the DTLs and outage maps
* for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps.
* Therefore, that is all we keep on disk. When loading the pool, or after
* a configuration change, we generate all other DTLs from first principles.
*/
void
vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
{
range_tree_t *rt = vd->vdev_dtl[t];
ASSERT(t < DTL_TYPES);
ASSERT(vd != vd->vdev_spa->spa_root_vdev);
ASSERT(spa_writeable(vd->vdev_spa));
mutex_enter(&vd->vdev_dtl_lock);
if (!range_tree_contains(rt, txg, size))
range_tree_add(rt, txg, size);
mutex_exit(&vd->vdev_dtl_lock);
}
boolean_t
vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
{
range_tree_t *rt = vd->vdev_dtl[t];
boolean_t dirty = B_FALSE;
ASSERT(t < DTL_TYPES);
ASSERT(vd != vd->vdev_spa->spa_root_vdev);
/*
* While we are loading the pool, the DTLs have not been loaded yet.
* This isn't a problem but it can result in devices being tried
* which are known to not have the data. In which case, the import
* is relying on the checksum to ensure that we get the right data.
* Note that while importing we are only reading the MOS, which is
* always checksummed.
*/
mutex_enter(&vd->vdev_dtl_lock);
if (!range_tree_is_empty(rt))
dirty = range_tree_contains(rt, txg, size);
mutex_exit(&vd->vdev_dtl_lock);
return (dirty);
}
boolean_t
vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t)
{
range_tree_t *rt = vd->vdev_dtl[t];
boolean_t empty;
mutex_enter(&vd->vdev_dtl_lock);
empty = range_tree_is_empty(rt);
mutex_exit(&vd->vdev_dtl_lock);
return (empty);
}
/*
* Check if the txg falls within the range which must be
* resilvered. DVAs outside this range can always be skipped.
*/
boolean_t
vdev_default_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
uint64_t phys_birth)
{
(void) dva, (void) psize;
/* Set by sequential resilver. */
if (phys_birth == TXG_UNKNOWN)
return (B_TRUE);
return (vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1));
}
/*
* Returns B_TRUE if the vdev determines the DVA needs to be resilvered.
*/
boolean_t
vdev_dtl_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
uint64_t phys_birth)
{
ASSERT(vd != vd->vdev_spa->spa_root_vdev);
if (vd->vdev_ops->vdev_op_need_resilver == NULL ||
vd->vdev_ops->vdev_op_leaf)
return (B_TRUE);
return (vd->vdev_ops->vdev_op_need_resilver(vd, dva, psize,
phys_birth));
}
/*
* Returns the lowest txg in the DTL range.
*/
static uint64_t
vdev_dtl_min(vdev_t *vd)
{
ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
ASSERT0(vd->vdev_children);
return (range_tree_min(vd->vdev_dtl[DTL_MISSING]) - 1);
}
/*
* Returns the highest txg in the DTL.
*/
static uint64_t
vdev_dtl_max(vdev_t *vd)
{
ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
ASSERT0(vd->vdev_children);
return (range_tree_max(vd->vdev_dtl[DTL_MISSING]));
}
/*
* Determine if a resilvering vdev should remove any DTL entries from
* its range. If the vdev was resilvering for the entire duration of the
* scan then it should excise that range from its DTLs. Otherwise, this
* vdev is considered partially resilvered and should leave its DTL
* entries intact. The comment in vdev_dtl_reassess() describes how we
* excise the DTLs.
*/
static boolean_t
vdev_dtl_should_excise(vdev_t *vd, boolean_t rebuild_done)
{
ASSERT0(vd->vdev_children);
if (vd->vdev_state < VDEV_STATE_DEGRADED)
return (B_FALSE);
if (vd->vdev_resilver_deferred)
return (B_FALSE);
if (range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]))
return (B_TRUE);
if (rebuild_done) {
vdev_rebuild_t *vr = &vd->vdev_top->vdev_rebuild_config;
vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
/* Rebuild not initiated by attach */
if (vd->vdev_rebuild_txg == 0)
return (B_TRUE);
/*
* When a rebuild completes without error then all missing data
* up to the rebuild max txg has been reconstructed and the DTL
* is eligible for excision.
*/
if (vrp->vrp_rebuild_state == VDEV_REBUILD_COMPLETE &&
vdev_dtl_max(vd) <= vrp->vrp_max_txg) {
ASSERT3U(vrp->vrp_min_txg, <=, vdev_dtl_min(vd));
ASSERT3U(vrp->vrp_min_txg, <, vd->vdev_rebuild_txg);
ASSERT3U(vd->vdev_rebuild_txg, <=, vrp->vrp_max_txg);
return (B_TRUE);
}
} else {
dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan;
dsl_scan_phys_t *scnp __maybe_unused = &scn->scn_phys;
/* Resilver not initiated by attach */
if (vd->vdev_resilver_txg == 0)
return (B_TRUE);
/*
* When a resilver is initiated the scan will assign the
* scn_max_txg value to the highest txg value that exists
* in all DTLs. If this device's max DTL is not part of this
* scan (i.e. it is not in the range (scn_min_txg, scn_max_txg]
* then it is not eligible for excision.
*/
if (vdev_dtl_max(vd) <= scn->scn_phys.scn_max_txg) {
ASSERT3U(scnp->scn_min_txg, <=, vdev_dtl_min(vd));
ASSERT3U(scnp->scn_min_txg, <, vd->vdev_resilver_txg);
ASSERT3U(vd->vdev_resilver_txg, <=, scnp->scn_max_txg);
return (B_TRUE);
}
}
return (B_FALSE);
}
/*
* Reassess DTLs after a config change or scrub completion. If txg == 0 no
* write operations will be issued to the pool.
*/
void
vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg,
boolean_t scrub_done, boolean_t rebuild_done)
{
spa_t *spa = vd->vdev_spa;
avl_tree_t reftree;
int minref;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
for (int c = 0; c < vd->vdev_children; c++)
vdev_dtl_reassess(vd->vdev_child[c], txg,
scrub_txg, scrub_done, rebuild_done);
if (vd == spa->spa_root_vdev || !vdev_is_concrete(vd) || vd->vdev_aux)
return;
if (vd->vdev_ops->vdev_op_leaf) {
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
vdev_rebuild_t *vr = &vd->vdev_top->vdev_rebuild_config;
boolean_t check_excise = B_FALSE;
boolean_t wasempty = B_TRUE;
mutex_enter(&vd->vdev_dtl_lock);
/*
* If requested, pretend the scan or rebuild completed cleanly.
*/
if (zfs_scan_ignore_errors) {
if (scn != NULL)
scn->scn_phys.scn_errors = 0;
if (vr != NULL)
vr->vr_rebuild_phys.vrp_errors = 0;
}
if (scrub_txg != 0 &&
!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) {
wasempty = B_FALSE;
zfs_dbgmsg("guid:%llu txg:%llu scrub:%llu started:%d "
"dtl:%llu/%llu errors:%llu",
(u_longlong_t)vd->vdev_guid, (u_longlong_t)txg,
(u_longlong_t)scrub_txg, spa->spa_scrub_started,
(u_longlong_t)vdev_dtl_min(vd),
(u_longlong_t)vdev_dtl_max(vd),
(u_longlong_t)(scn ? scn->scn_phys.scn_errors : 0));
}
/*
* If we've completed a scrub/resilver or a rebuild cleanly
* then determine if this vdev should remove any DTLs. We
* only want to excise regions on vdevs that were available
* during the entire duration of this scan.
*/
if (rebuild_done &&
vr != NULL && vr->vr_rebuild_phys.vrp_errors == 0) {
check_excise = B_TRUE;
} else {
if (spa->spa_scrub_started ||
(scn != NULL && scn->scn_phys.scn_errors == 0)) {
check_excise = B_TRUE;
}
}
if (scrub_txg && check_excise &&
vdev_dtl_should_excise(vd, rebuild_done)) {
/*
* We completed a scrub, resilver or rebuild up to
* scrub_txg. If we did it without rebooting, then
* the scrub dtl will be valid, so excise the old
* region and fold in the scrub dtl. Otherwise,
* leave the dtl as-is if there was an error.
*
* There's little trick here: to excise the beginning
* of the DTL_MISSING map, we put it into a reference
* tree and then add a segment with refcnt -1 that
* covers the range [0, scrub_txg). This means
* that each txg in that range has refcnt -1 or 0.
* We then add DTL_SCRUB with a refcnt of 2, so that
* entries in the range [0, scrub_txg) will have a
* positive refcnt -- either 1 or 2. We then convert
* the reference tree into the new DTL_MISSING map.
*/
space_reftree_create(&reftree);
space_reftree_add_map(&reftree,
vd->vdev_dtl[DTL_MISSING], 1);
space_reftree_add_seg(&reftree, 0, scrub_txg, -1);
space_reftree_add_map(&reftree,
vd->vdev_dtl[DTL_SCRUB], 2);
space_reftree_generate_map(&reftree,
vd->vdev_dtl[DTL_MISSING], 1);
space_reftree_destroy(&reftree);
if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) {
zfs_dbgmsg("update DTL_MISSING:%llu/%llu",
(u_longlong_t)vdev_dtl_min(vd),
(u_longlong_t)vdev_dtl_max(vd));
} else if (!wasempty) {
zfs_dbgmsg("DTL_MISSING is now empty");
}
}
range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL);
range_tree_walk(vd->vdev_dtl[DTL_MISSING],
range_tree_add, vd->vdev_dtl[DTL_PARTIAL]);
if (scrub_done)
range_tree_vacate(vd->vdev_dtl[DTL_SCRUB], NULL, NULL);
range_tree_vacate(vd->vdev_dtl[DTL_OUTAGE], NULL, NULL);
if (!vdev_readable(vd))
range_tree_add(vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL);
else
range_tree_walk(vd->vdev_dtl[DTL_MISSING],
range_tree_add, vd->vdev_dtl[DTL_OUTAGE]);
/*
* If the vdev was resilvering or rebuilding and no longer
* has any DTLs then reset the appropriate flag and dirty
* the top level so that we persist the change.
*/
if (txg != 0 &&
range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
range_tree_is_empty(vd->vdev_dtl[DTL_OUTAGE])) {
if (vd->vdev_rebuild_txg != 0) {
vd->vdev_rebuild_txg = 0;
vdev_config_dirty(vd->vdev_top);
} else if (vd->vdev_resilver_txg != 0) {
vd->vdev_resilver_txg = 0;
vdev_config_dirty(vd->vdev_top);
}
}
mutex_exit(&vd->vdev_dtl_lock);
if (txg != 0)
vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg);
return;
}
mutex_enter(&vd->vdev_dtl_lock);
for (int t = 0; t < DTL_TYPES; t++) {
/* account for child's outage in parent's missing map */
int s = (t == DTL_MISSING) ? DTL_OUTAGE: t;
if (t == DTL_SCRUB)
continue; /* leaf vdevs only */
if (t == DTL_PARTIAL)
minref = 1; /* i.e. non-zero */
else if (vdev_get_nparity(vd) != 0)
minref = vdev_get_nparity(vd) + 1; /* RAID-Z, dRAID */
else
minref = vd->vdev_children; /* any kind of mirror */
space_reftree_create(&reftree);
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
mutex_enter(&cvd->vdev_dtl_lock);
space_reftree_add_map(&reftree, cvd->vdev_dtl[s], 1);
mutex_exit(&cvd->vdev_dtl_lock);
}
space_reftree_generate_map(&reftree, vd->vdev_dtl[t], minref);
space_reftree_destroy(&reftree);
}
mutex_exit(&vd->vdev_dtl_lock);
}
int
vdev_dtl_load(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
range_tree_t *rt;
int error = 0;
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) {
ASSERT(vdev_is_concrete(vd));
/*
* If the dtl cannot be sync'd there is no need to open it.
*/
if (spa->spa_mode == SPA_MODE_READ && !spa->spa_read_spacemaps)
return (0);
error = space_map_open(&vd->vdev_dtl_sm, mos,
vd->vdev_dtl_object, 0, -1ULL, 0);
if (error)
return (error);
ASSERT(vd->vdev_dtl_sm != NULL);
rt = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
error = space_map_load(vd->vdev_dtl_sm, rt, SM_ALLOC);
if (error == 0) {
mutex_enter(&vd->vdev_dtl_lock);
range_tree_walk(rt, range_tree_add,
vd->vdev_dtl[DTL_MISSING]);
mutex_exit(&vd->vdev_dtl_lock);
}
range_tree_vacate(rt, NULL, NULL);
range_tree_destroy(rt);
return (error);
}
for (int c = 0; c < vd->vdev_children; c++) {
error = vdev_dtl_load(vd->vdev_child[c]);
if (error != 0)
break;
}
return (error);
}
static void
vdev_zap_allocation_data(vdev_t *vd, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
vdev_alloc_bias_t alloc_bias = vd->vdev_alloc_bias;
const char *string;
ASSERT(alloc_bias != VDEV_BIAS_NONE);
string =
(alloc_bias == VDEV_BIAS_LOG) ? VDEV_ALLOC_BIAS_LOG :
(alloc_bias == VDEV_BIAS_SPECIAL) ? VDEV_ALLOC_BIAS_SPECIAL :
(alloc_bias == VDEV_BIAS_DEDUP) ? VDEV_ALLOC_BIAS_DEDUP : NULL;
ASSERT(string != NULL);
VERIFY0(zap_add(mos, vd->vdev_top_zap, VDEV_TOP_ZAP_ALLOCATION_BIAS,
1, strlen(string) + 1, string, tx));
if (alloc_bias == VDEV_BIAS_SPECIAL || alloc_bias == VDEV_BIAS_DEDUP) {
spa_activate_allocation_classes(spa, tx);
}
}
void
vdev_destroy_unlink_zap(vdev_t *vd, uint64_t zapobj, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
VERIFY0(zap_destroy(spa->spa_meta_objset, zapobj, tx));
VERIFY0(zap_remove_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
zapobj, tx));
}
uint64_t
vdev_create_link_zap(vdev_t *vd, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
uint64_t zap = zap_create(spa->spa_meta_objset, DMU_OTN_ZAP_METADATA,
DMU_OT_NONE, 0, tx);
ASSERT(zap != 0);
VERIFY0(zap_add_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
zap, tx));
return (zap);
}
void
vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx)
{
if (vd->vdev_ops != &vdev_hole_ops &&
vd->vdev_ops != &vdev_missing_ops &&
vd->vdev_ops != &vdev_root_ops &&
!vd->vdev_top->vdev_removing) {
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_leaf_zap == 0) {
vd->vdev_leaf_zap = vdev_create_link_zap(vd, tx);
}
if (vd == vd->vdev_top && vd->vdev_top_zap == 0) {
vd->vdev_top_zap = vdev_create_link_zap(vd, tx);
if (vd->vdev_alloc_bias != VDEV_BIAS_NONE)
vdev_zap_allocation_data(vd, tx);
}
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
vdev_construct_zaps(vd->vdev_child[i], tx);
}
}
static void
vdev_dtl_sync(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
range_tree_t *rt = vd->vdev_dtl[DTL_MISSING];
objset_t *mos = spa->spa_meta_objset;
range_tree_t *rtsync;
dmu_tx_t *tx;
uint64_t object = space_map_object(vd->vdev_dtl_sm);
ASSERT(vdev_is_concrete(vd));
ASSERT(vd->vdev_ops->vdev_op_leaf);
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
if (vd->vdev_detached || vd->vdev_top->vdev_removing) {
mutex_enter(&vd->vdev_dtl_lock);
space_map_free(vd->vdev_dtl_sm, tx);
space_map_close(vd->vdev_dtl_sm);
vd->vdev_dtl_sm = NULL;
mutex_exit(&vd->vdev_dtl_lock);
/*
* We only destroy the leaf ZAP for detached leaves or for
* removed log devices. Removed data devices handle leaf ZAP
* cleanup later, once cancellation is no longer possible.
*/
if (vd->vdev_leaf_zap != 0 && (vd->vdev_detached ||
vd->vdev_top->vdev_islog)) {
vdev_destroy_unlink_zap(vd, vd->vdev_leaf_zap, tx);
vd->vdev_leaf_zap = 0;
}
dmu_tx_commit(tx);
return;
}
if (vd->vdev_dtl_sm == NULL) {
uint64_t new_object;
new_object = space_map_alloc(mos, zfs_vdev_dtl_sm_blksz, tx);
VERIFY3U(new_object, !=, 0);
VERIFY0(space_map_open(&vd->vdev_dtl_sm, mos, new_object,
0, -1ULL, 0));
ASSERT(vd->vdev_dtl_sm != NULL);
}
rtsync = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
mutex_enter(&vd->vdev_dtl_lock);
range_tree_walk(rt, range_tree_add, rtsync);
mutex_exit(&vd->vdev_dtl_lock);
space_map_truncate(vd->vdev_dtl_sm, zfs_vdev_dtl_sm_blksz, tx);
space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, SM_NO_VDEVID, tx);
range_tree_vacate(rtsync, NULL, NULL);
range_tree_destroy(rtsync);
/*
* If the object for the space map has changed then dirty
* the top level so that we update the config.
*/
if (object != space_map_object(vd->vdev_dtl_sm)) {
vdev_dbgmsg(vd, "txg %llu, spa %s, DTL old object %llu, "
"new object %llu", (u_longlong_t)txg, spa_name(spa),
(u_longlong_t)object,
(u_longlong_t)space_map_object(vd->vdev_dtl_sm));
vdev_config_dirty(vd->vdev_top);
}
dmu_tx_commit(tx);
}
/*
* Determine whether the specified vdev can be offlined/detached/removed
* without losing data.
*/
boolean_t
vdev_dtl_required(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
vdev_t *tvd = vd->vdev_top;
uint8_t cant_read = vd->vdev_cant_read;
boolean_t required;
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
if (vd == spa->spa_root_vdev || vd == tvd)
return (B_TRUE);
/*
* Temporarily mark the device as unreadable, and then determine
* whether this results in any DTL outages in the top-level vdev.
* If not, we can safely offline/detach/remove the device.
*/
vd->vdev_cant_read = B_TRUE;
vdev_dtl_reassess(tvd, 0, 0, B_FALSE, B_FALSE);
required = !vdev_dtl_empty(tvd, DTL_OUTAGE);
vd->vdev_cant_read = cant_read;
vdev_dtl_reassess(tvd, 0, 0, B_FALSE, B_FALSE);
if (!required && zio_injection_enabled) {
required = !!zio_handle_device_injection(vd, NULL,
SET_ERROR(ECHILD));
}
return (required);
}
/*
* Determine if resilver is needed, and if so the txg range.
*/
boolean_t
vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
{
boolean_t needed = B_FALSE;
uint64_t thismin = UINT64_MAX;
uint64_t thismax = 0;
if (vd->vdev_children == 0) {
mutex_enter(&vd->vdev_dtl_lock);
if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
vdev_writeable(vd)) {
thismin = vdev_dtl_min(vd);
thismax = vdev_dtl_max(vd);
needed = B_TRUE;
}
mutex_exit(&vd->vdev_dtl_lock);
} else {
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
uint64_t cmin, cmax;
if (vdev_resilver_needed(cvd, &cmin, &cmax)) {
thismin = MIN(thismin, cmin);
thismax = MAX(thismax, cmax);
needed = B_TRUE;
}
}
}
if (needed && minp) {
*minp = thismin;
*maxp = thismax;
}
return (needed);
}
/*
* Gets the checkpoint space map object from the vdev's ZAP. On success sm_obj
* will contain either the checkpoint spacemap object or zero if none exists.
* All other errors are returned to the caller.
*/
int
vdev_checkpoint_sm_object(vdev_t *vd, uint64_t *sm_obj)
{
ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
if (vd->vdev_top_zap == 0) {
*sm_obj = 0;
return (0);
}
int error = zap_lookup(spa_meta_objset(vd->vdev_spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1, sm_obj);
if (error == ENOENT) {
*sm_obj = 0;
error = 0;
}
return (error);
}
int
vdev_load(vdev_t *vd)
{
int children = vd->vdev_children;
int error = 0;
taskq_t *tq = NULL;
/*
* It's only worthwhile to use the taskq for the root vdev, because the
* slow part is metaslab_init, and that only happens for top-level
* vdevs.
*/
if (vd->vdev_ops == &vdev_root_ops && vd->vdev_children > 0) {
tq = taskq_create("vdev_load", children, minclsyspri,
children, children, TASKQ_PREPOPULATE);
}
/*
* Recursively load all children.
*/
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (tq == NULL || vdev_uses_zvols(cvd)) {
cvd->vdev_load_error = vdev_load(cvd);
} else {
VERIFY(taskq_dispatch(tq, vdev_load_child,
cvd, TQ_SLEEP) != TASKQID_INVALID);
}
}
if (tq != NULL) {
taskq_wait(tq);
taskq_destroy(tq);
}
for (int c = 0; c < vd->vdev_children; c++) {
int error = vd->vdev_child[c]->vdev_load_error;
if (error != 0)
return (error);
}
vdev_set_deflate_ratio(vd);
/*
* On spa_load path, grab the allocation bias from our zap
*/
if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
spa_t *spa = vd->vdev_spa;
char bias_str[64];
error = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_ALLOCATION_BIAS, 1, sizeof (bias_str),
bias_str);
if (error == 0) {
ASSERT(vd->vdev_alloc_bias == VDEV_BIAS_NONE);
vd->vdev_alloc_bias = vdev_derive_alloc_bias(bias_str);
} else if (error != ENOENT) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: zap_lookup(top_zap=%llu) "
"failed [error=%d]",
(u_longlong_t)vd->vdev_top_zap, error);
return (error);
}
}
/*
* Load any rebuild state from the top-level vdev zap.
*/
if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
error = vdev_rebuild_load(vd);
if (error && error != ENOTSUP) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: vdev_rebuild_load "
"failed [error=%d]", error);
return (error);
}
}
/*
* If this is a top-level vdev, initialize its metaslabs.
*/
if (vd == vd->vdev_top && vdev_is_concrete(vd)) {
vdev_metaslab_group_create(vd);
if (vd->vdev_ashift == 0 || vd->vdev_asize == 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: invalid size. ashift=%llu, "
"asize=%llu", (u_longlong_t)vd->vdev_ashift,
(u_longlong_t)vd->vdev_asize);
return (SET_ERROR(ENXIO));
}
error = vdev_metaslab_init(vd, 0);
if (error != 0) {
vdev_dbgmsg(vd, "vdev_load: metaslab_init failed "
"[error=%d]", error);
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
return (error);
}
uint64_t checkpoint_sm_obj;
error = vdev_checkpoint_sm_object(vd, &checkpoint_sm_obj);
if (error == 0 && checkpoint_sm_obj != 0) {
objset_t *mos = spa_meta_objset(vd->vdev_spa);
ASSERT(vd->vdev_asize != 0);
ASSERT3P(vd->vdev_checkpoint_sm, ==, NULL);
error = space_map_open(&vd->vdev_checkpoint_sm,
mos, checkpoint_sm_obj, 0, vd->vdev_asize,
vd->vdev_ashift);
if (error != 0) {
vdev_dbgmsg(vd, "vdev_load: space_map_open "
"failed for checkpoint spacemap (obj %llu) "
"[error=%d]",
(u_longlong_t)checkpoint_sm_obj, error);
return (error);
}
ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
/*
* Since the checkpoint_sm contains free entries
* exclusively we can use space_map_allocated() to
* indicate the cumulative checkpointed space that
* has been freed.
*/
vd->vdev_stat.vs_checkpoint_space =
-space_map_allocated(vd->vdev_checkpoint_sm);
vd->vdev_spa->spa_checkpoint_info.sci_dspace +=
vd->vdev_stat.vs_checkpoint_space;
} else if (error != 0) {
vdev_dbgmsg(vd, "vdev_load: failed to retrieve "
"checkpoint space map object from vdev ZAP "
"[error=%d]", error);
return (error);
}
}
/*
* If this is a leaf vdev, load its DTL.
*/
if (vd->vdev_ops->vdev_op_leaf && (error = vdev_dtl_load(vd)) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: vdev_dtl_load failed "
"[error=%d]", error);
return (error);
}
uint64_t obsolete_sm_object;
error = vdev_obsolete_sm_object(vd, &obsolete_sm_object);
if (error == 0 && obsolete_sm_object != 0) {
objset_t *mos = vd->vdev_spa->spa_meta_objset;
ASSERT(vd->vdev_asize != 0);
ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
if ((error = space_map_open(&vd->vdev_obsolete_sm, mos,
obsolete_sm_object, 0, vd->vdev_asize, 0))) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: space_map_open failed for "
"obsolete spacemap (obj %llu) [error=%d]",
(u_longlong_t)obsolete_sm_object, error);
return (error);
}
} else if (error != 0) {
vdev_dbgmsg(vd, "vdev_load: failed to retrieve obsolete "
"space map object from vdev ZAP [error=%d]", error);
return (error);
}
return (0);
}
/*
* The special vdev case is used for hot spares and l2cache devices. Its
* sole purpose it to set the vdev state for the associated vdev. To do this,
* we make sure that we can open the underlying device, then try to read the
* label, and make sure that the label is sane and that it hasn't been
* repurposed to another pool.
*/
int
vdev_validate_aux(vdev_t *vd)
{
nvlist_t *label;
uint64_t guid, version;
uint64_t state;
if (!vdev_readable(vd))
return (0);
if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
return (-1);
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 ||
!SPA_VERSION_IS_SUPPORTED(version) ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 ||
guid != vd->vdev_guid ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
return (-1);
}
/*
* We don't actually check the pool state here. If it's in fact in
* use by another pool, we update this fact on the fly when requested.
*/
nvlist_free(label);
return (0);
}
static void
vdev_destroy_ms_flush_data(vdev_t *vd, dmu_tx_t *tx)
{
objset_t *mos = spa_meta_objset(vd->vdev_spa);
if (vd->vdev_top_zap == 0)
return;
uint64_t object = 0;
int err = zap_lookup(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1, &object);
if (err == ENOENT)
return;
VERIFY0(err);
VERIFY0(dmu_object_free(mos, object, tx));
VERIFY0(zap_remove(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, tx));
}
/*
* Free the objects used to store this vdev's spacemaps, and the array
* that points to them.
*/
void
vdev_destroy_spacemaps(vdev_t *vd, dmu_tx_t *tx)
{
if (vd->vdev_ms_array == 0)
return;
objset_t *mos = vd->vdev_spa->spa_meta_objset;
uint64_t array_count = vd->vdev_asize >> vd->vdev_ms_shift;
size_t array_bytes = array_count * sizeof (uint64_t);
uint64_t *smobj_array = kmem_alloc(array_bytes, KM_SLEEP);
VERIFY0(dmu_read(mos, vd->vdev_ms_array, 0,
array_bytes, smobj_array, 0));
for (uint64_t i = 0; i < array_count; i++) {
uint64_t smobj = smobj_array[i];
if (smobj == 0)
continue;
space_map_free_obj(mos, smobj, tx);
}
kmem_free(smobj_array, array_bytes);
VERIFY0(dmu_object_free(mos, vd->vdev_ms_array, tx));
vdev_destroy_ms_flush_data(vd, tx);
vd->vdev_ms_array = 0;
}
static void
vdev_remove_empty_log(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
ASSERT(vd->vdev_islog);
ASSERT(vd == vd->vdev_top);
ASSERT3U(txg, ==, spa_syncing_txg(spa));
dmu_tx_t *tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
vdev_destroy_spacemaps(vd, tx);
if (vd->vdev_top_zap != 0) {
vdev_destroy_unlink_zap(vd, vd->vdev_top_zap, tx);
vd->vdev_top_zap = 0;
}
dmu_tx_commit(tx);
}
void
vdev_sync_done(vdev_t *vd, uint64_t txg)
{
metaslab_t *msp;
boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg));
ASSERT(vdev_is_concrete(vd));
while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))
!= NULL)
metaslab_sync_done(msp, txg);
if (reassess) {
metaslab_sync_reassess(vd->vdev_mg);
if (vd->vdev_log_mg != NULL)
metaslab_sync_reassess(vd->vdev_log_mg);
}
}
void
vdev_sync(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
vdev_t *lvd;
metaslab_t *msp;
ASSERT3U(txg, ==, spa->spa_syncing_txg);
dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
if (range_tree_space(vd->vdev_obsolete_segments) > 0) {
ASSERT(vd->vdev_removing ||
vd->vdev_ops == &vdev_indirect_ops);
vdev_indirect_sync_obsolete(vd, tx);
/*
* If the vdev is indirect, it can't have dirty
* metaslabs or DTLs.
*/
if (vd->vdev_ops == &vdev_indirect_ops) {
ASSERT(txg_list_empty(&vd->vdev_ms_list, txg));
ASSERT(txg_list_empty(&vd->vdev_dtl_list, txg));
dmu_tx_commit(tx);
return;
}
}
ASSERT(vdev_is_concrete(vd));
if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0 &&
!vd->vdev_removing) {
ASSERT(vd == vd->vdev_top);
ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset,
DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx);
ASSERT(vd->vdev_ms_array != 0);
vdev_config_dirty(vd);
}
while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) {
metaslab_sync(msp, txg);
(void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg));
}
while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL)
vdev_dtl_sync(lvd, txg);
/*
* If this is an empty log device being removed, destroy the
* metadata associated with it.
*/
if (vd->vdev_islog && vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing)
vdev_remove_empty_log(vd, txg);
(void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg));
dmu_tx_commit(tx);
}
uint64_t
vdev_psize_to_asize(vdev_t *vd, uint64_t psize)
{
return (vd->vdev_ops->vdev_op_asize(vd, psize));
}
/*
* Mark the given vdev faulted. A faulted vdev behaves as if the device could
* not be opened, and no I/O is attempted.
*/
int
vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux)
{
vdev_t *vd, *tvd;
spa_vdev_state_enter(spa, SCL_NONE);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
tvd = vd->vdev_top;
/*
* If user did a 'zpool offline -f' then make the fault persist across
* reboots.
*/
if (aux == VDEV_AUX_EXTERNAL_PERSIST) {
/*
* There are two kinds of forced faults: temporary and
* persistent. Temporary faults go away at pool import, while
* persistent faults stay set. Both types of faults can be
* cleared with a zpool clear.
*
* We tell if a vdev is persistently faulted by looking at the
* ZPOOL_CONFIG_AUX_STATE nvpair. If it's set to "external" at
* import then it's a persistent fault. Otherwise, it's
* temporary. We get ZPOOL_CONFIG_AUX_STATE set to "external"
* by setting vd.vdev_stat.vs_aux to VDEV_AUX_EXTERNAL. This
* tells vdev_config_generate() (which gets run later) to set
* ZPOOL_CONFIG_AUX_STATE to "external" in the nvlist.
*/
vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL;
vd->vdev_tmpoffline = B_FALSE;
aux = VDEV_AUX_EXTERNAL;
} else {
vd->vdev_tmpoffline = B_TRUE;
}
/*
* We don't directly use the aux state here, but if we do a
* vdev_reopen(), we need this value to be present to remember why we
* were faulted.
*/
vd->vdev_label_aux = aux;
/*
* Faulted state takes precedence over degraded.
*/
vd->vdev_delayed_close = B_FALSE;
vd->vdev_faulted = 1ULL;
vd->vdev_degraded = 0ULL;
vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux);
/*
* If this device has the only valid copy of the data, then
* back off and simply mark the vdev as degraded instead.
*/
if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) {
vd->vdev_degraded = 1ULL;
vd->vdev_faulted = 0ULL;
/*
* If we reopen the device and it's not dead, only then do we
* mark it degraded.
*/
vdev_reopen(tvd);
if (vdev_readable(vd))
vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux);
}
return (spa_vdev_state_exit(spa, vd, 0));
}
/*
* Mark the given vdev degraded. A degraded vdev is purely an indication to the
* user that something is wrong. The vdev continues to operate as normal as far
* as I/O is concerned.
*/
int
vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux)
{
vdev_t *vd;
spa_vdev_state_enter(spa, SCL_NONE);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
/*
* If the vdev is already faulted, then don't do anything.
*/
if (vd->vdev_faulted || vd->vdev_degraded)
return (spa_vdev_state_exit(spa, NULL, 0));
vd->vdev_degraded = 1ULL;
if (!vdev_is_dead(vd))
vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED,
aux);
return (spa_vdev_state_exit(spa, vd, 0));
}
/*
* Online the given vdev.
*
* If 'ZFS_ONLINE_UNSPARE' is set, it implies two things. First, any attached
* spare device should be detached when the device finishes resilvering.
* Second, the online should be treated like a 'test' online case, so no FMA
* events are generated if the device fails to open.
*/
int
vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
{
vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev;
boolean_t wasoffline;
vdev_state_t oldstate;
spa_vdev_state_enter(spa, SCL_NONE);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
wasoffline = (vd->vdev_offline || vd->vdev_tmpoffline);
oldstate = vd->vdev_state;
tvd = vd->vdev_top;
vd->vdev_offline = B_FALSE;
vd->vdev_tmpoffline = B_FALSE;
vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE);
vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT);
/* XXX - L2ARC 1.0 does not support expansion */
if (!vd->vdev_aux) {
for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
pvd->vdev_expanding = !!((flags & ZFS_ONLINE_EXPAND) ||
spa->spa_autoexpand);
vd->vdev_expansion_time = gethrestime_sec();
}
vdev_reopen(tvd);
vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE;
if (!vd->vdev_aux) {
for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
pvd->vdev_expanding = B_FALSE;
}
if (newstate)
*newstate = vd->vdev_state;
if ((flags & ZFS_ONLINE_UNSPARE) &&
!vdev_is_dead(vd) && vd->vdev_parent &&
vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
vd->vdev_parent->vdev_child[0] == vd)
vd->vdev_unspare = B_TRUE;
if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) {
/* XXX - L2ARC 1.0 does not support expansion */
if (vd->vdev_aux)
return (spa_vdev_state_exit(spa, vd, ENOTSUP));
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
/* Restart initializing if necessary */
mutex_enter(&vd->vdev_initialize_lock);
if (vdev_writeable(vd) &&
vd->vdev_initialize_thread == NULL &&
vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) {
(void) vdev_initialize(vd);
}
mutex_exit(&vd->vdev_initialize_lock);
/*
* Restart trimming if necessary. We do not restart trimming for cache
* devices here. This is triggered by l2arc_rebuild_vdev()
* asynchronously for the whole device or in l2arc_evict() as it evicts
* space for upcoming writes.
*/
mutex_enter(&vd->vdev_trim_lock);
if (vdev_writeable(vd) && !vd->vdev_isl2cache &&
vd->vdev_trim_thread == NULL &&
vd->vdev_trim_state == VDEV_TRIM_ACTIVE) {
(void) vdev_trim(vd, vd->vdev_trim_rate, vd->vdev_trim_partial,
vd->vdev_trim_secure);
}
mutex_exit(&vd->vdev_trim_lock);
if (wasoffline ||
(oldstate < VDEV_STATE_DEGRADED &&
vd->vdev_state >= VDEV_STATE_DEGRADED))
spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_ONLINE);
return (spa_vdev_state_exit(spa, vd, 0));
}
static int
vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags)
{
vdev_t *vd, *tvd;
int error = 0;
uint64_t generation;
metaslab_group_t *mg;
top:
spa_vdev_state_enter(spa, SCL_ALLOC);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
if (vd->vdev_ops == &vdev_draid_spare_ops)
return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
tvd = vd->vdev_top;
mg = tvd->vdev_mg;
generation = spa->spa_config_generation + 1;
/*
* If the device isn't already offline, try to offline it.
*/
if (!vd->vdev_offline) {
/*
* If this device has the only valid copy of some data,
* don't allow it to be offlined. Log devices are always
* expendable.
*/
if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
vdev_dtl_required(vd))
return (spa_vdev_state_exit(spa, NULL,
SET_ERROR(EBUSY)));
/*
* If the top-level is a slog and it has had allocations
* then proceed. We check that the vdev's metaslab group
* is not NULL since it's possible that we may have just
* added this vdev but not yet initialized its metaslabs.
*/
if (tvd->vdev_islog && mg != NULL) {
/*
* Prevent any future allocations.
*/
ASSERT3P(tvd->vdev_log_mg, ==, NULL);
metaslab_group_passivate(mg);
(void) spa_vdev_state_exit(spa, vd, 0);
error = spa_reset_logs(spa);
/*
* If the log device was successfully reset but has
* checkpointed data, do not offline it.
*/
if (error == 0 &&
tvd->vdev_checkpoint_sm != NULL) {
ASSERT3U(space_map_allocated(
tvd->vdev_checkpoint_sm), !=, 0);
error = ZFS_ERR_CHECKPOINT_EXISTS;
}
spa_vdev_state_enter(spa, SCL_ALLOC);
/*
* Check to see if the config has changed.
*/
if (error || generation != spa->spa_config_generation) {
metaslab_group_activate(mg);
if (error)
return (spa_vdev_state_exit(spa,
vd, error));
(void) spa_vdev_state_exit(spa, vd, 0);
goto top;
}
ASSERT0(tvd->vdev_stat.vs_alloc);
}
/*
* Offline this device and reopen its top-level vdev.
* If the top-level vdev is a log device then just offline
* it. Otherwise, if this action results in the top-level
* vdev becoming unusable, undo it and fail the request.
*/
vd->vdev_offline = B_TRUE;
vdev_reopen(tvd);
if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
vdev_is_dead(tvd)) {
vd->vdev_offline = B_FALSE;
vdev_reopen(tvd);
return (spa_vdev_state_exit(spa, NULL,
SET_ERROR(EBUSY)));
}
/*
* Add the device back into the metaslab rotor so that
* once we online the device it's open for business.
*/
if (tvd->vdev_islog && mg != NULL)
metaslab_group_activate(mg);
}
vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY);
return (spa_vdev_state_exit(spa, vd, 0));
}
int
vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags)
{
int error;
mutex_enter(&spa->spa_vdev_top_lock);
error = vdev_offline_locked(spa, guid, flags);
mutex_exit(&spa->spa_vdev_top_lock);
return (error);
}
/*
* Clear the error counts associated with this vdev. Unlike vdev_online() and
* vdev_offline(), we assume the spa config is locked. We also clear all
* children. If 'vd' is NULL, then the user wants to clear all vdevs.
*/
void
vdev_clear(spa_t *spa, vdev_t *vd)
{
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
if (vd == NULL)
vd = rvd;
vd->vdev_stat.vs_read_errors = 0;
vd->vdev_stat.vs_write_errors = 0;
vd->vdev_stat.vs_checksum_errors = 0;
vd->vdev_stat.vs_slow_ios = 0;
for (int c = 0; c < vd->vdev_children; c++)
vdev_clear(spa, vd->vdev_child[c]);
/*
* It makes no sense to "clear" an indirect vdev.
*/
if (!vdev_is_concrete(vd))
return;
/*
* If we're in the FAULTED state or have experienced failed I/O, then
* clear the persistent state and attempt to reopen the device. We
* also mark the vdev config dirty, so that the new faulted state is
* written out to disk.
*/
if (vd->vdev_faulted || vd->vdev_degraded ||
!vdev_readable(vd) || !vdev_writeable(vd)) {
/*
* When reopening in response to a clear event, it may be due to
* a fmadm repair request. In this case, if the device is
* still broken, we want to still post the ereport again.
*/
vd->vdev_forcefault = B_TRUE;
vd->vdev_faulted = vd->vdev_degraded = 0ULL;
vd->vdev_cant_read = B_FALSE;
vd->vdev_cant_write = B_FALSE;
vd->vdev_stat.vs_aux = 0;
vdev_reopen(vd == rvd ? rvd : vd->vdev_top);
vd->vdev_forcefault = B_FALSE;
if (vd != rvd && vdev_writeable(vd->vdev_top))
vdev_state_dirty(vd->vdev_top);
/* If a resilver isn't required, check if vdevs can be culled */
if (vd->vdev_aux == NULL && !vdev_is_dead(vd) &&
!dsl_scan_resilvering(spa->spa_dsl_pool) &&
!dsl_scan_resilver_scheduled(spa->spa_dsl_pool))
spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_CLEAR);
}
/*
* When clearing a FMA-diagnosed fault, we always want to
* unspare the device, as we assume that the original spare was
* done in response to the FMA fault.
*/
if (!vdev_is_dead(vd) && vd->vdev_parent != NULL &&
vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
vd->vdev_parent->vdev_child[0] == vd)
vd->vdev_unspare = B_TRUE;
/* Clear recent error events cache (i.e. duplicate events tracking) */
zfs_ereport_clear(spa, vd);
}
boolean_t
vdev_is_dead(vdev_t *vd)
{
/*
* Holes and missing devices are always considered "dead".
* This simplifies the code since we don't have to check for
* these types of devices in the various code paths.
* Instead we rely on the fact that we skip over dead devices
* before issuing I/O to them.
*/
return (vd->vdev_state < VDEV_STATE_DEGRADED ||
vd->vdev_ops == &vdev_hole_ops ||
vd->vdev_ops == &vdev_missing_ops);
}
boolean_t
vdev_readable(vdev_t *vd)
{
return (!vdev_is_dead(vd) && !vd->vdev_cant_read);
}
boolean_t
vdev_writeable(vdev_t *vd)
{
return (!vdev_is_dead(vd) && !vd->vdev_cant_write &&
vdev_is_concrete(vd));
}
boolean_t
vdev_allocatable(vdev_t *vd)
{
uint64_t state = vd->vdev_state;
/*
* We currently allow allocations from vdevs which may be in the
* process of reopening (i.e. VDEV_STATE_CLOSED). If the device
* fails to reopen then we'll catch it later when we're holding
* the proper locks. Note that we have to get the vdev state
* in a local variable because although it changes atomically,
* we're asking two separate questions about it.
*/
return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) &&
!vd->vdev_cant_write && vdev_is_concrete(vd) &&
vd->vdev_mg->mg_initialized);
}
boolean_t
vdev_accessible(vdev_t *vd, zio_t *zio)
{
ASSERT(zio->io_vd == vd);
if (vdev_is_dead(vd) || vd->vdev_remove_wanted)
return (B_FALSE);
if (zio->io_type == ZIO_TYPE_READ)
return (!vd->vdev_cant_read);
if (zio->io_type == ZIO_TYPE_WRITE)
return (!vd->vdev_cant_write);
return (B_TRUE);
}
static void
vdev_get_child_stat(vdev_t *cvd, vdev_stat_t *vs, vdev_stat_t *cvs)
{
/*
* Exclude the dRAID spare when aggregating to avoid double counting
* the ops and bytes. These IOs are counted by the physical leaves.
*/
if (cvd->vdev_ops == &vdev_draid_spare_ops)
return;
for (int t = 0; t < VS_ZIO_TYPES; t++) {
vs->vs_ops[t] += cvs->vs_ops[t];
vs->vs_bytes[t] += cvs->vs_bytes[t];
}
cvs->vs_scan_removing = cvd->vdev_removing;
}
/*
* Get extended stats
*/
static void
vdev_get_child_stat_ex(vdev_t *cvd, vdev_stat_ex_t *vsx, vdev_stat_ex_t *cvsx)
{
(void) cvd;
int t, b;
for (t = 0; t < ZIO_TYPES; t++) {
for (b = 0; b < ARRAY_SIZE(vsx->vsx_disk_histo[0]); b++)
vsx->vsx_disk_histo[t][b] += cvsx->vsx_disk_histo[t][b];
for (b = 0; b < ARRAY_SIZE(vsx->vsx_total_histo[0]); b++) {
vsx->vsx_total_histo[t][b] +=
cvsx->vsx_total_histo[t][b];
}
}
for (t = 0; t < ZIO_PRIORITY_NUM_QUEUEABLE; t++) {
for (b = 0; b < ARRAY_SIZE(vsx->vsx_queue_histo[0]); b++) {
vsx->vsx_queue_histo[t][b] +=
cvsx->vsx_queue_histo[t][b];
}
vsx->vsx_active_queue[t] += cvsx->vsx_active_queue[t];
vsx->vsx_pend_queue[t] += cvsx->vsx_pend_queue[t];
for (b = 0; b < ARRAY_SIZE(vsx->vsx_ind_histo[0]); b++)
vsx->vsx_ind_histo[t][b] += cvsx->vsx_ind_histo[t][b];
for (b = 0; b < ARRAY_SIZE(vsx->vsx_agg_histo[0]); b++)
vsx->vsx_agg_histo[t][b] += cvsx->vsx_agg_histo[t][b];
}
}
boolean_t
vdev_is_spacemap_addressable(vdev_t *vd)
{
if (spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_SPACEMAP_V2))
return (B_TRUE);
/*
* If double-word space map entries are not enabled we assume
* 47 bits of the space map entry are dedicated to the entry's
* offset (see SM_OFFSET_BITS in space_map.h). We then use that
* to calculate the maximum address that can be described by a
* space map entry for the given device.
*/
uint64_t shift = vd->vdev_ashift + SM_OFFSET_BITS;
if (shift >= 63) /* detect potential overflow */
return (B_TRUE);
return (vd->vdev_asize < (1ULL << shift));
}
/*
* Get statistics for the given vdev.
*/
static void
vdev_get_stats_ex_impl(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
{
int t;
/*
* If we're getting stats on the root vdev, aggregate the I/O counts
* over all top-level vdevs (i.e. the direct children of the root).
*/
if (!vd->vdev_ops->vdev_op_leaf) {
if (vs) {
memset(vs->vs_ops, 0, sizeof (vs->vs_ops));
memset(vs->vs_bytes, 0, sizeof (vs->vs_bytes));
}
if (vsx)
memset(vsx, 0, sizeof (*vsx));
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
vdev_stat_t *cvs = &cvd->vdev_stat;
vdev_stat_ex_t *cvsx = &cvd->vdev_stat_ex;
vdev_get_stats_ex_impl(cvd, cvs, cvsx);
if (vs)
vdev_get_child_stat(cvd, vs, cvs);
if (vsx)
vdev_get_child_stat_ex(cvd, vsx, cvsx);
}
} else {
/*
* We're a leaf. Just copy our ZIO active queue stats in. The
* other leaf stats are updated in vdev_stat_update().
*/
if (!vsx)
return;
memcpy(vsx, &vd->vdev_stat_ex, sizeof (vd->vdev_stat_ex));
for (t = 0; t < ARRAY_SIZE(vd->vdev_queue.vq_class); t++) {
vsx->vsx_active_queue[t] =
vd->vdev_queue.vq_class[t].vqc_active;
vsx->vsx_pend_queue[t] = avl_numnodes(
&vd->vdev_queue.vq_class[t].vqc_queued_tree);
}
}
}
void
vdev_get_stats_ex(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
{
vdev_t *tvd = vd->vdev_top;
mutex_enter(&vd->vdev_stat_lock);
if (vs) {
memcpy(vs, &vd->vdev_stat, sizeof (*vs));
vs->vs_timestamp = gethrtime() - vs->vs_timestamp;
vs->vs_state = vd->vdev_state;
vs->vs_rsize = vdev_get_min_asize(vd);
if (vd->vdev_ops->vdev_op_leaf) {
vs->vs_pspace = vd->vdev_psize;
vs->vs_rsize += VDEV_LABEL_START_SIZE +
VDEV_LABEL_END_SIZE;
/*
* Report initializing progress. Since we don't
* have the initializing locks held, this is only
* an estimate (although a fairly accurate one).
*/
vs->vs_initialize_bytes_done =
vd->vdev_initialize_bytes_done;
vs->vs_initialize_bytes_est =
vd->vdev_initialize_bytes_est;
vs->vs_initialize_state = vd->vdev_initialize_state;
vs->vs_initialize_action_time =
vd->vdev_initialize_action_time;
/*
* Report manual TRIM progress. Since we don't have
* the manual TRIM locks held, this is only an
* estimate (although fairly accurate one).
*/
vs->vs_trim_notsup = !vd->vdev_has_trim;
vs->vs_trim_bytes_done = vd->vdev_trim_bytes_done;
vs->vs_trim_bytes_est = vd->vdev_trim_bytes_est;
vs->vs_trim_state = vd->vdev_trim_state;
vs->vs_trim_action_time = vd->vdev_trim_action_time;
/* Set when there is a deferred resilver. */
vs->vs_resilver_deferred = vd->vdev_resilver_deferred;
}
/*
* Report expandable space on top-level, non-auxiliary devices
* only. The expandable space is reported in terms of metaslab
* sized units since that determines how much space the pool
* can expand.
*/
if (vd->vdev_aux == NULL && tvd != NULL) {
vs->vs_esize = P2ALIGN(
vd->vdev_max_asize - vd->vdev_asize,
1ULL << tvd->vdev_ms_shift);
}
vs->vs_configured_ashift = vd->vdev_top != NULL
? vd->vdev_top->vdev_ashift : vd->vdev_ashift;
vs->vs_logical_ashift = vd->vdev_logical_ashift;
- vs->vs_physical_ashift = vd->vdev_physical_ashift;
+ if (vd->vdev_physical_ashift <= ASHIFT_MAX)
+ vs->vs_physical_ashift = vd->vdev_physical_ashift;
+ else
+ vs->vs_physical_ashift = 0;
/*
* Report fragmentation and rebuild progress for top-level,
* non-auxiliary, concrete devices.
*/
if (vd->vdev_aux == NULL && vd == vd->vdev_top &&
vdev_is_concrete(vd)) {
/*
* The vdev fragmentation rating doesn't take into
* account the embedded slog metaslab (vdev_log_mg).
* Since it's only one metaslab, it would have a tiny
* impact on the overall fragmentation.
*/
vs->vs_fragmentation = (vd->vdev_mg != NULL) ?
vd->vdev_mg->mg_fragmentation : 0;
}
vs->vs_noalloc = MAX(vd->vdev_noalloc,
tvd ? tvd->vdev_noalloc : 0);
}
vdev_get_stats_ex_impl(vd, vs, vsx);
mutex_exit(&vd->vdev_stat_lock);
}
void
vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
{
return (vdev_get_stats_ex(vd, vs, NULL));
}
void
vdev_clear_stats(vdev_t *vd)
{
mutex_enter(&vd->vdev_stat_lock);
vd->vdev_stat.vs_space = 0;
vd->vdev_stat.vs_dspace = 0;
vd->vdev_stat.vs_alloc = 0;
mutex_exit(&vd->vdev_stat_lock);
}
void
vdev_scan_stat_init(vdev_t *vd)
{
vdev_stat_t *vs = &vd->vdev_stat;
for (int c = 0; c < vd->vdev_children; c++)
vdev_scan_stat_init(vd->vdev_child[c]);
mutex_enter(&vd->vdev_stat_lock);
vs->vs_scan_processed = 0;
mutex_exit(&vd->vdev_stat_lock);
}
void
vdev_stat_update(zio_t *zio, uint64_t psize)
{
spa_t *spa = zio->io_spa;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *vd = zio->io_vd ? zio->io_vd : rvd;
vdev_t *pvd;
uint64_t txg = zio->io_txg;
vdev_stat_t *vs = vd ? &vd->vdev_stat : NULL;
vdev_stat_ex_t *vsx = vd ? &vd->vdev_stat_ex : NULL;
zio_type_t type = zio->io_type;
int flags = zio->io_flags;
/*
* If this i/o is a gang leader, it didn't do any actual work.
*/
if (zio->io_gang_tree)
return;
if (zio->io_error == 0) {
/*
* If this is a root i/o, don't count it -- we've already
* counted the top-level vdevs, and vdev_get_stats() will
* aggregate them when asked. This reduces contention on
* the root vdev_stat_lock and implicitly handles blocks
* that compress away to holes, for which there is no i/o.
* (Holes never create vdev children, so all the counters
* remain zero, which is what we want.)
*
* Note: this only applies to successful i/o (io_error == 0)
* because unlike i/o counts, errors are not additive.
* When reading a ditto block, for example, failure of
* one top-level vdev does not imply a root-level error.
*/
if (vd == rvd)
return;
ASSERT(vd == zio->io_vd);
if (flags & ZIO_FLAG_IO_BYPASS)
return;
mutex_enter(&vd->vdev_stat_lock);
if (flags & ZIO_FLAG_IO_REPAIR) {
/*
* Repair is the result of a resilver issued by the
* scan thread (spa_sync).
*/
if (flags & ZIO_FLAG_SCAN_THREAD) {
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
dsl_scan_phys_t *scn_phys = &scn->scn_phys;
uint64_t *processed = &scn_phys->scn_processed;
if (vd->vdev_ops->vdev_op_leaf)
atomic_add_64(processed, psize);
vs->vs_scan_processed += psize;
}
/*
* Repair is the result of a rebuild issued by the
* rebuild thread (vdev_rebuild_thread). To avoid
* double counting repaired bytes the virtual dRAID
* spare vdev is excluded from the processed bytes.
*/
if (zio->io_priority == ZIO_PRIORITY_REBUILD) {
vdev_t *tvd = vd->vdev_top;
vdev_rebuild_t *vr = &tvd->vdev_rebuild_config;
vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
uint64_t *rebuilt = &vrp->vrp_bytes_rebuilt;
if (vd->vdev_ops->vdev_op_leaf &&
vd->vdev_ops != &vdev_draid_spare_ops) {
atomic_add_64(rebuilt, psize);
}
vs->vs_rebuild_processed += psize;
}
if (flags & ZIO_FLAG_SELF_HEAL)
vs->vs_self_healed += psize;
}
/*
* The bytes/ops/histograms are recorded at the leaf level and
* aggregated into the higher level vdevs in vdev_get_stats().
*/
if (vd->vdev_ops->vdev_op_leaf &&
(zio->io_priority < ZIO_PRIORITY_NUM_QUEUEABLE)) {
zio_type_t vs_type = type;
zio_priority_t priority = zio->io_priority;
/*
* TRIM ops and bytes are reported to user space as
* ZIO_TYPE_IOCTL. This is done to preserve the
* vdev_stat_t structure layout for user space.
*/
if (type == ZIO_TYPE_TRIM)
vs_type = ZIO_TYPE_IOCTL;
/*
* Solely for the purposes of 'zpool iostat -lqrw'
* reporting use the priority to categorize the IO.
* Only the following are reported to user space:
*
* ZIO_PRIORITY_SYNC_READ,
* ZIO_PRIORITY_SYNC_WRITE,
* ZIO_PRIORITY_ASYNC_READ,
* ZIO_PRIORITY_ASYNC_WRITE,
* ZIO_PRIORITY_SCRUB,
* ZIO_PRIORITY_TRIM,
* ZIO_PRIORITY_REBUILD.
*/
if (priority == ZIO_PRIORITY_INITIALIZING) {
ASSERT3U(type, ==, ZIO_TYPE_WRITE);
priority = ZIO_PRIORITY_ASYNC_WRITE;
} else if (priority == ZIO_PRIORITY_REMOVAL) {
priority = ((type == ZIO_TYPE_WRITE) ?
ZIO_PRIORITY_ASYNC_WRITE :
ZIO_PRIORITY_ASYNC_READ);
}
vs->vs_ops[vs_type]++;
vs->vs_bytes[vs_type] += psize;
if (flags & ZIO_FLAG_DELEGATED) {
vsx->vsx_agg_histo[priority]
[RQ_HISTO(zio->io_size)]++;
} else {
vsx->vsx_ind_histo[priority]
[RQ_HISTO(zio->io_size)]++;
}
if (zio->io_delta && zio->io_delay) {
vsx->vsx_queue_histo[priority]
[L_HISTO(zio->io_delta - zio->io_delay)]++;
vsx->vsx_disk_histo[type]
[L_HISTO(zio->io_delay)]++;
vsx->vsx_total_histo[type]
[L_HISTO(zio->io_delta)]++;
}
}
mutex_exit(&vd->vdev_stat_lock);
return;
}
if (flags & ZIO_FLAG_SPECULATIVE)
return;
/*
* If this is an I/O error that is going to be retried, then ignore the
* error. Otherwise, the user may interpret B_FAILFAST I/O errors as
* hard errors, when in reality they can happen for any number of
* innocuous reasons (bus resets, MPxIO link failure, etc).
*/
if (zio->io_error == EIO &&
!(zio->io_flags & ZIO_FLAG_IO_RETRY))
return;
/*
* Intent logs writes won't propagate their error to the root
* I/O so don't mark these types of failures as pool-level
* errors.
*/
if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
return;
if (type == ZIO_TYPE_WRITE && txg != 0 &&
(!(flags & ZIO_FLAG_IO_REPAIR) ||
(flags & ZIO_FLAG_SCAN_THREAD) ||
spa->spa_claiming)) {
/*
* This is either a normal write (not a repair), or it's
* a repair induced by the scrub thread, or it's a repair
* made by zil_claim() during spa_load() in the first txg.
* In the normal case, we commit the DTL change in the same
* txg as the block was born. In the scrub-induced repair
* case, we know that scrubs run in first-pass syncing context,
* so we commit the DTL change in spa_syncing_txg(spa).
* In the zil_claim() case, we commit in spa_first_txg(spa).
*
* We currently do not make DTL entries for failed spontaneous
* self-healing writes triggered by normal (non-scrubbing)
* reads, because we have no transactional context in which to
* do so -- and it's not clear that it'd be desirable anyway.
*/
if (vd->vdev_ops->vdev_op_leaf) {
uint64_t commit_txg = txg;
if (flags & ZIO_FLAG_SCAN_THREAD) {
ASSERT(flags & ZIO_FLAG_IO_REPAIR);
ASSERT(spa_sync_pass(spa) == 1);
vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1);
commit_txg = spa_syncing_txg(spa);
} else if (spa->spa_claiming) {
ASSERT(flags & ZIO_FLAG_IO_REPAIR);
commit_txg = spa_first_txg(spa);
}
ASSERT(commit_txg >= spa_syncing_txg(spa));
if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1))
return;
for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1);
vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg);
}
if (vd != rvd)
vdev_dtl_dirty(vd, DTL_MISSING, txg, 1);
}
}
int64_t
vdev_deflated_space(vdev_t *vd, int64_t space)
{
ASSERT((space & (SPA_MINBLOCKSIZE-1)) == 0);
ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache);
return ((space >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio);
}
/*
* Update the in-core space usage stats for this vdev, its metaslab class,
* and the root vdev.
*/
void
vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta,
int64_t space_delta)
{
(void) defer_delta;
int64_t dspace_delta;
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(vd == vd->vdev_top);
/*
* Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion
* factor. We must calculate this here and not at the root vdev
* because the root vdev's psize-to-asize is simply the max of its
* children's, thus not accurate enough for us.
*/
dspace_delta = vdev_deflated_space(vd, space_delta);
mutex_enter(&vd->vdev_stat_lock);
/* ensure we won't underflow */
if (alloc_delta < 0) {
ASSERT3U(vd->vdev_stat.vs_alloc, >=, -alloc_delta);
}
vd->vdev_stat.vs_alloc += alloc_delta;
vd->vdev_stat.vs_space += space_delta;
vd->vdev_stat.vs_dspace += dspace_delta;
mutex_exit(&vd->vdev_stat_lock);
/* every class but log contributes to root space stats */
if (vd->vdev_mg != NULL && !vd->vdev_islog) {
ASSERT(!vd->vdev_isl2cache);
mutex_enter(&rvd->vdev_stat_lock);
rvd->vdev_stat.vs_alloc += alloc_delta;
rvd->vdev_stat.vs_space += space_delta;
rvd->vdev_stat.vs_dspace += dspace_delta;
mutex_exit(&rvd->vdev_stat_lock);
}
/* Note: metaslab_class_space_update moved to metaslab_space_update */
}
/*
* Mark a top-level vdev's config as dirty, placing it on the dirty list
* so that it will be written out next time the vdev configuration is synced.
* If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs.
*/
void
vdev_config_dirty(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
int c;
ASSERT(spa_writeable(spa));
/*
* If this is an aux vdev (as with l2cache and spare devices), then we
* update the vdev config manually and set the sync flag.
*/
if (vd->vdev_aux != NULL) {
spa_aux_vdev_t *sav = vd->vdev_aux;
nvlist_t **aux;
uint_t naux;
for (c = 0; c < sav->sav_count; c++) {
if (sav->sav_vdevs[c] == vd)
break;
}
if (c == sav->sav_count) {
/*
* We're being removed. There's nothing more to do.
*/
ASSERT(sav->sav_sync == B_TRUE);
return;
}
sav->sav_sync = B_TRUE;
if (nvlist_lookup_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) {
VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_SPARES, &aux, &naux) == 0);
}
ASSERT(c < naux);
/*
* Setting the nvlist in the middle if the array is a little
* sketchy, but it will work.
*/
nvlist_free(aux[c]);
aux[c] = vdev_config_generate(spa, vd, B_TRUE, 0);
return;
}
/*
* The dirty list is protected by the SCL_CONFIG lock. The caller
* must either hold SCL_CONFIG as writer, or must be the sync thread
* (which holds SCL_CONFIG as reader). There's only one sync thread,
* so this is sufficient to ensure mutual exclusion.
*/
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_CONFIG, RW_READER)));
if (vd == rvd) {
for (c = 0; c < rvd->vdev_children; c++)
vdev_config_dirty(rvd->vdev_child[c]);
} else {
ASSERT(vd == vd->vdev_top);
if (!list_link_active(&vd->vdev_config_dirty_node) &&
vdev_is_concrete(vd)) {
list_insert_head(&spa->spa_config_dirty_list, vd);
}
}
}
void
vdev_config_clean(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_CONFIG, RW_READER)));
ASSERT(list_link_active(&vd->vdev_config_dirty_node));
list_remove(&spa->spa_config_dirty_list, vd);
}
/*
* Mark a top-level vdev's state as dirty, so that the next pass of
* spa_sync() can convert this into vdev_config_dirty(). We distinguish
* the state changes from larger config changes because they require
* much less locking, and are often needed for administrative actions.
*/
void
vdev_state_dirty(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_writeable(spa));
ASSERT(vd == vd->vdev_top);
/*
* The state list is protected by the SCL_STATE lock. The caller
* must either hold SCL_STATE as writer, or must be the sync thread
* (which holds SCL_STATE as reader). There's only one sync thread,
* so this is sufficient to ensure mutual exclusion.
*/
ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_STATE, RW_READER)));
if (!list_link_active(&vd->vdev_state_dirty_node) &&
vdev_is_concrete(vd))
list_insert_head(&spa->spa_state_dirty_list, vd);
}
void
vdev_state_clean(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_STATE, RW_READER)));
ASSERT(list_link_active(&vd->vdev_state_dirty_node));
list_remove(&spa->spa_state_dirty_list, vd);
}
/*
* Propagate vdev state up from children to parent.
*/
void
vdev_propagate_state(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
int degraded = 0, faulted = 0;
int corrupted = 0;
vdev_t *child;
if (vd->vdev_children > 0) {
for (int c = 0; c < vd->vdev_children; c++) {
child = vd->vdev_child[c];
/*
* Don't factor holes or indirect vdevs into the
* decision.
*/
if (!vdev_is_concrete(child))
continue;
if (!vdev_readable(child) ||
(!vdev_writeable(child) && spa_writeable(spa))) {
/*
* Root special: if there is a top-level log
* device, treat the root vdev as if it were
* degraded.
*/
if (child->vdev_islog && vd == rvd)
degraded++;
else
faulted++;
} else if (child->vdev_state <= VDEV_STATE_DEGRADED) {
degraded++;
}
if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA)
corrupted++;
}
vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded);
/*
* Root special: if there is a top-level vdev that cannot be
* opened due to corrupted metadata, then propagate the root
* vdev's aux state as 'corrupt' rather than 'insufficient
* replicas'.
*/
if (corrupted && vd == rvd &&
rvd->vdev_state == VDEV_STATE_CANT_OPEN)
vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
}
if (vd->vdev_parent)
vdev_propagate_state(vd->vdev_parent);
}
/*
* Set a vdev's state. If this is during an open, we don't update the parent
* state, because we're in the process of opening children depth-first.
* Otherwise, we propagate the change to the parent.
*
* If this routine places a device in a faulted state, an appropriate ereport is
* generated.
*/
void
vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux)
{
uint64_t save_state;
spa_t *spa = vd->vdev_spa;
if (state == vd->vdev_state) {
/*
* Since vdev_offline() code path is already in an offline
* state we can miss a statechange event to OFFLINE. Check
* the previous state to catch this condition.
*/
if (vd->vdev_ops->vdev_op_leaf &&
(state == VDEV_STATE_OFFLINE) &&
(vd->vdev_prevstate >= VDEV_STATE_FAULTED)) {
/* post an offline state change */
zfs_post_state_change(spa, vd, vd->vdev_prevstate);
}
vd->vdev_stat.vs_aux = aux;
return;
}
save_state = vd->vdev_state;
vd->vdev_state = state;
vd->vdev_stat.vs_aux = aux;
/*
* If we are setting the vdev state to anything but an open state, then
* always close the underlying device unless the device has requested
* a delayed close (i.e. we're about to remove or fault the device).
* Otherwise, we keep accessible but invalid devices open forever.
* We don't call vdev_close() itself, because that implies some extra
* checks (offline, etc) that we don't want here. This is limited to
* leaf devices, because otherwise closing the device will affect other
* children.
*/
if (!vd->vdev_delayed_close && vdev_is_dead(vd) &&
vd->vdev_ops->vdev_op_leaf)
vd->vdev_ops->vdev_op_close(vd);
if (vd->vdev_removed &&
state == VDEV_STATE_CANT_OPEN &&
(aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) {
/*
* If the previous state is set to VDEV_STATE_REMOVED, then this
* device was previously marked removed and someone attempted to
* reopen it. If this failed due to a nonexistent device, then
* keep the device in the REMOVED state. We also let this be if
* it is one of our special test online cases, which is only
* attempting to online the device and shouldn't generate an FMA
* fault.
*/
vd->vdev_state = VDEV_STATE_REMOVED;
vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
} else if (state == VDEV_STATE_REMOVED) {
vd->vdev_removed = B_TRUE;
} else if (state == VDEV_STATE_CANT_OPEN) {
/*
* If we fail to open a vdev during an import or recovery, we
* mark it as "not available", which signifies that it was
* never there to begin with. Failure to open such a device
* is not considered an error.
*/
if ((spa_load_state(spa) == SPA_LOAD_IMPORT ||
spa_load_state(spa) == SPA_LOAD_RECOVER) &&
vd->vdev_ops->vdev_op_leaf)
vd->vdev_not_present = 1;
/*
* Post the appropriate ereport. If the 'prevstate' field is
* set to something other than VDEV_STATE_UNKNOWN, it indicates
* that this is part of a vdev_reopen(). In this case, we don't
* want to post the ereport if the device was already in the
* CANT_OPEN state beforehand.
*
* If the 'checkremove' flag is set, then this is an attempt to
* online the device in response to an insertion event. If we
* hit this case, then we have detected an insertion event for a
* faulted or offline device that wasn't in the removed state.
* In this scenario, we don't post an ereport because we are
* about to replace the device, or attempt an online with
* vdev_forcefault, which will generate the fault for us.
*/
if ((vd->vdev_prevstate != state || vd->vdev_forcefault) &&
!vd->vdev_not_present && !vd->vdev_checkremove &&
vd != spa->spa_root_vdev) {
const char *class;
switch (aux) {
case VDEV_AUX_OPEN_FAILED:
class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED;
break;
case VDEV_AUX_CORRUPT_DATA:
class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA;
break;
case VDEV_AUX_NO_REPLICAS:
class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS;
break;
case VDEV_AUX_BAD_GUID_SUM:
class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM;
break;
case VDEV_AUX_TOO_SMALL:
class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL;
break;
case VDEV_AUX_BAD_LABEL:
class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL;
break;
case VDEV_AUX_BAD_ASHIFT:
class = FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT;
break;
default:
class = FM_EREPORT_ZFS_DEVICE_UNKNOWN;
}
(void) zfs_ereport_post(class, spa, vd, NULL, NULL,
save_state);
}
/* Erase any notion of persistent removed state */
vd->vdev_removed = B_FALSE;
} else {
vd->vdev_removed = B_FALSE;
}
/*
* Notify ZED of any significant state-change on a leaf vdev.
*
*/
if (vd->vdev_ops->vdev_op_leaf) {
/* preserve original state from a vdev_reopen() */
if ((vd->vdev_prevstate != VDEV_STATE_UNKNOWN) &&
(vd->vdev_prevstate != vd->vdev_state) &&
(save_state <= VDEV_STATE_CLOSED))
save_state = vd->vdev_prevstate;
/* filter out state change due to initial vdev_open */
if (save_state > VDEV_STATE_CLOSED)
zfs_post_state_change(spa, vd, save_state);
}
if (!isopen && vd->vdev_parent)
vdev_propagate_state(vd->vdev_parent);
}
boolean_t
vdev_children_are_offline(vdev_t *vd)
{
ASSERT(!vd->vdev_ops->vdev_op_leaf);
for (uint64_t i = 0; i < vd->vdev_children; i++) {
if (vd->vdev_child[i]->vdev_state != VDEV_STATE_OFFLINE)
return (B_FALSE);
}
return (B_TRUE);
}
/*
* Check the vdev configuration to ensure that it's capable of supporting
* a root pool. We do not support partial configuration.
*/
boolean_t
vdev_is_bootable(vdev_t *vd)
{
if (!vd->vdev_ops->vdev_op_leaf) {
const char *vdev_type = vd->vdev_ops->vdev_op_type;
if (strcmp(vdev_type, VDEV_TYPE_MISSING) == 0)
return (B_FALSE);
}
for (int c = 0; c < vd->vdev_children; c++) {
if (!vdev_is_bootable(vd->vdev_child[c]))
return (B_FALSE);
}
return (B_TRUE);
}
boolean_t
vdev_is_concrete(vdev_t *vd)
{
vdev_ops_t *ops = vd->vdev_ops;
if (ops == &vdev_indirect_ops || ops == &vdev_hole_ops ||
ops == &vdev_missing_ops || ops == &vdev_root_ops) {
return (B_FALSE);
} else {
return (B_TRUE);
}
}
/*
* Determine if a log device has valid content. If the vdev was
* removed or faulted in the MOS config then we know that
* the content on the log device has already been written to the pool.
*/
boolean_t
vdev_log_state_valid(vdev_t *vd)
{
if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted &&
!vd->vdev_removed)
return (B_TRUE);
for (int c = 0; c < vd->vdev_children; c++)
if (vdev_log_state_valid(vd->vdev_child[c]))
return (B_TRUE);
return (B_FALSE);
}
/*
* Expand a vdev if possible.
*/
void
vdev_expand(vdev_t *vd, uint64_t txg)
{
ASSERT(vd->vdev_top == vd);
ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
ASSERT(vdev_is_concrete(vd));
vdev_set_deflate_ratio(vd);
if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count &&
vdev_is_concrete(vd)) {
vdev_metaslab_group_create(vd);
VERIFY(vdev_metaslab_init(vd, txg) == 0);
vdev_config_dirty(vd);
}
}
/*
* Split a vdev.
*/
void
vdev_split(vdev_t *vd)
{
vdev_t *cvd, *pvd = vd->vdev_parent;
vdev_remove_child(pvd, vd);
vdev_compact_children(pvd);
cvd = pvd->vdev_child[0];
if (pvd->vdev_children == 1) {
vdev_remove_parent(cvd);
cvd->vdev_splitting = B_TRUE;
}
vdev_propagate_state(cvd);
}
void
vdev_deadman(vdev_t *vd, const char *tag)
{
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
vdev_deadman(cvd, tag);
}
if (vd->vdev_ops->vdev_op_leaf) {
vdev_queue_t *vq = &vd->vdev_queue;
mutex_enter(&vq->vq_lock);
if (avl_numnodes(&vq->vq_active_tree) > 0) {
spa_t *spa = vd->vdev_spa;
zio_t *fio;
uint64_t delta;
zfs_dbgmsg("slow vdev: %s has %lu active IOs",
vd->vdev_path, avl_numnodes(&vq->vq_active_tree));
/*
* Look at the head of all the pending queues,
* if any I/O has been outstanding for longer than
* the spa_deadman_synctime invoke the deadman logic.
*/
fio = avl_first(&vq->vq_active_tree);
delta = gethrtime() - fio->io_timestamp;
if (delta > spa_deadman_synctime(spa))
zio_deadman(fio, tag);
}
mutex_exit(&vq->vq_lock);
}
}
void
vdev_defer_resilver(vdev_t *vd)
{
ASSERT(vd->vdev_ops->vdev_op_leaf);
vd->vdev_resilver_deferred = B_TRUE;
vd->vdev_spa->spa_resilver_deferred = B_TRUE;
}
/*
* Clears the resilver deferred flag on all leaf devs under vd. Returns
* B_TRUE if we have devices that need to be resilvered and are available to
* accept resilver I/Os.
*/
boolean_t
vdev_clear_resilver_deferred(vdev_t *vd, dmu_tx_t *tx)
{
boolean_t resilver_needed = B_FALSE;
spa_t *spa = vd->vdev_spa;
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
resilver_needed |= vdev_clear_resilver_deferred(cvd, tx);
}
if (vd == spa->spa_root_vdev &&
spa_feature_is_active(spa, SPA_FEATURE_RESILVER_DEFER)) {
spa_feature_decr(spa, SPA_FEATURE_RESILVER_DEFER, tx);
vdev_config_dirty(vd);
spa->spa_resilver_deferred = B_FALSE;
return (resilver_needed);
}
if (!vdev_is_concrete(vd) || vd->vdev_aux ||
!vd->vdev_ops->vdev_op_leaf)
return (resilver_needed);
vd->vdev_resilver_deferred = B_FALSE;
return (!vdev_is_dead(vd) && !vd->vdev_offline &&
vdev_resilver_needed(vd, NULL, NULL));
}
boolean_t
vdev_xlate_is_empty(range_seg64_t *rs)
{
return (rs->rs_start == rs->rs_end);
}
/*
* Translate a logical range to the first contiguous physical range for the
* specified vdev_t. This function is initially called with a leaf vdev and
* will walk each parent vdev until it reaches a top-level vdev. Once the
* top-level is reached the physical range is initialized and the recursive
* function begins to unwind. As it unwinds it calls the parent's vdev
* specific translation function to do the real conversion.
*/
void
vdev_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
range_seg64_t *physical_rs, range_seg64_t *remain_rs)
{
/*
* Walk up the vdev tree
*/
if (vd != vd->vdev_top) {
vdev_xlate(vd->vdev_parent, logical_rs, physical_rs,
remain_rs);
} else {
/*
* We've reached the top-level vdev, initialize the physical
* range to the logical range and set an empty remaining
* range then start to unwind.
*/
physical_rs->rs_start = logical_rs->rs_start;
physical_rs->rs_end = logical_rs->rs_end;
remain_rs->rs_start = logical_rs->rs_start;
remain_rs->rs_end = logical_rs->rs_start;
return;
}
vdev_t *pvd = vd->vdev_parent;
ASSERT3P(pvd, !=, NULL);
ASSERT3P(pvd->vdev_ops->vdev_op_xlate, !=, NULL);
/*
* As this recursive function unwinds, translate the logical
* range into its physical and any remaining components by calling
* the vdev specific translate function.
*/
range_seg64_t intermediate = { 0 };
pvd->vdev_ops->vdev_op_xlate(vd, physical_rs, &intermediate, remain_rs);
physical_rs->rs_start = intermediate.rs_start;
physical_rs->rs_end = intermediate.rs_end;
}
void
vdev_xlate_walk(vdev_t *vd, const range_seg64_t *logical_rs,
vdev_xlate_func_t *func, void *arg)
{
range_seg64_t iter_rs = *logical_rs;
range_seg64_t physical_rs;
range_seg64_t remain_rs;
while (!vdev_xlate_is_empty(&iter_rs)) {
vdev_xlate(vd, &iter_rs, &physical_rs, &remain_rs);
/*
* With raidz and dRAID, it's possible that the logical range
* does not live on this leaf vdev. Only when there is a non-
* zero physical size call the provided function.
*/
if (!vdev_xlate_is_empty(&physical_rs))
func(arg, &physical_rs);
iter_rs = remain_rs;
}
}
static char *
vdev_name(vdev_t *vd, char *buf, int buflen)
{
if (vd->vdev_path == NULL) {
if (strcmp(vd->vdev_ops->vdev_op_type, "root") == 0) {
strlcpy(buf, vd->vdev_spa->spa_name, buflen);
} else if (!vd->vdev_ops->vdev_op_leaf) {
snprintf(buf, buflen, "%s-%llu",
vd->vdev_ops->vdev_op_type,
(u_longlong_t)vd->vdev_id);
}
} else {
strlcpy(buf, vd->vdev_path, buflen);
}
return (buf);
}
/*
* Look at the vdev tree and determine whether any devices are currently being
* replaced.
*/
boolean_t
vdev_replace_in_progress(vdev_t *vdev)
{
ASSERT(spa_config_held(vdev->vdev_spa, SCL_ALL, RW_READER) != 0);
if (vdev->vdev_ops == &vdev_replacing_ops)
return (B_TRUE);
/*
* A 'spare' vdev indicates that we have a replace in progress, unless
* it has exactly two children, and the second, the hot spare, has
* finished being resilvered.
*/
if (vdev->vdev_ops == &vdev_spare_ops && (vdev->vdev_children > 2 ||
!vdev_dtl_empty(vdev->vdev_child[1], DTL_MISSING)))
return (B_TRUE);
for (int i = 0; i < vdev->vdev_children; i++) {
if (vdev_replace_in_progress(vdev->vdev_child[i]))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Add a (source=src, propname=propval) list to an nvlist.
*/
static void
vdev_prop_add_list(nvlist_t *nvl, const char *propname, char *strval,
uint64_t intval, zprop_source_t src)
{
nvlist_t *propval;
propval = fnvlist_alloc();
fnvlist_add_uint64(propval, ZPROP_SOURCE, src);
if (strval != NULL)
fnvlist_add_string(propval, ZPROP_VALUE, strval);
else
fnvlist_add_uint64(propval, ZPROP_VALUE, intval);
fnvlist_add_nvlist(nvl, propname, propval);
nvlist_free(propval);
}
static void
vdev_props_set_sync(void *arg, dmu_tx_t *tx)
{
vdev_t *vd;
nvlist_t *nvp = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
objset_t *mos = spa->spa_meta_objset;
nvpair_t *elem = NULL;
uint64_t vdev_guid;
nvlist_t *nvprops;
vdev_guid = fnvlist_lookup_uint64(nvp, ZPOOL_VDEV_PROPS_SET_VDEV);
nvprops = fnvlist_lookup_nvlist(nvp, ZPOOL_VDEV_PROPS_SET_PROPS);
vd = spa_lookup_by_guid(spa, vdev_guid, B_TRUE);
/* this vdev could get removed while waiting for this sync task */
if (vd == NULL)
return;
mutex_enter(&spa->spa_props_lock);
while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) {
uint64_t intval, objid = 0;
char *strval;
vdev_prop_t prop;
const char *propname = nvpair_name(elem);
zprop_type_t proptype;
/*
* Set vdev property values in the vdev props mos object.
*/
if (vd->vdev_top_zap != 0) {
objid = vd->vdev_top_zap;
} else if (vd->vdev_leaf_zap != 0) {
objid = vd->vdev_leaf_zap;
} else {
panic("vdev not top or leaf");
}
switch (prop = vdev_name_to_prop(propname)) {
case VDEV_PROP_USERPROP:
if (vdev_prop_user(propname)) {
strval = fnvpair_value_string(elem);
if (strlen(strval) == 0) {
/* remove the property if value == "" */
(void) zap_remove(mos, objid, propname,
tx);
} else {
VERIFY0(zap_update(mos, objid, propname,
1, strlen(strval) + 1, strval, tx));
}
spa_history_log_internal(spa, "vdev set", tx,
"vdev_guid=%llu: %s=%s",
(u_longlong_t)vdev_guid, nvpair_name(elem),
strval);
}
break;
default:
/* normalize the property name */
propname = vdev_prop_to_name(prop);
proptype = vdev_prop_get_type(prop);
if (nvpair_type(elem) == DATA_TYPE_STRING) {
ASSERT(proptype == PROP_TYPE_STRING);
strval = fnvpair_value_string(elem);
VERIFY0(zap_update(mos, objid, propname,
1, strlen(strval) + 1, strval, tx));
spa_history_log_internal(spa, "vdev set", tx,
"vdev_guid=%llu: %s=%s",
(u_longlong_t)vdev_guid, nvpair_name(elem),
strval);
} else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
intval = fnvpair_value_uint64(elem);
if (proptype == PROP_TYPE_INDEX) {
const char *unused;
VERIFY0(vdev_prop_index_to_string(
prop, intval, &unused));
}
VERIFY0(zap_update(mos, objid, propname,
sizeof (uint64_t), 1, &intval, tx));
spa_history_log_internal(spa, "vdev set", tx,
"vdev_guid=%llu: %s=%lld",
(u_longlong_t)vdev_guid,
nvpair_name(elem), (longlong_t)intval);
} else {
panic("invalid vdev property type %u",
nvpair_type(elem));
}
}
}
mutex_exit(&spa->spa_props_lock);
}
int
vdev_prop_set(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl)
{
spa_t *spa = vd->vdev_spa;
nvpair_t *elem = NULL;
uint64_t vdev_guid;
nvlist_t *nvprops;
int error;
ASSERT(vd != NULL);
if (nvlist_lookup_uint64(innvl, ZPOOL_VDEV_PROPS_SET_VDEV,
&vdev_guid) != 0)
return (SET_ERROR(EINVAL));
if (nvlist_lookup_nvlist(innvl, ZPOOL_VDEV_PROPS_SET_PROPS,
&nvprops) != 0)
return (SET_ERROR(EINVAL));
if ((vd = spa_lookup_by_guid(spa, vdev_guid, B_TRUE)) == NULL)
return (SET_ERROR(EINVAL));
while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) {
char *propname = nvpair_name(elem);
vdev_prop_t prop = vdev_name_to_prop(propname);
uint64_t intval = 0;
char *strval = NULL;
if (prop == VDEV_PROP_USERPROP && !vdev_prop_user(propname)) {
error = EINVAL;
goto end;
}
if (vdev_prop_readonly(prop)) {
error = EROFS;
goto end;
}
/* Special Processing */
switch (prop) {
case VDEV_PROP_PATH:
if (vd->vdev_path == NULL) {
error = EROFS;
break;
}
if (nvpair_value_string(elem, &strval) != 0) {
error = EINVAL;
break;
}
/* New path must start with /dev/ */
if (strncmp(strval, "/dev/", 5)) {
error = EINVAL;
break;
}
error = spa_vdev_setpath(spa, vdev_guid, strval);
break;
case VDEV_PROP_ALLOCATING:
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
break;
}
if (intval != vd->vdev_noalloc)
break;
if (intval == 0)
error = spa_vdev_noalloc(spa, vdev_guid);
else
error = spa_vdev_alloc(spa, vdev_guid);
break;
default:
/* Most processing is done in vdev_props_set_sync */
break;
}
end:
if (error != 0) {
intval = error;
vdev_prop_add_list(outnvl, propname, strval, intval, 0);
return (error);
}
}
return (dsl_sync_task(spa->spa_name, NULL, vdev_props_set_sync,
innvl, 6, ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
int
vdev_prop_get(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl)
{
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
int err = 0;
uint64_t objid;
uint64_t vdev_guid;
nvpair_t *elem = NULL;
nvlist_t *nvprops = NULL;
uint64_t intval = 0;
char *strval = NULL;
const char *propname = NULL;
vdev_prop_t prop;
ASSERT(vd != NULL);
ASSERT(mos != NULL);
if (nvlist_lookup_uint64(innvl, ZPOOL_VDEV_PROPS_GET_VDEV,
&vdev_guid) != 0)
return (SET_ERROR(EINVAL));
nvlist_lookup_nvlist(innvl, ZPOOL_VDEV_PROPS_GET_PROPS, &nvprops);
if (vd->vdev_top_zap != 0) {
objid = vd->vdev_top_zap;
} else if (vd->vdev_leaf_zap != 0) {
objid = vd->vdev_leaf_zap;
} else {
return (SET_ERROR(EINVAL));
}
ASSERT(objid != 0);
mutex_enter(&spa->spa_props_lock);
if (nvprops != NULL) {
char namebuf[64] = { 0 };
while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) {
intval = 0;
strval = NULL;
propname = nvpair_name(elem);
prop = vdev_name_to_prop(propname);
zprop_source_t src = ZPROP_SRC_DEFAULT;
uint64_t integer_size, num_integers;
switch (prop) {
/* Special Read-only Properties */
case VDEV_PROP_NAME:
strval = vdev_name(vd, namebuf,
sizeof (namebuf));
if (strval == NULL)
continue;
vdev_prop_add_list(outnvl, propname, strval, 0,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_CAPACITY:
/* percent used */
intval = (vd->vdev_stat.vs_dspace == 0) ? 0 :
(vd->vdev_stat.vs_alloc * 100 /
vd->vdev_stat.vs_dspace);
vdev_prop_add_list(outnvl, propname, NULL,
intval, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_STATE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_state, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_GUID:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_guid, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_ASIZE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_asize, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_PSIZE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_psize, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_ASHIFT:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_ashift, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_SIZE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_dspace, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_FREE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_dspace -
vd->vdev_stat.vs_alloc, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_ALLOCATED:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_alloc, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_EXPANDSZ:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_esize, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_FRAGMENTATION:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_fragmentation,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_PARITY:
vdev_prop_add_list(outnvl, propname, NULL,
vdev_get_nparity(vd), ZPROP_SRC_NONE);
continue;
case VDEV_PROP_PATH:
if (vd->vdev_path == NULL)
continue;
vdev_prop_add_list(outnvl, propname,
vd->vdev_path, 0, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_DEVID:
if (vd->vdev_devid == NULL)
continue;
vdev_prop_add_list(outnvl, propname,
vd->vdev_devid, 0, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_PHYS_PATH:
if (vd->vdev_physpath == NULL)
continue;
vdev_prop_add_list(outnvl, propname,
vd->vdev_physpath, 0, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_ENC_PATH:
if (vd->vdev_enc_sysfs_path == NULL)
continue;
vdev_prop_add_list(outnvl, propname,
vd->vdev_enc_sysfs_path, 0, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_FRU:
if (vd->vdev_fru == NULL)
continue;
vdev_prop_add_list(outnvl, propname,
vd->vdev_fru, 0, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_PARENT:
if (vd->vdev_parent != NULL) {
strval = vdev_name(vd->vdev_parent,
namebuf, sizeof (namebuf));
vdev_prop_add_list(outnvl, propname,
strval, 0, ZPROP_SRC_NONE);
}
continue;
case VDEV_PROP_CHILDREN:
if (vd->vdev_children > 0)
strval = kmem_zalloc(ZAP_MAXVALUELEN,
KM_SLEEP);
for (uint64_t i = 0; i < vd->vdev_children;
i++) {
const char *vname;
vname = vdev_name(vd->vdev_child[i],
namebuf, sizeof (namebuf));
if (vname == NULL)
vname = "(unknown)";
if (strlen(strval) > 0)
strlcat(strval, ",",
ZAP_MAXVALUELEN);
strlcat(strval, vname, ZAP_MAXVALUELEN);
}
if (strval != NULL) {
vdev_prop_add_list(outnvl, propname,
strval, 0, ZPROP_SRC_NONE);
kmem_free(strval, ZAP_MAXVALUELEN);
}
continue;
case VDEV_PROP_NUMCHILDREN:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_children, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_READ_ERRORS:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_read_errors,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_WRITE_ERRORS:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_write_errors,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_CHECKSUM_ERRORS:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_checksum_errors,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_INITIALIZE_ERRORS:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_initialize_errors,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_OPS_NULL:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_ops[ZIO_TYPE_NULL],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_OPS_READ:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_ops[ZIO_TYPE_READ],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_OPS_WRITE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_ops[ZIO_TYPE_WRITE],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_OPS_FREE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_ops[ZIO_TYPE_FREE],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_OPS_CLAIM:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_ops[ZIO_TYPE_CLAIM],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_OPS_TRIM:
/*
* TRIM ops and bytes are reported to user
* space as ZIO_TYPE_IOCTL. This is done to
* preserve the vdev_stat_t structure layout
* for user space.
*/
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_ops[ZIO_TYPE_IOCTL],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_BYTES_NULL:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_bytes[ZIO_TYPE_NULL],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_BYTES_READ:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_bytes[ZIO_TYPE_READ],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_BYTES_WRITE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_bytes[ZIO_TYPE_WRITE],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_BYTES_FREE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_bytes[ZIO_TYPE_FREE],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_BYTES_CLAIM:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_bytes[ZIO_TYPE_CLAIM],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_BYTES_TRIM:
/*
* TRIM ops and bytes are reported to user
* space as ZIO_TYPE_IOCTL. This is done to
* preserve the vdev_stat_t structure layout
* for user space.
*/
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_bytes[ZIO_TYPE_IOCTL],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_REMOVING:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_removing, ZPROP_SRC_NONE);
continue;
/* Numeric Properites */
case VDEV_PROP_ALLOCATING:
src = ZPROP_SRC_LOCAL;
strval = NULL;
err = zap_lookup(mos, objid, nvpair_name(elem),
sizeof (uint64_t), 1, &intval);
if (err == ENOENT) {
intval =
vdev_prop_default_numeric(prop);
err = 0;
} else if (err)
break;
if (intval == vdev_prop_default_numeric(prop))
src = ZPROP_SRC_DEFAULT;
/* Leaf vdevs cannot have this property */
if (vd->vdev_mg == NULL &&
vd->vdev_top != NULL) {
src = ZPROP_SRC_NONE;
intval = ZPROP_BOOLEAN_NA;
}
vdev_prop_add_list(outnvl, propname, strval,
intval, src);
break;
/* Text Properties */
case VDEV_PROP_COMMENT:
/* Exists in the ZAP below */
/* FALLTHRU */
case VDEV_PROP_USERPROP:
/* User Properites */
src = ZPROP_SRC_LOCAL;
err = zap_length(mos, objid, nvpair_name(elem),
&integer_size, &num_integers);
if (err)
break;
switch (integer_size) {
case 8:
/* User properties cannot be integers */
err = EINVAL;
break;
case 1:
/* string property */
strval = kmem_alloc(num_integers,
KM_SLEEP);
err = zap_lookup(mos, objid,
nvpair_name(elem), 1,
num_integers, strval);
if (err) {
kmem_free(strval,
num_integers);
break;
}
vdev_prop_add_list(outnvl, propname,
strval, 0, src);
kmem_free(strval, num_integers);
break;
}
break;
default:
err = ENOENT;
break;
}
if (err)
break;
}
} else {
/*
* Get all properties from the MOS vdev property object.
*/
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, mos, objid);
(err = zap_cursor_retrieve(&zc, &za)) == 0;
zap_cursor_advance(&zc)) {
intval = 0;
strval = NULL;
zprop_source_t src = ZPROP_SRC_DEFAULT;
propname = za.za_name;
prop = vdev_name_to_prop(propname);
switch (za.za_integer_length) {
case 8:
/* We do not allow integer user properties */
/* This is likely an internal value */
break;
case 1:
/* string property */
strval = kmem_alloc(za.za_num_integers,
KM_SLEEP);
err = zap_lookup(mos, objid, za.za_name, 1,
za.za_num_integers, strval);
if (err) {
kmem_free(strval, za.za_num_integers);
break;
}
vdev_prop_add_list(outnvl, propname, strval, 0,
src);
kmem_free(strval, za.za_num_integers);
break;
default:
break;
}
}
zap_cursor_fini(&zc);
}
mutex_exit(&spa->spa_props_lock);
if (err && err != ENOENT) {
return (err);
}
return (0);
}
EXPORT_SYMBOL(vdev_fault);
EXPORT_SYMBOL(vdev_degrade);
EXPORT_SYMBOL(vdev_online);
EXPORT_SYMBOL(vdev_offline);
EXPORT_SYMBOL(vdev_clear);
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_count, INT, ZMOD_RW,
"Target number of metaslabs per top-level vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_shift, INT, ZMOD_RW,
"Default limit for metaslab size");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, min_ms_count, INT, ZMOD_RW,
"Minimum number of metaslabs per top-level vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, ms_count_limit, INT, ZMOD_RW,
"Practical upper limit of total metaslabs per top-level vdev");
ZFS_MODULE_PARAM(zfs, zfs_, slow_io_events_per_second, UINT, ZMOD_RW,
"Rate limit slow IO (delay) events to this many per second");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs, zfs_, checksum_events_per_second, UINT, ZMOD_RW,
"Rate limit checksum events to this many checksum errors per second "
"(do not set below ZED threshold).");
/* END CSTYLED */
ZFS_MODULE_PARAM(zfs, zfs_, scan_ignore_errors, INT, ZMOD_RW,
"Ignore errors during resilver/scrub");
ZFS_MODULE_PARAM(zfs_vdev, vdev_, validate_skip, INT, ZMOD_RW,
"Bypass vdev_validate()");
ZFS_MODULE_PARAM(zfs, zfs_, nocacheflush, INT, ZMOD_RW,
"Disable cache flushes");
ZFS_MODULE_PARAM(zfs, zfs_, embedded_slog_min_ms, INT, ZMOD_RW,
"Minimum number of metaslabs required to dedicate one for log blocks");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, min_auto_ashift,
param_set_min_auto_ashift, param_get_ulong, ZMOD_RW,
"Minimum ashift used when creating new top-level vdevs");
ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, max_auto_ashift,
param_set_max_auto_ashift, param_get_ulong, ZMOD_RW,
"Maximum ashift used when optimizing for logical -> physical sector "
"size on new top-level vdevs");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/vdev_draid.c b/sys/contrib/openzfs/module/zfs/vdev_draid.c
index 24034d9d9313..032e8825a297 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_draid.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_draid.c
@@ -1,2831 +1,2837 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2018 Intel Corporation.
* Copyright (c) 2020 by Lawrence Livermore National Security, LLC.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_draid.h>
#include <sys/vdev_raidz.h>
#include <sys/vdev_rebuild.h>
#include <sys/abd.h>
#include <sys/zio.h>
#include <sys/nvpair.h>
#include <sys/zio_checksum.h>
#include <sys/fs/zfs.h>
#include <sys/fm/fs/zfs.h>
#include <zfs_fletcher.h>
#ifdef ZFS_DEBUG
#include <sys/vdev.h> /* For vdev_xlate() in vdev_draid_io_verify() */
#endif
/*
* dRAID is a distributed spare implementation for ZFS. A dRAID vdev is
* comprised of multiple raidz redundancy groups which are spread over the
* dRAID children. To ensure an even distribution, and avoid hot spots, a
* permutation mapping is applied to the order of the dRAID children.
* This mixing effectively distributes the parity columns evenly over all
* of the disks in the dRAID.
*
* This is beneficial because it means when resilvering all of the disks
* can participate thereby increasing the available IOPs and bandwidth.
* Furthermore, by reserving a small fraction of each child's total capacity
* virtual distributed spare disks can be created. These spares similarly
* benefit from the performance gains of spanning all of the children. The
* consequence of which is that resilvering to a distributed spare can
* substantially reduce the time required to restore full parity to pool
* with a failed disks.
*
* === dRAID group layout ===
*
* First, let's define a "row" in the configuration to be a 16M chunk from
* each physical drive at the same offset. This is the minimum allowable
* size since it must be possible to store a full 16M block when there is
* only a single data column. Next, we define a "group" to be a set of
* sequential disks containing both the parity and data columns. We allow
* groups to span multiple rows in order to align any group size to any
* number of physical drives. Finally, a "slice" is comprised of the rows
* which contain the target number of groups. The permutation mappings
* are applied in a round robin fashion to each slice.
*
* Given D+P drives in a group (including parity drives) and C-S physical
* drives (not including the spare drives), we can distribute the groups
* across R rows without remainder by selecting the least common multiple
* of D+P and C-S as the number of groups; i.e. ngroups = LCM(D+P, C-S).
*
* In the example below, there are C=14 physical drives in the configuration
* with S=2 drives worth of spare capacity. Each group has a width of 9
* which includes D=8 data and P=1 parity drive. There are 4 groups and
* 3 rows per slice. Each group has a size of 144M (16M * 9) and a slice
* size is 576M (144M * 4). When allocating from a dRAID each group is
* filled before moving on to the next as show in slice0 below.
*
* data disks (8 data + 1 parity) spares (2)
* +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
* ^ | 2 | 6 | 1 | 11| 4 | 0 | 7 | 10| 8 | 9 | 13| 5 | 12| 3 | device map 0
* | +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
* | | group 0 | group 1..| |
* | +-----------------------------------+-----------+-------|
* | | 0 1 2 3 4 5 6 7 8 | 36 37 38| | r
* | | 9 10 11 12 13 14 15 16 17| 45 46 47| | o
* | | 18 19 20 21 22 23 24 25 26| 54 55 56| | w
* | 27 28 29 30 31 32 33 34 35| 63 64 65| | 0
* s +-----------------------+-----------------------+-------+
* l | ..group 1 | group 2.. | |
* i +-----------------------+-----------------------+-------+
* c | 39 40 41 42 43 44| 72 73 74 75 76 77| | r
* e | 48 49 50 51 52 53| 81 82 83 84 85 86| | o
* 0 | 57 58 59 60 61 62| 90 91 92 93 94 95| | w
* | 66 67 68 69 70 71| 99 100 101 102 103 104| | 1
* | +-----------+-----------+-----------------------+-------+
* | |..group 2 | group 3 | |
* | +-----------+-----------+-----------------------+-------+
* | | 78 79 80|108 109 110 111 112 113 114 115 116| | r
* | | 87 88 89|117 118 119 120 121 122 123 124 125| | o
* | | 96 97 98|126 127 128 129 130 131 132 133 134| | w
* v |105 106 107|135 136 137 138 139 140 141 142 143| | 2
* +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
* | 9 | 11| 12| 2 | 4 | 1 | 3 | 0 | 10| 13| 8 | 5 | 6 | 7 | device map 1
* s +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
* l | group 4 | group 5..| | row 3
* i +-----------------------+-----------+-----------+-------|
* c | ..group 5 | group 6.. | | row 4
* e +-----------+-----------+-----------------------+-------+
* 1 |..group 6 | group 7 | | row 5
* +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
* | 3 | 5 | 10| 8 | 6 | 11| 12| 0 | 2 | 4 | 7 | 1 | 9 | 13| device map 2
* s +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
* l | group 8 | group 9..| | row 6
* i +-----------------------------------------------+-------|
* c | ..group 9 | group 10.. | | row 7
* e +-----------------------+-----------------------+-------+
* 2 |..group 10 | group 11 | | row 8
* +-----------+-----------------------------------+-------+
*
* This layout has several advantages over requiring that each row contain
* a whole number of groups.
*
* 1. The group count is not a relevant parameter when defining a dRAID
* layout. Only the group width is needed, and *all* groups will have
* the desired size.
*
* 2. All possible group widths (<= physical disk count) can be supported.
*
* 3. The logic within vdev_draid.c is simplified when the group width is
* the same for all groups (although some of the logic around computing
* permutation numbers and drive offsets is more complicated).
*
* N.B. The following array describes all valid dRAID permutation maps.
* Each row is used to generate a permutation map for a different number
* of children from a unique seed. The seeds were generated and carefully
* evaluated by the 'draid' utility in order to provide balanced mappings.
* In addition to the seed a checksum of the in-memory mapping is stored
* for verification.
*
* The imbalance ratio of a given failure (e.g. 5 disks wide, child 3 failed,
* with a given permutation map) is the ratio of the amounts of I/O that will
* be sent to the least and most busy disks when resilvering. The average
* imbalance ratio (of a given number of disks and permutation map) is the
* average of the ratios of all possible single and double disk failures.
*
* In order to achieve a low imbalance ratio the number of permutations in
* the mapping must be significantly larger than the number of children.
* For dRAID the number of permutations has been limited to 512 to minimize
* the map size. This does result in a gradually increasing imbalance ratio
* as seen in the table below. Increasing the number of permutations for
* larger child counts would reduce the imbalance ratio. However, in practice
* when there are a large number of children each child is responsible for
* fewer total IOs so it's less of a concern.
*
* Note these values are hard coded and must never be changed. Existing
* pools depend on the same mapping always being generated in order to
* read and write from the correct locations. Any change would make
* existing pools completely inaccessible.
*/
static const draid_map_t draid_maps[VDEV_DRAID_MAX_MAPS] = {
{ 2, 256, 0x89ef3dabbcc7de37, 0x00000000433d433d }, /* 1.000 */
{ 3, 256, 0x89a57f3de98121b4, 0x00000000bcd8b7b5 }, /* 1.000 */
{ 4, 256, 0xc9ea9ec82340c885, 0x00000001819d7c69 }, /* 1.000 */
{ 5, 256, 0xf46733b7f4d47dfd, 0x00000002a1648d74 }, /* 1.010 */
{ 6, 256, 0x88c3c62d8585b362, 0x00000003d3b0c2c4 }, /* 1.031 */
{ 7, 256, 0x3a65d809b4d1b9d5, 0x000000055c4183ee }, /* 1.043 */
{ 8, 256, 0xe98930e3c5d2e90a, 0x00000006edfb0329 }, /* 1.059 */
{ 9, 256, 0x5a5430036b982ccb, 0x00000008ceaf6934 }, /* 1.056 */
{ 10, 256, 0x92bf389e9eadac74, 0x0000000b26668c09 }, /* 1.072 */
{ 11, 256, 0x74ccebf1dcf3ae80, 0x0000000dd691358c }, /* 1.083 */
{ 12, 256, 0x8847e41a1a9f5671, 0x00000010a0c63c8e }, /* 1.097 */
{ 13, 256, 0x7481b56debf0e637, 0x0000001424121fe4 }, /* 1.100 */
{ 14, 256, 0x559b8c44065f8967, 0x00000016ab2ff079 }, /* 1.121 */
{ 15, 256, 0x34c49545a2ee7f01, 0x0000001a6028efd6 }, /* 1.103 */
{ 16, 256, 0xb85f4fa81a7698f7, 0x0000001e95ff5e66 }, /* 1.111 */
{ 17, 256, 0x6353e47b7e47aba0, 0x00000021a81fa0fe }, /* 1.133 */
{ 18, 256, 0xaa549746b1cbb81c, 0x00000026f02494c9 }, /* 1.131 */
{ 19, 256, 0x892e343f2f31d690, 0x00000029eb392835 }, /* 1.130 */
{ 20, 256, 0x76914824db98cc3f, 0x0000003004f31a7c }, /* 1.141 */
{ 21, 256, 0x4b3cbabf9cfb1d0f, 0x00000036363a2408 }, /* 1.139 */
{ 22, 256, 0xf45c77abb4f035d4, 0x00000038dd0f3e84 }, /* 1.150 */
{ 23, 256, 0x5e18bd7f3fd4baf4, 0x0000003f0660391f }, /* 1.174 */
{ 24, 256, 0xa7b3a4d285d6503b, 0x000000443dfc9ff6 }, /* 1.168 */
{ 25, 256, 0x56ac7dd967521f5a, 0x0000004b03a87eb7 }, /* 1.180 */
{ 26, 256, 0x3a42dfda4eb880f7, 0x000000522c719bba }, /* 1.226 */
{ 27, 256, 0xd200d2fc6b54bf60, 0x0000005760b4fdf5 }, /* 1.228 */
{ 28, 256, 0xc52605bbd486c546, 0x0000005e00d8f74c }, /* 1.217 */
{ 29, 256, 0xc761779e63cd762f, 0x00000067be3cd85c }, /* 1.239 */
{ 30, 256, 0xca577b1e07f85ca5, 0x0000006f5517f3e4 }, /* 1.238 */
{ 31, 256, 0xfd50a593c518b3d4, 0x0000007370e7778f }, /* 1.273 */
{ 32, 512, 0xc6c87ba5b042650b, 0x000000f7eb08a156 }, /* 1.191 */
{ 33, 512, 0xc3880d0c9d458304, 0x0000010734b5d160 }, /* 1.199 */
{ 34, 512, 0xe920927e4d8b2c97, 0x00000118c1edbce0 }, /* 1.195 */
{ 35, 512, 0x8da7fcda87bde316, 0x0000012a3e9f9110 }, /* 1.201 */
{ 36, 512, 0xcf09937491514a29, 0x0000013bd6a24bef }, /* 1.194 */
{ 37, 512, 0x9b5abbf345cbd7cc, 0x0000014b9d90fac3 }, /* 1.237 */
{ 38, 512, 0x506312a44668d6a9, 0x0000015e1b5f6148 }, /* 1.242 */
{ 39, 512, 0x71659ede62b4755f, 0x00000173ef029bcd }, /* 1.231 */
{ 40, 512, 0xa7fde73fb74cf2d7, 0x000001866fb72748 }, /* 1.233 */
{ 41, 512, 0x19e8b461a1dea1d3, 0x000001a046f76b23 }, /* 1.271 */
{ 42, 512, 0x031c9b868cc3e976, 0x000001afa64c49d3 }, /* 1.263 */
{ 43, 512, 0xbaa5125faa781854, 0x000001c76789e278 }, /* 1.270 */
{ 44, 512, 0x4ed55052550d721b, 0x000001d800ccd8eb }, /* 1.281 */
{ 45, 512, 0x0fd63ddbdff90677, 0x000001f08ad59ed2 }, /* 1.282 */
{ 46, 512, 0x36d66546de7fdd6f, 0x000002016f09574b }, /* 1.286 */
{ 47, 512, 0x99f997e7eafb69d7, 0x0000021e42e47cb6 }, /* 1.329 */
{ 48, 512, 0xbecd9c2571312c5d, 0x000002320fe2872b }, /* 1.286 */
{ 49, 512, 0xd97371329e488a32, 0x0000024cd73f2ca7 }, /* 1.322 */
{ 50, 512, 0x30e9b136670749ee, 0x000002681c83b0e0 }, /* 1.335 */
{ 51, 512, 0x11ad6bc8f47aaeb4, 0x0000027e9261b5d5 }, /* 1.305 */
{ 52, 512, 0x68e445300af432c1, 0x0000029aa0eb7dbf }, /* 1.330 */
{ 53, 512, 0x910fb561657ea98c, 0x000002b3dca04853 }, /* 1.365 */
{ 54, 512, 0xd619693d8ce5e7a5, 0x000002cc280e9c97 }, /* 1.334 */
{ 55, 512, 0x24e281f564dbb60a, 0x000002e9fa842713 }, /* 1.364 */
{ 56, 512, 0x947a7d3bdaab44c5, 0x000003046680f72e }, /* 1.374 */
{ 57, 512, 0x2d44fec9c093e0de, 0x00000324198ba810 }, /* 1.363 */
{ 58, 512, 0x87743c272d29bb4c, 0x0000033ec48c9ac9 }, /* 1.401 */
{ 59, 512, 0x96aa3b6f67f5d923, 0x0000034faead902c }, /* 1.392 */
{ 60, 512, 0x94a4f1faf520b0d3, 0x0000037d713ab005 }, /* 1.360 */
{ 61, 512, 0xb13ed3a272f711a2, 0x00000397368f3cbd }, /* 1.396 */
{ 62, 512, 0x3b1b11805fa4a64a, 0x000003b8a5e2840c }, /* 1.453 */
{ 63, 512, 0x4c74caad9172ba71, 0x000003d4be280290 }, /* 1.437 */
{ 64, 512, 0x035ff643923dd29e, 0x000003fad6c355e1 }, /* 1.402 */
{ 65, 512, 0x768e9171b11abd3c, 0x0000040eb07fed20 }, /* 1.459 */
{ 66, 512, 0x75880e6f78a13ddd, 0x000004433d6acf14 }, /* 1.423 */
{ 67, 512, 0x910b9714f698a877, 0x00000451ea65d5db }, /* 1.447 */
{ 68, 512, 0x87f5db6f9fdcf5c7, 0x000004732169e3f7 }, /* 1.450 */
{ 69, 512, 0x836d4968fbaa3706, 0x000004954068a380 }, /* 1.455 */
{ 70, 512, 0xc567d73a036421ab, 0x000004bd7cb7bd3d }, /* 1.463 */
{ 71, 512, 0x619df40f240b8fed, 0x000004e376c2e972 }, /* 1.463 */
{ 72, 512, 0x42763a680d5bed8e, 0x000005084275c680 }, /* 1.452 */
{ 73, 512, 0x5866f064b3230431, 0x0000052906f2c9ab }, /* 1.498 */
{ 74, 512, 0x9fa08548b1621a44, 0x0000054708019247 }, /* 1.526 */
{ 75, 512, 0xb6053078ce0fc303, 0x00000572cc5c72b0 }, /* 1.491 */
{ 76, 512, 0x4a7aad7bf3890923, 0x0000058e987bc8e9 }, /* 1.470 */
{ 77, 512, 0xe165613fd75b5a53, 0x000005c20473a211 }, /* 1.527 */
{ 78, 512, 0x3ff154ac878163a6, 0x000005d659194bf3 }, /* 1.509 */
{ 79, 512, 0x24b93ade0aa8a532, 0x0000060a201c4f8e }, /* 1.569 */
{ 80, 512, 0xc18e2d14cd9bb554, 0x0000062c55cfe48c }, /* 1.555 */
{ 81, 512, 0x98cc78302feb58b6, 0x0000066656a07194 }, /* 1.509 */
{ 82, 512, 0xc6c5fd5a2abc0543, 0x0000067cff94fbf8 }, /* 1.596 */
{ 83, 512, 0xa7962f514acbba21, 0x000006ab7b5afa2e }, /* 1.568 */
{ 84, 512, 0xba02545069ddc6dc, 0x000006d19861364f }, /* 1.541 */
{ 85, 512, 0x447c73192c35073e, 0x000006fce315ce35 }, /* 1.623 */
{ 86, 512, 0x48beef9e2d42b0c2, 0x00000720a8e38b6b }, /* 1.620 */
{ 87, 512, 0x4874cf98541a35e0, 0x00000758382a2273 }, /* 1.597 */
{ 88, 512, 0xad4cf8333a31127a, 0x00000781e1651b1b }, /* 1.575 */
{ 89, 512, 0x47ae4859d57888c1, 0x000007b27edbe5bc }, /* 1.627 */
{ 90, 512, 0x06f7723cfe5d1891, 0x000007dc2a96d8eb }, /* 1.596 */
{ 91, 512, 0xd4e44218d660576d, 0x0000080ac46f02d5 }, /* 1.622 */
{ 92, 512, 0x7066702b0d5be1f2, 0x00000832c96d154e }, /* 1.695 */
{ 93, 512, 0x011209b4f9e11fb9, 0x0000085eefda104c }, /* 1.605 */
{ 94, 512, 0x47ffba30a0b35708, 0x00000899badc32dc }, /* 1.625 */
{ 95, 512, 0x1a95a6ac4538aaa8, 0x000008b6b69a42b2 }, /* 1.687 */
{ 96, 512, 0xbda2b239bb2008eb, 0x000008f22d2de38a }, /* 1.621 */
{ 97, 512, 0x7ffa0bea90355c6c, 0x0000092e5b23b816 }, /* 1.699 */
{ 98, 512, 0x1d56ba34be426795, 0x0000094f482e5d1b }, /* 1.688 */
{ 99, 512, 0x0aa89d45c502e93d, 0x00000977d94a98ce }, /* 1.642 */
{ 100, 512, 0x54369449f6857774, 0x000009c06c9b34cc }, /* 1.683 */
{ 101, 512, 0xf7d4dd8445b46765, 0x000009e5dc542259 }, /* 1.755 */
{ 102, 512, 0xfa8866312f169469, 0x00000a16b54eae93 }, /* 1.692 */
{ 103, 512, 0xd8a5aea08aef3ff9, 0x00000a381d2cbfe7 }, /* 1.747 */
{ 104, 512, 0x66bcd2c3d5f9ef0e, 0x00000a8191817be7 }, /* 1.751 */
{ 105, 512, 0x3fb13a47a012ec81, 0x00000ab562b9a254 }, /* 1.751 */
{ 106, 512, 0x43100f01c9e5e3ca, 0x00000aeee84c185f }, /* 1.726 */
{ 107, 512, 0xca09c50ccee2d054, 0x00000b1c359c047d }, /* 1.788 */
{ 108, 512, 0xd7176732ac503f9b, 0x00000b578bc52a73 }, /* 1.740 */
{ 109, 512, 0xed206e51f8d9422d, 0x00000b8083e0d960 }, /* 1.780 */
{ 110, 512, 0x17ead5dc6ba0dcd6, 0x00000bcfb1a32ca8 }, /* 1.836 */
{ 111, 512, 0x5f1dc21e38a969eb, 0x00000c0171becdd6 }, /* 1.778 */
{ 112, 512, 0xddaa973de33ec528, 0x00000c3edaba4b95 }, /* 1.831 */
{ 113, 512, 0x2a5eccd7735a3630, 0x00000c630664e7df }, /* 1.825 */
{ 114, 512, 0xafcccee5c0b71446, 0x00000cb65392f6e4 }, /* 1.826 */
{ 115, 512, 0x8fa30c5e7b147e27, 0x00000cd4db391e55 }, /* 1.843 */
{ 116, 512, 0x5afe0711fdfafd82, 0x00000d08cb4ec35d }, /* 1.826 */
{ 117, 512, 0x533a6090238afd4c, 0x00000d336f115d1b }, /* 1.803 */
{ 118, 512, 0x90cf11b595e39a84, 0x00000d8e041c2048 }, /* 1.857 */
{ 119, 512, 0x0d61a3b809444009, 0x00000dcb798afe35 }, /* 1.877 */
{ 120, 512, 0x7f34da0f54b0d114, 0x00000df3922664e1 }, /* 1.849 */
{ 121, 512, 0xa52258d5b72f6551, 0x00000e4d37a9872d }, /* 1.867 */
{ 122, 512, 0xc1de54d7672878db, 0x00000e6583a94cf6 }, /* 1.978 */
{ 123, 512, 0x1d03354316a414ab, 0x00000ebffc50308d }, /* 1.947 */
{ 124, 512, 0xcebdcc377665412c, 0x00000edee1997cea }, /* 1.865 */
{ 125, 512, 0x4ddd4c04b1a12344, 0x00000f21d64b373f }, /* 1.881 */
{ 126, 512, 0x64fc8f94e3973658, 0x00000f8f87a8896b }, /* 1.882 */
{ 127, 512, 0x68765f78034a334e, 0x00000fb8fe62197e }, /* 1.867 */
{ 128, 512, 0xaf36b871a303e816, 0x00000fec6f3afb1e }, /* 1.972 */
{ 129, 512, 0x2a4cbf73866c3a28, 0x00001027febfe4e5 }, /* 1.896 */
{ 130, 512, 0x9cb128aacdcd3b2f, 0x0000106aa8ac569d }, /* 1.965 */
{ 131, 512, 0x5511d41c55869124, 0x000010bbd755ddf1 }, /* 1.963 */
{ 132, 512, 0x42f92461937f284a, 0x000010fb8bceb3b5 }, /* 1.925 */
{ 133, 512, 0xe2d89a1cf6f1f287, 0x0000114cf5331e34 }, /* 1.862 */
{ 134, 512, 0xdc631a038956200e, 0x0000116428d2adc5 }, /* 2.042 */
{ 135, 512, 0xb2e5ac222cd236be, 0x000011ca88e4d4d2 }, /* 1.935 */
{ 136, 512, 0xbc7d8236655d88e7, 0x000011e39cb94e66 }, /* 2.005 */
{ 137, 512, 0x073e02d88d2d8e75, 0x0000123136c7933c }, /* 2.041 */
{ 138, 512, 0x3ddb9c3873166be0, 0x00001280e4ec6d52 }, /* 1.997 */
{ 139, 512, 0x7d3b1a845420e1b5, 0x000012c2e7cd6a44 }, /* 1.996 */
{ 140, 512, 0x60102308aa7b2a6c, 0x000012fc490e6c7d }, /* 2.053 */
{ 141, 512, 0xdb22bb2f9eb894aa, 0x00001343f5a85a1a }, /* 1.971 */
{ 142, 512, 0xd853f879a13b1606, 0x000013bb7d5f9048 }, /* 2.018 */
{ 143, 512, 0x001620a03f804b1d, 0x000013e74cc794fd }, /* 1.961 */
{ 144, 512, 0xfdb52dda76fbf667, 0x00001442d2f22480 }, /* 2.046 */
{ 145, 512, 0xa9160110f66e24ff, 0x0000144b899f9dbb }, /* 1.968 */
{ 146, 512, 0x77306a30379ae03b, 0x000014cb98eb1f81 }, /* 2.143 */
{ 147, 512, 0x14f5985d2752319d, 0x000014feab821fc9 }, /* 2.064 */
{ 148, 512, 0xa4b8ff11de7863f8, 0x0000154a0e60b9c9 }, /* 2.023 */
{ 149, 512, 0x44b345426455c1b3, 0x000015999c3c569c }, /* 2.136 */
{ 150, 512, 0x272677826049b46c, 0x000015c9697f4b92 }, /* 2.063 */
{ 151, 512, 0x2f9216e2cd74fe40, 0x0000162b1f7bbd39 }, /* 1.974 */
{ 152, 512, 0x706ae3e763ad8771, 0x00001661371c55e1 }, /* 2.210 */
{ 153, 512, 0xf7fd345307c2480e, 0x000016e251f28b6a }, /* 2.006 */
{ 154, 512, 0x6e94e3d26b3139eb, 0x000016f2429bb8c6 }, /* 2.193 */
{ 155, 512, 0x5458bbfbb781fcba, 0x0000173efdeca1b9 }, /* 2.163 */
{ 156, 512, 0xa80e2afeccd93b33, 0x000017bfdcb78adc }, /* 2.046 */
{ 157, 512, 0x1e4ccbb22796cf9d, 0x00001826fdcc39c9 }, /* 2.084 */
{ 158, 512, 0x8fba4b676aaa3663, 0x00001841a1379480 }, /* 2.264 */
{ 159, 512, 0xf82b843814b315fa, 0x000018886e19b8a3 }, /* 2.074 */
{ 160, 512, 0x7f21e920ecf753a3, 0x0000191812ca0ea7 }, /* 2.282 */
{ 161, 512, 0x48bb8ea2c4caa620, 0x0000192f310faccf }, /* 2.148 */
{ 162, 512, 0x5cdb652b4952c91b, 0x0000199e1d7437c7 }, /* 2.355 */
{ 163, 512, 0x6ac1ba6f78c06cd4, 0x000019cd11f82c70 }, /* 2.164 */
{ 164, 512, 0x9faf5f9ca2669a56, 0x00001a18d5431f6a }, /* 2.393 */
{ 165, 512, 0xaa57e9383eb01194, 0x00001a9e7d253d85 }, /* 2.178 */
{ 166, 512, 0x896967bf495c34d2, 0x00001afb8319b9fc }, /* 2.334 */
{ 167, 512, 0xdfad5f05de225f1b, 0x00001b3a59c3093b }, /* 2.266 */
{ 168, 512, 0xfd299a99f9f2abdd, 0x00001bb6f1a10799 }, /* 2.304 */
{ 169, 512, 0xdda239e798fe9fd4, 0x00001bfae0c9692d }, /* 2.218 */
{ 170, 512, 0x5fca670414a32c3e, 0x00001c22129dbcff }, /* 2.377 */
{ 171, 512, 0x1bb8934314b087de, 0x00001c955db36cd0 }, /* 2.155 */
{ 172, 512, 0xd96394b4b082200d, 0x00001cfc8619b7e6 }, /* 2.404 */
{ 173, 512, 0xb612a7735b1c8cbc, 0x00001d303acdd585 }, /* 2.205 */
{ 174, 512, 0x28e7430fe5875fe1, 0x00001d7ed5b3697d }, /* 2.359 */
{ 175, 512, 0x5038e89efdd981b9, 0x00001dc40ec35c59 }, /* 2.158 */
{ 176, 512, 0x075fd78f1d14db7c, 0x00001e31c83b4a2b }, /* 2.614 */
{ 177, 512, 0xc50fafdb5021be15, 0x00001e7cdac82fbc }, /* 2.239 */
{ 178, 512, 0xe6dc7572ce7b91c7, 0x00001edd8bb454fc }, /* 2.493 */
{ 179, 512, 0x21f7843e7beda537, 0x00001f3a8e019d6c }, /* 2.327 */
{ 180, 512, 0xc83385e20b43ec82, 0x00001f70735ec137 }, /* 2.231 */
{ 181, 512, 0xca818217dddb21fd, 0x0000201ca44c5a3c }, /* 2.237 */
{ 182, 512, 0xe6035defea48f933, 0x00002038e3346658 }, /* 2.691 */
{ 183, 512, 0x47262a4f953dac5a, 0x000020c2e554314e }, /* 2.170 */
{ 184, 512, 0xe24c7246260873ea, 0x000021197e618d64 }, /* 2.600 */
{ 185, 512, 0xeef6b57c9b58e9e1, 0x0000217ea48ecddc }, /* 2.391 */
{ 186, 512, 0x2becd3346e386142, 0x000021c496d4a5f9 }, /* 2.677 */
{ 187, 512, 0x63c6207bdf3b40a3, 0x0000220e0f2eec0c }, /* 2.410 */
{ 188, 512, 0x3056ce8989767d4b, 0x0000228eb76cd137 }, /* 2.776 */
{ 189, 512, 0x91af61c307cee780, 0x000022e17e2ea501 }, /* 2.266 */
{ 190, 512, 0xda359da225f6d54f, 0x00002358a2debc19 }, /* 2.717 */
{ 191, 512, 0x0a5f7a2a55607ba0, 0x0000238a79dac18c }, /* 2.474 */
{ 192, 512, 0x27bb75bf5224638a, 0x00002403a58e2351 }, /* 2.673 */
{ 193, 512, 0x1ebfdb94630f5d0f, 0x00002492a10cb339 }, /* 2.420 */
{ 194, 512, 0x6eae5e51d9c5f6fb, 0x000024ce4bf98715 }, /* 2.898 */
{ 195, 512, 0x08d903b4daedc2e0, 0x0000250d1e15886c }, /* 2.363 */
{ 196, 512, 0xc722a2f7fa7cd686, 0x0000258a99ed0c9e }, /* 2.747 */
{ 197, 512, 0x8f71faf0e54e361d, 0x000025dee11976f5 }, /* 2.531 */
{ 198, 512, 0x87f64695c91a54e7, 0x0000264e00a43da0 }, /* 2.707 */
{ 199, 512, 0xc719cbac2c336b92, 0x000026d327277ac1 }, /* 2.315 */
{ 200, 512, 0xe7e647afaf771ade, 0x000027523a5c44bf }, /* 3.012 */
{ 201, 512, 0x12d4b5c38ce8c946, 0x0000273898432545 }, /* 2.378 */
{ 202, 512, 0xf2e0cd4067bdc94a, 0x000027e47bb2c935 }, /* 2.969 */
{ 203, 512, 0x21b79f14d6d947d3, 0x0000281e64977f0d }, /* 2.594 */
{ 204, 512, 0x515093f952f18cd6, 0x0000289691a473fd }, /* 2.763 */
{ 205, 512, 0xd47b160a1b1022c8, 0x00002903e8b52411 }, /* 2.457 */
{ 206, 512, 0xc02fc96684715a16, 0x0000297515608601 }, /* 3.057 */
{ 207, 512, 0xef51e68efba72ed0, 0x000029ef73604804 }, /* 2.590 */
{ 208, 512, 0x9e3be6e5448b4f33, 0x00002a2846ed074b }, /* 3.047 */
{ 209, 512, 0x81d446c6d5fec063, 0x00002a92ca693455 }, /* 2.676 */
{ 210, 512, 0xff215de8224e57d5, 0x00002b2271fe3729 }, /* 2.993 */
{ 211, 512, 0xe2524d9ba8f69796, 0x00002b64b99c3ba2 }, /* 2.457 */
{ 212, 512, 0xf6b28e26097b7e4b, 0x00002bd768b6e068 }, /* 3.182 */
{ 213, 512, 0x893a487f30ce1644, 0x00002c67f722b4b2 }, /* 2.563 */
{ 214, 512, 0x386566c3fc9871df, 0x00002cc1cf8b4037 }, /* 3.025 */
{ 215, 512, 0x1e0ed78edf1f558a, 0x00002d3948d36c7f }, /* 2.730 */
{ 216, 512, 0xe3bc20c31e61f113, 0x00002d6d6b12e025 }, /* 3.036 */
{ 217, 512, 0xd6c3ad2e23021882, 0x00002deff7572241 }, /* 2.722 */
{ 218, 512, 0xb4a9f95cf0f69c5a, 0x00002e67d537aa36 }, /* 3.356 */
{ 219, 512, 0x6e98ed6f6c38e82f, 0x00002e9720626789 }, /* 2.697 */
{ 220, 512, 0x2e01edba33fddac7, 0x00002f407c6b0198 }, /* 2.979 */
{ 221, 512, 0x559d02e1f5f57ccc, 0x00002fb6a5ab4f24 }, /* 2.858 */
{ 222, 512, 0xac18f5a916adcd8e, 0x0000304ae1c5c57e }, /* 3.258 */
{ 223, 512, 0x15789fbaddb86f4b, 0x0000306f6e019c78 }, /* 2.693 */
{ 224, 512, 0xf4a9c36d5bc4c408, 0x000030da40434213 }, /* 3.259 */
{ 225, 512, 0xf640f90fd2727f44, 0x00003189ed37b90c }, /* 2.733 */
{ 226, 512, 0xb5313d390d61884a, 0x000031e152616b37 }, /* 3.235 */
{ 227, 512, 0x4bae6b3ce9160939, 0x0000321f40aeac42 }, /* 2.983 */
{ 228, 512, 0x838c34480f1a66a1, 0x000032f389c0f78e }, /* 3.308 */
{ 229, 512, 0xb1c4a52c8e3d6060, 0x0000330062a40284 }, /* 2.715 */
{ 230, 512, 0xe0f1110c6d0ed822, 0x0000338be435644f }, /* 3.540 */
{ 231, 512, 0x9f1a8ccdcea68d4b, 0x000034045a4e97e1 }, /* 2.779 */
{ 232, 512, 0x3261ed62223f3099, 0x000034702cfc401c }, /* 3.084 */
{ 233, 512, 0xf2191e2311022d65, 0x00003509dd19c9fc }, /* 2.987 */
{ 234, 512, 0xf102a395c2033abc, 0x000035654dc96fae }, /* 3.341 */
{ 235, 512, 0x11fe378f027906b6, 0x000035b5193b0264 }, /* 2.793 */
{ 236, 512, 0xf777f2c026b337aa, 0x000036704f5d9297 }, /* 3.518 */
{ 237, 512, 0x1b04e9c2ee143f32, 0x000036dfbb7af218 }, /* 2.962 */
{ 238, 512, 0x2fcec95266f9352c, 0x00003785c8df24a9 }, /* 3.196 */
{ 239, 512, 0xfe2b0e47e427dd85, 0x000037cbdf5da729 }, /* 2.914 */
{ 240, 512, 0x72b49bf2225f6c6d, 0x0000382227c15855 }, /* 3.408 */
{ 241, 512, 0x50486b43df7df9c7, 0x0000389b88be6453 }, /* 2.903 */
{ 242, 512, 0x5192a3e53181c8ab, 0x000038ddf3d67263 }, /* 3.778 */
{ 243, 512, 0xe9f5d8365296fd5e, 0x0000399f1c6c9e9c }, /* 3.026 */
{ 244, 512, 0xc740263f0301efa8, 0x00003a147146512d }, /* 3.347 */
{ 245, 512, 0x23cd0f2b5671e67d, 0x00003ab10bcc0d9d }, /* 3.212 */
{ 246, 512, 0x002ccc7e5cd41390, 0x00003ad6cd14a6c0 }, /* 3.482 */
{ 247, 512, 0x9aafb3c02544b31b, 0x00003b8cb8779fb0 }, /* 3.146 */
{ 248, 512, 0x72ba07a78b121999, 0x00003c24142a5a3f }, /* 3.626 */
{ 249, 512, 0x3d784aa58edfc7b4, 0x00003cd084817d99 }, /* 2.952 */
{ 250, 512, 0xaab750424d8004af, 0x00003d506a8e098e }, /* 3.463 */
{ 251, 512, 0x84403fcf8e6b5ca2, 0x00003d4c54c2aec4 }, /* 3.131 */
{ 252, 512, 0x71eb7455ec98e207, 0x00003e655715cf2c }, /* 3.538 */
{ 253, 512, 0xd752b4f19301595b, 0x00003ecd7b2ca5ac }, /* 2.974 */
{ 254, 512, 0xc4674129750499de, 0x00003e99e86d3e95 }, /* 3.843 */
{ 255, 512, 0x9772baff5cd12ef5, 0x00003f895c019841 }, /* 3.088 */
};
/*
* Verify the map is valid. Each device index must appear exactly
* once in every row, and the permutation array checksum must match.
*/
static int
verify_perms(uint8_t *perms, uint64_t children, uint64_t nperms,
uint64_t checksum)
{
int countssz = sizeof (uint16_t) * children;
uint16_t *counts = kmem_zalloc(countssz, KM_SLEEP);
for (int i = 0; i < nperms; i++) {
for (int j = 0; j < children; j++) {
uint8_t val = perms[(i * children) + j];
if (val >= children || counts[val] != i) {
kmem_free(counts, countssz);
return (EINVAL);
}
counts[val]++;
}
}
if (checksum != 0) {
int permssz = sizeof (uint8_t) * children * nperms;
zio_cksum_t cksum;
fletcher_4_native_varsize(perms, permssz, &cksum);
if (checksum != cksum.zc_word[0]) {
kmem_free(counts, countssz);
return (ECKSUM);
}
}
kmem_free(counts, countssz);
return (0);
}
/*
* Generate the permutation array for the draid_map_t. These maps control
* the placement of all data in a dRAID. Therefore it's critical that the
* seed always generates the same mapping. We provide our own pseudo-random
* number generator for this purpose.
*/
int
vdev_draid_generate_perms(const draid_map_t *map, uint8_t **permsp)
{
VERIFY3U(map->dm_children, >=, VDEV_DRAID_MIN_CHILDREN);
VERIFY3U(map->dm_children, <=, VDEV_DRAID_MAX_CHILDREN);
VERIFY3U(map->dm_seed, !=, 0);
VERIFY3U(map->dm_nperms, !=, 0);
VERIFY3P(map->dm_perms, ==, NULL);
#ifdef _KERNEL
/*
* The kernel code always provides both a map_seed and checksum.
* Only the tests/zfs-tests/cmd/draid/draid.c utility will provide
* a zero checksum when generating new candidate maps.
*/
VERIFY3U(map->dm_checksum, !=, 0);
#endif
uint64_t children = map->dm_children;
uint64_t nperms = map->dm_nperms;
int rowsz = sizeof (uint8_t) * children;
int permssz = rowsz * nperms;
uint8_t *perms;
/* Allocate the permutation array */
perms = vmem_alloc(permssz, KM_SLEEP);
/* Setup an initial row with a known pattern */
uint8_t *initial_row = kmem_alloc(rowsz, KM_SLEEP);
for (int i = 0; i < children; i++)
initial_row[i] = i;
uint64_t draid_seed[2] = { VDEV_DRAID_SEED, map->dm_seed };
uint8_t *current_row, *previous_row = initial_row;
/*
* Perform a Fisher-Yates shuffle of each row using the previous
* row as the starting point. An initial_row with known pattern
* is used as the input for the first row.
*/
for (int i = 0; i < nperms; i++) {
current_row = &perms[i * children];
memcpy(current_row, previous_row, rowsz);
for (int j = children - 1; j > 0; j--) {
uint64_t k = vdev_draid_rand(draid_seed) % (j + 1);
uint8_t val = current_row[j];
current_row[j] = current_row[k];
current_row[k] = val;
}
previous_row = current_row;
}
kmem_free(initial_row, rowsz);
int error = verify_perms(perms, children, nperms, map->dm_checksum);
if (error) {
vmem_free(perms, permssz);
return (error);
}
*permsp = perms;
return (0);
}
/*
* Lookup the fixed draid_map_t for the requested number of children.
*/
int
vdev_draid_lookup_map(uint64_t children, const draid_map_t **mapp)
{
- for (int i = 0; i <= VDEV_DRAID_MAX_MAPS; i++) {
+ for (int i = 0; i < VDEV_DRAID_MAX_MAPS; i++) {
if (draid_maps[i].dm_children == children) {
*mapp = &draid_maps[i];
return (0);
}
}
return (ENOENT);
}
/*
* Lookup the permutation array and iteration id for the provided offset.
*/
static void
vdev_draid_get_perm(vdev_draid_config_t *vdc, uint64_t pindex,
uint8_t **base, uint64_t *iter)
{
uint64_t ncols = vdc->vdc_children;
uint64_t poff = pindex % (vdc->vdc_nperms * ncols);
*base = vdc->vdc_perms + (poff / ncols) * ncols;
*iter = poff % ncols;
}
static inline uint64_t
vdev_draid_permute_id(vdev_draid_config_t *vdc,
uint8_t *base, uint64_t iter, uint64_t index)
{
return ((base[index] + iter) % vdc->vdc_children);
}
/*
* Return the asize which is the psize rounded up to a full group width.
* i.e. vdev_draid_psize_to_asize().
*/
static uint64_t
vdev_draid_asize(vdev_t *vd, uint64_t psize)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
uint64_t ashift = vd->vdev_ashift;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
uint64_t rows = ((psize - 1) / (vdc->vdc_ndata << ashift)) + 1;
uint64_t asize = (rows * vdc->vdc_groupwidth) << ashift;
ASSERT3U(asize, !=, 0);
ASSERT3U(asize % (vdc->vdc_groupwidth), ==, 0);
return (asize);
}
/*
* Deflate the asize to the psize, this includes stripping parity.
*/
uint64_t
vdev_draid_asize_to_psize(vdev_t *vd, uint64_t asize)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT0(asize % vdc->vdc_groupwidth);
return ((asize / vdc->vdc_groupwidth) * vdc->vdc_ndata);
}
/*
* Convert a logical offset to the corresponding group number.
*/
static uint64_t
vdev_draid_offset_to_group(vdev_t *vd, uint64_t offset)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
return (offset / vdc->vdc_groupsz);
}
/*
* Convert a group number to the logical starting offset for that group.
*/
static uint64_t
vdev_draid_group_to_offset(vdev_t *vd, uint64_t group)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
return (group * vdc->vdc_groupsz);
}
/*
* Full stripe writes. When writing, all columns (D+P) are required. Parity
* is calculated over all the columns, including empty zero filled sectors,
* and each is written to disk. While only the data columns are needed for
* a normal read, all of the columns are required for reconstruction when
* performing a sequential resilver.
*
* For "big columns" it's sufficient to map the correct range of the zio ABD.
* Partial columns require allocating a gang ABD in order to zero fill the
* empty sectors. When the column is empty a zero filled sector must be
* mapped. In all cases the data ABDs must be the same size as the parity
* ABDs (e.g. rc->rc_size == parity_size).
*/
static void
vdev_draid_map_alloc_write(zio_t *zio, uint64_t abd_offset, raidz_row_t *rr)
{
uint64_t skip_size = 1ULL << zio->io_vd->vdev_top->vdev_ashift;
uint64_t parity_size = rr->rr_col[0].rc_size;
uint64_t abd_off = abd_offset;
ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
ASSERT3U(parity_size, ==, abd_get_size(rr->rr_col[0].rc_abd));
for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_size == 0) {
/* empty data column (small write), add a skip sector */
ASSERT3U(skip_size, ==, parity_size);
rc->rc_abd = abd_get_zeros(skip_size);
} else if (rc->rc_size == parity_size) {
/* this is a "big column" */
rc->rc_abd = abd_get_offset_struct(&rc->rc_abdstruct,
zio->io_abd, abd_off, rc->rc_size);
} else {
/* short data column, add a skip sector */
ASSERT3U(rc->rc_size + skip_size, ==, parity_size);
rc->rc_abd = abd_alloc_gang();
abd_gang_add(rc->rc_abd, abd_get_offset_size(
zio->io_abd, abd_off, rc->rc_size), B_TRUE);
abd_gang_add(rc->rc_abd, abd_get_zeros(skip_size),
B_TRUE);
}
ASSERT3U(abd_get_size(rc->rc_abd), ==, parity_size);
abd_off += rc->rc_size;
rc->rc_size = parity_size;
}
IMPLY(abd_offset != 0, abd_off == zio->io_size);
}
/*
* Scrub/resilver reads. In order to store the contents of the skip sectors
* an additional ABD is allocated. The columns are handled in the same way
* as a full stripe write except instead of using the zero ABD the newly
* allocated skip ABD is used to back the skip sectors. In all cases the
* data ABD must be the same size as the parity ABDs.
*/
static void
vdev_draid_map_alloc_scrub(zio_t *zio, uint64_t abd_offset, raidz_row_t *rr)
{
uint64_t skip_size = 1ULL << zio->io_vd->vdev_top->vdev_ashift;
uint64_t parity_size = rr->rr_col[0].rc_size;
uint64_t abd_off = abd_offset;
uint64_t skip_off = 0;
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
ASSERT3P(rr->rr_abd_empty, ==, NULL);
if (rr->rr_nempty > 0) {
rr->rr_abd_empty = abd_alloc_linear(rr->rr_nempty * skip_size,
B_FALSE);
}
for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_size == 0) {
/* empty data column (small read), add a skip sector */
ASSERT3U(skip_size, ==, parity_size);
ASSERT3U(rr->rr_nempty, !=, 0);
rc->rc_abd = abd_get_offset_size(rr->rr_abd_empty,
skip_off, skip_size);
skip_off += skip_size;
} else if (rc->rc_size == parity_size) {
/* this is a "big column" */
rc->rc_abd = abd_get_offset_struct(&rc->rc_abdstruct,
zio->io_abd, abd_off, rc->rc_size);
} else {
/* short data column, add a skip sector */
ASSERT3U(rc->rc_size + skip_size, ==, parity_size);
ASSERT3U(rr->rr_nempty, !=, 0);
rc->rc_abd = abd_alloc_gang();
abd_gang_add(rc->rc_abd, abd_get_offset_size(
zio->io_abd, abd_off, rc->rc_size), B_TRUE);
abd_gang_add(rc->rc_abd, abd_get_offset_size(
rr->rr_abd_empty, skip_off, skip_size), B_TRUE);
skip_off += skip_size;
}
uint64_t abd_size = abd_get_size(rc->rc_abd);
ASSERT3U(abd_size, ==, abd_get_size(rr->rr_col[0].rc_abd));
/*
* Increase rc_size so the skip ABD is included in subsequent
* parity calculations.
*/
abd_off += rc->rc_size;
rc->rc_size = abd_size;
}
IMPLY(abd_offset != 0, abd_off == zio->io_size);
ASSERT3U(skip_off, ==, rr->rr_nempty * skip_size);
}
/*
* Normal reads. In this common case only the columns containing data
* are read in to the zio ABDs. Neither the parity columns or empty skip
* sectors are read unless the checksum fails verification. In which case
* vdev_raidz_read_all() will call vdev_draid_map_alloc_empty() to expand
* the raid map in order to allow reconstruction using the parity data and
* skip sectors.
*/
static void
vdev_draid_map_alloc_read(zio_t *zio, uint64_t abd_offset, raidz_row_t *rr)
{
uint64_t abd_off = abd_offset;
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_size > 0) {
rc->rc_abd = abd_get_offset_struct(&rc->rc_abdstruct,
zio->io_abd, abd_off, rc->rc_size);
abd_off += rc->rc_size;
}
}
IMPLY(abd_offset != 0, abd_off == zio->io_size);
}
/*
* Converts a normal "read" raidz_row_t to a "scrub" raidz_row_t. The key
* difference is that an ABD is allocated to back skip sectors so they may
* be read in to memory, verified, and repaired if needed.
*/
void
vdev_draid_map_alloc_empty(zio_t *zio, raidz_row_t *rr)
{
uint64_t skip_size = 1ULL << zio->io_vd->vdev_top->vdev_ashift;
uint64_t parity_size = rr->rr_col[0].rc_size;
uint64_t skip_off = 0;
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
ASSERT3P(rr->rr_abd_empty, ==, NULL);
if (rr->rr_nempty > 0) {
rr->rr_abd_empty = abd_alloc_linear(rr->rr_nempty * skip_size,
B_FALSE);
}
for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_size == 0) {
/* empty data column (small read), add a skip sector */
ASSERT3U(skip_size, ==, parity_size);
ASSERT3U(rr->rr_nempty, !=, 0);
ASSERT3P(rc->rc_abd, ==, NULL);
rc->rc_abd = abd_get_offset_size(rr->rr_abd_empty,
skip_off, skip_size);
skip_off += skip_size;
} else if (rc->rc_size == parity_size) {
/* this is a "big column", nothing to add */
ASSERT3P(rc->rc_abd, !=, NULL);
} else {
/*
* short data column, add a skip sector and clear
* rc_tried to force the entire column to be re-read
* thereby including the missing skip sector data
* which is needed for reconstruction.
*/
ASSERT3U(rc->rc_size + skip_size, ==, parity_size);
ASSERT3U(rr->rr_nempty, !=, 0);
ASSERT3P(rc->rc_abd, !=, NULL);
ASSERT(!abd_is_gang(rc->rc_abd));
abd_t *read_abd = rc->rc_abd;
rc->rc_abd = abd_alloc_gang();
abd_gang_add(rc->rc_abd, read_abd, B_TRUE);
abd_gang_add(rc->rc_abd, abd_get_offset_size(
rr->rr_abd_empty, skip_off, skip_size), B_TRUE);
skip_off += skip_size;
rc->rc_tried = 0;
}
/*
* Increase rc_size so the empty ABD is included in subsequent
* parity calculations.
*/
rc->rc_size = parity_size;
}
ASSERT3U(skip_off, ==, rr->rr_nempty * skip_size);
}
/*
* Verify that all empty sectors are zero filled before using them to
* calculate parity. Otherwise, silent corruption in an empty sector will
* result in bad parity being generated. That bad parity will then be
* considered authoritative and overwrite the good parity on disk. This
* is possible because the checksum is only calculated over the data,
* thus it cannot be used to detect damage in empty sectors.
*/
int
vdev_draid_map_verify_empty(zio_t *zio, raidz_row_t *rr)
{
uint64_t skip_size = 1ULL << zio->io_vd->vdev_top->vdev_ashift;
uint64_t parity_size = rr->rr_col[0].rc_size;
uint64_t skip_off = parity_size - skip_size;
uint64_t empty_off = 0;
int ret = 0;
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
ASSERT3P(rr->rr_abd_empty, !=, NULL);
ASSERT3U(rr->rr_bigcols, >, 0);
void *zero_buf = kmem_zalloc(skip_size, KM_SLEEP);
for (int c = rr->rr_bigcols; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
ASSERT3P(rc->rc_abd, !=, NULL);
ASSERT3U(rc->rc_size, ==, parity_size);
if (abd_cmp_buf_off(rc->rc_abd, zero_buf, skip_off,
skip_size) != 0) {
vdev_raidz_checksum_error(zio, rc, rc->rc_abd);
abd_zero_off(rc->rc_abd, skip_off, skip_size);
rc->rc_error = SET_ERROR(ECKSUM);
ret++;
}
empty_off += skip_size;
}
ASSERT3U(empty_off, ==, abd_get_size(rr->rr_abd_empty));
kmem_free(zero_buf, skip_size);
return (ret);
}
/*
* Given a logical address within a dRAID configuration, return the physical
* address on the first drive in the group that this address maps to
* (at position 'start' in permutation number 'perm').
*/
static uint64_t
vdev_draid_logical_to_physical(vdev_t *vd, uint64_t logical_offset,
uint64_t *perm, uint64_t *start)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
/* b is the dRAID (parent) sector offset. */
uint64_t ashift = vd->vdev_top->vdev_ashift;
uint64_t b_offset = logical_offset >> ashift;
/*
* The height of a row in units of the vdev's minimum sector size.
* This is the amount of data written to each disk of each group
* in a given permutation.
*/
uint64_t rowheight_sectors = VDEV_DRAID_ROWHEIGHT >> ashift;
/*
* We cycle through a disk permutation every groupsz * ngroups chunk
* of address space. Note that ngroups * groupsz must be a multiple
* of the number of data drives (ndisks) in order to guarantee
* alignment. So, for example, if our row height is 16MB, our group
* size is 10, and there are 13 data drives in the draid, then ngroups
* will be 13, we will change permutation every 2.08GB and each
* disk will have 160MB of data per chunk.
*/
uint64_t groupwidth = vdc->vdc_groupwidth;
uint64_t ngroups = vdc->vdc_ngroups;
uint64_t ndisks = vdc->vdc_ndisks;
/*
* groupstart is where the group this IO will land in "starts" in
* the permutation array.
*/
uint64_t group = logical_offset / vdc->vdc_groupsz;
uint64_t groupstart = (group * groupwidth) % ndisks;
ASSERT3U(groupstart + groupwidth, <=, ndisks + groupstart);
*start = groupstart;
/* b_offset is the sector offset within a group chunk */
b_offset = b_offset % (rowheight_sectors * groupwidth);
ASSERT0(b_offset % groupwidth);
/*
* Find the starting byte offset on each child vdev:
* - within a permutation there are ngroups groups spread over the
* rows, where each row covers a slice portion of the disk
* - each permutation has (groupwidth * ngroups) / ndisks rows
* - so each permutation covers rows * slice portion of the disk
* - so we need to find the row where this IO group target begins
*/
*perm = group / ngroups;
uint64_t row = (*perm * ((groupwidth * ngroups) / ndisks)) +
(((group % ngroups) * groupwidth) / ndisks);
return (((rowheight_sectors * row) +
(b_offset / groupwidth)) << ashift);
}
static uint64_t
vdev_draid_map_alloc_row(zio_t *zio, raidz_row_t **rrp, uint64_t io_offset,
uint64_t abd_offset, uint64_t abd_size)
{
vdev_t *vd = zio->io_vd;
vdev_draid_config_t *vdc = vd->vdev_tsd;
uint64_t ashift = vd->vdev_top->vdev_ashift;
uint64_t io_size = abd_size;
uint64_t io_asize = vdev_draid_asize(vd, io_size);
uint64_t group = vdev_draid_offset_to_group(vd, io_offset);
uint64_t start_offset = vdev_draid_group_to_offset(vd, group + 1);
/*
* Limit the io_size to the space remaining in the group. A second
* row in the raidz_map_t is created for the remainder.
*/
if (io_offset + io_asize > start_offset) {
io_size = vdev_draid_asize_to_psize(vd,
start_offset - io_offset);
}
/*
* At most a block may span the logical end of one group and the start
* of the next group. Therefore, at the end of a group the io_size must
* span the group width evenly and the remainder must be aligned to the
* start of the next group.
*/
IMPLY(abd_offset == 0 && io_size < zio->io_size,
(io_asize >> ashift) % vdc->vdc_groupwidth == 0);
IMPLY(abd_offset != 0,
vdev_draid_group_to_offset(vd, group) == io_offset);
/* Lookup starting byte offset on each child vdev */
uint64_t groupstart, perm;
uint64_t physical_offset = vdev_draid_logical_to_physical(vd,
io_offset, &perm, &groupstart);
/*
* If there is less than groupwidth drives available after the group
* start, the group is going to wrap onto the next row. 'wrap' is the
* group disk number that starts on the next row.
*/
uint64_t ndisks = vdc->vdc_ndisks;
uint64_t groupwidth = vdc->vdc_groupwidth;
uint64_t wrap = groupwidth;
if (groupstart + groupwidth > ndisks)
wrap = ndisks - groupstart;
/* The io size in units of the vdev's minimum sector size. */
const uint64_t psize = io_size >> ashift;
/*
* "Quotient": The number of data sectors for this stripe on all but
* the "big column" child vdevs that also contain "remainder" data.
*/
uint64_t q = psize / vdc->vdc_ndata;
/*
* "Remainder": The number of partial stripe data sectors in this I/O.
* This will add a sector to some, but not all, child vdevs.
*/
uint64_t r = psize - q * vdc->vdc_ndata;
/* The number of "big columns" - those which contain remainder data. */
uint64_t bc = (r == 0 ? 0 : r + vdc->vdc_nparity);
ASSERT3U(bc, <, groupwidth);
/* The total number of data and parity sectors for this I/O. */
uint64_t tot = psize + (vdc->vdc_nparity * (q + (r == 0 ? 0 : 1)));
raidz_row_t *rr;
rr = kmem_alloc(offsetof(raidz_row_t, rr_col[groupwidth]), KM_SLEEP);
rr->rr_cols = groupwidth;
rr->rr_scols = groupwidth;
rr->rr_bigcols = bc;
rr->rr_missingdata = 0;
rr->rr_missingparity = 0;
rr->rr_firstdatacol = vdc->vdc_nparity;
rr->rr_abd_empty = NULL;
#ifdef ZFS_DEBUG
rr->rr_offset = io_offset;
rr->rr_size = io_size;
#endif
*rrp = rr;
uint8_t *base;
uint64_t iter, asize = 0;
vdev_draid_get_perm(vdc, perm, &base, &iter);
for (uint64_t i = 0; i < groupwidth; i++) {
raidz_col_t *rc = &rr->rr_col[i];
uint64_t c = (groupstart + i) % ndisks;
/* increment the offset if we wrap to the next row */
if (i == wrap)
physical_offset += VDEV_DRAID_ROWHEIGHT;
rc->rc_devidx = vdev_draid_permute_id(vdc, base, iter, c);
rc->rc_offset = physical_offset;
rc->rc_abd = NULL;
rc->rc_orig_data = NULL;
rc->rc_error = 0;
rc->rc_tried = 0;
rc->rc_skipped = 0;
rc->rc_force_repair = 0;
rc->rc_allow_repair = 1;
rc->rc_need_orig_restore = B_FALSE;
if (q == 0 && i >= bc)
rc->rc_size = 0;
else if (i < bc)
rc->rc_size = (q + 1) << ashift;
else
rc->rc_size = q << ashift;
asize += rc->rc_size;
}
ASSERT3U(asize, ==, tot << ashift);
rr->rr_nempty = roundup(tot, groupwidth) - tot;
IMPLY(bc > 0, rr->rr_nempty == groupwidth - bc);
/* Allocate buffers for the parity columns */
for (uint64_t c = 0; c < rr->rr_firstdatacol; c++) {
raidz_col_t *rc = &rr->rr_col[c];
rc->rc_abd = abd_alloc_linear(rc->rc_size, B_FALSE);
}
/*
* Map buffers for data columns and allocate/map buffers for skip
* sectors. There are three distinct cases for dRAID which are
* required to support sequential rebuild.
*/
if (zio->io_type == ZIO_TYPE_WRITE) {
vdev_draid_map_alloc_write(zio, abd_offset, rr);
} else if ((rr->rr_nempty > 0) &&
(zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) {
vdev_draid_map_alloc_scrub(zio, abd_offset, rr);
} else {
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
vdev_draid_map_alloc_read(zio, abd_offset, rr);
}
return (io_size);
}
/*
* Allocate the raidz mapping to be applied to the dRAID I/O. The parity
* calculations for dRAID are identical to raidz however there are a few
* differences in the layout.
*
* - dRAID always allocates a full stripe width. Any extra sectors due
* this padding are zero filled and written to disk. They will be read
* back during a scrub or repair operation since they are included in
* the parity calculation. This property enables sequential resilvering.
*
* - When the block at the logical offset spans redundancy groups then two
* rows are allocated in the raidz_map_t. One row resides at the end of
* the first group and the other at the start of the following group.
*/
static raidz_map_t *
vdev_draid_map_alloc(zio_t *zio)
{
raidz_row_t *rr[2];
uint64_t abd_offset = 0;
uint64_t abd_size = zio->io_size;
uint64_t io_offset = zio->io_offset;
uint64_t size;
int nrows = 1;
size = vdev_draid_map_alloc_row(zio, &rr[0], io_offset,
abd_offset, abd_size);
if (size < abd_size) {
vdev_t *vd = zio->io_vd;
io_offset += vdev_draid_asize(vd, size);
abd_offset += size;
abd_size -= size;
nrows++;
ASSERT3U(io_offset, ==, vdev_draid_group_to_offset(
vd, vdev_draid_offset_to_group(vd, io_offset)));
ASSERT3U(abd_offset, <, zio->io_size);
ASSERT3U(abd_size, !=, 0);
size = vdev_draid_map_alloc_row(zio, &rr[1],
io_offset, abd_offset, abd_size);
VERIFY3U(size, ==, abd_size);
}
raidz_map_t *rm;
rm = kmem_zalloc(offsetof(raidz_map_t, rm_row[nrows]), KM_SLEEP);
rm->rm_ops = vdev_raidz_math_get_ops();
rm->rm_nrows = nrows;
rm->rm_row[0] = rr[0];
if (nrows == 2)
rm->rm_row[1] = rr[1];
return (rm);
}
/*
* Given an offset into a dRAID return the next group width aligned offset
* which can be used to start an allocation.
*/
static uint64_t
vdev_draid_get_astart(vdev_t *vd, const uint64_t start)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
return (roundup(start, vdc->vdc_groupwidth << vd->vdev_ashift));
}
/*
* Allocatable space for dRAID is (children - nspares) * sizeof(smallest child)
* rounded down to the last full slice. So each child must provide at least
* 1 / (children - nspares) of its asize.
*/
static uint64_t
vdev_draid_min_asize(vdev_t *vd)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
return (VDEV_DRAID_REFLOW_RESERVE +
(vd->vdev_min_asize + vdc->vdc_ndisks - 1) / (vdc->vdc_ndisks));
}
/*
* When using dRAID the minimum allocation size is determined by the number
* of data disks in the redundancy group. Full stripes are always used.
*/
static uint64_t
vdev_draid_min_alloc(vdev_t *vd)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
return (vdc->vdc_ndata << vd->vdev_ashift);
}
/*
* Returns true if the txg range does not exist on any leaf vdev.
*
* A dRAID spare does not fit into the DTL model. While it has child vdevs
* there is no redundancy among them, and the effective child vdev is
* determined by offset. Essentially we do a vdev_dtl_reassess() on the
* fly by replacing a dRAID spare with the child vdev under the offset.
* Note that it is a recursive process because the child vdev can be
* another dRAID spare and so on.
*/
boolean_t
vdev_draid_missing(vdev_t *vd, uint64_t physical_offset, uint64_t txg,
uint64_t size)
{
if (vd->vdev_ops == &vdev_spare_ops ||
vd->vdev_ops == &vdev_replacing_ops) {
/*
* Check all of the readable children, if any child
* contains the txg range the data it is not missing.
*/
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (!vdev_readable(cvd))
continue;
if (!vdev_draid_missing(cvd, physical_offset,
txg, size))
return (B_FALSE);
}
return (B_TRUE);
}
if (vd->vdev_ops == &vdev_draid_spare_ops) {
/*
* When sequentially resilvering we don't have a proper
* txg range so instead we must presume all txgs are
* missing on this vdev until the resilver completes.
*/
if (vd->vdev_rebuild_txg != 0)
return (B_TRUE);
/*
* DTL_MISSING is set for all prior txgs when a resilver
* is started in spa_vdev_attach().
*/
if (vdev_dtl_contains(vd, DTL_MISSING, txg, size))
return (B_TRUE);
/*
* Consult the DTL on the relevant vdev. Either a vdev
* leaf or spare/replace mirror child may be returned so
* we must recursively call vdev_draid_missing_impl().
*/
vd = vdev_draid_spare_get_child(vd, physical_offset);
if (vd == NULL)
return (B_TRUE);
return (vdev_draid_missing(vd, physical_offset,
txg, size));
}
return (vdev_dtl_contains(vd, DTL_MISSING, txg, size));
}
/*
* Returns true if the txg is only partially replicated on the leaf vdevs.
*/
static boolean_t
vdev_draid_partial(vdev_t *vd, uint64_t physical_offset, uint64_t txg,
uint64_t size)
{
if (vd->vdev_ops == &vdev_spare_ops ||
vd->vdev_ops == &vdev_replacing_ops) {
/*
* Check all of the readable children, if any child is
* missing the txg range then it is partially replicated.
*/
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (!vdev_readable(cvd))
continue;
if (vdev_draid_partial(cvd, physical_offset, txg, size))
return (B_TRUE);
}
return (B_FALSE);
}
if (vd->vdev_ops == &vdev_draid_spare_ops) {
/*
* When sequentially resilvering we don't have a proper
* txg range so instead we must presume all txgs are
* missing on this vdev until the resilver completes.
*/
if (vd->vdev_rebuild_txg != 0)
return (B_TRUE);
/*
* DTL_MISSING is set for all prior txgs when a resilver
* is started in spa_vdev_attach().
*/
if (vdev_dtl_contains(vd, DTL_MISSING, txg, size))
return (B_TRUE);
/*
* Consult the DTL on the relevant vdev. Either a vdev
* leaf or spare/replace mirror child may be returned so
* we must recursively call vdev_draid_missing_impl().
*/
vd = vdev_draid_spare_get_child(vd, physical_offset);
if (vd == NULL)
return (B_TRUE);
return (vdev_draid_partial(vd, physical_offset, txg, size));
}
return (vdev_dtl_contains(vd, DTL_MISSING, txg, size));
}
/*
* Determine if the vdev is readable at the given offset.
*/
boolean_t
vdev_draid_readable(vdev_t *vd, uint64_t physical_offset)
{
if (vd->vdev_ops == &vdev_draid_spare_ops) {
vd = vdev_draid_spare_get_child(vd, physical_offset);
if (vd == NULL)
return (B_FALSE);
}
if (vd->vdev_ops == &vdev_spare_ops ||
vd->vdev_ops == &vdev_replacing_ops) {
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (!vdev_readable(cvd))
continue;
if (vdev_draid_readable(cvd, physical_offset))
return (B_TRUE);
}
return (B_FALSE);
}
return (vdev_readable(vd));
}
/*
* Returns the first distributed spare found under the provided vdev tree.
*/
static vdev_t *
vdev_draid_find_spare(vdev_t *vd)
{
if (vd->vdev_ops == &vdev_draid_spare_ops)
return (vd);
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *svd = vdev_draid_find_spare(vd->vdev_child[c]);
if (svd != NULL)
return (svd);
}
return (NULL);
}
/*
* Returns B_TRUE if the passed in vdev is currently "faulted".
* Faulted, in this context, means that the vdev represents a
* replacing or sparing vdev tree.
*/
static boolean_t
vdev_draid_faulted(vdev_t *vd, uint64_t physical_offset)
{
if (vd->vdev_ops == &vdev_draid_spare_ops) {
vd = vdev_draid_spare_get_child(vd, physical_offset);
if (vd == NULL)
return (B_FALSE);
/*
* After resolving the distributed spare to a leaf vdev
* check the parent to determine if it's "faulted".
*/
vd = vd->vdev_parent;
}
return (vd->vdev_ops == &vdev_replacing_ops ||
vd->vdev_ops == &vdev_spare_ops);
}
/*
* Determine if the dRAID block at the logical offset is degraded.
* Used by sequential resilver.
*/
static boolean_t
vdev_draid_group_degraded(vdev_t *vd, uint64_t offset)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
ASSERT3U(vdev_draid_get_astart(vd, offset), ==, offset);
uint64_t groupstart, perm;
uint64_t physical_offset = vdev_draid_logical_to_physical(vd,
offset, &perm, &groupstart);
uint8_t *base;
uint64_t iter;
vdev_draid_get_perm(vdc, perm, &base, &iter);
for (uint64_t i = 0; i < vdc->vdc_groupwidth; i++) {
uint64_t c = (groupstart + i) % vdc->vdc_ndisks;
uint64_t cid = vdev_draid_permute_id(vdc, base, iter, c);
vdev_t *cvd = vd->vdev_child[cid];
/* Group contains a faulted vdev. */
if (vdev_draid_faulted(cvd, physical_offset))
return (B_TRUE);
/*
* Always check groups with active distributed spares
* because any vdev failure in the pool will affect them.
*/
if (vdev_draid_find_spare(cvd) != NULL)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Determine if the txg is missing. Used by healing resilver.
*/
static boolean_t
vdev_draid_group_missing(vdev_t *vd, uint64_t offset, uint64_t txg,
uint64_t size)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
ASSERT3U(vdev_draid_get_astart(vd, offset), ==, offset);
uint64_t groupstart, perm;
uint64_t physical_offset = vdev_draid_logical_to_physical(vd,
offset, &perm, &groupstart);
uint8_t *base;
uint64_t iter;
vdev_draid_get_perm(vdc, perm, &base, &iter);
for (uint64_t i = 0; i < vdc->vdc_groupwidth; i++) {
uint64_t c = (groupstart + i) % vdc->vdc_ndisks;
uint64_t cid = vdev_draid_permute_id(vdc, base, iter, c);
vdev_t *cvd = vd->vdev_child[cid];
/* Transaction group is known to be partially replicated. */
if (vdev_draid_partial(cvd, physical_offset, txg, size))
return (B_TRUE);
/*
* Always check groups with active distributed spares
* because any vdev failure in the pool will affect them.
*/
if (vdev_draid_find_spare(cvd) != NULL)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Find the smallest child asize and largest sector size to calculate the
* available capacity. Distributed spares are ignored since their capacity
* is also based of the minimum child size in the top-level dRAID.
*/
static void
vdev_draid_calculate_asize(vdev_t *vd, uint64_t *asizep, uint64_t *max_asizep,
uint64_t *logical_ashiftp, uint64_t *physical_ashiftp)
{
uint64_t logical_ashift = 0, physical_ashift = 0;
uint64_t asize = 0, max_asize = 0;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (cvd->vdev_ops == &vdev_draid_spare_ops)
continue;
asize = MIN(asize - 1, cvd->vdev_asize - 1) + 1;
max_asize = MIN(max_asize - 1, cvd->vdev_max_asize - 1) + 1;
logical_ashift = MAX(logical_ashift, cvd->vdev_ashift);
- physical_ashift = MAX(physical_ashift,
- cvd->vdev_physical_ashift);
+ }
+ for (int c = 0; c < vd->vdev_children; c++) {
+ vdev_t *cvd = vd->vdev_child[c];
+
+ if (cvd->vdev_ops == &vdev_draid_spare_ops)
+ continue;
+ physical_ashift = vdev_best_ashift(logical_ashift,
+ physical_ashift, cvd->vdev_physical_ashift);
}
*asizep = asize;
*max_asizep = max_asize;
*logical_ashiftp = logical_ashift;
*physical_ashiftp = physical_ashift;
}
/*
* Open spare vdevs.
*/
static boolean_t
vdev_draid_open_spares(vdev_t *vd)
{
return (vd->vdev_ops == &vdev_draid_spare_ops ||
vd->vdev_ops == &vdev_replacing_ops ||
vd->vdev_ops == &vdev_spare_ops);
}
/*
* Open all children, excluding spares.
*/
static boolean_t
vdev_draid_open_children(vdev_t *vd)
{
return (!vdev_draid_open_spares(vd));
}
/*
* Open a top-level dRAID vdev.
*/
static int
vdev_draid_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
uint64_t *logical_ashift, uint64_t *physical_ashift)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
uint64_t nparity = vdc->vdc_nparity;
int open_errors = 0;
if (nparity > VDEV_DRAID_MAXPARITY ||
vd->vdev_children < nparity + 1) {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
return (SET_ERROR(EINVAL));
}
/*
* First open the normal children then the distributed spares. This
* ordering is important to ensure the distributed spares calculate
* the correct psize in the event that the dRAID vdevs were expanded.
*/
vdev_open_children_subset(vd, vdev_draid_open_children);
vdev_open_children_subset(vd, vdev_draid_open_spares);
/* Verify enough of the children are available to continue. */
for (int c = 0; c < vd->vdev_children; c++) {
if (vd->vdev_child[c]->vdev_open_error != 0) {
if ((++open_errors) > nparity) {
vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS;
return (SET_ERROR(ENXIO));
}
}
}
/*
* Allocatable capacity is the sum of the space on all children less
* the number of distributed spares rounded down to last full row
* and then to the last full group. An additional 32MB of scratch
* space is reserved at the end of each child for use by the dRAID
* expansion feature.
*/
uint64_t child_asize, child_max_asize;
vdev_draid_calculate_asize(vd, &child_asize, &child_max_asize,
logical_ashift, physical_ashift);
/*
* Should be unreachable since the minimum child size is 64MB, but
* we want to make sure an underflow absolutely cannot occur here.
*/
if (child_asize < VDEV_DRAID_REFLOW_RESERVE ||
child_max_asize < VDEV_DRAID_REFLOW_RESERVE) {
return (SET_ERROR(ENXIO));
}
child_asize = ((child_asize - VDEV_DRAID_REFLOW_RESERVE) /
VDEV_DRAID_ROWHEIGHT) * VDEV_DRAID_ROWHEIGHT;
child_max_asize = ((child_max_asize - VDEV_DRAID_REFLOW_RESERVE) /
VDEV_DRAID_ROWHEIGHT) * VDEV_DRAID_ROWHEIGHT;
*asize = (((child_asize * vdc->vdc_ndisks) / vdc->vdc_groupsz) *
vdc->vdc_groupsz);
*max_asize = (((child_max_asize * vdc->vdc_ndisks) / vdc->vdc_groupsz) *
vdc->vdc_groupsz);
return (0);
}
/*
* Close a top-level dRAID vdev.
*/
static void
vdev_draid_close(vdev_t *vd)
{
for (int c = 0; c < vd->vdev_children; c++) {
if (vd->vdev_child[c] != NULL)
vdev_close(vd->vdev_child[c]);
}
}
/*
* Return the maximum asize for a rebuild zio in the provided range
* given the following constraints. A dRAID chunks may not:
*
* - Exceed the maximum allowed block size (SPA_MAXBLOCKSIZE), or
* - Span dRAID redundancy groups.
*/
static uint64_t
vdev_draid_rebuild_asize(vdev_t *vd, uint64_t start, uint64_t asize,
uint64_t max_segment)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
uint64_t ashift = vd->vdev_ashift;
uint64_t ndata = vdc->vdc_ndata;
uint64_t psize = MIN(P2ROUNDUP(max_segment * ndata, 1 << ashift),
SPA_MAXBLOCKSIZE);
ASSERT3U(vdev_draid_get_astart(vd, start), ==, start);
ASSERT3U(asize % (vdc->vdc_groupwidth << ashift), ==, 0);
/* Chunks must evenly span all data columns in the group. */
psize = (((psize >> ashift) / ndata) * ndata) << ashift;
uint64_t chunk_size = MIN(asize, vdev_psize_to_asize(vd, psize));
/* Reduce the chunk size to the group space remaining. */
uint64_t group = vdev_draid_offset_to_group(vd, start);
uint64_t left = vdev_draid_group_to_offset(vd, group + 1) - start;
chunk_size = MIN(chunk_size, left);
ASSERT3U(chunk_size % (vdc->vdc_groupwidth << ashift), ==, 0);
ASSERT3U(vdev_draid_offset_to_group(vd, start), ==,
vdev_draid_offset_to_group(vd, start + chunk_size - 1));
return (chunk_size);
}
/*
* Align the start of the metaslab to the group width and slightly reduce
* its size to a multiple of the group width. Since full stripe writes are
* required by dRAID this space is unallocable. Furthermore, aligning the
* metaslab start is important for vdev initialize and TRIM which both operate
* on metaslab boundaries which vdev_xlate() expects to be aligned.
*/
static void
vdev_draid_metaslab_init(vdev_t *vd, uint64_t *ms_start, uint64_t *ms_size)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
uint64_t sz = vdc->vdc_groupwidth << vd->vdev_ashift;
uint64_t astart = vdev_draid_get_astart(vd, *ms_start);
uint64_t asize = ((*ms_size - (astart - *ms_start)) / sz) * sz;
*ms_start = astart;
*ms_size = asize;
ASSERT0(*ms_start % sz);
ASSERT0(*ms_size % sz);
}
/*
* Add virtual dRAID spares to the list of valid spares. In order to accomplish
* this the existing array must be freed and reallocated with the additional
* entries.
*/
int
vdev_draid_spare_create(nvlist_t *nvroot, vdev_t *vd, uint64_t *ndraidp,
uint64_t next_vdev_id)
{
uint64_t draid_nspares = 0;
uint64_t ndraid = 0;
int error;
for (uint64_t i = 0; i < vd->vdev_children; i++) {
vdev_t *cvd = vd->vdev_child[i];
if (cvd->vdev_ops == &vdev_draid_ops) {
vdev_draid_config_t *vdc = cvd->vdev_tsd;
draid_nspares += vdc->vdc_nspares;
ndraid++;
}
}
if (draid_nspares == 0) {
*ndraidp = ndraid;
return (0);
}
nvlist_t **old_spares, **new_spares;
uint_t old_nspares;
error = nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&old_spares, &old_nspares);
if (error)
old_nspares = 0;
/* Allocate memory and copy of the existing spares. */
new_spares = kmem_alloc(sizeof (nvlist_t *) *
(draid_nspares + old_nspares), KM_SLEEP);
for (uint_t i = 0; i < old_nspares; i++)
new_spares[i] = fnvlist_dup(old_spares[i]);
/* Add new distributed spares to ZPOOL_CONFIG_SPARES. */
uint64_t n = old_nspares;
for (uint64_t vdev_id = 0; vdev_id < vd->vdev_children; vdev_id++) {
vdev_t *cvd = vd->vdev_child[vdev_id];
char path[64];
if (cvd->vdev_ops != &vdev_draid_ops)
continue;
vdev_draid_config_t *vdc = cvd->vdev_tsd;
uint64_t nspares = vdc->vdc_nspares;
uint64_t nparity = vdc->vdc_nparity;
for (uint64_t spare_id = 0; spare_id < nspares; spare_id++) {
memset(path, 0, sizeof (path));
(void) snprintf(path, sizeof (path) - 1,
"%s%llu-%llu-%llu", VDEV_TYPE_DRAID,
(u_longlong_t)nparity,
(u_longlong_t)next_vdev_id + vdev_id,
(u_longlong_t)spare_id);
nvlist_t *spare = fnvlist_alloc();
fnvlist_add_string(spare, ZPOOL_CONFIG_PATH, path);
fnvlist_add_string(spare, ZPOOL_CONFIG_TYPE,
VDEV_TYPE_DRAID_SPARE);
fnvlist_add_uint64(spare, ZPOOL_CONFIG_TOP_GUID,
cvd->vdev_guid);
fnvlist_add_uint64(spare, ZPOOL_CONFIG_SPARE_ID,
spare_id);
fnvlist_add_uint64(spare, ZPOOL_CONFIG_IS_LOG, 0);
fnvlist_add_uint64(spare, ZPOOL_CONFIG_IS_SPARE, 1);
fnvlist_add_uint64(spare, ZPOOL_CONFIG_WHOLE_DISK, 1);
fnvlist_add_uint64(spare, ZPOOL_CONFIG_ASHIFT,
cvd->vdev_ashift);
new_spares[n] = spare;
n++;
}
}
if (n > 0) {
(void) nvlist_remove_all(nvroot, ZPOOL_CONFIG_SPARES);
fnvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
(const nvlist_t **)new_spares, n);
}
for (int i = 0; i < n; i++)
nvlist_free(new_spares[i]);
kmem_free(new_spares, sizeof (*new_spares) * n);
*ndraidp = ndraid;
return (0);
}
/*
* Determine if any portion of the provided block resides on a child vdev
* with a dirty DTL and therefore needs to be resilvered.
*/
static boolean_t
vdev_draid_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
uint64_t phys_birth)
{
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t asize = vdev_draid_asize(vd, psize);
if (phys_birth == TXG_UNKNOWN) {
/*
* Sequential resilver. There is no meaningful phys_birth
* for this block, we can only determine if block resides
* in a degraded group in which case it must be resilvered.
*/
ASSERT3U(vdev_draid_offset_to_group(vd, offset), ==,
vdev_draid_offset_to_group(vd, offset + asize - 1));
return (vdev_draid_group_degraded(vd, offset));
} else {
/*
* Healing resilver. TXGs not in DTL_PARTIAL are intact,
* as are blocks in non-degraded groups.
*/
if (!vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1))
return (B_FALSE);
if (vdev_draid_group_missing(vd, offset, phys_birth, 1))
return (B_TRUE);
/* The block may span groups in which case check both. */
if (vdev_draid_offset_to_group(vd, offset) !=
vdev_draid_offset_to_group(vd, offset + asize - 1)) {
if (vdev_draid_group_missing(vd,
offset + asize, phys_birth, 1))
return (B_TRUE);
}
return (B_FALSE);
}
}
static boolean_t
vdev_draid_rebuilding(vdev_t *vd)
{
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_rebuild_txg)
return (B_TRUE);
for (int i = 0; i < vd->vdev_children; i++) {
if (vdev_draid_rebuilding(vd->vdev_child[i])) {
return (B_TRUE);
}
}
return (B_FALSE);
}
static void
vdev_draid_io_verify(vdev_t *vd, raidz_row_t *rr, int col)
{
#ifdef ZFS_DEBUG
range_seg64_t logical_rs, physical_rs, remain_rs;
logical_rs.rs_start = rr->rr_offset;
logical_rs.rs_end = logical_rs.rs_start +
vdev_draid_asize(vd, rr->rr_size);
raidz_col_t *rc = &rr->rr_col[col];
vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
vdev_xlate(cvd, &logical_rs, &physical_rs, &remain_rs);
ASSERT(vdev_xlate_is_empty(&remain_rs));
ASSERT3U(rc->rc_offset, ==, physical_rs.rs_start);
ASSERT3U(rc->rc_offset, <, physical_rs.rs_end);
ASSERT3U(rc->rc_offset + rc->rc_size, ==, physical_rs.rs_end);
#endif
}
/*
* For write operations:
* 1. Generate the parity data
* 2. Create child zio write operations to each column's vdev, for both
* data and parity. A gang ABD is allocated by vdev_draid_map_alloc()
* if a skip sector needs to be added to a column.
*/
static void
vdev_draid_io_start_write(zio_t *zio, raidz_row_t *rr)
{
vdev_t *vd = zio->io_vd;
raidz_map_t *rm = zio->io_vsd;
vdev_raidz_generate_parity_row(rm, rr);
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
/*
* Empty columns are zero filled and included in the parity
* calculation and therefore must be written.
*/
ASSERT3U(rc->rc_size, !=, 0);
/* Verify physical to logical translation */
vdev_draid_io_verify(vd, rr, c);
zio_nowait(zio_vdev_child_io(zio, NULL,
vd->vdev_child[rc->rc_devidx], rc->rc_offset,
rc->rc_abd, rc->rc_size, zio->io_type, zio->io_priority,
0, vdev_raidz_child_done, rc));
}
}
/*
* For read operations:
* 1. The vdev_draid_map_alloc() function will create a minimal raidz
* mapping for the read based on the zio->io_flags. There are two
* possible mappings either 1) a normal read, or 2) a scrub/resilver.
* 2. Create the zio read operations. This will include all parity
* columns and skip sectors for a scrub/resilver.
*/
static void
vdev_draid_io_start_read(zio_t *zio, raidz_row_t *rr)
{
vdev_t *vd = zio->io_vd;
/* Sequential rebuild must do IO at redundancy group boundary. */
IMPLY(zio->io_priority == ZIO_PRIORITY_REBUILD, rr->rr_nempty == 0);
/*
* Iterate over the columns in reverse order so that we hit the parity
* last. Any errors along the way will force us to read the parity.
* For scrub/resilver IOs which verify skip sectors, a gang ABD will
* have been allocated to store them and rc->rc_size is increased.
*/
for (int c = rr->rr_cols - 1; c >= 0; c--) {
raidz_col_t *rc = &rr->rr_col[c];
vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
if (!vdev_draid_readable(cvd, rc->rc_offset)) {
if (c >= rr->rr_firstdatacol)
rr->rr_missingdata++;
else
rr->rr_missingparity++;
rc->rc_error = SET_ERROR(ENXIO);
rc->rc_tried = 1;
rc->rc_skipped = 1;
continue;
}
if (vdev_draid_missing(cvd, rc->rc_offset, zio->io_txg, 1)) {
if (c >= rr->rr_firstdatacol)
rr->rr_missingdata++;
else
rr->rr_missingparity++;
rc->rc_error = SET_ERROR(ESTALE);
rc->rc_skipped = 1;
continue;
}
/*
* Empty columns may be read during vdev_draid_io_done().
* Only skip them after the readable and missing checks
* verify they are available.
*/
if (rc->rc_size == 0) {
rc->rc_skipped = 1;
continue;
}
if (zio->io_flags & ZIO_FLAG_RESILVER) {
vdev_t *svd;
/*
* Sequential rebuilds need to always consider the data
* on the child being rebuilt to be stale. This is
* important when all columns are available to aid
* known reconstruction in identifing which columns
* contain incorrect data.
*
* Furthermore, all repairs need to be constrained to
* the devices being rebuilt because without a checksum
* we cannot verify the data is actually correct and
* performing an incorrect repair could result in
* locking in damage and making the data unrecoverable.
*/
if (zio->io_priority == ZIO_PRIORITY_REBUILD) {
if (vdev_draid_rebuilding(cvd)) {
if (c >= rr->rr_firstdatacol)
rr->rr_missingdata++;
else
rr->rr_missingparity++;
rc->rc_error = SET_ERROR(ESTALE);
rc->rc_skipped = 1;
rc->rc_allow_repair = 1;
continue;
} else {
rc->rc_allow_repair = 0;
}
} else {
rc->rc_allow_repair = 1;
}
/*
* If this child is a distributed spare then the
* offset might reside on the vdev being replaced.
* In which case this data must be written to the
* new device. Failure to do so would result in
* checksum errors when the old device is detached
* and the pool is scrubbed.
*/
if ((svd = vdev_draid_find_spare(cvd)) != NULL) {
svd = vdev_draid_spare_get_child(svd,
rc->rc_offset);
if (svd && (svd->vdev_ops == &vdev_spare_ops ||
svd->vdev_ops == &vdev_replacing_ops)) {
rc->rc_force_repair = 1;
if (vdev_draid_rebuilding(svd))
rc->rc_allow_repair = 1;
}
}
/*
* Always issue a repair IO to this child when its
* a spare or replacing vdev with an active rebuild.
*/
if ((cvd->vdev_ops == &vdev_spare_ops ||
cvd->vdev_ops == &vdev_replacing_ops) &&
vdev_draid_rebuilding(cvd)) {
rc->rc_force_repair = 1;
rc->rc_allow_repair = 1;
}
}
}
/*
* Either a parity or data column is missing this means a repair
* may be attempted by vdev_draid_io_done(). Expand the raid map
* to read in empty columns which are needed along with the parity
* during reconstruction.
*/
if ((rr->rr_missingdata > 0 || rr->rr_missingparity > 0) &&
rr->rr_nempty > 0 && rr->rr_abd_empty == NULL) {
vdev_draid_map_alloc_empty(zio, rr);
}
for (int c = rr->rr_cols - 1; c >= 0; c--) {
raidz_col_t *rc = &rr->rr_col[c];
vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
if (rc->rc_error || rc->rc_size == 0)
continue;
if (c >= rr->rr_firstdatacol || rr->rr_missingdata > 0 ||
(zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) {
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
rc->rc_offset, rc->rc_abd, rc->rc_size,
zio->io_type, zio->io_priority, 0,
vdev_raidz_child_done, rc));
}
}
}
/*
* Start an IO operation to a dRAID vdev.
*/
static void
vdev_draid_io_start(zio_t *zio)
{
vdev_t *vd __maybe_unused = zio->io_vd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
ASSERT3U(zio->io_offset, ==, vdev_draid_get_astart(vd, zio->io_offset));
raidz_map_t *rm = vdev_draid_map_alloc(zio);
zio->io_vsd = rm;
zio->io_vsd_ops = &vdev_raidz_vsd_ops;
if (zio->io_type == ZIO_TYPE_WRITE) {
for (int i = 0; i < rm->rm_nrows; i++) {
vdev_draid_io_start_write(zio, rm->rm_row[i]);
}
} else {
ASSERT(zio->io_type == ZIO_TYPE_READ);
for (int i = 0; i < rm->rm_nrows; i++) {
vdev_draid_io_start_read(zio, rm->rm_row[i]);
}
}
zio_execute(zio);
}
/*
* Complete an IO operation on a dRAID vdev. The raidz logic can be applied
* to dRAID since the layout is fully described by the raidz_map_t.
*/
static void
vdev_draid_io_done(zio_t *zio)
{
vdev_raidz_io_done(zio);
}
static void
vdev_draid_state_change(vdev_t *vd, int faulted, int degraded)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT(vd->vdev_ops == &vdev_draid_ops);
if (faulted > vdc->vdc_nparity)
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_NO_REPLICAS);
else if (degraded + faulted != 0)
vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE);
else
vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE);
}
static void
vdev_draid_xlate(vdev_t *cvd, const range_seg64_t *logical_rs,
range_seg64_t *physical_rs, range_seg64_t *remain_rs)
{
vdev_t *raidvd = cvd->vdev_parent;
ASSERT(raidvd->vdev_ops == &vdev_draid_ops);
vdev_draid_config_t *vdc = raidvd->vdev_tsd;
uint64_t ashift = raidvd->vdev_top->vdev_ashift;
/* Make sure the offsets are block-aligned */
ASSERT0(logical_rs->rs_start % (1 << ashift));
ASSERT0(logical_rs->rs_end % (1 << ashift));
uint64_t logical_start = logical_rs->rs_start;
uint64_t logical_end = logical_rs->rs_end;
/*
* Unaligned ranges must be skipped. All metaslabs are correctly
* aligned so this should not happen, but this case is handled in
* case it's needed by future callers.
*/
uint64_t astart = vdev_draid_get_astart(raidvd, logical_start);
if (astart != logical_start) {
physical_rs->rs_start = logical_start;
physical_rs->rs_end = logical_start;
remain_rs->rs_start = MIN(astart, logical_end);
remain_rs->rs_end = logical_end;
return;
}
/*
* Unlike with mirrors and raidz a dRAID logical range can map
* to multiple non-contiguous physical ranges. This is handled by
* limiting the size of the logical range to a single group and
* setting the remain argument such that it describes the remaining
* unmapped logical range. This is stricter than absolutely
* necessary but helps simplify the logic below.
*/
uint64_t group = vdev_draid_offset_to_group(raidvd, logical_start);
uint64_t nextstart = vdev_draid_group_to_offset(raidvd, group + 1);
if (logical_end > nextstart)
logical_end = nextstart;
/* Find the starting offset for each vdev in the group */
uint64_t perm, groupstart;
uint64_t start = vdev_draid_logical_to_physical(raidvd,
logical_start, &perm, &groupstart);
uint64_t end = start;
uint8_t *base;
uint64_t iter, id;
vdev_draid_get_perm(vdc, perm, &base, &iter);
/*
* Check if the passed child falls within the group. If it does
* update the start and end to reflect the physical range.
* Otherwise, leave them unmodified which will result in an empty
* (zero-length) physical range being returned.
*/
for (uint64_t i = 0; i < vdc->vdc_groupwidth; i++) {
uint64_t c = (groupstart + i) % vdc->vdc_ndisks;
if (c == 0 && i != 0) {
/* the group wrapped, increment the start */
start += VDEV_DRAID_ROWHEIGHT;
end = start;
}
id = vdev_draid_permute_id(vdc, base, iter, c);
if (id == cvd->vdev_id) {
uint64_t b_size = (logical_end >> ashift) -
(logical_start >> ashift);
ASSERT3U(b_size, >, 0);
end = start + ((((b_size - 1) /
vdc->vdc_groupwidth) + 1) << ashift);
break;
}
}
physical_rs->rs_start = start;
physical_rs->rs_end = end;
/*
* Only top-level vdevs are allowed to set remain_rs because
* when .vdev_op_xlate() is called for their children the full
* logical range is not provided by vdev_xlate().
*/
remain_rs->rs_start = logical_end;
remain_rs->rs_end = logical_rs->rs_end;
ASSERT3U(physical_rs->rs_start, <=, logical_start);
ASSERT3U(physical_rs->rs_end - physical_rs->rs_start, <=,
logical_end - logical_start);
}
/*
* Add dRAID specific fields to the config nvlist.
*/
static void
vdev_draid_config_generate(vdev_t *vd, nvlist_t *nv)
{
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
vdev_draid_config_t *vdc = vd->vdev_tsd;
fnvlist_add_uint64(nv, ZPOOL_CONFIG_NPARITY, vdc->vdc_nparity);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NDATA, vdc->vdc_ndata);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NSPARES, vdc->vdc_nspares);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NGROUPS, vdc->vdc_ngroups);
}
/*
* Initialize private dRAID specific fields from the nvlist.
*/
static int
vdev_draid_init(spa_t *spa, nvlist_t *nv, void **tsd)
{
(void) spa;
uint64_t ndata, nparity, nspares, ngroups;
int error;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NDATA, &ndata))
return (SET_ERROR(EINVAL));
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, &nparity) ||
nparity == 0 || nparity > VDEV_DRAID_MAXPARITY) {
return (SET_ERROR(EINVAL));
}
uint_t children;
nvlist_t **child;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0 || children == 0 ||
children > VDEV_DRAID_MAX_CHILDREN) {
return (SET_ERROR(EINVAL));
}
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NSPARES, &nspares) ||
nspares > 100 || nspares > (children - (ndata + nparity))) {
return (SET_ERROR(EINVAL));
}
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NGROUPS, &ngroups) ||
ngroups == 0 || ngroups > VDEV_DRAID_MAX_CHILDREN) {
return (SET_ERROR(EINVAL));
}
/*
* Validate the minimum number of children exist per group for the
* specified parity level (draid1 >= 2, draid2 >= 3, draid3 >= 4).
*/
if (children < (ndata + nparity + nspares))
return (SET_ERROR(EINVAL));
/*
* Create the dRAID configuration using the pool nvlist configuration
* and the fixed mapping for the correct number of children.
*/
vdev_draid_config_t *vdc;
const draid_map_t *map;
error = vdev_draid_lookup_map(children, &map);
if (error)
return (SET_ERROR(EINVAL));
vdc = kmem_zalloc(sizeof (*vdc), KM_SLEEP);
vdc->vdc_ndata = ndata;
vdc->vdc_nparity = nparity;
vdc->vdc_nspares = nspares;
vdc->vdc_children = children;
vdc->vdc_ngroups = ngroups;
vdc->vdc_nperms = map->dm_nperms;
error = vdev_draid_generate_perms(map, &vdc->vdc_perms);
if (error) {
kmem_free(vdc, sizeof (*vdc));
return (SET_ERROR(EINVAL));
}
/*
* Derived constants.
*/
vdc->vdc_groupwidth = vdc->vdc_ndata + vdc->vdc_nparity;
vdc->vdc_ndisks = vdc->vdc_children - vdc->vdc_nspares;
vdc->vdc_groupsz = vdc->vdc_groupwidth * VDEV_DRAID_ROWHEIGHT;
vdc->vdc_devslicesz = (vdc->vdc_groupsz * vdc->vdc_ngroups) /
vdc->vdc_ndisks;
ASSERT3U(vdc->vdc_groupwidth, >=, 2);
ASSERT3U(vdc->vdc_groupwidth, <=, vdc->vdc_ndisks);
ASSERT3U(vdc->vdc_groupsz, >=, 2 * VDEV_DRAID_ROWHEIGHT);
ASSERT3U(vdc->vdc_devslicesz, >=, VDEV_DRAID_ROWHEIGHT);
ASSERT3U(vdc->vdc_devslicesz % VDEV_DRAID_ROWHEIGHT, ==, 0);
ASSERT3U((vdc->vdc_groupwidth * vdc->vdc_ngroups) %
vdc->vdc_ndisks, ==, 0);
*tsd = vdc;
return (0);
}
static void
vdev_draid_fini(vdev_t *vd)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
vmem_free(vdc->vdc_perms, sizeof (uint8_t) *
vdc->vdc_children * vdc->vdc_nperms);
kmem_free(vdc, sizeof (*vdc));
}
static uint64_t
vdev_draid_nparity(vdev_t *vd)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
return (vdc->vdc_nparity);
}
static uint64_t
vdev_draid_ndisks(vdev_t *vd)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
return (vdc->vdc_ndisks);
}
vdev_ops_t vdev_draid_ops = {
.vdev_op_init = vdev_draid_init,
.vdev_op_fini = vdev_draid_fini,
.vdev_op_open = vdev_draid_open,
.vdev_op_close = vdev_draid_close,
.vdev_op_asize = vdev_draid_asize,
.vdev_op_min_asize = vdev_draid_min_asize,
.vdev_op_min_alloc = vdev_draid_min_alloc,
.vdev_op_io_start = vdev_draid_io_start,
.vdev_op_io_done = vdev_draid_io_done,
.vdev_op_state_change = vdev_draid_state_change,
.vdev_op_need_resilver = vdev_draid_need_resilver,
.vdev_op_hold = NULL,
.vdev_op_rele = NULL,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_draid_xlate,
.vdev_op_rebuild_asize = vdev_draid_rebuild_asize,
.vdev_op_metaslab_init = vdev_draid_metaslab_init,
.vdev_op_config_generate = vdev_draid_config_generate,
.vdev_op_nparity = vdev_draid_nparity,
.vdev_op_ndisks = vdev_draid_ndisks,
.vdev_op_type = VDEV_TYPE_DRAID,
.vdev_op_leaf = B_FALSE,
};
/*
* A dRAID distributed spare is a virtual leaf vdev which is included in the
* parent dRAID configuration. The last N columns of the dRAID permutation
* table are used to determine on which dRAID children a specific offset
* should be written. These spare leaf vdevs can only be used to replace
* faulted children in the same dRAID configuration.
*/
/*
* Distributed spare state. All fields are set when the distributed spare is
* first opened and are immutable.
*/
typedef struct {
vdev_t *vds_draid_vdev; /* top-level parent dRAID vdev */
uint64_t vds_top_guid; /* top-level parent dRAID guid */
uint64_t vds_spare_id; /* spare id (0 - vdc->vdc_nspares-1) */
} vdev_draid_spare_t;
/*
* Returns the parent dRAID vdev to which the distributed spare belongs.
* This may be safely called even when the vdev is not open.
*/
vdev_t *
vdev_draid_spare_get_parent(vdev_t *vd)
{
vdev_draid_spare_t *vds = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_spare_ops);
if (vds->vds_draid_vdev != NULL)
return (vds->vds_draid_vdev);
return (vdev_lookup_by_guid(vd->vdev_spa->spa_root_vdev,
vds->vds_top_guid));
}
/*
* A dRAID space is active when it's the child of a vdev using the
* vdev_spare_ops, vdev_replacing_ops or vdev_draid_ops.
*/
static boolean_t
vdev_draid_spare_is_active(vdev_t *vd)
{
vdev_t *pvd = vd->vdev_parent;
if (pvd != NULL && (pvd->vdev_ops == &vdev_spare_ops ||
pvd->vdev_ops == &vdev_replacing_ops ||
pvd->vdev_ops == &vdev_draid_ops)) {
return (B_TRUE);
} else {
return (B_FALSE);
}
}
/*
* Given a dRAID distribute spare vdev, returns the physical child vdev
* on which the provided offset resides. This may involve recursing through
* multiple layers of distributed spares. Note that offset is relative to
* this vdev.
*/
vdev_t *
vdev_draid_spare_get_child(vdev_t *vd, uint64_t physical_offset)
{
vdev_draid_spare_t *vds = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_spare_ops);
/* The vdev is closed */
if (vds->vds_draid_vdev == NULL)
return (NULL);
vdev_t *tvd = vds->vds_draid_vdev;
vdev_draid_config_t *vdc = tvd->vdev_tsd;
ASSERT3P(tvd->vdev_ops, ==, &vdev_draid_ops);
ASSERT3U(vds->vds_spare_id, <, vdc->vdc_nspares);
uint8_t *base;
uint64_t iter;
uint64_t perm = physical_offset / vdc->vdc_devslicesz;
vdev_draid_get_perm(vdc, perm, &base, &iter);
uint64_t cid = vdev_draid_permute_id(vdc, base, iter,
(tvd->vdev_children - 1) - vds->vds_spare_id);
vdev_t *cvd = tvd->vdev_child[cid];
if (cvd->vdev_ops == &vdev_draid_spare_ops)
return (vdev_draid_spare_get_child(cvd, physical_offset));
return (cvd);
}
static void
vdev_draid_spare_close(vdev_t *vd)
{
vdev_draid_spare_t *vds = vd->vdev_tsd;
vds->vds_draid_vdev = NULL;
}
/*
* Opening a dRAID spare device is done by looking up the associated dRAID
* top-level vdev guid from the spare configuration.
*/
static int
vdev_draid_spare_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
uint64_t *logical_ashift, uint64_t *physical_ashift)
{
vdev_draid_spare_t *vds = vd->vdev_tsd;
vdev_t *rvd = vd->vdev_spa->spa_root_vdev;
uint64_t asize, max_asize;
vdev_t *tvd = vdev_lookup_by_guid(rvd, vds->vds_top_guid);
if (tvd == NULL) {
/*
* When spa_vdev_add() is labeling new spares the
* associated dRAID is not attached to the root vdev
* nor does this spare have a parent. Simulate a valid
* device in order to allow the label to be initialized
* and the distributed spare added to the configuration.
*/
if (vd->vdev_parent == NULL) {
*psize = *max_psize = SPA_MINDEVSIZE;
*logical_ashift = *physical_ashift = ASHIFT_MIN;
return (0);
}
return (SET_ERROR(EINVAL));
}
vdev_draid_config_t *vdc = tvd->vdev_tsd;
if (tvd->vdev_ops != &vdev_draid_ops || vdc == NULL)
return (SET_ERROR(EINVAL));
if (vds->vds_spare_id >= vdc->vdc_nspares)
return (SET_ERROR(EINVAL));
/*
* Neither tvd->vdev_asize or tvd->vdev_max_asize can be used here
* because the caller may be vdev_draid_open() in which case the
* values are stale as they haven't yet been updated by vdev_open().
* To avoid this always recalculate the dRAID asize and max_asize.
*/
vdev_draid_calculate_asize(tvd, &asize, &max_asize,
logical_ashift, physical_ashift);
*psize = asize + VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
*max_psize = max_asize + VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
vds->vds_draid_vdev = tvd;
return (0);
}
/*
* Completed distributed spare IO. Store the result in the parent zio
* as if it had performed the operation itself. Only the first error is
* preserved if there are multiple errors.
*/
static void
vdev_draid_spare_child_done(zio_t *zio)
{
zio_t *pio = zio->io_private;
/*
* IOs are issued to non-writable vdevs in order to keep their
* DTLs accurate. However, we don't want to propagate the
* error in to the distributed spare's DTL. When resilvering
* vdev_draid_need_resilver() will consult the relevant DTL
* to determine if the data is missing and must be repaired.
*/
if (!vdev_writeable(zio->io_vd))
return;
if (pio->io_error == 0)
pio->io_error = zio->io_error;
}
/*
* Returns a valid label nvlist for the distributed spare vdev. This is
* used to bypass the IO pipeline to avoid the complexity of constructing
* a complete label with valid checksum to return when read.
*/
nvlist_t *
vdev_draid_read_config_spare(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
spa_aux_vdev_t *sav = &spa->spa_spares;
uint64_t guid = vd->vdev_guid;
nvlist_t *nv = fnvlist_alloc();
fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 1);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, vd->vdev_crtxg);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_VERSION, spa_version(spa));
fnvlist_add_string(nv, ZPOOL_CONFIG_POOL_NAME, spa_name(spa));
fnvlist_add_uint64(nv, ZPOOL_CONFIG_POOL_GUID, spa_guid(spa));
fnvlist_add_uint64(nv, ZPOOL_CONFIG_POOL_TXG, spa->spa_config_txg);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_TOP_GUID, vd->vdev_top->vdev_guid);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_POOL_STATE,
vdev_draid_spare_is_active(vd) ?
POOL_STATE_ACTIVE : POOL_STATE_SPARE);
/* Set the vdev guid based on the vdev list in sav_count. */
for (int i = 0; i < sav->sav_count; i++) {
if (sav->sav_vdevs[i]->vdev_ops == &vdev_draid_spare_ops &&
strcmp(sav->sav_vdevs[i]->vdev_path, vd->vdev_path) == 0) {
guid = sav->sav_vdevs[i]->vdev_guid;
break;
}
}
fnvlist_add_uint64(nv, ZPOOL_CONFIG_GUID, guid);
return (nv);
}
/*
* Handle any ioctl requested of the distributed spare. Only flushes
* are supported in which case all children must be flushed.
*/
static int
vdev_draid_spare_ioctl(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
int error = 0;
if (zio->io_cmd == DKIOCFLUSHWRITECACHE) {
for (int c = 0; c < vd->vdev_children; c++) {
zio_nowait(zio_vdev_child_io(zio, NULL,
vd->vdev_child[c], zio->io_offset, zio->io_abd,
zio->io_size, zio->io_type, zio->io_priority, 0,
vdev_draid_spare_child_done, zio));
}
} else {
error = SET_ERROR(ENOTSUP);
}
return (error);
}
/*
* Initiate an IO to the distributed spare. For normal IOs this entails using
* the zio->io_offset and permutation table to calculate which child dRAID vdev
* is responsible for the data. Then passing along the zio to that child to
* perform the actual IO. The label ranges are not stored on disk and require
* some special handling which is described below.
*/
static void
vdev_draid_spare_io_start(zio_t *zio)
{
vdev_t *cvd = NULL, *vd = zio->io_vd;
vdev_draid_spare_t *vds = vd->vdev_tsd;
uint64_t offset = zio->io_offset - VDEV_LABEL_START_SIZE;
/*
* If the vdev is closed, it's likely in the REMOVED or FAULTED state.
* Nothing to be done here but return failure.
*/
if (vds == NULL) {
zio->io_error = ENXIO;
zio_interrupt(zio);
return;
}
switch (zio->io_type) {
case ZIO_TYPE_IOCTL:
zio->io_error = vdev_draid_spare_ioctl(zio);
break;
case ZIO_TYPE_WRITE:
if (VDEV_OFFSET_IS_LABEL(vd, zio->io_offset)) {
/*
* Accept probe IOs and config writers to simulate the
* existence of an on disk label. vdev_label_sync(),
* vdev_uberblock_sync() and vdev_copy_uberblocks()
* skip the distributed spares. This only leaves
* vdev_label_init() which is allowed to succeed to
* avoid adding special cases the function.
*/
if (zio->io_flags & ZIO_FLAG_PROBE ||
zio->io_flags & ZIO_FLAG_CONFIG_WRITER) {
zio->io_error = 0;
} else {
zio->io_error = SET_ERROR(EIO);
}
} else {
cvd = vdev_draid_spare_get_child(vd, offset);
if (cvd == NULL) {
zio->io_error = SET_ERROR(ENXIO);
} else {
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
offset, zio->io_abd, zio->io_size,
zio->io_type, zio->io_priority, 0,
vdev_draid_spare_child_done, zio));
}
}
break;
case ZIO_TYPE_READ:
if (VDEV_OFFSET_IS_LABEL(vd, zio->io_offset)) {
/*
* Accept probe IOs to simulate the existence of a
* label. vdev_label_read_config() bypasses the
* pipeline to read the label configuration and
* vdev_uberblock_load() skips distributed spares
* when attempting to locate the best uberblock.
*/
if (zio->io_flags & ZIO_FLAG_PROBE) {
zio->io_error = 0;
} else {
zio->io_error = SET_ERROR(EIO);
}
} else {
cvd = vdev_draid_spare_get_child(vd, offset);
if (cvd == NULL || !vdev_readable(cvd)) {
zio->io_error = SET_ERROR(ENXIO);
} else {
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
offset, zio->io_abd, zio->io_size,
zio->io_type, zio->io_priority, 0,
vdev_draid_spare_child_done, zio));
}
}
break;
case ZIO_TYPE_TRIM:
/* The vdev label ranges are never trimmed */
ASSERT0(VDEV_OFFSET_IS_LABEL(vd, zio->io_offset));
cvd = vdev_draid_spare_get_child(vd, offset);
if (cvd == NULL || !cvd->vdev_has_trim) {
zio->io_error = SET_ERROR(ENXIO);
} else {
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
offset, zio->io_abd, zio->io_size,
zio->io_type, zio->io_priority, 0,
vdev_draid_spare_child_done, zio));
}
break;
default:
zio->io_error = SET_ERROR(ENOTSUP);
break;
}
zio_execute(zio);
}
static void
vdev_draid_spare_io_done(zio_t *zio)
{
(void) zio;
}
/*
* Lookup the full spare config in spa->spa_spares.sav_config and
* return the top_guid and spare_id for the named spare.
*/
static int
vdev_draid_spare_lookup(spa_t *spa, nvlist_t *nv, uint64_t *top_guidp,
uint64_t *spare_idp)
{
nvlist_t **spares;
uint_t nspares;
int error;
if ((spa->spa_spares.sav_config == NULL) ||
(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, &spares, &nspares) != 0)) {
return (SET_ERROR(ENOENT));
}
char *spare_name;
error = nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &spare_name);
if (error != 0)
return (SET_ERROR(EINVAL));
for (int i = 0; i < nspares; i++) {
nvlist_t *spare = spares[i];
uint64_t top_guid, spare_id;
char *type, *path;
/* Skip non-distributed spares */
error = nvlist_lookup_string(spare, ZPOOL_CONFIG_TYPE, &type);
if (error != 0 || strcmp(type, VDEV_TYPE_DRAID_SPARE) != 0)
continue;
/* Skip spares with the wrong name */
error = nvlist_lookup_string(spare, ZPOOL_CONFIG_PATH, &path);
if (error != 0 || strcmp(path, spare_name) != 0)
continue;
/* Found the matching spare */
error = nvlist_lookup_uint64(spare,
ZPOOL_CONFIG_TOP_GUID, &top_guid);
if (error == 0) {
error = nvlist_lookup_uint64(spare,
ZPOOL_CONFIG_SPARE_ID, &spare_id);
}
if (error != 0) {
return (SET_ERROR(EINVAL));
} else {
*top_guidp = top_guid;
*spare_idp = spare_id;
return (0);
}
}
return (SET_ERROR(ENOENT));
}
/*
* Initialize private dRAID spare specific fields from the nvlist.
*/
static int
vdev_draid_spare_init(spa_t *spa, nvlist_t *nv, void **tsd)
{
vdev_draid_spare_t *vds;
uint64_t top_guid = 0;
uint64_t spare_id;
/*
* In the normal case check the list of spares stored in the spa
* to lookup the top_guid and spare_id for provided spare config.
* When creating a new pool or adding vdevs the spare list is not
* yet populated and the values are provided in the passed config.
*/
if (vdev_draid_spare_lookup(spa, nv, &top_guid, &spare_id) != 0) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_TOP_GUID,
&top_guid) != 0)
return (SET_ERROR(EINVAL));
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_SPARE_ID,
&spare_id) != 0)
return (SET_ERROR(EINVAL));
}
vds = kmem_alloc(sizeof (vdev_draid_spare_t), KM_SLEEP);
vds->vds_draid_vdev = NULL;
vds->vds_top_guid = top_guid;
vds->vds_spare_id = spare_id;
*tsd = vds;
return (0);
}
static void
vdev_draid_spare_fini(vdev_t *vd)
{
kmem_free(vd->vdev_tsd, sizeof (vdev_draid_spare_t));
}
static void
vdev_draid_spare_config_generate(vdev_t *vd, nvlist_t *nv)
{
vdev_draid_spare_t *vds = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_spare_ops);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_TOP_GUID, vds->vds_top_guid);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_SPARE_ID, vds->vds_spare_id);
}
vdev_ops_t vdev_draid_spare_ops = {
.vdev_op_init = vdev_draid_spare_init,
.vdev_op_fini = vdev_draid_spare_fini,
.vdev_op_open = vdev_draid_spare_open,
.vdev_op_close = vdev_draid_spare_close,
.vdev_op_asize = vdev_default_asize,
.vdev_op_min_asize = vdev_default_min_asize,
.vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_draid_spare_io_start,
.vdev_op_io_done = vdev_draid_spare_io_done,
.vdev_op_state_change = NULL,
.vdev_op_need_resilver = NULL,
.vdev_op_hold = NULL,
.vdev_op_rele = NULL,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_default_xlate,
.vdev_op_rebuild_asize = NULL,
.vdev_op_metaslab_init = NULL,
.vdev_op_config_generate = vdev_draid_spare_config_generate,
.vdev_op_nparity = NULL,
.vdev_op_ndisks = NULL,
.vdev_op_type = VDEV_TYPE_DRAID_SPARE,
.vdev_op_leaf = B_TRUE,
};
diff --git a/sys/contrib/openzfs/module/zfs/vdev_mirror.c b/sys/contrib/openzfs/module/zfs/vdev_mirror.c
index 3879de680453..f9a01c9f53f4 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_mirror.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_mirror.c
@@ -1,1034 +1,1040 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_scan.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_draid.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/abd.h>
#include <sys/fs/zfs.h>
/*
* Vdev mirror kstats
*/
static kstat_t *mirror_ksp = NULL;
typedef struct mirror_stats {
kstat_named_t vdev_mirror_stat_rotating_linear;
kstat_named_t vdev_mirror_stat_rotating_offset;
kstat_named_t vdev_mirror_stat_rotating_seek;
kstat_named_t vdev_mirror_stat_non_rotating_linear;
kstat_named_t vdev_mirror_stat_non_rotating_seek;
kstat_named_t vdev_mirror_stat_preferred_found;
kstat_named_t vdev_mirror_stat_preferred_not_found;
} mirror_stats_t;
static mirror_stats_t mirror_stats = {
/* New I/O follows directly the last I/O */
{ "rotating_linear", KSTAT_DATA_UINT64 },
/* New I/O is within zfs_vdev_mirror_rotating_seek_offset of the last */
{ "rotating_offset", KSTAT_DATA_UINT64 },
/* New I/O requires random seek */
{ "rotating_seek", KSTAT_DATA_UINT64 },
/* New I/O follows directly the last I/O (nonrot) */
{ "non_rotating_linear", KSTAT_DATA_UINT64 },
/* New I/O requires random seek (nonrot) */
{ "non_rotating_seek", KSTAT_DATA_UINT64 },
/* Preferred child vdev found */
{ "preferred_found", KSTAT_DATA_UINT64 },
/* Preferred child vdev not found or equal load */
{ "preferred_not_found", KSTAT_DATA_UINT64 },
};
#define MIRROR_STAT(stat) (mirror_stats.stat.value.ui64)
#define MIRROR_INCR(stat, val) atomic_add_64(&MIRROR_STAT(stat), val)
#define MIRROR_BUMP(stat) MIRROR_INCR(stat, 1)
void
vdev_mirror_stat_init(void)
{
mirror_ksp = kstat_create("zfs", 0, "vdev_mirror_stats",
"misc", KSTAT_TYPE_NAMED,
sizeof (mirror_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
if (mirror_ksp != NULL) {
mirror_ksp->ks_data = &mirror_stats;
kstat_install(mirror_ksp);
}
}
void
vdev_mirror_stat_fini(void)
{
if (mirror_ksp != NULL) {
kstat_delete(mirror_ksp);
mirror_ksp = NULL;
}
}
/*
* Virtual device vector for mirroring.
*/
typedef struct mirror_child {
vdev_t *mc_vd;
abd_t *mc_abd;
uint64_t mc_offset;
int mc_error;
int mc_load;
uint8_t mc_tried;
uint8_t mc_skipped;
uint8_t mc_speculative;
uint8_t mc_rebuilding;
} mirror_child_t;
typedef struct mirror_map {
int *mm_preferred;
int mm_preferred_cnt;
int mm_children;
boolean_t mm_resilvering;
boolean_t mm_rebuilding;
boolean_t mm_root;
mirror_child_t mm_child[];
} mirror_map_t;
static const int vdev_mirror_shift = 21;
/*
* The load configuration settings below are tuned by default for
* the case where all devices are of the same rotational type.
*
* If there is a mixture of rotating and non-rotating media, setting
* zfs_vdev_mirror_non_rotating_seek_inc to 0 may well provide better results
* as it will direct more reads to the non-rotating vdevs which are more likely
* to have a higher performance.
*/
/* Rotating media load calculation configuration. */
static int zfs_vdev_mirror_rotating_inc = 0;
static int zfs_vdev_mirror_rotating_seek_inc = 5;
static int zfs_vdev_mirror_rotating_seek_offset = 1 * 1024 * 1024;
/* Non-rotating media load calculation configuration. */
static int zfs_vdev_mirror_non_rotating_inc = 0;
static int zfs_vdev_mirror_non_rotating_seek_inc = 1;
static inline size_t
vdev_mirror_map_size(int children)
{
return (offsetof(mirror_map_t, mm_child[children]) +
sizeof (int) * children);
}
static inline mirror_map_t *
vdev_mirror_map_alloc(int children, boolean_t resilvering, boolean_t root)
{
mirror_map_t *mm;
mm = kmem_zalloc(vdev_mirror_map_size(children), KM_SLEEP);
mm->mm_children = children;
mm->mm_resilvering = resilvering;
mm->mm_root = root;
mm->mm_preferred = (int *)((uintptr_t)mm +
offsetof(mirror_map_t, mm_child[children]));
return (mm);
}
static void
vdev_mirror_map_free(zio_t *zio)
{
mirror_map_t *mm = zio->io_vsd;
kmem_free(mm, vdev_mirror_map_size(mm->mm_children));
}
static const zio_vsd_ops_t vdev_mirror_vsd_ops = {
.vsd_free = vdev_mirror_map_free,
};
static int
vdev_mirror_load(mirror_map_t *mm, vdev_t *vd, uint64_t zio_offset)
{
uint64_t last_offset;
int64_t offset_diff;
int load;
/* All DVAs have equal weight at the root. */
if (mm->mm_root)
return (INT_MAX);
/*
* We don't return INT_MAX if the device is resilvering i.e.
* vdev_resilver_txg != 0 as when tested performance was slightly
* worse overall when resilvering with compared to without.
*/
/* Fix zio_offset for leaf vdevs */
if (vd->vdev_ops->vdev_op_leaf)
zio_offset += VDEV_LABEL_START_SIZE;
/* Standard load based on pending queue length. */
load = vdev_queue_length(vd);
last_offset = vdev_queue_last_offset(vd);
if (vd->vdev_nonrot) {
/* Non-rotating media. */
if (last_offset == zio_offset) {
MIRROR_BUMP(vdev_mirror_stat_non_rotating_linear);
return (load + zfs_vdev_mirror_non_rotating_inc);
}
/*
* Apply a seek penalty even for non-rotating devices as
* sequential I/O's can be aggregated into fewer operations on
* the device, thus avoiding unnecessary per-command overhead
* and boosting performance.
*/
MIRROR_BUMP(vdev_mirror_stat_non_rotating_seek);
return (load + zfs_vdev_mirror_non_rotating_seek_inc);
}
/* Rotating media I/O's which directly follow the last I/O. */
if (last_offset == zio_offset) {
MIRROR_BUMP(vdev_mirror_stat_rotating_linear);
return (load + zfs_vdev_mirror_rotating_inc);
}
/*
* Apply half the seek increment to I/O's within seek offset
* of the last I/O issued to this vdev as they should incur less
* of a seek increment.
*/
offset_diff = (int64_t)(last_offset - zio_offset);
if (ABS(offset_diff) < zfs_vdev_mirror_rotating_seek_offset) {
MIRROR_BUMP(vdev_mirror_stat_rotating_offset);
return (load + (zfs_vdev_mirror_rotating_seek_inc / 2));
}
/* Apply the full seek increment to all other I/O's. */
MIRROR_BUMP(vdev_mirror_stat_rotating_seek);
return (load + zfs_vdev_mirror_rotating_seek_inc);
}
static boolean_t
vdev_mirror_rebuilding(vdev_t *vd)
{
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_rebuild_txg)
return (B_TRUE);
for (int i = 0; i < vd->vdev_children; i++) {
if (vdev_mirror_rebuilding(vd->vdev_child[i])) {
return (B_TRUE);
}
}
return (B_FALSE);
}
/*
* Avoid inlining the function to keep vdev_mirror_io_start(), which
* is this functions only caller, as small as possible on the stack.
*/
noinline static mirror_map_t *
vdev_mirror_map_init(zio_t *zio)
{
mirror_map_t *mm = NULL;
mirror_child_t *mc;
vdev_t *vd = zio->io_vd;
int c;
if (vd == NULL) {
dva_t *dva = zio->io_bp->blk_dva;
spa_t *spa = zio->io_spa;
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
dva_t dva_copy[SPA_DVAS_PER_BP];
/*
* The sequential scrub code sorts and issues all DVAs
* of a bp separately. Each of these IOs includes all
* original DVA copies so that repairs can be performed
* in the event of an error, but we only actually want
* to check the first DVA since the others will be
* checked by their respective sorted IOs. Only if we
* hit an error will we try all DVAs upon retrying.
*
* Note: This check is safe even if the user switches
* from a legacy scrub to a sequential one in the middle
* of processing, since scn_is_sorted isn't updated until
* all outstanding IOs from the previous scrub pass
* complete.
*/
if ((zio->io_flags & ZIO_FLAG_SCRUB) &&
!(zio->io_flags & ZIO_FLAG_IO_RETRY) &&
dsl_scan_scrubbing(spa->spa_dsl_pool) &&
scn->scn_is_sorted) {
c = 1;
} else {
c = BP_GET_NDVAS(zio->io_bp);
}
/*
* If the pool cannot be written to, then infer that some
* DVAs might be invalid or point to vdevs that do not exist.
* We skip them.
*/
if (!spa_writeable(spa)) {
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
int j = 0;
for (int i = 0; i < c; i++) {
if (zfs_dva_valid(spa, &dva[i], zio->io_bp))
dva_copy[j++] = dva[i];
}
if (j == 0) {
zio->io_vsd = NULL;
zio->io_error = ENXIO;
return (NULL);
}
if (j < c) {
dva = dva_copy;
c = j;
}
}
mm = vdev_mirror_map_alloc(c, B_FALSE, B_TRUE);
for (c = 0; c < mm->mm_children; c++) {
mc = &mm->mm_child[c];
mc->mc_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[c]));
mc->mc_offset = DVA_GET_OFFSET(&dva[c]);
if (mc->mc_vd == NULL) {
kmem_free(mm, vdev_mirror_map_size(
mm->mm_children));
zio->io_vsd = NULL;
zio->io_error = ENXIO;
return (NULL);
}
}
} else {
/*
* If we are resilvering, then we should handle scrub reads
* differently; we shouldn't issue them to the resilvering
* device because it might not have those blocks.
*
* We are resilvering iff:
* 1) We are a replacing vdev (ie our name is "replacing-1" or
* "spare-1" or something like that), and
* 2) The pool is currently being resilvered.
*
* We cannot simply check vd->vdev_resilver_txg, because it's
* not set in this path.
*
* Nor can we just check our vdev_ops; there are cases (such as
* when a user types "zpool replace pool odev spare_dev" and
* spare_dev is in the spare list, or when a spare device is
* automatically used to replace a DEGRADED device) when
* resilvering is complete but both the original vdev and the
* spare vdev remain in the pool. That behavior is intentional.
* It helps implement the policy that a spare should be
* automatically removed from the pool after the user replaces
* the device that originally failed.
*
* If a spa load is in progress, then spa_dsl_pool may be
* uninitialized. But we shouldn't be resilvering during a spa
* load anyway.
*/
boolean_t replacing = (vd->vdev_ops == &vdev_replacing_ops ||
vd->vdev_ops == &vdev_spare_ops) &&
spa_load_state(vd->vdev_spa) == SPA_LOAD_NONE &&
dsl_scan_resilvering(vd->vdev_spa->spa_dsl_pool);
mm = vdev_mirror_map_alloc(vd->vdev_children, replacing,
B_FALSE);
for (c = 0; c < mm->mm_children; c++) {
mc = &mm->mm_child[c];
mc->mc_vd = vd->vdev_child[c];
mc->mc_offset = zio->io_offset;
if (vdev_mirror_rebuilding(mc->mc_vd))
mm->mm_rebuilding = mc->mc_rebuilding = B_TRUE;
}
}
return (mm);
}
static int
vdev_mirror_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
uint64_t *logical_ashift, uint64_t *physical_ashift)
{
int numerrors = 0;
int lasterror = 0;
if (vd->vdev_children == 0) {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
return (SET_ERROR(EINVAL));
}
vdev_open_children(vd);
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (cvd->vdev_open_error) {
lasterror = cvd->vdev_open_error;
numerrors++;
continue;
}
*asize = MIN(*asize - 1, cvd->vdev_asize - 1) + 1;
*max_asize = MIN(*max_asize - 1, cvd->vdev_max_asize - 1) + 1;
*logical_ashift = MAX(*logical_ashift, cvd->vdev_ashift);
- *physical_ashift = MAX(*physical_ashift,
- cvd->vdev_physical_ashift);
+ }
+ for (int c = 0; c < vd->vdev_children; c++) {
+ vdev_t *cvd = vd->vdev_child[c];
+
+ if (cvd->vdev_open_error)
+ continue;
+ *physical_ashift = vdev_best_ashift(*logical_ashift,
+ *physical_ashift, cvd->vdev_physical_ashift);
}
if (numerrors == vd->vdev_children) {
if (vdev_children_are_offline(vd))
vd->vdev_stat.vs_aux = VDEV_AUX_CHILDREN_OFFLINE;
else
vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS;
return (lasterror);
}
return (0);
}
static void
vdev_mirror_close(vdev_t *vd)
{
for (int c = 0; c < vd->vdev_children; c++)
vdev_close(vd->vdev_child[c]);
}
static void
vdev_mirror_child_done(zio_t *zio)
{
mirror_child_t *mc = zio->io_private;
mc->mc_error = zio->io_error;
mc->mc_tried = 1;
mc->mc_skipped = 0;
}
/*
* Check the other, lower-index DVAs to see if they're on the same
* vdev as the child we picked. If they are, use them since they
* are likely to have been allocated from the primary metaslab in
* use at the time, and hence are more likely to have locality with
* single-copy data.
*/
static int
vdev_mirror_dva_select(zio_t *zio, int p)
{
dva_t *dva = zio->io_bp->blk_dva;
mirror_map_t *mm = zio->io_vsd;
int preferred;
int c;
preferred = mm->mm_preferred[p];
for (p--; p >= 0; p--) {
c = mm->mm_preferred[p];
if (DVA_GET_VDEV(&dva[c]) == DVA_GET_VDEV(&dva[preferred]))
preferred = c;
}
return (preferred);
}
static int
vdev_mirror_preferred_child_randomize(zio_t *zio)
{
mirror_map_t *mm = zio->io_vsd;
int p;
if (mm->mm_root) {
p = random_in_range(mm->mm_preferred_cnt);
return (vdev_mirror_dva_select(zio, p));
}
/*
* To ensure we don't always favour the first matching vdev,
* which could lead to wear leveling issues on SSD's, we
* use the I/O offset as a pseudo random seed into the vdevs
* which have the lowest load.
*/
p = (zio->io_offset >> vdev_mirror_shift) % mm->mm_preferred_cnt;
return (mm->mm_preferred[p]);
}
static boolean_t
vdev_mirror_child_readable(mirror_child_t *mc)
{
vdev_t *vd = mc->mc_vd;
if (vd->vdev_top != NULL && vd->vdev_top->vdev_ops == &vdev_draid_ops)
return (vdev_draid_readable(vd, mc->mc_offset));
else
return (vdev_readable(vd));
}
static boolean_t
vdev_mirror_child_missing(mirror_child_t *mc, uint64_t txg, uint64_t size)
{
vdev_t *vd = mc->mc_vd;
if (vd->vdev_top != NULL && vd->vdev_top->vdev_ops == &vdev_draid_ops)
return (vdev_draid_missing(vd, mc->mc_offset, txg, size));
else
return (vdev_dtl_contains(vd, DTL_MISSING, txg, size));
}
/*
* Try to find a vdev whose DTL doesn't contain the block we want to read
* preferring vdevs based on determined load. If we can't, try the read on
* any vdev we haven't already tried.
*
* Distributed spares are an exception to the above load rule. They are
* always preferred in order to detect gaps in the distributed spare which
* are created when another disk in the dRAID fails. In order to restore
* redundancy those gaps must be read to trigger the required repair IO.
*/
static int
vdev_mirror_child_select(zio_t *zio)
{
mirror_map_t *mm = zio->io_vsd;
uint64_t txg = zio->io_txg;
int c, lowest_load;
ASSERT(zio->io_bp == NULL || BP_PHYSICAL_BIRTH(zio->io_bp) == txg);
lowest_load = INT_MAX;
mm->mm_preferred_cnt = 0;
for (c = 0; c < mm->mm_children; c++) {
mirror_child_t *mc;
mc = &mm->mm_child[c];
if (mc->mc_tried || mc->mc_skipped)
continue;
if (mc->mc_vd == NULL ||
!vdev_mirror_child_readable(mc)) {
mc->mc_error = SET_ERROR(ENXIO);
mc->mc_tried = 1; /* don't even try */
mc->mc_skipped = 1;
continue;
}
if (vdev_mirror_child_missing(mc, txg, 1)) {
mc->mc_error = SET_ERROR(ESTALE);
mc->mc_skipped = 1;
mc->mc_speculative = 1;
continue;
}
if (mc->mc_vd->vdev_ops == &vdev_draid_spare_ops) {
mm->mm_preferred[0] = c;
mm->mm_preferred_cnt = 1;
break;
}
mc->mc_load = vdev_mirror_load(mm, mc->mc_vd, mc->mc_offset);
if (mc->mc_load > lowest_load)
continue;
if (mc->mc_load < lowest_load) {
lowest_load = mc->mc_load;
mm->mm_preferred_cnt = 0;
}
mm->mm_preferred[mm->mm_preferred_cnt] = c;
mm->mm_preferred_cnt++;
}
if (mm->mm_preferred_cnt == 1) {
MIRROR_BUMP(vdev_mirror_stat_preferred_found);
return (mm->mm_preferred[0]);
}
if (mm->mm_preferred_cnt > 1) {
MIRROR_BUMP(vdev_mirror_stat_preferred_not_found);
return (vdev_mirror_preferred_child_randomize(zio));
}
/*
* Every device is either missing or has this txg in its DTL.
* Look for any child we haven't already tried before giving up.
*/
for (c = 0; c < mm->mm_children; c++) {
if (!mm->mm_child[c].mc_tried)
return (c);
}
/*
* Every child failed. There's no place left to look.
*/
return (-1);
}
static void
vdev_mirror_io_start(zio_t *zio)
{
mirror_map_t *mm;
mirror_child_t *mc;
int c, children;
mm = vdev_mirror_map_init(zio);
zio->io_vsd = mm;
zio->io_vsd_ops = &vdev_mirror_vsd_ops;
if (mm == NULL) {
ASSERT(!spa_trust_config(zio->io_spa));
ASSERT(zio->io_type == ZIO_TYPE_READ);
zio_execute(zio);
return;
}
if (zio->io_type == ZIO_TYPE_READ) {
if ((zio->io_flags & ZIO_FLAG_SCRUB) && !mm->mm_resilvering) {
/*
* For scrubbing reads we need to issue reads to all
* children. One child can reuse parent buffer, but
* for others we have to allocate separate ones to
* verify checksums if io_bp is non-NULL, or compare
* them in vdev_mirror_io_done() otherwise.
*/
boolean_t first = B_TRUE;
for (c = 0; c < mm->mm_children; c++) {
mc = &mm->mm_child[c];
/* Don't issue ZIOs to offline children */
if (!vdev_mirror_child_readable(mc)) {
mc->mc_error = SET_ERROR(ENXIO);
mc->mc_tried = 1;
mc->mc_skipped = 1;
continue;
}
mc->mc_abd = first ? zio->io_abd :
abd_alloc_sametype(zio->io_abd,
zio->io_size);
zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
mc->mc_vd, mc->mc_offset, mc->mc_abd,
zio->io_size, zio->io_type,
zio->io_priority, 0,
vdev_mirror_child_done, mc));
first = B_FALSE;
}
zio_execute(zio);
return;
}
/*
* For normal reads just pick one child.
*/
c = vdev_mirror_child_select(zio);
children = (c >= 0);
} else {
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
/*
* Writes go to all children.
*/
c = 0;
children = mm->mm_children;
}
while (children--) {
mc = &mm->mm_child[c];
c++;
/*
* When sequentially resilvering only issue write repair
* IOs to the vdev which is being rebuilt since performance
* is limited by the slowest child. This is an issue for
* faster replacement devices such as distributed spares.
*/
if ((zio->io_priority == ZIO_PRIORITY_REBUILD) &&
(zio->io_flags & ZIO_FLAG_IO_REPAIR) &&
!(zio->io_flags & ZIO_FLAG_SCRUB) &&
mm->mm_rebuilding && !mc->mc_rebuilding) {
continue;
}
zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
mc->mc_vd, mc->mc_offset, zio->io_abd, zio->io_size,
zio->io_type, zio->io_priority, 0,
vdev_mirror_child_done, mc));
}
zio_execute(zio);
}
static int
vdev_mirror_worst_error(mirror_map_t *mm)
{
int error[2] = { 0, 0 };
for (int c = 0; c < mm->mm_children; c++) {
mirror_child_t *mc = &mm->mm_child[c];
int s = mc->mc_speculative;
error[s] = zio_worst_error(error[s], mc->mc_error);
}
return (error[0] ? error[0] : error[1]);
}
static void
vdev_mirror_io_done(zio_t *zio)
{
mirror_map_t *mm = zio->io_vsd;
mirror_child_t *mc;
int c;
int good_copies = 0;
int unexpected_errors = 0;
int last_good_copy = -1;
if (mm == NULL)
return;
for (c = 0; c < mm->mm_children; c++) {
mc = &mm->mm_child[c];
if (mc->mc_error) {
if (!mc->mc_skipped)
unexpected_errors++;
} else if (mc->mc_tried) {
last_good_copy = c;
good_copies++;
}
}
if (zio->io_type == ZIO_TYPE_WRITE) {
/*
* XXX -- for now, treat partial writes as success.
*
* Now that we support write reallocation, it would be better
* to treat partial failure as real failure unless there are
* no non-degraded top-level vdevs left, and not update DTLs
* if we intend to reallocate.
*/
if (good_copies != mm->mm_children) {
/*
* Always require at least one good copy.
*
* For ditto blocks (io_vd == NULL), require
* all copies to be good.
*
* XXX -- for replacing vdevs, there's no great answer.
* If the old device is really dead, we may not even
* be able to access it -- so we only want to
* require good writes to the new device. But if
* the new device turns out to be flaky, we want
* to be able to detach it -- which requires all
* writes to the old device to have succeeded.
*/
if (good_copies == 0 || zio->io_vd == NULL)
zio->io_error = vdev_mirror_worst_error(mm);
}
return;
}
ASSERT(zio->io_type == ZIO_TYPE_READ);
/*
* If we don't have a good copy yet, keep trying other children.
*/
if (good_copies == 0 && (c = vdev_mirror_child_select(zio)) != -1) {
ASSERT(c >= 0 && c < mm->mm_children);
mc = &mm->mm_child[c];
zio_vdev_io_redone(zio);
zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
mc->mc_vd, mc->mc_offset, zio->io_abd, zio->io_size,
ZIO_TYPE_READ, zio->io_priority, 0,
vdev_mirror_child_done, mc));
return;
}
if (zio->io_flags & ZIO_FLAG_SCRUB && !mm->mm_resilvering) {
abd_t *best_abd = NULL;
if (last_good_copy >= 0)
best_abd = mm->mm_child[last_good_copy].mc_abd;
/*
* If we're scrubbing but don't have a BP available (because
* this vdev is under a raidz or draid vdev) then the best we
* can do is compare all of the copies read. If they're not
* identical then return a checksum error and the most likely
* correct data. The raidz code will issue a repair I/O if
* possible.
*/
if (zio->io_bp == NULL) {
ASSERT(zio->io_vd->vdev_ops == &vdev_replacing_ops ||
zio->io_vd->vdev_ops == &vdev_spare_ops);
abd_t *pref_abd = NULL;
for (c = 0; c < last_good_copy; c++) {
mc = &mm->mm_child[c];
if (mc->mc_error || !mc->mc_tried)
continue;
if (abd_cmp(mc->mc_abd, best_abd) != 0)
zio->io_error = SET_ERROR(ECKSUM);
/*
* The distributed spare is always prefered
* by vdev_mirror_child_select() so it's
* considered to be the best candidate.
*/
if (pref_abd == NULL &&
mc->mc_vd->vdev_ops ==
&vdev_draid_spare_ops)
pref_abd = mc->mc_abd;
/*
* In the absence of a preferred copy, use
* the parent pointer to avoid a memory copy.
*/
if (mc->mc_abd == zio->io_abd)
best_abd = mc->mc_abd;
}
if (pref_abd)
best_abd = pref_abd;
} else {
/*
* If we have a BP available, then checksums are
* already verified and we just need a buffer
* with valid data, preferring parent one to
* avoid a memory copy.
*/
for (c = 0; c < last_good_copy; c++) {
mc = &mm->mm_child[c];
if (mc->mc_error || !mc->mc_tried)
continue;
if (mc->mc_abd == zio->io_abd) {
best_abd = mc->mc_abd;
break;
}
}
}
if (best_abd && best_abd != zio->io_abd)
abd_copy(zio->io_abd, best_abd, zio->io_size);
for (c = 0; c < mm->mm_children; c++) {
mc = &mm->mm_child[c];
if (mc->mc_abd != zio->io_abd)
abd_free(mc->mc_abd);
mc->mc_abd = NULL;
}
}
if (good_copies == 0) {
zio->io_error = vdev_mirror_worst_error(mm);
ASSERT(zio->io_error != 0);
}
if (good_copies && spa_writeable(zio->io_spa) &&
(unexpected_errors ||
(zio->io_flags & ZIO_FLAG_RESILVER) ||
((zio->io_flags & ZIO_FLAG_SCRUB) && mm->mm_resilvering))) {
/*
* Use the good data we have in hand to repair damaged children.
*/
for (c = 0; c < mm->mm_children; c++) {
/*
* Don't rewrite known good children.
* Not only is it unnecessary, it could
* actually be harmful: if the system lost
* power while rewriting the only good copy,
* there would be no good copies left!
*/
mc = &mm->mm_child[c];
if (mc->mc_error == 0) {
vdev_ops_t *ops = mc->mc_vd->vdev_ops;
if (mc->mc_tried)
continue;
/*
* We didn't try this child. We need to
* repair it if:
* 1. it's a scrub (in which case we have
* tried everything that was healthy)
* - or -
* 2. it's an indirect or distributed spare
* vdev (in which case it could point to any
* other vdev, which might have a bad DTL)
* - or -
* 3. the DTL indicates that this data is
* missing from this vdev
*/
if (!(zio->io_flags & ZIO_FLAG_SCRUB) &&
ops != &vdev_indirect_ops &&
ops != &vdev_draid_spare_ops &&
!vdev_dtl_contains(mc->mc_vd, DTL_PARTIAL,
zio->io_txg, 1))
continue;
mc->mc_error = SET_ERROR(ESTALE);
}
zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
mc->mc_vd, mc->mc_offset,
zio->io_abd, zio->io_size, ZIO_TYPE_WRITE,
zio->io_priority == ZIO_PRIORITY_REBUILD ?
ZIO_PRIORITY_REBUILD : ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_IO_REPAIR | (unexpected_errors ?
ZIO_FLAG_SELF_HEAL : 0), NULL, NULL));
}
}
}
static void
vdev_mirror_state_change(vdev_t *vd, int faulted, int degraded)
{
if (faulted == vd->vdev_children) {
if (vdev_children_are_offline(vd)) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_OFFLINE,
VDEV_AUX_CHILDREN_OFFLINE);
} else {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_NO_REPLICAS);
}
} else if (degraded + faulted != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE);
} else {
vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE);
}
}
/*
* Return the maximum asize for a rebuild zio in the provided range.
*/
static uint64_t
vdev_mirror_rebuild_asize(vdev_t *vd, uint64_t start, uint64_t asize,
uint64_t max_segment)
{
(void) start;
uint64_t psize = MIN(P2ROUNDUP(max_segment, 1 << vd->vdev_ashift),
SPA_MAXBLOCKSIZE);
return (MIN(asize, vdev_psize_to_asize(vd, psize)));
}
vdev_ops_t vdev_mirror_ops = {
.vdev_op_init = NULL,
.vdev_op_fini = NULL,
.vdev_op_open = vdev_mirror_open,
.vdev_op_close = vdev_mirror_close,
.vdev_op_asize = vdev_default_asize,
.vdev_op_min_asize = vdev_default_min_asize,
.vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_mirror_io_start,
.vdev_op_io_done = vdev_mirror_io_done,
.vdev_op_state_change = vdev_mirror_state_change,
.vdev_op_need_resilver = vdev_default_need_resilver,
.vdev_op_hold = NULL,
.vdev_op_rele = NULL,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_default_xlate,
.vdev_op_rebuild_asize = vdev_mirror_rebuild_asize,
.vdev_op_metaslab_init = NULL,
.vdev_op_config_generate = NULL,
.vdev_op_nparity = NULL,
.vdev_op_ndisks = NULL,
.vdev_op_type = VDEV_TYPE_MIRROR, /* name of this vdev type */
.vdev_op_leaf = B_FALSE /* not a leaf vdev */
};
vdev_ops_t vdev_replacing_ops = {
.vdev_op_init = NULL,
.vdev_op_fini = NULL,
.vdev_op_open = vdev_mirror_open,
.vdev_op_close = vdev_mirror_close,
.vdev_op_asize = vdev_default_asize,
.vdev_op_min_asize = vdev_default_min_asize,
.vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_mirror_io_start,
.vdev_op_io_done = vdev_mirror_io_done,
.vdev_op_state_change = vdev_mirror_state_change,
.vdev_op_need_resilver = vdev_default_need_resilver,
.vdev_op_hold = NULL,
.vdev_op_rele = NULL,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_default_xlate,
.vdev_op_rebuild_asize = vdev_mirror_rebuild_asize,
.vdev_op_metaslab_init = NULL,
.vdev_op_config_generate = NULL,
.vdev_op_nparity = NULL,
.vdev_op_ndisks = NULL,
.vdev_op_type = VDEV_TYPE_REPLACING, /* name of this vdev type */
.vdev_op_leaf = B_FALSE /* not a leaf vdev */
};
vdev_ops_t vdev_spare_ops = {
.vdev_op_init = NULL,
.vdev_op_fini = NULL,
.vdev_op_open = vdev_mirror_open,
.vdev_op_close = vdev_mirror_close,
.vdev_op_asize = vdev_default_asize,
.vdev_op_min_asize = vdev_default_min_asize,
.vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_mirror_io_start,
.vdev_op_io_done = vdev_mirror_io_done,
.vdev_op_state_change = vdev_mirror_state_change,
.vdev_op_need_resilver = vdev_default_need_resilver,
.vdev_op_hold = NULL,
.vdev_op_rele = NULL,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_default_xlate,
.vdev_op_rebuild_asize = vdev_mirror_rebuild_asize,
.vdev_op_metaslab_init = NULL,
.vdev_op_config_generate = NULL,
.vdev_op_nparity = NULL,
.vdev_op_ndisks = NULL,
.vdev_op_type = VDEV_TYPE_SPARE, /* name of this vdev type */
.vdev_op_leaf = B_FALSE /* not a leaf vdev */
};
ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, rotating_inc, INT, ZMOD_RW,
"Rotating media load increment for non-seeking I/Os");
ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, rotating_seek_inc, INT,
ZMOD_RW, "Rotating media load increment for seeking I/Os");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, rotating_seek_offset, INT,
ZMOD_RW,
"Offset in bytes from the last I/O which triggers "
"a reduced rotating media seek increment");
/* END CSTYLED */
ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, non_rotating_inc, INT,
ZMOD_RW, "Non-rotating media load increment for non-seeking I/Os");
ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, non_rotating_seek_inc, INT,
ZMOD_RW, "Non-rotating media load increment for seeking I/Os");
diff --git a/sys/contrib/openzfs/module/zfs/vdev_raidz.c b/sys/contrib/openzfs/module/zfs/vdev_raidz.c
index b4daf642ed20..5a44983e551d 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_raidz.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_raidz.c
@@ -1,2667 +1,2673 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2016 Gvozden Nešković. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/vdev_impl.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/abd.h>
#include <sys/fs/zfs.h>
#include <sys/fm/fs/zfs.h>
#include <sys/vdev_raidz.h>
#include <sys/vdev_raidz_impl.h>
#include <sys/vdev_draid.h>
#ifdef ZFS_DEBUG
#include <sys/vdev.h> /* For vdev_xlate() in vdev_raidz_io_verify() */
#endif
/*
* Virtual device vector for RAID-Z.
*
* This vdev supports single, double, and triple parity. For single parity,
* we use a simple XOR of all the data columns. For double or triple parity,
* we use a special case of Reed-Solomon coding. This extends the
* technique described in "The mathematics of RAID-6" by H. Peter Anvin by
* drawing on the system described in "A Tutorial on Reed-Solomon Coding for
* Fault-Tolerance in RAID-like Systems" by James S. Plank on which the
* former is also based. The latter is designed to provide higher performance
* for writes.
*
* Note that the Plank paper claimed to support arbitrary N+M, but was then
* amended six years later identifying a critical flaw that invalidates its
* claims. Nevertheless, the technique can be adapted to work for up to
* triple parity. For additional parity, the amendment "Note: Correction to
* the 1997 Tutorial on Reed-Solomon Coding" by James S. Plank and Ying Ding
* is viable, but the additional complexity means that write performance will
* suffer.
*
* All of the methods above operate on a Galois field, defined over the
* integers mod 2^N. In our case we choose N=8 for GF(8) so that all elements
* can be expressed with a single byte. Briefly, the operations on the
* field are defined as follows:
*
* o addition (+) is represented by a bitwise XOR
* o subtraction (-) is therefore identical to addition: A + B = A - B
* o multiplication of A by 2 is defined by the following bitwise expression:
*
* (A * 2)_7 = A_6
* (A * 2)_6 = A_5
* (A * 2)_5 = A_4
* (A * 2)_4 = A_3 + A_7
* (A * 2)_3 = A_2 + A_7
* (A * 2)_2 = A_1 + A_7
* (A * 2)_1 = A_0
* (A * 2)_0 = A_7
*
* In C, multiplying by 2 is therefore ((a << 1) ^ ((a & 0x80) ? 0x1d : 0)).
* As an aside, this multiplication is derived from the error correcting
* primitive polynomial x^8 + x^4 + x^3 + x^2 + 1.
*
* Observe that any number in the field (except for 0) can be expressed as a
* power of 2 -- a generator for the field. We store a table of the powers of
* 2 and logs base 2 for quick look ups, and exploit the fact that A * B can
* be rewritten as 2^(log_2(A) + log_2(B)) (where '+' is normal addition rather
* than field addition). The inverse of a field element A (A^-1) is therefore
* A ^ (255 - 1) = A^254.
*
* The up-to-three parity columns, P, Q, R over several data columns,
* D_0, ... D_n-1, can be expressed by field operations:
*
* P = D_0 + D_1 + ... + D_n-2 + D_n-1
* Q = 2^n-1 * D_0 + 2^n-2 * D_1 + ... + 2^1 * D_n-2 + 2^0 * D_n-1
* = ((...((D_0) * 2 + D_1) * 2 + ...) * 2 + D_n-2) * 2 + D_n-1
* R = 4^n-1 * D_0 + 4^n-2 * D_1 + ... + 4^1 * D_n-2 + 4^0 * D_n-1
* = ((...((D_0) * 4 + D_1) * 4 + ...) * 4 + D_n-2) * 4 + D_n-1
*
* We chose 1, 2, and 4 as our generators because 1 corresponds to the trivial
* XOR operation, and 2 and 4 can be computed quickly and generate linearly-
* independent coefficients. (There are no additional coefficients that have
* this property which is why the uncorrected Plank method breaks down.)
*
* See the reconstruction code below for how P, Q and R can used individually
* or in concert to recover missing data columns.
*/
#define VDEV_RAIDZ_P 0
#define VDEV_RAIDZ_Q 1
#define VDEV_RAIDZ_R 2
#define VDEV_RAIDZ_MUL_2(x) (((x) << 1) ^ (((x) & 0x80) ? 0x1d : 0))
#define VDEV_RAIDZ_MUL_4(x) (VDEV_RAIDZ_MUL_2(VDEV_RAIDZ_MUL_2(x)))
/*
* We provide a mechanism to perform the field multiplication operation on a
* 64-bit value all at once rather than a byte at a time. This works by
* creating a mask from the top bit in each byte and using that to
* conditionally apply the XOR of 0x1d.
*/
#define VDEV_RAIDZ_64MUL_2(x, mask) \
{ \
(mask) = (x) & 0x8080808080808080ULL; \
(mask) = ((mask) << 1) - ((mask) >> 7); \
(x) = (((x) << 1) & 0xfefefefefefefefeULL) ^ \
((mask) & 0x1d1d1d1d1d1d1d1dULL); \
}
#define VDEV_RAIDZ_64MUL_4(x, mask) \
{ \
VDEV_RAIDZ_64MUL_2((x), mask); \
VDEV_RAIDZ_64MUL_2((x), mask); \
}
static void
vdev_raidz_row_free(raidz_row_t *rr)
{
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_size != 0)
abd_free(rc->rc_abd);
if (rc->rc_orig_data != NULL)
abd_free(rc->rc_orig_data);
}
if (rr->rr_abd_empty != NULL)
abd_free(rr->rr_abd_empty);
kmem_free(rr, offsetof(raidz_row_t, rr_col[rr->rr_scols]));
}
void
vdev_raidz_map_free(raidz_map_t *rm)
{
for (int i = 0; i < rm->rm_nrows; i++)
vdev_raidz_row_free(rm->rm_row[i]);
kmem_free(rm, offsetof(raidz_map_t, rm_row[rm->rm_nrows]));
}
static void
vdev_raidz_map_free_vsd(zio_t *zio)
{
raidz_map_t *rm = zio->io_vsd;
vdev_raidz_map_free(rm);
}
const zio_vsd_ops_t vdev_raidz_vsd_ops = {
.vsd_free = vdev_raidz_map_free_vsd,
};
static void
vdev_raidz_map_alloc_write(zio_t *zio, raidz_map_t *rm, uint64_t ashift)
{
int c;
int nwrapped = 0;
uint64_t off = 0;
raidz_row_t *rr = rm->rm_row[0];
ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
ASSERT3U(rm->rm_nrows, ==, 1);
/*
* Pad any parity columns with additional space to account for skip
* sectors.
*/
if (rm->rm_skipstart < rr->rr_firstdatacol) {
ASSERT0(rm->rm_skipstart);
nwrapped = rm->rm_nskip;
} else if (rr->rr_scols < (rm->rm_skipstart + rm->rm_nskip)) {
nwrapped =
(rm->rm_skipstart + rm->rm_nskip) % rr->rr_scols;
}
/*
* Optional single skip sectors (rc_size == 0) will be handled in
* vdev_raidz_io_start_write().
*/
int skipped = rr->rr_scols - rr->rr_cols;
/* Allocate buffers for the parity columns */
for (c = 0; c < rr->rr_firstdatacol; c++) {
raidz_col_t *rc = &rr->rr_col[c];
/*
* Parity columns will pad out a linear ABD to account for
* the skip sector. A linear ABD is used here because
* parity calculations use the ABD buffer directly to calculate
* parity. This avoids doing a memcpy back to the ABD after the
* parity has been calculated. By issuing the parity column
* with the skip sector we can reduce contention on the child
* VDEV queue locks (vq_lock).
*/
if (c < nwrapped) {
rc->rc_abd = abd_alloc_linear(
rc->rc_size + (1ULL << ashift), B_FALSE);
abd_zero_off(rc->rc_abd, rc->rc_size, 1ULL << ashift);
skipped++;
} else {
rc->rc_abd = abd_alloc_linear(rc->rc_size, B_FALSE);
}
}
for (off = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
abd_t *abd = abd_get_offset_struct(&rc->rc_abdstruct,
zio->io_abd, off, rc->rc_size);
/*
* Generate I/O for skip sectors to improve aggregation
* continuity. We will use gang ABD's to reduce contention
* on the child VDEV queue locks (vq_lock) by issuing
* a single I/O that contains the data and skip sector.
*
* It is important to make sure that rc_size is not updated
* even though we are adding a skip sector to the ABD. When
* calculating the parity in vdev_raidz_generate_parity_row()
* the rc_size is used to iterate through the ABD's. We can
* not have zero'd out skip sectors used for calculating
* parity for raidz, because those same sectors are not used
* during reconstruction.
*/
if (c >= rm->rm_skipstart && skipped < rm->rm_nskip) {
rc->rc_abd = abd_alloc_gang();
abd_gang_add(rc->rc_abd, abd, B_TRUE);
abd_gang_add(rc->rc_abd,
abd_get_zeros(1ULL << ashift), B_TRUE);
skipped++;
} else {
rc->rc_abd = abd;
}
off += rc->rc_size;
}
ASSERT3U(off, ==, zio->io_size);
ASSERT3S(skipped, ==, rm->rm_nskip);
}
static void
vdev_raidz_map_alloc_read(zio_t *zio, raidz_map_t *rm)
{
int c;
raidz_row_t *rr = rm->rm_row[0];
ASSERT3U(rm->rm_nrows, ==, 1);
/* Allocate buffers for the parity columns */
for (c = 0; c < rr->rr_firstdatacol; c++)
rr->rr_col[c].rc_abd =
abd_alloc_linear(rr->rr_col[c].rc_size, B_FALSE);
for (uint64_t off = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
rc->rc_abd = abd_get_offset_struct(&rc->rc_abdstruct,
zio->io_abd, off, rc->rc_size);
off += rc->rc_size;
}
}
/*
* Divides the IO evenly across all child vdevs; usually, dcols is
* the number of children in the target vdev.
*
* Avoid inlining the function to keep vdev_raidz_io_start(), which
* is this functions only caller, as small as possible on the stack.
*/
noinline raidz_map_t *
vdev_raidz_map_alloc(zio_t *zio, uint64_t ashift, uint64_t dcols,
uint64_t nparity)
{
raidz_row_t *rr;
/* The starting RAIDZ (parent) vdev sector of the block. */
uint64_t b = zio->io_offset >> ashift;
/* The zio's size in units of the vdev's minimum sector size. */
uint64_t s = zio->io_size >> ashift;
/* The first column for this stripe. */
uint64_t f = b % dcols;
/* The starting byte offset on each child vdev. */
uint64_t o = (b / dcols) << ashift;
uint64_t q, r, c, bc, col, acols, scols, coff, devidx, asize, tot;
raidz_map_t *rm =
kmem_zalloc(offsetof(raidz_map_t, rm_row[1]), KM_SLEEP);
rm->rm_nrows = 1;
/*
* "Quotient": The number of data sectors for this stripe on all but
* the "big column" child vdevs that also contain "remainder" data.
*/
q = s / (dcols - nparity);
/*
* "Remainder": The number of partial stripe data sectors in this I/O.
* This will add a sector to some, but not all, child vdevs.
*/
r = s - q * (dcols - nparity);
/* The number of "big columns" - those which contain remainder data. */
bc = (r == 0 ? 0 : r + nparity);
/*
* The total number of data and parity sectors associated with
* this I/O.
*/
tot = s + nparity * (q + (r == 0 ? 0 : 1));
/*
* acols: The columns that will be accessed.
* scols: The columns that will be accessed or skipped.
*/
if (q == 0) {
/* Our I/O request doesn't span all child vdevs. */
acols = bc;
scols = MIN(dcols, roundup(bc, nparity + 1));
} else {
acols = dcols;
scols = dcols;
}
ASSERT3U(acols, <=, scols);
rr = kmem_alloc(offsetof(raidz_row_t, rr_col[scols]), KM_SLEEP);
rm->rm_row[0] = rr;
rr->rr_cols = acols;
rr->rr_scols = scols;
rr->rr_bigcols = bc;
rr->rr_missingdata = 0;
rr->rr_missingparity = 0;
rr->rr_firstdatacol = nparity;
rr->rr_abd_empty = NULL;
rr->rr_nempty = 0;
#ifdef ZFS_DEBUG
rr->rr_offset = zio->io_offset;
rr->rr_size = zio->io_size;
#endif
asize = 0;
for (c = 0; c < scols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
col = f + c;
coff = o;
if (col >= dcols) {
col -= dcols;
coff += 1ULL << ashift;
}
rc->rc_devidx = col;
rc->rc_offset = coff;
rc->rc_abd = NULL;
rc->rc_orig_data = NULL;
rc->rc_error = 0;
rc->rc_tried = 0;
rc->rc_skipped = 0;
rc->rc_force_repair = 0;
rc->rc_allow_repair = 1;
rc->rc_need_orig_restore = B_FALSE;
if (c >= acols)
rc->rc_size = 0;
else if (c < bc)
rc->rc_size = (q + 1) << ashift;
else
rc->rc_size = q << ashift;
asize += rc->rc_size;
}
ASSERT3U(asize, ==, tot << ashift);
rm->rm_nskip = roundup(tot, nparity + 1) - tot;
rm->rm_skipstart = bc;
/*
* If all data stored spans all columns, there's a danger that parity
* will always be on the same device and, since parity isn't read
* during normal operation, that device's I/O bandwidth won't be
* used effectively. We therefore switch the parity every 1MB.
*
* ... at least that was, ostensibly, the theory. As a practical
* matter unless we juggle the parity between all devices evenly, we
* won't see any benefit. Further, occasional writes that aren't a
* multiple of the LCM of the number of children and the minimum
* stripe width are sufficient to avoid pessimal behavior.
* Unfortunately, this decision created an implicit on-disk format
* requirement that we need to support for all eternity, but only
* for single-parity RAID-Z.
*
* If we intend to skip a sector in the zeroth column for padding
* we must make sure to note this swap. We will never intend to
* skip the first column since at least one data and one parity
* column must appear in each row.
*/
ASSERT(rr->rr_cols >= 2);
ASSERT(rr->rr_col[0].rc_size == rr->rr_col[1].rc_size);
if (rr->rr_firstdatacol == 1 && (zio->io_offset & (1ULL << 20))) {
devidx = rr->rr_col[0].rc_devidx;
o = rr->rr_col[0].rc_offset;
rr->rr_col[0].rc_devidx = rr->rr_col[1].rc_devidx;
rr->rr_col[0].rc_offset = rr->rr_col[1].rc_offset;
rr->rr_col[1].rc_devidx = devidx;
rr->rr_col[1].rc_offset = o;
if (rm->rm_skipstart == 0)
rm->rm_skipstart = 1;
}
if (zio->io_type == ZIO_TYPE_WRITE) {
vdev_raidz_map_alloc_write(zio, rm, ashift);
} else {
vdev_raidz_map_alloc_read(zio, rm);
}
/* init RAIDZ parity ops */
rm->rm_ops = vdev_raidz_math_get_ops();
return (rm);
}
struct pqr_struct {
uint64_t *p;
uint64_t *q;
uint64_t *r;
};
static int
vdev_raidz_p_func(void *buf, size_t size, void *private)
{
struct pqr_struct *pqr = private;
const uint64_t *src = buf;
int i, cnt = size / sizeof (src[0]);
ASSERT(pqr->p && !pqr->q && !pqr->r);
for (i = 0; i < cnt; i++, src++, pqr->p++)
*pqr->p ^= *src;
return (0);
}
static int
vdev_raidz_pq_func(void *buf, size_t size, void *private)
{
struct pqr_struct *pqr = private;
const uint64_t *src = buf;
uint64_t mask;
int i, cnt = size / sizeof (src[0]);
ASSERT(pqr->p && pqr->q && !pqr->r);
for (i = 0; i < cnt; i++, src++, pqr->p++, pqr->q++) {
*pqr->p ^= *src;
VDEV_RAIDZ_64MUL_2(*pqr->q, mask);
*pqr->q ^= *src;
}
return (0);
}
static int
vdev_raidz_pqr_func(void *buf, size_t size, void *private)
{
struct pqr_struct *pqr = private;
const uint64_t *src = buf;
uint64_t mask;
int i, cnt = size / sizeof (src[0]);
ASSERT(pqr->p && pqr->q && pqr->r);
for (i = 0; i < cnt; i++, src++, pqr->p++, pqr->q++, pqr->r++) {
*pqr->p ^= *src;
VDEV_RAIDZ_64MUL_2(*pqr->q, mask);
*pqr->q ^= *src;
VDEV_RAIDZ_64MUL_4(*pqr->r, mask);
*pqr->r ^= *src;
}
return (0);
}
static void
vdev_raidz_generate_parity_p(raidz_row_t *rr)
{
uint64_t *p = abd_to_buf(rr->rr_col[VDEV_RAIDZ_P].rc_abd);
for (int c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
abd_t *src = rr->rr_col[c].rc_abd;
if (c == rr->rr_firstdatacol) {
abd_copy_to_buf(p, src, rr->rr_col[c].rc_size);
} else {
struct pqr_struct pqr = { p, NULL, NULL };
(void) abd_iterate_func(src, 0, rr->rr_col[c].rc_size,
vdev_raidz_p_func, &pqr);
}
}
}
static void
vdev_raidz_generate_parity_pq(raidz_row_t *rr)
{
uint64_t *p = abd_to_buf(rr->rr_col[VDEV_RAIDZ_P].rc_abd);
uint64_t *q = abd_to_buf(rr->rr_col[VDEV_RAIDZ_Q].rc_abd);
uint64_t pcnt = rr->rr_col[VDEV_RAIDZ_P].rc_size / sizeof (p[0]);
ASSERT(rr->rr_col[VDEV_RAIDZ_P].rc_size ==
rr->rr_col[VDEV_RAIDZ_Q].rc_size);
for (int c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
abd_t *src = rr->rr_col[c].rc_abd;
uint64_t ccnt = rr->rr_col[c].rc_size / sizeof (p[0]);
if (c == rr->rr_firstdatacol) {
ASSERT(ccnt == pcnt || ccnt == 0);
abd_copy_to_buf(p, src, rr->rr_col[c].rc_size);
(void) memcpy(q, p, rr->rr_col[c].rc_size);
for (uint64_t i = ccnt; i < pcnt; i++) {
p[i] = 0;
q[i] = 0;
}
} else {
struct pqr_struct pqr = { p, q, NULL };
ASSERT(ccnt <= pcnt);
(void) abd_iterate_func(src, 0, rr->rr_col[c].rc_size,
vdev_raidz_pq_func, &pqr);
/*
* Treat short columns as though they are full of 0s.
* Note that there's therefore nothing needed for P.
*/
uint64_t mask;
for (uint64_t i = ccnt; i < pcnt; i++) {
VDEV_RAIDZ_64MUL_2(q[i], mask);
}
}
}
}
static void
vdev_raidz_generate_parity_pqr(raidz_row_t *rr)
{
uint64_t *p = abd_to_buf(rr->rr_col[VDEV_RAIDZ_P].rc_abd);
uint64_t *q = abd_to_buf(rr->rr_col[VDEV_RAIDZ_Q].rc_abd);
uint64_t *r = abd_to_buf(rr->rr_col[VDEV_RAIDZ_R].rc_abd);
uint64_t pcnt = rr->rr_col[VDEV_RAIDZ_P].rc_size / sizeof (p[0]);
ASSERT(rr->rr_col[VDEV_RAIDZ_P].rc_size ==
rr->rr_col[VDEV_RAIDZ_Q].rc_size);
ASSERT(rr->rr_col[VDEV_RAIDZ_P].rc_size ==
rr->rr_col[VDEV_RAIDZ_R].rc_size);
for (int c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
abd_t *src = rr->rr_col[c].rc_abd;
uint64_t ccnt = rr->rr_col[c].rc_size / sizeof (p[0]);
if (c == rr->rr_firstdatacol) {
ASSERT(ccnt == pcnt || ccnt == 0);
abd_copy_to_buf(p, src, rr->rr_col[c].rc_size);
(void) memcpy(q, p, rr->rr_col[c].rc_size);
(void) memcpy(r, p, rr->rr_col[c].rc_size);
for (uint64_t i = ccnt; i < pcnt; i++) {
p[i] = 0;
q[i] = 0;
r[i] = 0;
}
} else {
struct pqr_struct pqr = { p, q, r };
ASSERT(ccnt <= pcnt);
(void) abd_iterate_func(src, 0, rr->rr_col[c].rc_size,
vdev_raidz_pqr_func, &pqr);
/*
* Treat short columns as though they are full of 0s.
* Note that there's therefore nothing needed for P.
*/
uint64_t mask;
for (uint64_t i = ccnt; i < pcnt; i++) {
VDEV_RAIDZ_64MUL_2(q[i], mask);
VDEV_RAIDZ_64MUL_4(r[i], mask);
}
}
}
}
/*
* Generate RAID parity in the first virtual columns according to the number of
* parity columns available.
*/
void
vdev_raidz_generate_parity_row(raidz_map_t *rm, raidz_row_t *rr)
{
ASSERT3U(rr->rr_cols, !=, 0);
/* Generate using the new math implementation */
if (vdev_raidz_math_generate(rm, rr) != RAIDZ_ORIGINAL_IMPL)
return;
switch (rr->rr_firstdatacol) {
case 1:
vdev_raidz_generate_parity_p(rr);
break;
case 2:
vdev_raidz_generate_parity_pq(rr);
break;
case 3:
vdev_raidz_generate_parity_pqr(rr);
break;
default:
cmn_err(CE_PANIC, "invalid RAID-Z configuration");
}
}
void
vdev_raidz_generate_parity(raidz_map_t *rm)
{
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
vdev_raidz_generate_parity_row(rm, rr);
}
}
static int
vdev_raidz_reconst_p_func(void *dbuf, void *sbuf, size_t size, void *private)
{
(void) private;
uint64_t *dst = dbuf;
uint64_t *src = sbuf;
int cnt = size / sizeof (src[0]);
for (int i = 0; i < cnt; i++) {
dst[i] ^= src[i];
}
return (0);
}
static int
vdev_raidz_reconst_q_pre_func(void *dbuf, void *sbuf, size_t size,
void *private)
{
(void) private;
uint64_t *dst = dbuf;
uint64_t *src = sbuf;
uint64_t mask;
int cnt = size / sizeof (dst[0]);
for (int i = 0; i < cnt; i++, dst++, src++) {
VDEV_RAIDZ_64MUL_2(*dst, mask);
*dst ^= *src;
}
return (0);
}
static int
vdev_raidz_reconst_q_pre_tail_func(void *buf, size_t size, void *private)
{
(void) private;
uint64_t *dst = buf;
uint64_t mask;
int cnt = size / sizeof (dst[0]);
for (int i = 0; i < cnt; i++, dst++) {
/* same operation as vdev_raidz_reconst_q_pre_func() on dst */
VDEV_RAIDZ_64MUL_2(*dst, mask);
}
return (0);
}
struct reconst_q_struct {
uint64_t *q;
int exp;
};
static int
vdev_raidz_reconst_q_post_func(void *buf, size_t size, void *private)
{
struct reconst_q_struct *rq = private;
uint64_t *dst = buf;
int cnt = size / sizeof (dst[0]);
for (int i = 0; i < cnt; i++, dst++, rq->q++) {
int j;
uint8_t *b;
*dst ^= *rq->q;
for (j = 0, b = (uint8_t *)dst; j < 8; j++, b++) {
*b = vdev_raidz_exp2(*b, rq->exp);
}
}
return (0);
}
struct reconst_pq_struct {
uint8_t *p;
uint8_t *q;
uint8_t *pxy;
uint8_t *qxy;
int aexp;
int bexp;
};
static int
vdev_raidz_reconst_pq_func(void *xbuf, void *ybuf, size_t size, void *private)
{
struct reconst_pq_struct *rpq = private;
uint8_t *xd = xbuf;
uint8_t *yd = ybuf;
for (int i = 0; i < size;
i++, rpq->p++, rpq->q++, rpq->pxy++, rpq->qxy++, xd++, yd++) {
*xd = vdev_raidz_exp2(*rpq->p ^ *rpq->pxy, rpq->aexp) ^
vdev_raidz_exp2(*rpq->q ^ *rpq->qxy, rpq->bexp);
*yd = *rpq->p ^ *rpq->pxy ^ *xd;
}
return (0);
}
static int
vdev_raidz_reconst_pq_tail_func(void *xbuf, size_t size, void *private)
{
struct reconst_pq_struct *rpq = private;
uint8_t *xd = xbuf;
for (int i = 0; i < size;
i++, rpq->p++, rpq->q++, rpq->pxy++, rpq->qxy++, xd++) {
/* same operation as vdev_raidz_reconst_pq_func() on xd */
*xd = vdev_raidz_exp2(*rpq->p ^ *rpq->pxy, rpq->aexp) ^
vdev_raidz_exp2(*rpq->q ^ *rpq->qxy, rpq->bexp);
}
return (0);
}
static void
vdev_raidz_reconstruct_p(raidz_row_t *rr, int *tgts, int ntgts)
{
int x = tgts[0];
abd_t *dst, *src;
ASSERT3U(ntgts, ==, 1);
ASSERT3U(x, >=, rr->rr_firstdatacol);
ASSERT3U(x, <, rr->rr_cols);
ASSERT3U(rr->rr_col[x].rc_size, <=, rr->rr_col[VDEV_RAIDZ_P].rc_size);
src = rr->rr_col[VDEV_RAIDZ_P].rc_abd;
dst = rr->rr_col[x].rc_abd;
abd_copy_from_buf(dst, abd_to_buf(src), rr->rr_col[x].rc_size);
for (int c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
uint64_t size = MIN(rr->rr_col[x].rc_size,
rr->rr_col[c].rc_size);
src = rr->rr_col[c].rc_abd;
if (c == x)
continue;
(void) abd_iterate_func2(dst, src, 0, 0, size,
vdev_raidz_reconst_p_func, NULL);
}
}
static void
vdev_raidz_reconstruct_q(raidz_row_t *rr, int *tgts, int ntgts)
{
int x = tgts[0];
int c, exp;
abd_t *dst, *src;
ASSERT(ntgts == 1);
ASSERT(rr->rr_col[x].rc_size <= rr->rr_col[VDEV_RAIDZ_Q].rc_size);
for (c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
uint64_t size = (c == x) ? 0 : MIN(rr->rr_col[x].rc_size,
rr->rr_col[c].rc_size);
src = rr->rr_col[c].rc_abd;
dst = rr->rr_col[x].rc_abd;
if (c == rr->rr_firstdatacol) {
abd_copy(dst, src, size);
if (rr->rr_col[x].rc_size > size) {
abd_zero_off(dst, size,
rr->rr_col[x].rc_size - size);
}
} else {
ASSERT3U(size, <=, rr->rr_col[x].rc_size);
(void) abd_iterate_func2(dst, src, 0, 0, size,
vdev_raidz_reconst_q_pre_func, NULL);
(void) abd_iterate_func(dst,
size, rr->rr_col[x].rc_size - size,
vdev_raidz_reconst_q_pre_tail_func, NULL);
}
}
src = rr->rr_col[VDEV_RAIDZ_Q].rc_abd;
dst = rr->rr_col[x].rc_abd;
exp = 255 - (rr->rr_cols - 1 - x);
struct reconst_q_struct rq = { abd_to_buf(src), exp };
(void) abd_iterate_func(dst, 0, rr->rr_col[x].rc_size,
vdev_raidz_reconst_q_post_func, &rq);
}
static void
vdev_raidz_reconstruct_pq(raidz_row_t *rr, int *tgts, int ntgts)
{
uint8_t *p, *q, *pxy, *qxy, tmp, a, b, aexp, bexp;
abd_t *pdata, *qdata;
uint64_t xsize, ysize;
int x = tgts[0];
int y = tgts[1];
abd_t *xd, *yd;
ASSERT(ntgts == 2);
ASSERT(x < y);
ASSERT(x >= rr->rr_firstdatacol);
ASSERT(y < rr->rr_cols);
ASSERT(rr->rr_col[x].rc_size >= rr->rr_col[y].rc_size);
/*
* Move the parity data aside -- we're going to compute parity as
* though columns x and y were full of zeros -- Pxy and Qxy. We want to
* reuse the parity generation mechanism without trashing the actual
* parity so we make those columns appear to be full of zeros by
* setting their lengths to zero.
*/
pdata = rr->rr_col[VDEV_RAIDZ_P].rc_abd;
qdata = rr->rr_col[VDEV_RAIDZ_Q].rc_abd;
xsize = rr->rr_col[x].rc_size;
ysize = rr->rr_col[y].rc_size;
rr->rr_col[VDEV_RAIDZ_P].rc_abd =
abd_alloc_linear(rr->rr_col[VDEV_RAIDZ_P].rc_size, B_TRUE);
rr->rr_col[VDEV_RAIDZ_Q].rc_abd =
abd_alloc_linear(rr->rr_col[VDEV_RAIDZ_Q].rc_size, B_TRUE);
rr->rr_col[x].rc_size = 0;
rr->rr_col[y].rc_size = 0;
vdev_raidz_generate_parity_pq(rr);
rr->rr_col[x].rc_size = xsize;
rr->rr_col[y].rc_size = ysize;
p = abd_to_buf(pdata);
q = abd_to_buf(qdata);
pxy = abd_to_buf(rr->rr_col[VDEV_RAIDZ_P].rc_abd);
qxy = abd_to_buf(rr->rr_col[VDEV_RAIDZ_Q].rc_abd);
xd = rr->rr_col[x].rc_abd;
yd = rr->rr_col[y].rc_abd;
/*
* We now have:
* Pxy = P + D_x + D_y
* Qxy = Q + 2^(ndevs - 1 - x) * D_x + 2^(ndevs - 1 - y) * D_y
*
* We can then solve for D_x:
* D_x = A * (P + Pxy) + B * (Q + Qxy)
* where
* A = 2^(x - y) * (2^(x - y) + 1)^-1
* B = 2^(ndevs - 1 - x) * (2^(x - y) + 1)^-1
*
* With D_x in hand, we can easily solve for D_y:
* D_y = P + Pxy + D_x
*/
a = vdev_raidz_pow2[255 + x - y];
b = vdev_raidz_pow2[255 - (rr->rr_cols - 1 - x)];
tmp = 255 - vdev_raidz_log2[a ^ 1];
aexp = vdev_raidz_log2[vdev_raidz_exp2(a, tmp)];
bexp = vdev_raidz_log2[vdev_raidz_exp2(b, tmp)];
ASSERT3U(xsize, >=, ysize);
struct reconst_pq_struct rpq = { p, q, pxy, qxy, aexp, bexp };
(void) abd_iterate_func2(xd, yd, 0, 0, ysize,
vdev_raidz_reconst_pq_func, &rpq);
(void) abd_iterate_func(xd, ysize, xsize - ysize,
vdev_raidz_reconst_pq_tail_func, &rpq);
abd_free(rr->rr_col[VDEV_RAIDZ_P].rc_abd);
abd_free(rr->rr_col[VDEV_RAIDZ_Q].rc_abd);
/*
* Restore the saved parity data.
*/
rr->rr_col[VDEV_RAIDZ_P].rc_abd = pdata;
rr->rr_col[VDEV_RAIDZ_Q].rc_abd = qdata;
}
/*
* In the general case of reconstruction, we must solve the system of linear
* equations defined by the coefficients used to generate parity as well as
* the contents of the data and parity disks. This can be expressed with
* vectors for the original data (D) and the actual data (d) and parity (p)
* and a matrix composed of the identity matrix (I) and a dispersal matrix (V):
*
* __ __ __ __
* | | __ __ | p_0 |
* | V | | D_0 | | p_m-1 |
* | | x | : | = | d_0 |
* | I | | D_n-1 | | : |
* | | ~~ ~~ | d_n-1 |
* ~~ ~~ ~~ ~~
*
* I is simply a square identity matrix of size n, and V is a vandermonde
* matrix defined by the coefficients we chose for the various parity columns
* (1, 2, 4). Note that these values were chosen both for simplicity, speedy
* computation as well as linear separability.
*
* __ __ __ __
* | 1 .. 1 1 1 | | p_0 |
* | 2^n-1 .. 4 2 1 | __ __ | : |
* | 4^n-1 .. 16 4 1 | | D_0 | | p_m-1 |
* | 1 .. 0 0 0 | | D_1 | | d_0 |
* | 0 .. 0 0 0 | x | D_2 | = | d_1 |
* | : : : : | | : | | d_2 |
* | 0 .. 1 0 0 | | D_n-1 | | : |
* | 0 .. 0 1 0 | ~~ ~~ | : |
* | 0 .. 0 0 1 | | d_n-1 |
* ~~ ~~ ~~ ~~
*
* Note that I, V, d, and p are known. To compute D, we must invert the
* matrix and use the known data and parity values to reconstruct the unknown
* data values. We begin by removing the rows in V|I and d|p that correspond
* to failed or missing columns; we then make V|I square (n x n) and d|p
* sized n by removing rows corresponding to unused parity from the bottom up
* to generate (V|I)' and (d|p)'. We can then generate the inverse of (V|I)'
* using Gauss-Jordan elimination. In the example below we use m=3 parity
* columns, n=8 data columns, with errors in d_1, d_2, and p_1:
* __ __
* | 1 1 1 1 1 1 1 1 |
* | 128 64 32 16 8 4 2 1 | <-----+-+-- missing disks
* | 19 205 116 29 64 16 4 1 | / /
* | 1 0 0 0 0 0 0 0 | / /
* | 0 1 0 0 0 0 0 0 | <--' /
* (V|I) = | 0 0 1 0 0 0 0 0 | <---'
* | 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 1 1 1 1 1 1 1 1 |
* | 128 64 32 16 8 4 2 1 |
* | 19 205 116 29 64 16 4 1 |
* | 1 0 0 0 0 0 0 0 |
* | 0 1 0 0 0 0 0 0 |
* (V|I)' = | 0 0 1 0 0 0 0 0 |
* | 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 |
* ~~ ~~
*
* Here we employ Gauss-Jordan elimination to find the inverse of (V|I)'. We
* have carefully chosen the seed values 1, 2, and 4 to ensure that this
* matrix is not singular.
* __ __
* | 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 |
* | 19 205 116 29 64 16 4 1 0 1 0 0 0 0 0 0 |
* | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
* | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
* | 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 |
* | 19 205 116 29 64 16 4 1 0 1 0 0 0 0 0 0 |
* | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
* | 0 1 1 0 0 0 0 0 1 0 1 1 1 1 1 1 |
* | 0 205 116 0 0 0 0 0 0 1 19 29 64 16 4 1 |
* | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
* | 0 1 1 0 0 0 0 0 1 0 1 1 1 1 1 1 |
* | 0 0 185 0 0 0 0 0 205 1 222 208 141 221 201 204 |
* | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
* | 0 1 1 0 0 0 0 0 1 0 1 1 1 1 1 1 |
* | 0 0 1 0 0 0 0 0 166 100 4 40 158 168 216 209 |
* | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
* | 0 1 0 0 0 0 0 0 167 100 5 41 159 169 217 208 |
* | 0 0 1 0 0 0 0 0 166 100 4 40 158 168 216 209 |
* | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 0 0 1 0 0 0 0 0 |
* | 167 100 5 41 159 169 217 208 |
* | 166 100 4 40 158 168 216 209 |
* (V|I)'^-1 = | 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 |
* ~~ ~~
*
* We can then simply compute D = (V|I)'^-1 x (d|p)' to discover the values
* of the missing data.
*
* As is apparent from the example above, the only non-trivial rows in the
* inverse matrix correspond to the data disks that we're trying to
* reconstruct. Indeed, those are the only rows we need as the others would
* only be useful for reconstructing data known or assumed to be valid. For
* that reason, we only build the coefficients in the rows that correspond to
* targeted columns.
*/
static void
vdev_raidz_matrix_init(raidz_row_t *rr, int n, int nmap, int *map,
uint8_t **rows)
{
int i, j;
int pow;
ASSERT(n == rr->rr_cols - rr->rr_firstdatacol);
/*
* Fill in the missing rows of interest.
*/
for (i = 0; i < nmap; i++) {
ASSERT3S(0, <=, map[i]);
ASSERT3S(map[i], <=, 2);
pow = map[i] * n;
if (pow > 255)
pow -= 255;
ASSERT(pow <= 255);
for (j = 0; j < n; j++) {
pow -= map[i];
if (pow < 0)
pow += 255;
rows[i][j] = vdev_raidz_pow2[pow];
}
}
}
static void
vdev_raidz_matrix_invert(raidz_row_t *rr, int n, int nmissing, int *missing,
uint8_t **rows, uint8_t **invrows, const uint8_t *used)
{
int i, j, ii, jj;
uint8_t log;
/*
* Assert that the first nmissing entries from the array of used
* columns correspond to parity columns and that subsequent entries
* correspond to data columns.
*/
for (i = 0; i < nmissing; i++) {
ASSERT3S(used[i], <, rr->rr_firstdatacol);
}
for (; i < n; i++) {
ASSERT3S(used[i], >=, rr->rr_firstdatacol);
}
/*
* First initialize the storage where we'll compute the inverse rows.
*/
for (i = 0; i < nmissing; i++) {
for (j = 0; j < n; j++) {
invrows[i][j] = (i == j) ? 1 : 0;
}
}
/*
* Subtract all trivial rows from the rows of consequence.
*/
for (i = 0; i < nmissing; i++) {
for (j = nmissing; j < n; j++) {
ASSERT3U(used[j], >=, rr->rr_firstdatacol);
jj = used[j] - rr->rr_firstdatacol;
ASSERT3S(jj, <, n);
invrows[i][j] = rows[i][jj];
rows[i][jj] = 0;
}
}
/*
* For each of the rows of interest, we must normalize it and subtract
* a multiple of it from the other rows.
*/
for (i = 0; i < nmissing; i++) {
for (j = 0; j < missing[i]; j++) {
ASSERT0(rows[i][j]);
}
ASSERT3U(rows[i][missing[i]], !=, 0);
/*
* Compute the inverse of the first element and multiply each
* element in the row by that value.
*/
log = 255 - vdev_raidz_log2[rows[i][missing[i]]];
for (j = 0; j < n; j++) {
rows[i][j] = vdev_raidz_exp2(rows[i][j], log);
invrows[i][j] = vdev_raidz_exp2(invrows[i][j], log);
}
for (ii = 0; ii < nmissing; ii++) {
if (i == ii)
continue;
ASSERT3U(rows[ii][missing[i]], !=, 0);
log = vdev_raidz_log2[rows[ii][missing[i]]];
for (j = 0; j < n; j++) {
rows[ii][j] ^=
vdev_raidz_exp2(rows[i][j], log);
invrows[ii][j] ^=
vdev_raidz_exp2(invrows[i][j], log);
}
}
}
/*
* Verify that the data that is left in the rows are properly part of
* an identity matrix.
*/
for (i = 0; i < nmissing; i++) {
for (j = 0; j < n; j++) {
if (j == missing[i]) {
ASSERT3U(rows[i][j], ==, 1);
} else {
ASSERT0(rows[i][j]);
}
}
}
}
static void
vdev_raidz_matrix_reconstruct(raidz_row_t *rr, int n, int nmissing,
int *missing, uint8_t **invrows, const uint8_t *used)
{
int i, j, x, cc, c;
uint8_t *src;
uint64_t ccount;
uint8_t *dst[VDEV_RAIDZ_MAXPARITY] = { NULL };
uint64_t dcount[VDEV_RAIDZ_MAXPARITY] = { 0 };
uint8_t log = 0;
uint8_t val;
int ll;
uint8_t *invlog[VDEV_RAIDZ_MAXPARITY];
uint8_t *p, *pp;
size_t psize;
psize = sizeof (invlog[0][0]) * n * nmissing;
p = kmem_alloc(psize, KM_SLEEP);
for (pp = p, i = 0; i < nmissing; i++) {
invlog[i] = pp;
pp += n;
}
for (i = 0; i < nmissing; i++) {
for (j = 0; j < n; j++) {
ASSERT3U(invrows[i][j], !=, 0);
invlog[i][j] = vdev_raidz_log2[invrows[i][j]];
}
}
for (i = 0; i < n; i++) {
c = used[i];
ASSERT3U(c, <, rr->rr_cols);
ccount = rr->rr_col[c].rc_size;
ASSERT(ccount >= rr->rr_col[missing[0]].rc_size || i > 0);
if (ccount == 0)
continue;
src = abd_to_buf(rr->rr_col[c].rc_abd);
for (j = 0; j < nmissing; j++) {
cc = missing[j] + rr->rr_firstdatacol;
ASSERT3U(cc, >=, rr->rr_firstdatacol);
ASSERT3U(cc, <, rr->rr_cols);
ASSERT3U(cc, !=, c);
dcount[j] = rr->rr_col[cc].rc_size;
if (dcount[j] != 0)
dst[j] = abd_to_buf(rr->rr_col[cc].rc_abd);
}
for (x = 0; x < ccount; x++, src++) {
if (*src != 0)
log = vdev_raidz_log2[*src];
for (cc = 0; cc < nmissing; cc++) {
if (x >= dcount[cc])
continue;
if (*src == 0) {
val = 0;
} else {
if ((ll = log + invlog[cc][i]) >= 255)
ll -= 255;
val = vdev_raidz_pow2[ll];
}
if (i == 0)
dst[cc][x] = val;
else
dst[cc][x] ^= val;
}
}
}
kmem_free(p, psize);
}
static void
vdev_raidz_reconstruct_general(raidz_row_t *rr, int *tgts, int ntgts)
{
int n, i, c, t, tt;
int nmissing_rows;
int missing_rows[VDEV_RAIDZ_MAXPARITY];
int parity_map[VDEV_RAIDZ_MAXPARITY];
uint8_t *p, *pp;
size_t psize;
uint8_t *rows[VDEV_RAIDZ_MAXPARITY];
uint8_t *invrows[VDEV_RAIDZ_MAXPARITY];
uint8_t *used;
abd_t **bufs = NULL;
/*
* Matrix reconstruction can't use scatter ABDs yet, so we allocate
* temporary linear ABDs if any non-linear ABDs are found.
*/
for (i = rr->rr_firstdatacol; i < rr->rr_cols; i++) {
if (!abd_is_linear(rr->rr_col[i].rc_abd)) {
bufs = kmem_alloc(rr->rr_cols * sizeof (abd_t *),
KM_PUSHPAGE);
for (c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
raidz_col_t *col = &rr->rr_col[c];
bufs[c] = col->rc_abd;
if (bufs[c] != NULL) {
col->rc_abd = abd_alloc_linear(
col->rc_size, B_TRUE);
abd_copy(col->rc_abd, bufs[c],
col->rc_size);
}
}
break;
}
}
n = rr->rr_cols - rr->rr_firstdatacol;
/*
* Figure out which data columns are missing.
*/
nmissing_rows = 0;
for (t = 0; t < ntgts; t++) {
if (tgts[t] >= rr->rr_firstdatacol) {
missing_rows[nmissing_rows++] =
tgts[t] - rr->rr_firstdatacol;
}
}
/*
* Figure out which parity columns to use to help generate the missing
* data columns.
*/
for (tt = 0, c = 0, i = 0; i < nmissing_rows; c++) {
ASSERT(tt < ntgts);
ASSERT(c < rr->rr_firstdatacol);
/*
* Skip any targeted parity columns.
*/
if (c == tgts[tt]) {
tt++;
continue;
}
parity_map[i] = c;
i++;
}
psize = (sizeof (rows[0][0]) + sizeof (invrows[0][0])) *
nmissing_rows * n + sizeof (used[0]) * n;
p = kmem_alloc(psize, KM_SLEEP);
for (pp = p, i = 0; i < nmissing_rows; i++) {
rows[i] = pp;
pp += n;
invrows[i] = pp;
pp += n;
}
used = pp;
for (i = 0; i < nmissing_rows; i++) {
used[i] = parity_map[i];
}
for (tt = 0, c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
if (tt < nmissing_rows &&
c == missing_rows[tt] + rr->rr_firstdatacol) {
tt++;
continue;
}
ASSERT3S(i, <, n);
used[i] = c;
i++;
}
/*
* Initialize the interesting rows of the matrix.
*/
vdev_raidz_matrix_init(rr, n, nmissing_rows, parity_map, rows);
/*
* Invert the matrix.
*/
vdev_raidz_matrix_invert(rr, n, nmissing_rows, missing_rows, rows,
invrows, used);
/*
* Reconstruct the missing data using the generated matrix.
*/
vdev_raidz_matrix_reconstruct(rr, n, nmissing_rows, missing_rows,
invrows, used);
kmem_free(p, psize);
/*
* copy back from temporary linear abds and free them
*/
if (bufs) {
for (c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
raidz_col_t *col = &rr->rr_col[c];
if (bufs[c] != NULL) {
abd_copy(bufs[c], col->rc_abd, col->rc_size);
abd_free(col->rc_abd);
}
col->rc_abd = bufs[c];
}
kmem_free(bufs, rr->rr_cols * sizeof (abd_t *));
}
}
static void
vdev_raidz_reconstruct_row(raidz_map_t *rm, raidz_row_t *rr,
const int *t, int nt)
{
int tgts[VDEV_RAIDZ_MAXPARITY], *dt;
int ntgts;
int i, c, ret;
int nbadparity, nbaddata;
int parity_valid[VDEV_RAIDZ_MAXPARITY];
nbadparity = rr->rr_firstdatacol;
nbaddata = rr->rr_cols - nbadparity;
ntgts = 0;
for (i = 0, c = 0; c < rr->rr_cols; c++) {
if (c < rr->rr_firstdatacol)
parity_valid[c] = B_FALSE;
if (i < nt && c == t[i]) {
tgts[ntgts++] = c;
i++;
} else if (rr->rr_col[c].rc_error != 0) {
tgts[ntgts++] = c;
} else if (c >= rr->rr_firstdatacol) {
nbaddata--;
} else {
parity_valid[c] = B_TRUE;
nbadparity--;
}
}
ASSERT(ntgts >= nt);
ASSERT(nbaddata >= 0);
ASSERT(nbaddata + nbadparity == ntgts);
dt = &tgts[nbadparity];
/* Reconstruct using the new math implementation */
ret = vdev_raidz_math_reconstruct(rm, rr, parity_valid, dt, nbaddata);
if (ret != RAIDZ_ORIGINAL_IMPL)
return;
/*
* See if we can use any of our optimized reconstruction routines.
*/
switch (nbaddata) {
case 1:
if (parity_valid[VDEV_RAIDZ_P]) {
vdev_raidz_reconstruct_p(rr, dt, 1);
return;
}
ASSERT(rr->rr_firstdatacol > 1);
if (parity_valid[VDEV_RAIDZ_Q]) {
vdev_raidz_reconstruct_q(rr, dt, 1);
return;
}
ASSERT(rr->rr_firstdatacol > 2);
break;
case 2:
ASSERT(rr->rr_firstdatacol > 1);
if (parity_valid[VDEV_RAIDZ_P] &&
parity_valid[VDEV_RAIDZ_Q]) {
vdev_raidz_reconstruct_pq(rr, dt, 2);
return;
}
ASSERT(rr->rr_firstdatacol > 2);
break;
}
vdev_raidz_reconstruct_general(rr, tgts, ntgts);
}
static int
vdev_raidz_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
uint64_t *logical_ashift, uint64_t *physical_ashift)
{
vdev_raidz_t *vdrz = vd->vdev_tsd;
uint64_t nparity = vdrz->vd_nparity;
int c;
int lasterror = 0;
int numerrors = 0;
ASSERT(nparity > 0);
if (nparity > VDEV_RAIDZ_MAXPARITY ||
vd->vdev_children < nparity + 1) {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
return (SET_ERROR(EINVAL));
}
vdev_open_children(vd);
for (c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (cvd->vdev_open_error != 0) {
lasterror = cvd->vdev_open_error;
numerrors++;
continue;
}
*asize = MIN(*asize - 1, cvd->vdev_asize - 1) + 1;
*max_asize = MIN(*max_asize - 1, cvd->vdev_max_asize - 1) + 1;
*logical_ashift = MAX(*logical_ashift, cvd->vdev_ashift);
- *physical_ashift = MAX(*physical_ashift,
- cvd->vdev_physical_ashift);
+ }
+ for (c = 0; c < vd->vdev_children; c++) {
+ vdev_t *cvd = vd->vdev_child[c];
+
+ if (cvd->vdev_open_error != 0)
+ continue;
+ *physical_ashift = vdev_best_ashift(*logical_ashift,
+ *physical_ashift, cvd->vdev_physical_ashift);
}
*asize *= vd->vdev_children;
*max_asize *= vd->vdev_children;
if (numerrors > nparity) {
vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS;
return (lasterror);
}
return (0);
}
static void
vdev_raidz_close(vdev_t *vd)
{
for (int c = 0; c < vd->vdev_children; c++) {
if (vd->vdev_child[c] != NULL)
vdev_close(vd->vdev_child[c]);
}
}
static uint64_t
vdev_raidz_asize(vdev_t *vd, uint64_t psize)
{
vdev_raidz_t *vdrz = vd->vdev_tsd;
uint64_t asize;
uint64_t ashift = vd->vdev_top->vdev_ashift;
uint64_t cols = vdrz->vd_logical_width;
uint64_t nparity = vdrz->vd_nparity;
asize = ((psize - 1) >> ashift) + 1;
asize += nparity * ((asize + cols - nparity - 1) / (cols - nparity));
asize = roundup(asize, nparity + 1) << ashift;
return (asize);
}
/*
* The allocatable space for a raidz vdev is N * sizeof(smallest child)
* so each child must provide at least 1/Nth of its asize.
*/
static uint64_t
vdev_raidz_min_asize(vdev_t *vd)
{
return ((vd->vdev_min_asize + vd->vdev_children - 1) /
vd->vdev_children);
}
void
vdev_raidz_child_done(zio_t *zio)
{
raidz_col_t *rc = zio->io_private;
ASSERT3P(rc->rc_abd, !=, NULL);
rc->rc_error = zio->io_error;
rc->rc_tried = 1;
rc->rc_skipped = 0;
}
static void
vdev_raidz_io_verify(vdev_t *vd, raidz_row_t *rr, int col)
{
#ifdef ZFS_DEBUG
vdev_t *tvd = vd->vdev_top;
range_seg64_t logical_rs, physical_rs, remain_rs;
logical_rs.rs_start = rr->rr_offset;
logical_rs.rs_end = logical_rs.rs_start +
vdev_raidz_asize(vd, rr->rr_size);
raidz_col_t *rc = &rr->rr_col[col];
vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
vdev_xlate(cvd, &logical_rs, &physical_rs, &remain_rs);
ASSERT(vdev_xlate_is_empty(&remain_rs));
ASSERT3U(rc->rc_offset, ==, physical_rs.rs_start);
ASSERT3U(rc->rc_offset, <, physical_rs.rs_end);
/*
* It would be nice to assert that rs_end is equal
* to rc_offset + rc_size but there might be an
* optional I/O at the end that is not accounted in
* rc_size.
*/
if (physical_rs.rs_end > rc->rc_offset + rc->rc_size) {
ASSERT3U(physical_rs.rs_end, ==, rc->rc_offset +
rc->rc_size + (1 << tvd->vdev_ashift));
} else {
ASSERT3U(physical_rs.rs_end, ==, rc->rc_offset + rc->rc_size);
}
#endif
}
static void
vdev_raidz_io_start_write(zio_t *zio, raidz_row_t *rr, uint64_t ashift)
{
vdev_t *vd = zio->io_vd;
raidz_map_t *rm = zio->io_vsd;
vdev_raidz_generate_parity_row(rm, rr);
for (int c = 0; c < rr->rr_scols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
/* Verify physical to logical translation */
vdev_raidz_io_verify(vd, rr, c);
if (rc->rc_size > 0) {
ASSERT3P(rc->rc_abd, !=, NULL);
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
rc->rc_offset, rc->rc_abd,
abd_get_size(rc->rc_abd), zio->io_type,
zio->io_priority, 0, vdev_raidz_child_done, rc));
} else {
/*
* Generate optional write for skip sector to improve
* aggregation contiguity.
*/
ASSERT3P(rc->rc_abd, ==, NULL);
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
rc->rc_offset, NULL, 1ULL << ashift,
zio->io_type, zio->io_priority,
ZIO_FLAG_NODATA | ZIO_FLAG_OPTIONAL, NULL,
NULL));
}
}
}
static void
vdev_raidz_io_start_read(zio_t *zio, raidz_row_t *rr)
{
vdev_t *vd = zio->io_vd;
/*
* Iterate over the columns in reverse order so that we hit the parity
* last -- any errors along the way will force us to read the parity.
*/
for (int c = rr->rr_cols - 1; c >= 0; c--) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_size == 0)
continue;
vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
if (!vdev_readable(cvd)) {
if (c >= rr->rr_firstdatacol)
rr->rr_missingdata++;
else
rr->rr_missingparity++;
rc->rc_error = SET_ERROR(ENXIO);
rc->rc_tried = 1; /* don't even try */
rc->rc_skipped = 1;
continue;
}
if (vdev_dtl_contains(cvd, DTL_MISSING, zio->io_txg, 1)) {
if (c >= rr->rr_firstdatacol)
rr->rr_missingdata++;
else
rr->rr_missingparity++;
rc->rc_error = SET_ERROR(ESTALE);
rc->rc_skipped = 1;
continue;
}
if (c >= rr->rr_firstdatacol || rr->rr_missingdata > 0 ||
(zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) {
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
rc->rc_offset, rc->rc_abd, rc->rc_size,
zio->io_type, zio->io_priority, 0,
vdev_raidz_child_done, rc));
}
}
}
/*
* Start an IO operation on a RAIDZ VDev
*
* Outline:
* - For write operations:
* 1. Generate the parity data
* 2. Create child zio write operations to each column's vdev, for both
* data and parity.
* 3. If the column skips any sectors for padding, create optional dummy
* write zio children for those areas to improve aggregation continuity.
* - For read operations:
* 1. Create child zio read operations to each data column's vdev to read
* the range of data required for zio.
* 2. If this is a scrub or resilver operation, or if any of the data
* vdevs have had errors, then create zio read operations to the parity
* columns' VDevs as well.
*/
static void
vdev_raidz_io_start(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
vdev_t *tvd = vd->vdev_top;
vdev_raidz_t *vdrz = vd->vdev_tsd;
raidz_map_t *rm = vdev_raidz_map_alloc(zio, tvd->vdev_ashift,
vdrz->vd_logical_width, vdrz->vd_nparity);
zio->io_vsd = rm;
zio->io_vsd_ops = &vdev_raidz_vsd_ops;
/*
* Until raidz expansion is implemented all maps for a raidz vdev
* contain a single row.
*/
ASSERT3U(rm->rm_nrows, ==, 1);
raidz_row_t *rr = rm->rm_row[0];
if (zio->io_type == ZIO_TYPE_WRITE) {
vdev_raidz_io_start_write(zio, rr, tvd->vdev_ashift);
} else {
ASSERT(zio->io_type == ZIO_TYPE_READ);
vdev_raidz_io_start_read(zio, rr);
}
zio_execute(zio);
}
/*
* Report a checksum error for a child of a RAID-Z device.
*/
void
vdev_raidz_checksum_error(zio_t *zio, raidz_col_t *rc, abd_t *bad_data)
{
vdev_t *vd = zio->io_vd->vdev_child[rc->rc_devidx];
if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE) &&
zio->io_priority != ZIO_PRIORITY_REBUILD) {
zio_bad_cksum_t zbc;
raidz_map_t *rm = zio->io_vsd;
zbc.zbc_has_cksum = 0;
zbc.zbc_injected = rm->rm_ecksuminjected;
(void) zfs_ereport_post_checksum(zio->io_spa, vd,
&zio->io_bookmark, zio, rc->rc_offset, rc->rc_size,
rc->rc_abd, bad_data, &zbc);
mutex_enter(&vd->vdev_stat_lock);
vd->vdev_stat.vs_checksum_errors++;
mutex_exit(&vd->vdev_stat_lock);
}
}
/*
* We keep track of whether or not there were any injected errors, so that
* any ereports we generate can note it.
*/
static int
raidz_checksum_verify(zio_t *zio)
{
zio_bad_cksum_t zbc = {{{0}}};
raidz_map_t *rm = zio->io_vsd;
int ret = zio_checksum_error(zio, &zbc);
if (ret != 0 && zbc.zbc_injected != 0)
rm->rm_ecksuminjected = 1;
return (ret);
}
/*
* Generate the parity from the data columns. If we tried and were able to
* read the parity without error, verify that the generated parity matches the
* data we read. If it doesn't, we fire off a checksum error. Return the
* number of such failures.
*/
static int
raidz_parity_verify(zio_t *zio, raidz_row_t *rr)
{
abd_t *orig[VDEV_RAIDZ_MAXPARITY];
int c, ret = 0;
raidz_map_t *rm = zio->io_vsd;
raidz_col_t *rc;
blkptr_t *bp = zio->io_bp;
enum zio_checksum checksum = (bp == NULL ? zio->io_prop.zp_checksum :
(BP_IS_GANG(bp) ? ZIO_CHECKSUM_GANG_HEADER : BP_GET_CHECKSUM(bp)));
if (checksum == ZIO_CHECKSUM_NOPARITY)
return (ret);
for (c = 0; c < rr->rr_firstdatacol; c++) {
rc = &rr->rr_col[c];
if (!rc->rc_tried || rc->rc_error != 0)
continue;
orig[c] = rc->rc_abd;
ASSERT3U(abd_get_size(rc->rc_abd), ==, rc->rc_size);
rc->rc_abd = abd_alloc_linear(rc->rc_size, B_FALSE);
}
/*
* Verify any empty sectors are zero filled to ensure the parity
* is calculated correctly even if these non-data sectors are damaged.
*/
if (rr->rr_nempty && rr->rr_abd_empty != NULL)
ret += vdev_draid_map_verify_empty(zio, rr);
/*
* Regenerates parity even for !tried||rc_error!=0 columns. This
* isn't harmful but it does have the side effect of fixing stuff
* we didn't realize was necessary (i.e. even if we return 0).
*/
vdev_raidz_generate_parity_row(rm, rr);
for (c = 0; c < rr->rr_firstdatacol; c++) {
rc = &rr->rr_col[c];
if (!rc->rc_tried || rc->rc_error != 0)
continue;
if (abd_cmp(orig[c], rc->rc_abd) != 0) {
vdev_raidz_checksum_error(zio, rc, orig[c]);
rc->rc_error = SET_ERROR(ECKSUM);
ret++;
}
abd_free(orig[c]);
}
return (ret);
}
static int
vdev_raidz_worst_error(raidz_row_t *rr)
{
int error = 0;
for (int c = 0; c < rr->rr_cols; c++)
error = zio_worst_error(error, rr->rr_col[c].rc_error);
return (error);
}
static void
vdev_raidz_io_done_verified(zio_t *zio, raidz_row_t *rr)
{
int unexpected_errors = 0;
int parity_errors = 0;
int parity_untried = 0;
int data_errors = 0;
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_error) {
if (c < rr->rr_firstdatacol)
parity_errors++;
else
data_errors++;
if (!rc->rc_skipped)
unexpected_errors++;
} else if (c < rr->rr_firstdatacol && !rc->rc_tried) {
parity_untried++;
}
if (rc->rc_force_repair)
unexpected_errors++;
}
/*
* If we read more parity disks than were used for
* reconstruction, confirm that the other parity disks produced
* correct data.
*
* Note that we also regenerate parity when resilvering so we
* can write it out to failed devices later.
*/
if (parity_errors + parity_untried <
rr->rr_firstdatacol - data_errors ||
(zio->io_flags & ZIO_FLAG_RESILVER)) {
int n = raidz_parity_verify(zio, rr);
unexpected_errors += n;
}
if (zio->io_error == 0 && spa_writeable(zio->io_spa) &&
(unexpected_errors > 0 || (zio->io_flags & ZIO_FLAG_RESILVER))) {
/*
* Use the good data we have in hand to repair damaged children.
*/
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
vdev_t *vd = zio->io_vd;
vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
if (!rc->rc_allow_repair) {
continue;
} else if (!rc->rc_force_repair &&
(rc->rc_error == 0 || rc->rc_size == 0)) {
continue;
}
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
rc->rc_offset, rc->rc_abd, rc->rc_size,
ZIO_TYPE_WRITE,
zio->io_priority == ZIO_PRIORITY_REBUILD ?
ZIO_PRIORITY_REBUILD : ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_IO_REPAIR | (unexpected_errors ?
ZIO_FLAG_SELF_HEAL : 0), NULL, NULL));
}
}
}
static void
raidz_restore_orig_data(raidz_map_t *rm)
{
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_need_orig_restore) {
abd_copy(rc->rc_abd,
rc->rc_orig_data, rc->rc_size);
rc->rc_need_orig_restore = B_FALSE;
}
}
}
}
/*
* returns EINVAL if reconstruction of the block will not be possible
* returns ECKSUM if this specific reconstruction failed
* returns 0 on successful reconstruction
*/
static int
raidz_reconstruct(zio_t *zio, int *ltgts, int ntgts, int nparity)
{
raidz_map_t *rm = zio->io_vsd;
/* Reconstruct each row */
for (int r = 0; r < rm->rm_nrows; r++) {
raidz_row_t *rr = rm->rm_row[r];
int my_tgts[VDEV_RAIDZ_MAXPARITY]; /* value is child id */
int t = 0;
int dead = 0;
int dead_data = 0;
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
ASSERT0(rc->rc_need_orig_restore);
if (rc->rc_error != 0) {
dead++;
if (c >= nparity)
dead_data++;
continue;
}
if (rc->rc_size == 0)
continue;
for (int lt = 0; lt < ntgts; lt++) {
if (rc->rc_devidx == ltgts[lt]) {
if (rc->rc_orig_data == NULL) {
rc->rc_orig_data =
abd_alloc_linear(
rc->rc_size, B_TRUE);
abd_copy(rc->rc_orig_data,
rc->rc_abd, rc->rc_size);
}
rc->rc_need_orig_restore = B_TRUE;
dead++;
if (c >= nparity)
dead_data++;
my_tgts[t++] = c;
break;
}
}
}
if (dead > nparity) {
/* reconstruction not possible */
raidz_restore_orig_data(rm);
return (EINVAL);
}
if (dead_data > 0)
vdev_raidz_reconstruct_row(rm, rr, my_tgts, t);
}
/* Check for success */
if (raidz_checksum_verify(zio) == 0) {
/* Reconstruction succeeded - report errors */
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_need_orig_restore) {
/*
* Note: if this is a parity column,
* we don't really know if it's wrong.
* We need to let
* vdev_raidz_io_done_verified() check
* it, and if we set rc_error, it will
* think that it is a "known" error
* that doesn't need to be checked
* or corrected.
*/
if (rc->rc_error == 0 &&
c >= rr->rr_firstdatacol) {
vdev_raidz_checksum_error(zio,
rc, rc->rc_orig_data);
rc->rc_error =
SET_ERROR(ECKSUM);
}
rc->rc_need_orig_restore = B_FALSE;
}
}
vdev_raidz_io_done_verified(zio, rr);
}
zio_checksum_verified(zio);
return (0);
}
/* Reconstruction failed - restore original data */
raidz_restore_orig_data(rm);
return (ECKSUM);
}
/*
* Iterate over all combinations of N bad vdevs and attempt a reconstruction.
* Note that the algorithm below is non-optimal because it doesn't take into
* account how reconstruction is actually performed. For example, with
* triple-parity RAID-Z the reconstruction procedure is the same if column 4
* is targeted as invalid as if columns 1 and 4 are targeted since in both
* cases we'd only use parity information in column 0.
*
* The order that we find the various possible combinations of failed
* disks is dictated by these rules:
* - Examine each "slot" (the "i" in tgts[i])
* - Try to increment this slot (tgts[i] = tgts[i] + 1)
* - if we can't increment because it runs into the next slot,
* reset our slot to the minimum, and examine the next slot
*
* For example, with a 6-wide RAIDZ3, and no known errors (so we have to choose
* 3 columns to reconstruct), we will generate the following sequence:
*
* STATE ACTION
* 0 1 2 special case: skip since these are all parity
* 0 1 3 first slot: reset to 0; middle slot: increment to 2
* 0 2 3 first slot: increment to 1
* 1 2 3 first: reset to 0; middle: reset to 1; last: increment to 4
* 0 1 4 first: reset to 0; middle: increment to 2
* 0 2 4 first: increment to 1
* 1 2 4 first: reset to 0; middle: increment to 3
* 0 3 4 first: increment to 1
* 1 3 4 first: increment to 2
* 2 3 4 first: reset to 0; middle: reset to 1; last: increment to 5
* 0 1 5 first: reset to 0; middle: increment to 2
* 0 2 5 first: increment to 1
* 1 2 5 first: reset to 0; middle: increment to 3
* 0 3 5 first: increment to 1
* 1 3 5 first: increment to 2
* 2 3 5 first: reset to 0; middle: increment to 4
* 0 4 5 first: increment to 1
* 1 4 5 first: increment to 2
* 2 4 5 first: increment to 3
* 3 4 5 done
*
* This strategy works for dRAID but is less efficient when there are a large
* number of child vdevs and therefore permutations to check. Furthermore,
* since the raidz_map_t rows likely do not overlap reconstruction would be
* possible as long as there are no more than nparity data errors per row.
* These additional permutations are not currently checked but could be as
* a future improvement.
*/
static int
vdev_raidz_combrec(zio_t *zio)
{
int nparity = vdev_get_nparity(zio->io_vd);
raidz_map_t *rm = zio->io_vsd;
/* Check if there's enough data to attempt reconstrution. */
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
int total_errors = 0;
for (int c = 0; c < rr->rr_cols; c++) {
if (rr->rr_col[c].rc_error)
total_errors++;
}
if (total_errors > nparity)
return (vdev_raidz_worst_error(rr));
}
for (int num_failures = 1; num_failures <= nparity; num_failures++) {
int tstore[VDEV_RAIDZ_MAXPARITY + 2];
int *ltgts = &tstore[1]; /* value is logical child ID */
/* Determine number of logical children, n */
int n = zio->io_vd->vdev_children;
ASSERT3U(num_failures, <=, nparity);
ASSERT3U(num_failures, <=, VDEV_RAIDZ_MAXPARITY);
/* Handle corner cases in combrec logic */
ltgts[-1] = -1;
for (int i = 0; i < num_failures; i++) {
ltgts[i] = i;
}
ltgts[num_failures] = n;
for (;;) {
int err = raidz_reconstruct(zio, ltgts, num_failures,
nparity);
if (err == EINVAL) {
/*
* Reconstruction not possible with this #
* failures; try more failures.
*/
break;
} else if (err == 0)
return (0);
/* Compute next targets to try */
for (int t = 0; ; t++) {
ASSERT3U(t, <, num_failures);
ltgts[t]++;
if (ltgts[t] == n) {
/* try more failures */
ASSERT3U(t, ==, num_failures - 1);
break;
}
ASSERT3U(ltgts[t], <, n);
ASSERT3U(ltgts[t], <=, ltgts[t + 1]);
/*
* If that spot is available, we're done here.
* Try the next combination.
*/
if (ltgts[t] != ltgts[t + 1])
break;
/*
* Otherwise, reset this tgt to the minimum,
* and move on to the next tgt.
*/
ltgts[t] = ltgts[t - 1] + 1;
ASSERT3U(ltgts[t], ==, t);
}
/* Increase the number of failures and keep trying. */
if (ltgts[num_failures - 1] == n)
break;
}
}
return (ECKSUM);
}
void
vdev_raidz_reconstruct(raidz_map_t *rm, const int *t, int nt)
{
for (uint64_t row = 0; row < rm->rm_nrows; row++) {
raidz_row_t *rr = rm->rm_row[row];
vdev_raidz_reconstruct_row(rm, rr, t, nt);
}
}
/*
* Complete a write IO operation on a RAIDZ VDev
*
* Outline:
* 1. Check for errors on the child IOs.
* 2. Return, setting an error code if too few child VDevs were written
* to reconstruct the data later. Note that partial writes are
* considered successful if they can be reconstructed at all.
*/
static void
vdev_raidz_io_done_write_impl(zio_t *zio, raidz_row_t *rr)
{
int total_errors = 0;
ASSERT3U(rr->rr_missingparity, <=, rr->rr_firstdatacol);
ASSERT3U(rr->rr_missingdata, <=, rr->rr_cols - rr->rr_firstdatacol);
ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_error) {
ASSERT(rc->rc_error != ECKSUM); /* child has no bp */
total_errors++;
}
}
/*
* Treat partial writes as a success. If we couldn't write enough
* columns to reconstruct the data, the I/O failed. Otherwise,
* good enough.
*
* Now that we support write reallocation, it would be better
* to treat partial failure as real failure unless there are
* no non-degraded top-level vdevs left, and not update DTLs
* if we intend to reallocate.
*/
if (total_errors > rr->rr_firstdatacol) {
zio->io_error = zio_worst_error(zio->io_error,
vdev_raidz_worst_error(rr));
}
}
static void
vdev_raidz_io_done_reconstruct_known_missing(zio_t *zio, raidz_map_t *rm,
raidz_row_t *rr)
{
int parity_errors = 0;
int parity_untried = 0;
int data_errors = 0;
int total_errors = 0;
ASSERT3U(rr->rr_missingparity, <=, rr->rr_firstdatacol);
ASSERT3U(rr->rr_missingdata, <=, rr->rr_cols - rr->rr_firstdatacol);
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
/*
* If scrubbing and a replacing/sparing child vdev determined
* that not all of its children have an identical copy of the
* data, then clear the error so the column is treated like
* any other read and force a repair to correct the damage.
*/
if (rc->rc_error == ECKSUM) {
ASSERT(zio->io_flags & ZIO_FLAG_SCRUB);
vdev_raidz_checksum_error(zio, rc, rc->rc_abd);
rc->rc_force_repair = 1;
rc->rc_error = 0;
}
if (rc->rc_error) {
if (c < rr->rr_firstdatacol)
parity_errors++;
else
data_errors++;
total_errors++;
} else if (c < rr->rr_firstdatacol && !rc->rc_tried) {
parity_untried++;
}
}
/*
* If there were data errors and the number of errors we saw was
* correctable -- less than or equal to the number of parity disks read
* -- reconstruct based on the missing data.
*/
if (data_errors != 0 &&
total_errors <= rr->rr_firstdatacol - parity_untried) {
/*
* We either attempt to read all the parity columns or
* none of them. If we didn't try to read parity, we
* wouldn't be here in the correctable case. There must
* also have been fewer parity errors than parity
* columns or, again, we wouldn't be in this code path.
*/
ASSERT(parity_untried == 0);
ASSERT(parity_errors < rr->rr_firstdatacol);
/*
* Identify the data columns that reported an error.
*/
int n = 0;
int tgts[VDEV_RAIDZ_MAXPARITY];
for (int c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_error != 0) {
ASSERT(n < VDEV_RAIDZ_MAXPARITY);
tgts[n++] = c;
}
}
ASSERT(rr->rr_firstdatacol >= n);
vdev_raidz_reconstruct_row(rm, rr, tgts, n);
}
}
/*
* Return the number of reads issued.
*/
static int
vdev_raidz_read_all(zio_t *zio, raidz_row_t *rr)
{
vdev_t *vd = zio->io_vd;
int nread = 0;
rr->rr_missingdata = 0;
rr->rr_missingparity = 0;
/*
* If this rows contains empty sectors which are not required
* for a normal read then allocate an ABD for them now so they
* may be read, verified, and any needed repairs performed.
*/
if (rr->rr_nempty && rr->rr_abd_empty == NULL)
vdev_draid_map_alloc_empty(zio, rr);
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_tried || rc->rc_size == 0)
continue;
zio_nowait(zio_vdev_child_io(zio, NULL,
vd->vdev_child[rc->rc_devidx],
rc->rc_offset, rc->rc_abd, rc->rc_size,
zio->io_type, zio->io_priority, 0,
vdev_raidz_child_done, rc));
nread++;
}
return (nread);
}
/*
* We're here because either there were too many errors to even attempt
* reconstruction (total_errors == rm_first_datacol), or vdev_*_combrec()
* failed. In either case, there is enough bad data to prevent reconstruction.
* Start checksum ereports for all children which haven't failed.
*/
static void
vdev_raidz_io_done_unrecoverable(zio_t *zio)
{
raidz_map_t *rm = zio->io_vsd;
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
vdev_t *cvd = zio->io_vd->vdev_child[rc->rc_devidx];
if (rc->rc_error != 0)
continue;
zio_bad_cksum_t zbc;
zbc.zbc_has_cksum = 0;
zbc.zbc_injected = rm->rm_ecksuminjected;
(void) zfs_ereport_start_checksum(zio->io_spa,
cvd, &zio->io_bookmark, zio, rc->rc_offset,
rc->rc_size, &zbc);
mutex_enter(&cvd->vdev_stat_lock);
cvd->vdev_stat.vs_checksum_errors++;
mutex_exit(&cvd->vdev_stat_lock);
}
}
}
void
vdev_raidz_io_done(zio_t *zio)
{
raidz_map_t *rm = zio->io_vsd;
if (zio->io_type == ZIO_TYPE_WRITE) {
for (int i = 0; i < rm->rm_nrows; i++) {
vdev_raidz_io_done_write_impl(zio, rm->rm_row[i]);
}
} else {
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
vdev_raidz_io_done_reconstruct_known_missing(zio,
rm, rr);
}
if (raidz_checksum_verify(zio) == 0) {
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
vdev_raidz_io_done_verified(zio, rr);
}
zio_checksum_verified(zio);
} else {
/*
* A sequential resilver has no checksum which makes
* combinatoral reconstruction impossible. This code
* path is unreachable since raidz_checksum_verify()
* has no checksum to verify and must succeed.
*/
ASSERT3U(zio->io_priority, !=, ZIO_PRIORITY_REBUILD);
/*
* This isn't a typical situation -- either we got a
* read error or a child silently returned bad data.
* Read every block so we can try again with as much
* data and parity as we can track down. If we've
* already been through once before, all children will
* be marked as tried so we'll proceed to combinatorial
* reconstruction.
*/
int nread = 0;
for (int i = 0; i < rm->rm_nrows; i++) {
nread += vdev_raidz_read_all(zio,
rm->rm_row[i]);
}
if (nread != 0) {
/*
* Normally our stage is VDEV_IO_DONE, but if
* we've already called redone(), it will have
* changed to VDEV_IO_START, in which case we
* don't want to call redone() again.
*/
if (zio->io_stage != ZIO_STAGE_VDEV_IO_START)
zio_vdev_io_redone(zio);
return;
}
zio->io_error = vdev_raidz_combrec(zio);
if (zio->io_error == ECKSUM &&
!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
vdev_raidz_io_done_unrecoverable(zio);
}
}
}
}
static void
vdev_raidz_state_change(vdev_t *vd, int faulted, int degraded)
{
vdev_raidz_t *vdrz = vd->vdev_tsd;
if (faulted > vdrz->vd_nparity)
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_NO_REPLICAS);
else if (degraded + faulted != 0)
vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE);
else
vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE);
}
/*
* Determine if any portion of the provided block resides on a child vdev
* with a dirty DTL and therefore needs to be resilvered. The function
* assumes that at least one DTL is dirty which implies that full stripe
* width blocks must be resilvered.
*/
static boolean_t
vdev_raidz_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
uint64_t phys_birth)
{
vdev_raidz_t *vdrz = vd->vdev_tsd;
uint64_t dcols = vd->vdev_children;
uint64_t nparity = vdrz->vd_nparity;
uint64_t ashift = vd->vdev_top->vdev_ashift;
/* The starting RAIDZ (parent) vdev sector of the block. */
uint64_t b = DVA_GET_OFFSET(dva) >> ashift;
/* The zio's size in units of the vdev's minimum sector size. */
uint64_t s = ((psize - 1) >> ashift) + 1;
/* The first column for this stripe. */
uint64_t f = b % dcols;
/* Unreachable by sequential resilver. */
ASSERT3U(phys_birth, !=, TXG_UNKNOWN);
if (!vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1))
return (B_FALSE);
if (s + nparity >= dcols)
return (B_TRUE);
for (uint64_t c = 0; c < s + nparity; c++) {
uint64_t devidx = (f + c) % dcols;
vdev_t *cvd = vd->vdev_child[devidx];
/*
* dsl_scan_need_resilver() already checked vd with
* vdev_dtl_contains(). So here just check cvd with
* vdev_dtl_empty(), cheaper and a good approximation.
*/
if (!vdev_dtl_empty(cvd, DTL_PARTIAL))
return (B_TRUE);
}
return (B_FALSE);
}
static void
vdev_raidz_xlate(vdev_t *cvd, const range_seg64_t *logical_rs,
range_seg64_t *physical_rs, range_seg64_t *remain_rs)
{
(void) remain_rs;
vdev_t *raidvd = cvd->vdev_parent;
ASSERT(raidvd->vdev_ops == &vdev_raidz_ops);
uint64_t width = raidvd->vdev_children;
uint64_t tgt_col = cvd->vdev_id;
uint64_t ashift = raidvd->vdev_top->vdev_ashift;
/* make sure the offsets are block-aligned */
ASSERT0(logical_rs->rs_start % (1 << ashift));
ASSERT0(logical_rs->rs_end % (1 << ashift));
uint64_t b_start = logical_rs->rs_start >> ashift;
uint64_t b_end = logical_rs->rs_end >> ashift;
uint64_t start_row = 0;
if (b_start > tgt_col) /* avoid underflow */
start_row = ((b_start - tgt_col - 1) / width) + 1;
uint64_t end_row = 0;
if (b_end > tgt_col)
end_row = ((b_end - tgt_col - 1) / width) + 1;
physical_rs->rs_start = start_row << ashift;
physical_rs->rs_end = end_row << ashift;
ASSERT3U(physical_rs->rs_start, <=, logical_rs->rs_start);
ASSERT3U(physical_rs->rs_end - physical_rs->rs_start, <=,
logical_rs->rs_end - logical_rs->rs_start);
}
/*
* Initialize private RAIDZ specific fields from the nvlist.
*/
static int
vdev_raidz_init(spa_t *spa, nvlist_t *nv, void **tsd)
{
vdev_raidz_t *vdrz;
uint64_t nparity;
uint_t children;
nvlist_t **child;
int error = nvlist_lookup_nvlist_array(nv,
ZPOOL_CONFIG_CHILDREN, &child, &children);
if (error != 0)
return (SET_ERROR(EINVAL));
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, &nparity) == 0) {
if (nparity == 0 || nparity > VDEV_RAIDZ_MAXPARITY)
return (SET_ERROR(EINVAL));
/*
* Previous versions could only support 1 or 2 parity
* device.
*/
if (nparity > 1 && spa_version(spa) < SPA_VERSION_RAIDZ2)
return (SET_ERROR(EINVAL));
else if (nparity > 2 && spa_version(spa) < SPA_VERSION_RAIDZ3)
return (SET_ERROR(EINVAL));
} else {
/*
* We require the parity to be specified for SPAs that
* support multiple parity levels.
*/
if (spa_version(spa) >= SPA_VERSION_RAIDZ2)
return (SET_ERROR(EINVAL));
/*
* Otherwise, we default to 1 parity device for RAID-Z.
*/
nparity = 1;
}
vdrz = kmem_zalloc(sizeof (*vdrz), KM_SLEEP);
vdrz->vd_logical_width = children;
vdrz->vd_nparity = nparity;
*tsd = vdrz;
return (0);
}
static void
vdev_raidz_fini(vdev_t *vd)
{
kmem_free(vd->vdev_tsd, sizeof (vdev_raidz_t));
}
/*
* Add RAIDZ specific fields to the config nvlist.
*/
static void
vdev_raidz_config_generate(vdev_t *vd, nvlist_t *nv)
{
ASSERT3P(vd->vdev_ops, ==, &vdev_raidz_ops);
vdev_raidz_t *vdrz = vd->vdev_tsd;
/*
* Make sure someone hasn't managed to sneak a fancy new vdev
* into a crufty old storage pool.
*/
ASSERT(vdrz->vd_nparity == 1 ||
(vdrz->vd_nparity <= 2 &&
spa_version(vd->vdev_spa) >= SPA_VERSION_RAIDZ2) ||
(vdrz->vd_nparity <= 3 &&
spa_version(vd->vdev_spa) >= SPA_VERSION_RAIDZ3));
/*
* Note that we'll add these even on storage pools where they
* aren't strictly required -- older software will just ignore
* it.
*/
fnvlist_add_uint64(nv, ZPOOL_CONFIG_NPARITY, vdrz->vd_nparity);
}
static uint64_t
vdev_raidz_nparity(vdev_t *vd)
{
vdev_raidz_t *vdrz = vd->vdev_tsd;
return (vdrz->vd_nparity);
}
static uint64_t
vdev_raidz_ndisks(vdev_t *vd)
{
return (vd->vdev_children);
}
vdev_ops_t vdev_raidz_ops = {
.vdev_op_init = vdev_raidz_init,
.vdev_op_fini = vdev_raidz_fini,
.vdev_op_open = vdev_raidz_open,
.vdev_op_close = vdev_raidz_close,
.vdev_op_asize = vdev_raidz_asize,
.vdev_op_min_asize = vdev_raidz_min_asize,
.vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_raidz_io_start,
.vdev_op_io_done = vdev_raidz_io_done,
.vdev_op_state_change = vdev_raidz_state_change,
.vdev_op_need_resilver = vdev_raidz_need_resilver,
.vdev_op_hold = NULL,
.vdev_op_rele = NULL,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_raidz_xlate,
.vdev_op_rebuild_asize = NULL,
.vdev_op_metaslab_init = NULL,
.vdev_op_config_generate = vdev_raidz_config_generate,
.vdev_op_nparity = vdev_raidz_nparity,
.vdev_op_ndisks = vdev_raidz_ndisks,
.vdev_op_type = VDEV_TYPE_RAIDZ, /* name of this vdev type */
.vdev_op_leaf = B_FALSE /* not a leaf vdev */
};
diff --git a/sys/contrib/openzfs/module/zfs/vdev_raidz_math_impl.h b/sys/contrib/openzfs/module/zfs/vdev_raidz_math_impl.h
index 2d96f602314c..8ba7e0cd769d 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_raidz_math_impl.h
+++ b/sys/contrib/openzfs/module/zfs/vdev_raidz_math_impl.h
@@ -1,1502 +1,1502 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (C) 2016 Gvozden Nešković. All rights reserved.
*/
#ifndef _VDEV_RAIDZ_MATH_IMPL_H
#define _VDEV_RAIDZ_MATH_IMPL_H
#include <sys/types.h>
#include <sys/vdev_raidz_impl.h>
#define raidz_inline inline __attribute__((always_inline))
#ifndef noinline
#define noinline __attribute__((noinline))
#endif
/*
* Functions calculate multiplication constants for data reconstruction.
* Coefficients depend on RAIDZ geometry, indexes of failed child vdevs, and
* used parity columns for reconstruction.
* @rr RAIDZ row
* @tgtidx array of missing data indexes
* @coeff output array of coefficients. Array must be provided by
* user and must hold minimum MUL_CNT values.
*/
static noinline void
raidz_rec_q_coeff(const raidz_row_t *rr, const int *tgtidx, unsigned *coeff)
{
const unsigned ncols = rr->rr_cols;
const unsigned x = tgtidx[TARGET_X];
coeff[MUL_Q_X] = gf_exp2(255 - (ncols - x - 1));
}
static noinline void
raidz_rec_r_coeff(const raidz_row_t *rr, const int *tgtidx, unsigned *coeff)
{
const unsigned ncols = rr->rr_cols;
const unsigned x = tgtidx[TARGET_X];
coeff[MUL_R_X] = gf_exp4(255 - (ncols - x - 1));
}
static noinline void
raidz_rec_pq_coeff(const raidz_row_t *rr, const int *tgtidx, unsigned *coeff)
{
const unsigned ncols = rr->rr_cols;
const unsigned x = tgtidx[TARGET_X];
const unsigned y = tgtidx[TARGET_Y];
gf_t a, b, e;
a = gf_exp2(x + 255 - y);
b = gf_exp2(255 - (ncols - x - 1));
e = a ^ 0x01;
coeff[MUL_PQ_X] = gf_div(a, e);
coeff[MUL_PQ_Y] = gf_div(b, e);
}
static noinline void
raidz_rec_pr_coeff(const raidz_row_t *rr, const int *tgtidx, unsigned *coeff)
{
const unsigned ncols = rr->rr_cols;
const unsigned x = tgtidx[TARGET_X];
const unsigned y = tgtidx[TARGET_Y];
gf_t a, b, e;
a = gf_exp4(x + 255 - y);
b = gf_exp4(255 - (ncols - x - 1));
e = a ^ 0x01;
coeff[MUL_PR_X] = gf_div(a, e);
coeff[MUL_PR_Y] = gf_div(b, e);
}
static noinline void
raidz_rec_qr_coeff(const raidz_row_t *rr, const int *tgtidx, unsigned *coeff)
{
const unsigned ncols = rr->rr_cols;
const unsigned x = tgtidx[TARGET_X];
const unsigned y = tgtidx[TARGET_Y];
gf_t nx, ny, nxxy, nxyy, d;
nx = gf_exp2(ncols - x - 1);
ny = gf_exp2(ncols - y - 1);
nxxy = gf_mul(gf_mul(nx, nx), ny);
nxyy = gf_mul(gf_mul(nx, ny), ny);
d = nxxy ^ nxyy;
coeff[MUL_QR_XQ] = ny;
coeff[MUL_QR_X] = gf_div(ny, d);
coeff[MUL_QR_YQ] = nx;
coeff[MUL_QR_Y] = gf_div(nx, d);
}
static noinline void
raidz_rec_pqr_coeff(const raidz_row_t *rr, const int *tgtidx, unsigned *coeff)
{
const unsigned ncols = rr->rr_cols;
const unsigned x = tgtidx[TARGET_X];
const unsigned y = tgtidx[TARGET_Y];
const unsigned z = tgtidx[TARGET_Z];
gf_t nx, ny, nz, nxx, nyy, nzz, nyyz, nyzz, xd, yd;
nx = gf_exp2(ncols - x - 1);
ny = gf_exp2(ncols - y - 1);
nz = gf_exp2(ncols - z - 1);
nxx = gf_exp4(ncols - x - 1);
nyy = gf_exp4(ncols - y - 1);
nzz = gf_exp4(ncols - z - 1);
nyyz = gf_mul(gf_mul(ny, nz), ny);
nyzz = gf_mul(nzz, ny);
xd = gf_mul(nxx, ny) ^ gf_mul(nx, nyy) ^ nyyz ^
gf_mul(nxx, nz) ^ gf_mul(nzz, nx) ^ nyzz;
yd = gf_inv(ny ^ nz);
coeff[MUL_PQR_XP] = gf_div(nyyz ^ nyzz, xd);
coeff[MUL_PQR_XQ] = gf_div(nyy ^ nzz, xd);
coeff[MUL_PQR_XR] = gf_div(ny ^ nz, xd);
coeff[MUL_PQR_YU] = nx;
coeff[MUL_PQR_YP] = gf_mul(nz, yd);
coeff[MUL_PQR_YQ] = yd;
}
/*
* Method for zeroing a buffer (can be implemented using SIMD).
* This method is used by multiple for gen/rec functions.
*
* @dc Destination buffer
* @dsize Destination buffer size
* @private Unused
*/
static int
raidz_zero_abd_cb(void *dc, size_t dsize, void *private)
{
v_t *dst = (v_t *)dc;
size_t i;
ZERO_DEFINE();
(void) private; /* unused */
ZERO(ZERO_D);
for (i = 0; i < dsize / sizeof (v_t); i += (2 * ZERO_STRIDE)) {
STORE(dst + i, ZERO_D);
STORE(dst + i + ZERO_STRIDE, ZERO_D);
}
return (0);
}
#define raidz_zero(dabd, size) \
{ \
abd_iterate_func(dabd, 0, size, raidz_zero_abd_cb, NULL); \
}
/*
* Method for copying two buffers (can be implemented using SIMD).
* This method is used by multiple for gen/rec functions.
*
* @dc Destination buffer
* @sc Source buffer
* @dsize Destination buffer size
* @ssize Source buffer size
* @private Unused
*/
static int
raidz_copy_abd_cb(void *dc, void *sc, size_t size, void *private)
{
v_t *dst = (v_t *)dc;
const v_t *src = (v_t *)sc;
size_t i;
COPY_DEFINE();
(void) private; /* unused */
for (i = 0; i < size / sizeof (v_t); i += (2 * COPY_STRIDE)) {
LOAD(src + i, COPY_D);
STORE(dst + i, COPY_D);
LOAD(src + i + COPY_STRIDE, COPY_D);
STORE(dst + i + COPY_STRIDE, COPY_D);
}
return (0);
}
#define raidz_copy(dabd, sabd, size) \
{ \
abd_iterate_func2(dabd, sabd, 0, 0, size, raidz_copy_abd_cb, NULL);\
}
/*
* Method for adding (XORing) two buffers.
* Source and destination are XORed together and result is stored in
* destination buffer. This method is used by multiple for gen/rec functions.
*
* @dc Destination buffer
* @sc Source buffer
* @dsize Destination buffer size
* @ssize Source buffer size
* @private Unused
*/
static int
raidz_add_abd_cb(void *dc, void *sc, size_t size, void *private)
{
v_t *dst = (v_t *)dc;
const v_t *src = (v_t *)sc;
size_t i;
ADD_DEFINE();
(void) private; /* unused */
for (i = 0; i < size / sizeof (v_t); i += (2 * ADD_STRIDE)) {
LOAD(dst + i, ADD_D);
XOR_ACC(src + i, ADD_D);
STORE(dst + i, ADD_D);
LOAD(dst + i + ADD_STRIDE, ADD_D);
XOR_ACC(src + i + ADD_STRIDE, ADD_D);
STORE(dst + i + ADD_STRIDE, ADD_D);
}
return (0);
}
#define raidz_add(dabd, sabd, size) \
{ \
abd_iterate_func2(dabd, sabd, 0, 0, size, raidz_add_abd_cb, NULL);\
}
/*
* Method for multiplying a buffer with a constant in GF(2^8).
* Symbols from buffer are multiplied by a constant and result is stored
* back in the same buffer.
*
* @dc In/Out data buffer.
* @size Size of the buffer
* @private pointer to the multiplication constant (unsigned)
*/
static int
raidz_mul_abd_cb(void *dc, size_t size, void *private)
{
const unsigned mul = *((unsigned *)private);
v_t *d = (v_t *)dc;
size_t i;
MUL_DEFINE();
for (i = 0; i < size / sizeof (v_t); i += (2 * MUL_STRIDE)) {
LOAD(d + i, MUL_D);
MUL(mul, MUL_D);
STORE(d + i, MUL_D);
LOAD(d + i + MUL_STRIDE, MUL_D);
MUL(mul, MUL_D);
STORE(d + i + MUL_STRIDE, MUL_D);
}
return (0);
}
/*
* Syndrome generation/update macros
*
* Require LOAD(), XOR(), STORE(), MUL2(), and MUL4() macros
*/
#define P_D_SYNDROME(D, T, t) \
{ \
LOAD((t), T); \
XOR(D, T); \
STORE((t), T); \
}
#define Q_D_SYNDROME(D, T, t) \
{ \
LOAD((t), T); \
MUL2(T); \
XOR(D, T); \
STORE((t), T); \
}
#define Q_SYNDROME(T, t) \
{ \
LOAD((t), T); \
MUL2(T); \
STORE((t), T); \
}
#define R_D_SYNDROME(D, T, t) \
{ \
LOAD((t), T); \
MUL4(T); \
XOR(D, T); \
STORE((t), T); \
}
#define R_SYNDROME(T, t) \
{ \
LOAD((t), T); \
MUL4(T); \
STORE((t), T); \
}
/*
* PARITY CALCULATION
*
* Macros *_SYNDROME are used for parity/syndrome calculation.
* *_D_SYNDROME() macros are used to calculate syndrome between 0 and
* length of data column, and *_SYNDROME() macros are only for updating
* the parity/syndrome if data column is shorter.
*
* P parity is calculated using raidz_add_abd().
*/
/*
* Generate P parity (RAIDZ1)
*
* @rr RAIDZ row
*/
static raidz_inline void
raidz_generate_p_impl(raidz_row_t * const rr)
{
size_t c;
const size_t ncols = rr->rr_cols;
const size_t psize = rr->rr_col[CODE_P].rc_size;
abd_t *pabd = rr->rr_col[CODE_P].rc_abd;
size_t size;
abd_t *dabd;
raidz_math_begin();
/* start with first data column */
raidz_copy(pabd, rr->rr_col[1].rc_abd, psize);
for (c = 2; c < ncols; c++) {
dabd = rr->rr_col[c].rc_abd;
size = rr->rr_col[c].rc_size;
/* add data column */
raidz_add(pabd, dabd, size);
}
raidz_math_end();
}
/*
* Generate PQ parity (RAIDZ2)
* The function is called per data column.
*
* @c array of pointers to parity (code) columns
* @dc pointer to data column
* @csize size of parity columns
* @dsize size of data column
*/
static void
raidz_gen_pq_add(void **c, const void *dc, const size_t csize,
const size_t dsize)
{
v_t *p = (v_t *)c[0];
v_t *q = (v_t *)c[1];
const v_t *d = (const v_t *)dc;
const v_t * const dend = d + (dsize / sizeof (v_t));
const v_t * const qend = q + (csize / sizeof (v_t));
GEN_PQ_DEFINE();
MUL2_SETUP();
for (; d < dend; d += GEN_PQ_STRIDE, p += GEN_PQ_STRIDE,
q += GEN_PQ_STRIDE) {
LOAD(d, GEN_PQ_D);
P_D_SYNDROME(GEN_PQ_D, GEN_PQ_C, p);
Q_D_SYNDROME(GEN_PQ_D, GEN_PQ_C, q);
}
for (; q < qend; q += GEN_PQ_STRIDE) {
Q_SYNDROME(GEN_PQ_C, q);
}
}
/*
* Generate PQ parity (RAIDZ2)
*
* @rr RAIDZ row
*/
static raidz_inline void
raidz_generate_pq_impl(raidz_row_t * const rr)
{
size_t c;
const size_t ncols = rr->rr_cols;
const size_t csize = rr->rr_col[CODE_P].rc_size;
size_t dsize;
abd_t *dabd;
abd_t *cabds[] = {
rr->rr_col[CODE_P].rc_abd,
rr->rr_col[CODE_Q].rc_abd
};
raidz_math_begin();
raidz_copy(cabds[CODE_P], rr->rr_col[2].rc_abd, csize);
raidz_copy(cabds[CODE_Q], rr->rr_col[2].rc_abd, csize);
for (c = 3; c < ncols; c++) {
dabd = rr->rr_col[c].rc_abd;
dsize = rr->rr_col[c].rc_size;
abd_raidz_gen_iterate(cabds, dabd, csize, dsize, 2,
raidz_gen_pq_add);
}
raidz_math_end();
}
/*
* Generate PQR parity (RAIDZ3)
* The function is called per data column.
*
* @c array of pointers to parity (code) columns
* @dc pointer to data column
* @csize size of parity columns
* @dsize size of data column
*/
static void
raidz_gen_pqr_add(void **c, const void *dc, const size_t csize,
const size_t dsize)
{
- v_t *p = (v_t *)c[0];
- v_t *q = (v_t *)c[1];
+ v_t *p = (v_t *)c[CODE_P];
+ v_t *q = (v_t *)c[CODE_Q];
v_t *r = (v_t *)c[CODE_R];
const v_t *d = (const v_t *)dc;
const v_t * const dend = d + (dsize / sizeof (v_t));
const v_t * const qend = q + (csize / sizeof (v_t));
GEN_PQR_DEFINE();
MUL2_SETUP();
for (; d < dend; d += GEN_PQR_STRIDE, p += GEN_PQR_STRIDE,
q += GEN_PQR_STRIDE, r += GEN_PQR_STRIDE) {
LOAD(d, GEN_PQR_D);
P_D_SYNDROME(GEN_PQR_D, GEN_PQR_C, p);
Q_D_SYNDROME(GEN_PQR_D, GEN_PQR_C, q);
R_D_SYNDROME(GEN_PQR_D, GEN_PQR_C, r);
}
for (; q < qend; q += GEN_PQR_STRIDE, r += GEN_PQR_STRIDE) {
Q_SYNDROME(GEN_PQR_C, q);
R_SYNDROME(GEN_PQR_C, r);
}
}
/*
- * Generate PQR parity (RAIDZ2)
+ * Generate PQR parity (RAIDZ3)
*
* @rr RAIDZ row
*/
static raidz_inline void
raidz_generate_pqr_impl(raidz_row_t * const rr)
{
size_t c;
const size_t ncols = rr->rr_cols;
const size_t csize = rr->rr_col[CODE_P].rc_size;
size_t dsize;
abd_t *dabd;
abd_t *cabds[] = {
rr->rr_col[CODE_P].rc_abd,
rr->rr_col[CODE_Q].rc_abd,
rr->rr_col[CODE_R].rc_abd
};
raidz_math_begin();
raidz_copy(cabds[CODE_P], rr->rr_col[3].rc_abd, csize);
raidz_copy(cabds[CODE_Q], rr->rr_col[3].rc_abd, csize);
raidz_copy(cabds[CODE_R], rr->rr_col[3].rc_abd, csize);
for (c = 4; c < ncols; c++) {
dabd = rr->rr_col[c].rc_abd;
dsize = rr->rr_col[c].rc_size;
abd_raidz_gen_iterate(cabds, dabd, csize, dsize, 3,
raidz_gen_pqr_add);
}
raidz_math_end();
}
/*
* DATA RECONSTRUCTION
*
* Data reconstruction process consists of two phases:
* - Syndrome calculation
* - Data reconstruction
*
* Syndrome is calculated by generating parity using available data columns
* and zeros in places of erasure. Existing parity is added to corresponding
* syndrome value to obtain the [P|Q|R]syn values from equation:
* P = Psyn + Dx + Dy + Dz
* Q = Qsyn + 2^x * Dx + 2^y * Dy + 2^z * Dz
* R = Rsyn + 4^x * Dx + 4^y * Dy + 4^z * Dz
*
* For data reconstruction phase, the corresponding equations are solved
* for missing data (Dx, Dy, Dz). This generally involves multiplying known
* symbols by an coefficient and adding them together. The multiplication
* constant coefficients are calculated ahead of the operation in
* raidz_rec_[q|r|pq|pq|qr|pqr]_coeff() functions.
*
* IMPLEMENTATION NOTE: RAID-Z block can have complex geometry, with "big"
* and "short" columns.
* For this reason, reconstruction is performed in minimum of
* two steps. First, from offset 0 to short_size, then from short_size to
* short_size. Calculation functions REC_[*]_BLOCK() are implemented to work
* over both ranges. The split also enables removal of conditional expressions
* from loop bodies, improving throughput of SIMD implementations.
* For the best performance, all functions marked with raidz_inline attribute
* must be inlined by compiler.
*
* parity data
* columns columns
* <----------> <------------------>
* x y <----+ missing columns (x, y)
* | |
* +---+---+---+---+-v-+---+-v-+---+ ^ 0
* | | | | | | | | | |
* | | | | | | | | | |
* | P | Q | R | D | D | D | D | D | |
* | | | | 0 | 1 | 2 | 3 | 4 | |
* | | | | | | | | | v
* | | | | | +---+---+---+ ^ short_size
* | | | | | | |
* +---+---+---+---+---+ v big_size
* <------------------> <---------->
* big columns short columns
*
*/
/*
* Reconstruct single data column using P parity
*
* @syn_method raidz_add_abd()
* @rec_method not applicable
*
* @rr RAIDZ row
* @tgtidx array of missing data indexes
*/
static raidz_inline int
raidz_reconstruct_p_impl(raidz_row_t *rr, const int *tgtidx)
{
size_t c;
const size_t firstdc = rr->rr_firstdatacol;
const size_t ncols = rr->rr_cols;
const size_t x = tgtidx[TARGET_X];
const size_t xsize = rr->rr_col[x].rc_size;
abd_t *xabd = rr->rr_col[x].rc_abd;
size_t size;
abd_t *dabd;
if (xabd == NULL)
return (1 << CODE_P);
raidz_math_begin();
/* copy P into target */
raidz_copy(xabd, rr->rr_col[CODE_P].rc_abd, xsize);
/* generate p_syndrome */
for (c = firstdc; c < ncols; c++) {
if (c == x)
continue;
dabd = rr->rr_col[c].rc_abd;
size = MIN(rr->rr_col[c].rc_size, xsize);
raidz_add(xabd, dabd, size);
}
raidz_math_end();
return (1 << CODE_P);
}
/*
* Generate Q syndrome (Qsyn)
*
* @xc array of pointers to syndrome columns
* @dc data column (NULL if missing)
* @xsize size of syndrome columns
* @dsize size of data column (0 if missing)
*/
static void
raidz_syn_q_abd(void **xc, const void *dc, const size_t xsize,
const size_t dsize)
{
v_t *x = (v_t *)xc[TARGET_X];
const v_t *d = (const v_t *)dc;
const v_t * const dend = d + (dsize / sizeof (v_t));
const v_t * const xend = x + (xsize / sizeof (v_t));
SYN_Q_DEFINE();
MUL2_SETUP();
for (; d < dend; d += SYN_STRIDE, x += SYN_STRIDE) {
LOAD(d, SYN_Q_D);
Q_D_SYNDROME(SYN_Q_D, SYN_Q_X, x);
}
for (; x < xend; x += SYN_STRIDE) {
Q_SYNDROME(SYN_Q_X, x);
}
}
/*
* Reconstruct single data column using Q parity
*
* @syn_method raidz_add_abd()
* @rec_method raidz_mul_abd_cb()
*
* @rr RAIDZ row
* @tgtidx array of missing data indexes
*/
static raidz_inline int
raidz_reconstruct_q_impl(raidz_row_t *rr, const int *tgtidx)
{
size_t c;
size_t dsize;
abd_t *dabd;
const size_t firstdc = rr->rr_firstdatacol;
const size_t ncols = rr->rr_cols;
const size_t x = tgtidx[TARGET_X];
abd_t *xabd = rr->rr_col[x].rc_abd;
const size_t xsize = rr->rr_col[x].rc_size;
abd_t *tabds[] = { xabd };
if (xabd == NULL)
return (1 << CODE_Q);
unsigned coeff[MUL_CNT];
raidz_rec_q_coeff(rr, tgtidx, coeff);
raidz_math_begin();
/* Start with first data column if present */
if (firstdc != x) {
raidz_copy(xabd, rr->rr_col[firstdc].rc_abd, xsize);
} else {
raidz_zero(xabd, xsize);
}
/* generate q_syndrome */
for (c = firstdc+1; c < ncols; c++) {
if (c == x) {
dabd = NULL;
dsize = 0;
} else {
dabd = rr->rr_col[c].rc_abd;
dsize = rr->rr_col[c].rc_size;
}
abd_raidz_gen_iterate(tabds, dabd, xsize, dsize, 1,
raidz_syn_q_abd);
}
/* add Q to the syndrome */
raidz_add(xabd, rr->rr_col[CODE_Q].rc_abd, xsize);
/* transform the syndrome */
abd_iterate_func(xabd, 0, xsize, raidz_mul_abd_cb, (void*) coeff);
raidz_math_end();
return (1 << CODE_Q);
}
/*
* Generate R syndrome (Rsyn)
*
* @xc array of pointers to syndrome columns
* @dc data column (NULL if missing)
* @tsize size of syndrome columns
* @dsize size of data column (0 if missing)
*/
static void
raidz_syn_r_abd(void **xc, const void *dc, const size_t tsize,
const size_t dsize)
{
v_t *x = (v_t *)xc[TARGET_X];
const v_t *d = (const v_t *)dc;
const v_t * const dend = d + (dsize / sizeof (v_t));
const v_t * const xend = x + (tsize / sizeof (v_t));
SYN_R_DEFINE();
MUL2_SETUP();
for (; d < dend; d += SYN_STRIDE, x += SYN_STRIDE) {
LOAD(d, SYN_R_D);
R_D_SYNDROME(SYN_R_D, SYN_R_X, x);
}
for (; x < xend; x += SYN_STRIDE) {
R_SYNDROME(SYN_R_X, x);
}
}
/*
* Reconstruct single data column using R parity
*
* @syn_method raidz_add_abd()
* @rec_method raidz_mul_abd_cb()
*
* @rr RAIDZ rr
* @tgtidx array of missing data indexes
*/
static raidz_inline int
raidz_reconstruct_r_impl(raidz_row_t *rr, const int *tgtidx)
{
size_t c;
size_t dsize;
abd_t *dabd;
const size_t firstdc = rr->rr_firstdatacol;
const size_t ncols = rr->rr_cols;
const size_t x = tgtidx[TARGET_X];
const size_t xsize = rr->rr_col[x].rc_size;
abd_t *xabd = rr->rr_col[x].rc_abd;
abd_t *tabds[] = { xabd };
if (xabd == NULL)
return (1 << CODE_R);
unsigned coeff[MUL_CNT];
raidz_rec_r_coeff(rr, tgtidx, coeff);
raidz_math_begin();
/* Start with first data column if present */
if (firstdc != x) {
raidz_copy(xabd, rr->rr_col[firstdc].rc_abd, xsize);
} else {
raidz_zero(xabd, xsize);
}
/* generate q_syndrome */
for (c = firstdc+1; c < ncols; c++) {
if (c == x) {
dabd = NULL;
dsize = 0;
} else {
dabd = rr->rr_col[c].rc_abd;
dsize = rr->rr_col[c].rc_size;
}
abd_raidz_gen_iterate(tabds, dabd, xsize, dsize, 1,
raidz_syn_r_abd);
}
/* add R to the syndrome */
raidz_add(xabd, rr->rr_col[CODE_R].rc_abd, xsize);
/* transform the syndrome */
abd_iterate_func(xabd, 0, xsize, raidz_mul_abd_cb, (void *)coeff);
raidz_math_end();
return (1 << CODE_R);
}
/*
* Generate P and Q syndromes
*
* @xc array of pointers to syndrome columns
* @dc data column (NULL if missing)
* @tsize size of syndrome columns
* @dsize size of data column (0 if missing)
*/
static void
raidz_syn_pq_abd(void **tc, const void *dc, const size_t tsize,
const size_t dsize)
{
v_t *x = (v_t *)tc[TARGET_X];
v_t *y = (v_t *)tc[TARGET_Y];
const v_t *d = (const v_t *)dc;
const v_t * const dend = d + (dsize / sizeof (v_t));
const v_t * const yend = y + (tsize / sizeof (v_t));
SYN_PQ_DEFINE();
MUL2_SETUP();
for (; d < dend; d += SYN_STRIDE, x += SYN_STRIDE, y += SYN_STRIDE) {
LOAD(d, SYN_PQ_D);
P_D_SYNDROME(SYN_PQ_D, SYN_PQ_X, x);
Q_D_SYNDROME(SYN_PQ_D, SYN_PQ_X, y);
}
for (; y < yend; y += SYN_STRIDE) {
Q_SYNDROME(SYN_PQ_X, y);
}
}
/*
* Reconstruct data using PQ parity and PQ syndromes
*
* @tc syndrome/result columns
* @tsize size of syndrome/result columns
* @c parity columns
* @mul array of multiplication constants
*/
static void
raidz_rec_pq_abd(void **tc, const size_t tsize, void **c,
const unsigned *mul)
{
v_t *x = (v_t *)tc[TARGET_X];
v_t *y = (v_t *)tc[TARGET_Y];
const v_t * const xend = x + (tsize / sizeof (v_t));
const v_t *p = (v_t *)c[CODE_P];
const v_t *q = (v_t *)c[CODE_Q];
REC_PQ_DEFINE();
for (; x < xend; x += REC_PQ_STRIDE, y += REC_PQ_STRIDE,
p += REC_PQ_STRIDE, q += REC_PQ_STRIDE) {
LOAD(x, REC_PQ_X);
LOAD(y, REC_PQ_Y);
XOR_ACC(p, REC_PQ_X);
XOR_ACC(q, REC_PQ_Y);
/* Save Pxy */
COPY(REC_PQ_X, REC_PQ_T);
/* Calc X */
MUL(mul[MUL_PQ_X], REC_PQ_X);
MUL(mul[MUL_PQ_Y], REC_PQ_Y);
XOR(REC_PQ_Y, REC_PQ_X);
STORE(x, REC_PQ_X);
/* Calc Y */
XOR(REC_PQ_T, REC_PQ_X);
STORE(y, REC_PQ_X);
}
}
/*
* Reconstruct two data columns using PQ parity
*
* @syn_method raidz_syn_pq_abd()
* @rec_method raidz_rec_pq_abd()
*
* @rr RAIDZ row
* @tgtidx array of missing data indexes
*/
static raidz_inline int
raidz_reconstruct_pq_impl(raidz_row_t *rr, const int *tgtidx)
{
size_t c;
size_t dsize;
abd_t *dabd;
const size_t firstdc = rr->rr_firstdatacol;
const size_t ncols = rr->rr_cols;
const size_t x = tgtidx[TARGET_X];
const size_t y = tgtidx[TARGET_Y];
const size_t xsize = rr->rr_col[x].rc_size;
const size_t ysize = rr->rr_col[y].rc_size;
abd_t *xabd = rr->rr_col[x].rc_abd;
abd_t *yabd = rr->rr_col[y].rc_abd;
abd_t *tabds[2] = { xabd, yabd };
abd_t *cabds[] = {
rr->rr_col[CODE_P].rc_abd,
rr->rr_col[CODE_Q].rc_abd
};
if (xabd == NULL)
return ((1 << CODE_P) | (1 << CODE_Q));
unsigned coeff[MUL_CNT];
raidz_rec_pq_coeff(rr, tgtidx, coeff);
/*
* Check if some of targets is shorter then others
* In this case, shorter target needs to be replaced with
* new buffer so that syndrome can be calculated.
*/
if (ysize < xsize) {
yabd = abd_alloc(xsize, B_FALSE);
tabds[1] = yabd;
}
raidz_math_begin();
/* Start with first data column if present */
if (firstdc != x) {
raidz_copy(xabd, rr->rr_col[firstdc].rc_abd, xsize);
raidz_copy(yabd, rr->rr_col[firstdc].rc_abd, xsize);
} else {
raidz_zero(xabd, xsize);
raidz_zero(yabd, xsize);
}
/* generate q_syndrome */
for (c = firstdc+1; c < ncols; c++) {
if (c == x || c == y) {
dabd = NULL;
dsize = 0;
} else {
dabd = rr->rr_col[c].rc_abd;
dsize = rr->rr_col[c].rc_size;
}
abd_raidz_gen_iterate(tabds, dabd, xsize, dsize, 2,
raidz_syn_pq_abd);
}
abd_raidz_rec_iterate(cabds, tabds, xsize, 2, raidz_rec_pq_abd, coeff);
/* Copy shorter targets back to the original abd buffer */
if (ysize < xsize)
raidz_copy(rr->rr_col[y].rc_abd, yabd, ysize);
raidz_math_end();
if (ysize < xsize)
abd_free(yabd);
return ((1 << CODE_P) | (1 << CODE_Q));
}
/*
* Generate P and R syndromes
*
* @xc array of pointers to syndrome columns
* @dc data column (NULL if missing)
* @tsize size of syndrome columns
* @dsize size of data column (0 if missing)
*/
static void
raidz_syn_pr_abd(void **c, const void *dc, const size_t tsize,
const size_t dsize)
{
v_t *x = (v_t *)c[TARGET_X];
v_t *y = (v_t *)c[TARGET_Y];
const v_t *d = (const v_t *)dc;
const v_t * const dend = d + (dsize / sizeof (v_t));
const v_t * const yend = y + (tsize / sizeof (v_t));
SYN_PR_DEFINE();
MUL2_SETUP();
for (; d < dend; d += SYN_STRIDE, x += SYN_STRIDE, y += SYN_STRIDE) {
LOAD(d, SYN_PR_D);
P_D_SYNDROME(SYN_PR_D, SYN_PR_X, x);
R_D_SYNDROME(SYN_PR_D, SYN_PR_X, y);
}
for (; y < yend; y += SYN_STRIDE) {
R_SYNDROME(SYN_PR_X, y);
}
}
/*
* Reconstruct data using PR parity and PR syndromes
*
* @tc syndrome/result columns
* @tsize size of syndrome/result columns
* @c parity columns
* @mul array of multiplication constants
*/
static void
raidz_rec_pr_abd(void **t, const size_t tsize, void **c,
const unsigned *mul)
{
v_t *x = (v_t *)t[TARGET_X];
v_t *y = (v_t *)t[TARGET_Y];
const v_t * const xend = x + (tsize / sizeof (v_t));
const v_t *p = (v_t *)c[CODE_P];
const v_t *q = (v_t *)c[CODE_Q];
REC_PR_DEFINE();
for (; x < xend; x += REC_PR_STRIDE, y += REC_PR_STRIDE,
p += REC_PR_STRIDE, q += REC_PR_STRIDE) {
LOAD(x, REC_PR_X);
LOAD(y, REC_PR_Y);
XOR_ACC(p, REC_PR_X);
XOR_ACC(q, REC_PR_Y);
/* Save Pxy */
COPY(REC_PR_X, REC_PR_T);
/* Calc X */
MUL(mul[MUL_PR_X], REC_PR_X);
MUL(mul[MUL_PR_Y], REC_PR_Y);
XOR(REC_PR_Y, REC_PR_X);
STORE(x, REC_PR_X);
/* Calc Y */
XOR(REC_PR_T, REC_PR_X);
STORE(y, REC_PR_X);
}
}
/*
* Reconstruct two data columns using PR parity
*
* @syn_method raidz_syn_pr_abd()
* @rec_method raidz_rec_pr_abd()
*
* @rr RAIDZ row
* @tgtidx array of missing data indexes
*/
static raidz_inline int
raidz_reconstruct_pr_impl(raidz_row_t *rr, const int *tgtidx)
{
size_t c;
size_t dsize;
abd_t *dabd;
const size_t firstdc = rr->rr_firstdatacol;
const size_t ncols = rr->rr_cols;
const size_t x = tgtidx[0];
const size_t y = tgtidx[1];
const size_t xsize = rr->rr_col[x].rc_size;
const size_t ysize = rr->rr_col[y].rc_size;
abd_t *xabd = rr->rr_col[x].rc_abd;
abd_t *yabd = rr->rr_col[y].rc_abd;
abd_t *tabds[2] = { xabd, yabd };
abd_t *cabds[] = {
rr->rr_col[CODE_P].rc_abd,
rr->rr_col[CODE_R].rc_abd
};
if (xabd == NULL)
return ((1 << CODE_P) | (1 << CODE_R));
unsigned coeff[MUL_CNT];
raidz_rec_pr_coeff(rr, tgtidx, coeff);
/*
* Check if some of targets are shorter then others.
* They need to be replaced with a new buffer so that syndrome can
* be calculated on full length.
*/
if (ysize < xsize) {
yabd = abd_alloc(xsize, B_FALSE);
tabds[1] = yabd;
}
raidz_math_begin();
/* Start with first data column if present */
if (firstdc != x) {
raidz_copy(xabd, rr->rr_col[firstdc].rc_abd, xsize);
raidz_copy(yabd, rr->rr_col[firstdc].rc_abd, xsize);
} else {
raidz_zero(xabd, xsize);
raidz_zero(yabd, xsize);
}
/* generate q_syndrome */
for (c = firstdc+1; c < ncols; c++) {
if (c == x || c == y) {
dabd = NULL;
dsize = 0;
} else {
dabd = rr->rr_col[c].rc_abd;
dsize = rr->rr_col[c].rc_size;
}
abd_raidz_gen_iterate(tabds, dabd, xsize, dsize, 2,
raidz_syn_pr_abd);
}
abd_raidz_rec_iterate(cabds, tabds, xsize, 2, raidz_rec_pr_abd, coeff);
/*
* Copy shorter targets back to the original abd buffer
*/
if (ysize < xsize)
raidz_copy(rr->rr_col[y].rc_abd, yabd, ysize);
raidz_math_end();
if (ysize < xsize)
abd_free(yabd);
return ((1 << CODE_P) | (1 << CODE_R));
}
/*
* Generate Q and R syndromes
*
* @xc array of pointers to syndrome columns
* @dc data column (NULL if missing)
* @tsize size of syndrome columns
* @dsize size of data column (0 if missing)
*/
static void
raidz_syn_qr_abd(void **c, const void *dc, const size_t tsize,
const size_t dsize)
{
v_t *x = (v_t *)c[TARGET_X];
v_t *y = (v_t *)c[TARGET_Y];
const v_t * const xend = x + (tsize / sizeof (v_t));
const v_t *d = (const v_t *)dc;
const v_t * const dend = d + (dsize / sizeof (v_t));
SYN_QR_DEFINE();
MUL2_SETUP();
for (; d < dend; d += SYN_STRIDE, x += SYN_STRIDE, y += SYN_STRIDE) {
LOAD(d, SYN_PQ_D);
Q_D_SYNDROME(SYN_QR_D, SYN_QR_X, x);
R_D_SYNDROME(SYN_QR_D, SYN_QR_X, y);
}
for (; x < xend; x += SYN_STRIDE, y += SYN_STRIDE) {
Q_SYNDROME(SYN_QR_X, x);
R_SYNDROME(SYN_QR_X, y);
}
}
/*
* Reconstruct data using QR parity and QR syndromes
*
* @tc syndrome/result columns
* @tsize size of syndrome/result columns
* @c parity columns
* @mul array of multiplication constants
*/
static void
raidz_rec_qr_abd(void **t, const size_t tsize, void **c,
const unsigned *mul)
{
v_t *x = (v_t *)t[TARGET_X];
v_t *y = (v_t *)t[TARGET_Y];
const v_t * const xend = x + (tsize / sizeof (v_t));
const v_t *p = (v_t *)c[CODE_P];
const v_t *q = (v_t *)c[CODE_Q];
REC_QR_DEFINE();
for (; x < xend; x += REC_QR_STRIDE, y += REC_QR_STRIDE,
p += REC_QR_STRIDE, q += REC_QR_STRIDE) {
LOAD(x, REC_QR_X);
LOAD(y, REC_QR_Y);
XOR_ACC(p, REC_QR_X);
XOR_ACC(q, REC_QR_Y);
/* Save Pxy */
COPY(REC_QR_X, REC_QR_T);
/* Calc X */
MUL(mul[MUL_QR_XQ], REC_QR_X); /* X = Q * xqm */
XOR(REC_QR_Y, REC_QR_X); /* X = R ^ X */
MUL(mul[MUL_QR_X], REC_QR_X); /* X = X * xm */
STORE(x, REC_QR_X);
/* Calc Y */
MUL(mul[MUL_QR_YQ], REC_QR_T); /* X = Q * xqm */
XOR(REC_QR_Y, REC_QR_T); /* X = R ^ X */
MUL(mul[MUL_QR_Y], REC_QR_T); /* X = X * xm */
STORE(y, REC_QR_T);
}
}
/*
* Reconstruct two data columns using QR parity
*
* @syn_method raidz_syn_qr_abd()
* @rec_method raidz_rec_qr_abd()
*
* @rr RAIDZ row
* @tgtidx array of missing data indexes
*/
static raidz_inline int
raidz_reconstruct_qr_impl(raidz_row_t *rr, const int *tgtidx)
{
size_t c;
size_t dsize;
abd_t *dabd;
const size_t firstdc = rr->rr_firstdatacol;
const size_t ncols = rr->rr_cols;
const size_t x = tgtidx[TARGET_X];
const size_t y = tgtidx[TARGET_Y];
const size_t xsize = rr->rr_col[x].rc_size;
const size_t ysize = rr->rr_col[y].rc_size;
abd_t *xabd = rr->rr_col[x].rc_abd;
abd_t *yabd = rr->rr_col[y].rc_abd;
abd_t *tabds[2] = { xabd, yabd };
abd_t *cabds[] = {
rr->rr_col[CODE_Q].rc_abd,
rr->rr_col[CODE_R].rc_abd
};
if (xabd == NULL)
return ((1 << CODE_Q) | (1 << CODE_R));
unsigned coeff[MUL_CNT];
raidz_rec_qr_coeff(rr, tgtidx, coeff);
/*
* Check if some of targets is shorter then others
* In this case, shorter target needs to be replaced with
* new buffer so that syndrome can be calculated.
*/
if (ysize < xsize) {
yabd = abd_alloc(xsize, B_FALSE);
tabds[1] = yabd;
}
raidz_math_begin();
/* Start with first data column if present */
if (firstdc != x) {
raidz_copy(xabd, rr->rr_col[firstdc].rc_abd, xsize);
raidz_copy(yabd, rr->rr_col[firstdc].rc_abd, xsize);
} else {
raidz_zero(xabd, xsize);
raidz_zero(yabd, xsize);
}
/* generate q_syndrome */
for (c = firstdc+1; c < ncols; c++) {
if (c == x || c == y) {
dabd = NULL;
dsize = 0;
} else {
dabd = rr->rr_col[c].rc_abd;
dsize = rr->rr_col[c].rc_size;
}
abd_raidz_gen_iterate(tabds, dabd, xsize, dsize, 2,
raidz_syn_qr_abd);
}
abd_raidz_rec_iterate(cabds, tabds, xsize, 2, raidz_rec_qr_abd, coeff);
/*
* Copy shorter targets back to the original abd buffer
*/
if (ysize < xsize)
raidz_copy(rr->rr_col[y].rc_abd, yabd, ysize);
raidz_math_end();
if (ysize < xsize)
abd_free(yabd);
return ((1 << CODE_Q) | (1 << CODE_R));
}
/*
* Generate P, Q, and R syndromes
*
* @xc array of pointers to syndrome columns
* @dc data column (NULL if missing)
* @tsize size of syndrome columns
* @dsize size of data column (0 if missing)
*/
static void
raidz_syn_pqr_abd(void **c, const void *dc, const size_t tsize,
const size_t dsize)
{
v_t *x = (v_t *)c[TARGET_X];
v_t *y = (v_t *)c[TARGET_Y];
v_t *z = (v_t *)c[TARGET_Z];
const v_t * const yend = y + (tsize / sizeof (v_t));
const v_t *d = (const v_t *)dc;
const v_t * const dend = d + (dsize / sizeof (v_t));
SYN_PQR_DEFINE();
MUL2_SETUP();
for (; d < dend; d += SYN_STRIDE, x += SYN_STRIDE, y += SYN_STRIDE,
z += SYN_STRIDE) {
LOAD(d, SYN_PQR_D);
P_D_SYNDROME(SYN_PQR_D, SYN_PQR_X, x)
Q_D_SYNDROME(SYN_PQR_D, SYN_PQR_X, y);
R_D_SYNDROME(SYN_PQR_D, SYN_PQR_X, z);
}
for (; y < yend; y += SYN_STRIDE, z += SYN_STRIDE) {
Q_SYNDROME(SYN_PQR_X, y);
R_SYNDROME(SYN_PQR_X, z);
}
}
/*
* Reconstruct data using PRQ parity and PQR syndromes
*
* @tc syndrome/result columns
* @tsize size of syndrome/result columns
* @c parity columns
* @mul array of multiplication constants
*/
static void
raidz_rec_pqr_abd(void **t, const size_t tsize, void **c,
const unsigned * const mul)
{
v_t *x = (v_t *)t[TARGET_X];
v_t *y = (v_t *)t[TARGET_Y];
v_t *z = (v_t *)t[TARGET_Z];
const v_t * const xend = x + (tsize / sizeof (v_t));
const v_t *p = (v_t *)c[CODE_P];
const v_t *q = (v_t *)c[CODE_Q];
const v_t *r = (v_t *)c[CODE_R];
REC_PQR_DEFINE();
for (; x < xend; x += REC_PQR_STRIDE, y += REC_PQR_STRIDE,
z += REC_PQR_STRIDE, p += REC_PQR_STRIDE, q += REC_PQR_STRIDE,
r += REC_PQR_STRIDE) {
LOAD(x, REC_PQR_X);
LOAD(y, REC_PQR_Y);
LOAD(z, REC_PQR_Z);
XOR_ACC(p, REC_PQR_X);
XOR_ACC(q, REC_PQR_Y);
XOR_ACC(r, REC_PQR_Z);
/* Save Pxyz and Qxyz */
COPY(REC_PQR_X, REC_PQR_XS);
COPY(REC_PQR_Y, REC_PQR_YS);
/* Calc X */
MUL(mul[MUL_PQR_XP], REC_PQR_X); /* Xp = Pxyz * xp */
MUL(mul[MUL_PQR_XQ], REC_PQR_Y); /* Xq = Qxyz * xq */
XOR(REC_PQR_Y, REC_PQR_X);
MUL(mul[MUL_PQR_XR], REC_PQR_Z); /* Xr = Rxyz * xr */
XOR(REC_PQR_Z, REC_PQR_X); /* X = Xp + Xq + Xr */
STORE(x, REC_PQR_X);
/* Calc Y */
XOR(REC_PQR_X, REC_PQR_XS); /* Pyz = Pxyz + X */
MUL(mul[MUL_PQR_YU], REC_PQR_X); /* Xq = X * upd_q */
XOR(REC_PQR_X, REC_PQR_YS); /* Qyz = Qxyz + Xq */
COPY(REC_PQR_XS, REC_PQR_X); /* restore Pyz */
MUL(mul[MUL_PQR_YP], REC_PQR_X); /* Yp = Pyz * yp */
MUL(mul[MUL_PQR_YQ], REC_PQR_YS); /* Yq = Qyz * yq */
XOR(REC_PQR_X, REC_PQR_YS); /* Y = Yp + Yq */
STORE(y, REC_PQR_YS);
/* Calc Z */
XOR(REC_PQR_XS, REC_PQR_YS); /* Z = Pz = Pyz + Y */
STORE(z, REC_PQR_YS);
}
}
/*
* Reconstruct three data columns using PQR parity
*
* @syn_method raidz_syn_pqr_abd()
* @rec_method raidz_rec_pqr_abd()
*
* @rr RAIDZ row
* @tgtidx array of missing data indexes
*/
static raidz_inline int
raidz_reconstruct_pqr_impl(raidz_row_t *rr, const int *tgtidx)
{
size_t c;
size_t dsize;
abd_t *dabd;
const size_t firstdc = rr->rr_firstdatacol;
const size_t ncols = rr->rr_cols;
const size_t x = tgtidx[TARGET_X];
const size_t y = tgtidx[TARGET_Y];
const size_t z = tgtidx[TARGET_Z];
const size_t xsize = rr->rr_col[x].rc_size;
const size_t ysize = rr->rr_col[y].rc_size;
const size_t zsize = rr->rr_col[z].rc_size;
abd_t *xabd = rr->rr_col[x].rc_abd;
abd_t *yabd = rr->rr_col[y].rc_abd;
abd_t *zabd = rr->rr_col[z].rc_abd;
abd_t *tabds[] = { xabd, yabd, zabd };
abd_t *cabds[] = {
rr->rr_col[CODE_P].rc_abd,
rr->rr_col[CODE_Q].rc_abd,
rr->rr_col[CODE_R].rc_abd
};
if (xabd == NULL)
return ((1 << CODE_P) | (1 << CODE_Q) | (1 << CODE_R));
unsigned coeff[MUL_CNT];
raidz_rec_pqr_coeff(rr, tgtidx, coeff);
/*
* Check if some of targets is shorter then others
* In this case, shorter target needs to be replaced with
* new buffer so that syndrome can be calculated.
*/
if (ysize < xsize) {
yabd = abd_alloc(xsize, B_FALSE);
tabds[1] = yabd;
}
if (zsize < xsize) {
zabd = abd_alloc(xsize, B_FALSE);
tabds[2] = zabd;
}
raidz_math_begin();
/* Start with first data column if present */
if (firstdc != x) {
raidz_copy(xabd, rr->rr_col[firstdc].rc_abd, xsize);
raidz_copy(yabd, rr->rr_col[firstdc].rc_abd, xsize);
raidz_copy(zabd, rr->rr_col[firstdc].rc_abd, xsize);
} else {
raidz_zero(xabd, xsize);
raidz_zero(yabd, xsize);
raidz_zero(zabd, xsize);
}
/* generate q_syndrome */
for (c = firstdc+1; c < ncols; c++) {
if (c == x || c == y || c == z) {
dabd = NULL;
dsize = 0;
} else {
dabd = rr->rr_col[c].rc_abd;
dsize = rr->rr_col[c].rc_size;
}
abd_raidz_gen_iterate(tabds, dabd, xsize, dsize, 3,
raidz_syn_pqr_abd);
}
abd_raidz_rec_iterate(cabds, tabds, xsize, 3, raidz_rec_pqr_abd, coeff);
/*
* Copy shorter targets back to the original abd buffer
*/
if (ysize < xsize)
raidz_copy(rr->rr_col[y].rc_abd, yabd, ysize);
if (zsize < xsize)
raidz_copy(rr->rr_col[z].rc_abd, zabd, zsize);
raidz_math_end();
if (ysize < xsize)
abd_free(yabd);
if (zsize < xsize)
abd_free(zabd);
return ((1 << CODE_P) | (1 << CODE_Q) | (1 << CODE_R));
}
#endif /* _VDEV_RAIDZ_MATH_IMPL_H */
diff --git a/sys/contrib/openzfs/module/zfs/zcp_synctask.c b/sys/contrib/openzfs/module/zfs/zcp_synctask.c
index 24210117eca0..058910054d97 100644
--- a/sys/contrib/openzfs/module/zfs/zcp_synctask.c
+++ b/sys/contrib/openzfs/module/zfs/zcp_synctask.c
@@ -1,549 +1,586 @@
/*
* CDDL HEADER START
*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2016, 2017 by Delphix. All rights reserved.
* Copyright (c) 2019, 2020 by Christian Schwarz. All rights reserved.
* Copyright 2020 Joyent, Inc.
*/
#include <sys/lua/lua.h>
#include <sys/lua/lauxlib.h>
#include <sys/zcp.h>
#include <sys/zcp_set.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_synctask.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_bookmark.h>
#include <sys/dsl_destroy.h>
#include <sys/dmu_objset.h>
#include <sys/zfs_znode.h>
#include <sys/zfeature.h>
#include <sys/metaslab.h>
#define DST_AVG_BLKSHIFT 14
typedef struct zcp_inherit_prop_arg {
lua_State *zipa_state;
const char *zipa_prop;
dsl_props_set_arg_t zipa_dpsa;
} zcp_inherit_prop_arg_t;
typedef int (zcp_synctask_func_t)(lua_State *, boolean_t, nvlist_t *);
typedef struct zcp_synctask_info {
const char *name;
zcp_synctask_func_t *func;
const zcp_arg_t pargs[4];
const zcp_arg_t kwargs[2];
zfs_space_check_t space_check;
int blocks_modified;
} zcp_synctask_info_t;
static void
zcp_synctask_cleanup(void *arg)
{
fnvlist_free(arg);
}
/*
* Generic synctask interface for channel program syncfuncs.
*
* To perform some action in syncing context, we'd generally call
* dsl_sync_task(), but since the Lua script is already running inside a
* synctask we need to leave out some actions (such as acquiring the config
* rwlock and performing space checks).
*
* If 'sync' is false, executes a dry run and returns the error code.
*
* If we are not running in syncing context and we are not doing a dry run
* (meaning we are running a zfs.sync function in open-context) then we
* return a Lua error.
*
* This function also handles common fatal error cases for channel program
* library functions. If a fatal error occurs, err_dsname will be the dataset
* name reported in error messages, if supplied.
*/
static int
zcp_sync_task(lua_State *state, dsl_checkfunc_t *checkfunc,
dsl_syncfunc_t *syncfunc, void *arg, boolean_t sync, const char *err_dsname)
{
int err;
zcp_run_info_t *ri = zcp_run_info(state);
err = checkfunc(arg, ri->zri_tx);
if (!sync)
return (err);
if (!ri->zri_sync) {
return (luaL_error(state, "running functions from the zfs.sync "
"submodule requires passing sync=TRUE to "
"lzc_channel_program() (i.e. do not specify the \"-n\" "
"command line argument)"));
}
if (err == 0) {
syncfunc(arg, ri->zri_tx);
} else if (err == EIO) {
if (err_dsname != NULL) {
return (luaL_error(state,
"I/O error while accessing dataset '%s'",
err_dsname));
} else {
return (luaL_error(state,
"I/O error while accessing dataset."));
}
}
return (err);
}
static int zcp_synctask_destroy(lua_State *, boolean_t, nvlist_t *);
static const zcp_synctask_info_t zcp_synctask_destroy_info = {
.name = "destroy",
.func = zcp_synctask_destroy,
.pargs = {
{.za_name = "filesystem | snapshot", .za_lua_type = LUA_TSTRING },
{NULL, 0}
},
.kwargs = {
{.za_name = "defer", .za_lua_type = LUA_TBOOLEAN },
{NULL, 0}
},
.space_check = ZFS_SPACE_CHECK_DESTROY,
.blocks_modified = 0
};
static int
zcp_synctask_destroy(lua_State *state, boolean_t sync, nvlist_t *err_details)
{
(void) err_details;
int err;
const char *dsname = lua_tostring(state, 1);
boolean_t issnap = (strchr(dsname, '@') != NULL);
if (!issnap && !lua_isnil(state, 2)) {
return (luaL_error(state,
"'deferred' kwarg only supported for snapshots: %s",
dsname));
}
if (issnap) {
dsl_destroy_snapshot_arg_t ddsa = { 0 };
ddsa.ddsa_name = dsname;
if (!lua_isnil(state, 2)) {
ddsa.ddsa_defer = lua_toboolean(state, 2);
} else {
ddsa.ddsa_defer = B_FALSE;
}
err = zcp_sync_task(state, dsl_destroy_snapshot_check,
dsl_destroy_snapshot_sync, &ddsa, sync, dsname);
} else {
dsl_destroy_head_arg_t ddha = { 0 };
ddha.ddha_name = dsname;
err = zcp_sync_task(state, dsl_destroy_head_check,
dsl_destroy_head_sync, &ddha, sync, dsname);
}
return (err);
}
static int zcp_synctask_promote(lua_State *, boolean_t, nvlist_t *);
static const zcp_synctask_info_t zcp_synctask_promote_info = {
.name = "promote",
.func = zcp_synctask_promote,
.pargs = {
{.za_name = "clone", .za_lua_type = LUA_TSTRING },
{NULL, 0}
},
.kwargs = {
{NULL, 0}
},
.space_check = ZFS_SPACE_CHECK_RESERVED,
.blocks_modified = 3
};
static int
zcp_synctask_promote(lua_State *state, boolean_t sync, nvlist_t *err_details)
{
int err;
dsl_dataset_promote_arg_t ddpa = { 0 };
const char *dsname = lua_tostring(state, 1);
zcp_run_info_t *ri = zcp_run_info(state);
ddpa.ddpa_clonename = dsname;
ddpa.err_ds = err_details;
ddpa.cr = ri->zri_cred;
ddpa.proc = ri->zri_proc;
/*
* If there was a snapshot name conflict, then err_ds will be filled
* with a list of conflicting snapshot names.
*/
err = zcp_sync_task(state, dsl_dataset_promote_check,
dsl_dataset_promote_sync, &ddpa, sync, dsname);
return (err);
}
static int zcp_synctask_rollback(lua_State *, boolean_t, nvlist_t *err_details);
static const zcp_synctask_info_t zcp_synctask_rollback_info = {
.name = "rollback",
.func = zcp_synctask_rollback,
.space_check = ZFS_SPACE_CHECK_RESERVED,
.blocks_modified = 1,
.pargs = {
{.za_name = "filesystem", .za_lua_type = LUA_TSTRING },
{0, 0}
},
.kwargs = {
{0, 0}
}
};
static int
zcp_synctask_rollback(lua_State *state, boolean_t sync, nvlist_t *err_details)
{
int err;
const char *dsname = lua_tostring(state, 1);
dsl_dataset_rollback_arg_t ddra = { 0 };
ddra.ddra_fsname = dsname;
ddra.ddra_result = err_details;
err = zcp_sync_task(state, dsl_dataset_rollback_check,
dsl_dataset_rollback_sync, &ddra, sync, dsname);
return (err);
}
static int zcp_synctask_snapshot(lua_State *, boolean_t, nvlist_t *);
static const zcp_synctask_info_t zcp_synctask_snapshot_info = {
.name = "snapshot",
.func = zcp_synctask_snapshot,
.pargs = {
{.za_name = "filesystem@snapname | volume@snapname",
.za_lua_type = LUA_TSTRING },
{NULL, 0}
},
.kwargs = {
{NULL, 0}
},
.space_check = ZFS_SPACE_CHECK_NORMAL,
.blocks_modified = 3
};
static int
zcp_synctask_snapshot(lua_State *state, boolean_t sync, nvlist_t *err_details)
{
(void) err_details;
int err;
dsl_dataset_snapshot_arg_t ddsa = { 0 };
const char *dsname = lua_tostring(state, 1);
zcp_run_info_t *ri = zcp_run_info(state);
/*
* On old pools, the ZIL must not be active when a snapshot is created,
* but we can't suspend the ZIL because we're already in syncing
* context.
*/
if (spa_version(ri->zri_pool->dp_spa) < SPA_VERSION_FAST_SNAP) {
return (SET_ERROR(ENOTSUP));
}
/*
* We only allow for a single snapshot rather than a list, so the
* error list output is unnecessary.
*/
ddsa.ddsa_errors = NULL;
ddsa.ddsa_props = NULL;
ddsa.ddsa_cr = ri->zri_cred;
ddsa.ddsa_proc = ri->zri_proc;
ddsa.ddsa_snaps = fnvlist_alloc();
fnvlist_add_boolean(ddsa.ddsa_snaps, dsname);
zcp_cleanup_handler_t *zch = zcp_register_cleanup(state,
zcp_synctask_cleanup, ddsa.ddsa_snaps);
err = zcp_sync_task(state, dsl_dataset_snapshot_check,
dsl_dataset_snapshot_sync, &ddsa, sync, dsname);
if (err == 0) {
/*
* We may need to create a new device minor node for this
* dataset (if it is a zvol and the "snapdev" property is set).
* Save it in the nvlist so that it can be processed in open
* context.
*/
fnvlist_add_boolean(ri->zri_new_zvols, dsname);
}
zcp_deregister_cleanup(state, zch);
fnvlist_free(ddsa.ddsa_snaps);
return (err);
}
+static int zcp_synctask_rename_snapshot(lua_State *, boolean_t, nvlist_t *);
+static const zcp_synctask_info_t zcp_synctask_rename_snapshot_info = {
+ .name = "rename_snapshot",
+ .func = zcp_synctask_rename_snapshot,
+ .pargs = {
+ {.za_name = "filesystem | volume", .za_lua_type = LUA_TSTRING },
+ {.za_name = "oldsnapname", .za_lua_type = LUA_TSTRING },
+ {.za_name = "newsnapname", .za_lua_type = LUA_TSTRING },
+ {NULL, 0}
+ },
+ .space_check = ZFS_SPACE_CHECK_RESERVED,
+ .blocks_modified = 1
+};
+
+static int
+zcp_synctask_rename_snapshot(lua_State *state, boolean_t sync,
+ nvlist_t *err_details)
+{
+ (void) err_details;
+ int err;
+ const char *fsname = lua_tostring(state, 1);
+ const char *oldsnapname = lua_tostring(state, 2);
+ const char *newsnapname = lua_tostring(state, 3);
+
+ struct dsl_dataset_rename_snapshot_arg ddrsa = { 0 };
+ ddrsa.ddrsa_fsname = fsname;
+ ddrsa.ddrsa_oldsnapname = oldsnapname;
+ ddrsa.ddrsa_newsnapname = newsnapname;
+ ddrsa.ddrsa_recursive = B_FALSE;
+
+ err = zcp_sync_task(state, dsl_dataset_rename_snapshot_check,
+ dsl_dataset_rename_snapshot_sync, &ddrsa, sync, NULL);
+
+ return (err);
+}
+
static int zcp_synctask_inherit_prop(lua_State *, boolean_t,
nvlist_t *err_details);
static const zcp_synctask_info_t zcp_synctask_inherit_prop_info = {
.name = "inherit",
.func = zcp_synctask_inherit_prop,
.space_check = ZFS_SPACE_CHECK_RESERVED,
.blocks_modified = 2, /* 2 * numprops */
.pargs = {
{ .za_name = "dataset", .za_lua_type = LUA_TSTRING },
{ .za_name = "property", .za_lua_type = LUA_TSTRING },
{ NULL, 0 }
},
.kwargs = {
{ NULL, 0 }
},
};
static int
zcp_synctask_inherit_prop_check(void *arg, dmu_tx_t *tx)
{
zcp_inherit_prop_arg_t *args = arg;
zfs_prop_t prop = zfs_name_to_prop(args->zipa_prop);
if (prop == ZPROP_USERPROP) {
if (zfs_prop_user(args->zipa_prop))
return (0);
return (EINVAL);
}
if (zfs_prop_readonly(prop))
return (EINVAL);
if (!zfs_prop_inheritable(prop))
return (EINVAL);
return (dsl_props_set_check(&args->zipa_dpsa, tx));
}
static void
zcp_synctask_inherit_prop_sync(void *arg, dmu_tx_t *tx)
{
zcp_inherit_prop_arg_t *args = arg;
dsl_props_set_arg_t *dpsa = &args->zipa_dpsa;
dsl_props_set_sync(dpsa, tx);
}
static int
zcp_synctask_inherit_prop(lua_State *state, boolean_t sync,
nvlist_t *err_details)
{
(void) err_details;
int err;
zcp_inherit_prop_arg_t zipa = { 0 };
dsl_props_set_arg_t *dpsa = &zipa.zipa_dpsa;
const char *dsname = lua_tostring(state, 1);
const char *prop = lua_tostring(state, 2);
zipa.zipa_state = state;
zipa.zipa_prop = prop;
dpsa->dpsa_dsname = dsname;
dpsa->dpsa_source = ZPROP_SRC_INHERITED;
dpsa->dpsa_props = fnvlist_alloc();
fnvlist_add_boolean(dpsa->dpsa_props, prop);
zcp_cleanup_handler_t *zch = zcp_register_cleanup(state,
zcp_synctask_cleanup, dpsa->dpsa_props);
err = zcp_sync_task(state, zcp_synctask_inherit_prop_check,
zcp_synctask_inherit_prop_sync, &zipa, sync, dsname);
zcp_deregister_cleanup(state, zch);
fnvlist_free(dpsa->dpsa_props);
return (err);
}
static int zcp_synctask_bookmark(lua_State *, boolean_t, nvlist_t *);
static const zcp_synctask_info_t zcp_synctask_bookmark_info = {
.name = "bookmark",
.func = zcp_synctask_bookmark,
.pargs = {
{.za_name = "snapshot | bookmark", .za_lua_type = LUA_TSTRING },
{.za_name = "bookmark", .za_lua_type = LUA_TSTRING },
{NULL, 0}
},
.kwargs = {
{NULL, 0}
},
.space_check = ZFS_SPACE_CHECK_NORMAL,
.blocks_modified = 1,
};
static int
zcp_synctask_bookmark(lua_State *state, boolean_t sync, nvlist_t *err_details)
{
(void) err_details;
int err;
const char *source = lua_tostring(state, 1);
const char *new = lua_tostring(state, 2);
nvlist_t *bmarks = fnvlist_alloc();
fnvlist_add_string(bmarks, new, source);
zcp_cleanup_handler_t *zch = zcp_register_cleanup(state,
zcp_synctask_cleanup, bmarks);
dsl_bookmark_create_arg_t dbca = {
.dbca_bmarks = bmarks,
.dbca_errors = NULL,
};
err = zcp_sync_task(state, dsl_bookmark_create_check,
dsl_bookmark_create_sync, &dbca, sync, source);
zcp_deregister_cleanup(state, zch);
fnvlist_free(bmarks);
return (err);
}
static int zcp_synctask_set_prop(lua_State *, boolean_t, nvlist_t *err_details);
static const zcp_synctask_info_t zcp_synctask_set_prop_info = {
.name = "set_prop",
.func = zcp_synctask_set_prop,
.space_check = ZFS_SPACE_CHECK_RESERVED,
.blocks_modified = 2,
.pargs = {
{ .za_name = "dataset", .za_lua_type = LUA_TSTRING },
{ .za_name = "property", .za_lua_type = LUA_TSTRING },
{ .za_name = "value", .za_lua_type = LUA_TSTRING },
{ NULL, 0 }
},
.kwargs = {
{ NULL, 0 }
}
};
static int
zcp_synctask_set_prop(lua_State *state, boolean_t sync, nvlist_t *err_details)
{
(void) err_details;
int err;
zcp_set_prop_arg_t args = { 0 };
const char *dsname = lua_tostring(state, 1);
const char *prop = lua_tostring(state, 2);
const char *val = lua_tostring(state, 3);
args.state = state;
args.dsname = dsname;
args.prop = prop;
args.val = val;
err = zcp_sync_task(state, zcp_set_prop_check, zcp_set_prop_sync,
&args, sync, dsname);
return (err);
}
static int
zcp_synctask_wrapper(lua_State *state)
{
int err;
zcp_cleanup_handler_t *zch;
int num_ret = 1;
nvlist_t *err_details = fnvlist_alloc();
/*
* Make sure err_details is properly freed, even if a fatal error is
* thrown during the synctask.
*/
zch = zcp_register_cleanup(state, zcp_synctask_cleanup, err_details);
zcp_synctask_info_t *info = lua_touserdata(state, lua_upvalueindex(1));
boolean_t sync = lua_toboolean(state, lua_upvalueindex(2));
zcp_run_info_t *ri = zcp_run_info(state);
dsl_pool_t *dp = ri->zri_pool;
/* MOS space is triple-dittoed, so we multiply by 3. */
uint64_t funcspace =
((uint64_t)info->blocks_modified << DST_AVG_BLKSHIFT) * 3;
zcp_parse_args(state, info->name, info->pargs, info->kwargs);
err = 0;
if (info->space_check != ZFS_SPACE_CHECK_NONE) {
uint64_t quota = dsl_pool_unreserved_space(dp,
info->space_check);
uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes +
ri->zri_space_used;
if (used + funcspace > quota) {
err = SET_ERROR(ENOSPC);
}
}
if (err == 0) {
err = info->func(state, sync, err_details);
}
if (err == 0) {
ri->zri_space_used += funcspace;
}
lua_pushnumber(state, (lua_Number)err);
if (fnvlist_num_pairs(err_details) > 0) {
(void) zcp_nvlist_to_lua(state, err_details, NULL, 0);
num_ret++;
}
zcp_deregister_cleanup(state, zch);
fnvlist_free(err_details);
return (num_ret);
}
int
zcp_load_synctask_lib(lua_State *state, boolean_t sync)
{
const zcp_synctask_info_t *zcp_synctask_funcs[] = {
&zcp_synctask_destroy_info,
&zcp_synctask_promote_info,
&zcp_synctask_rollback_info,
&zcp_synctask_snapshot_info,
+ &zcp_synctask_rename_snapshot_info,
&zcp_synctask_inherit_prop_info,
&zcp_synctask_bookmark_info,
&zcp_synctask_set_prop_info,
NULL
};
lua_newtable(state);
for (int i = 0; zcp_synctask_funcs[i] != NULL; i++) {
const zcp_synctask_info_t *info = zcp_synctask_funcs[i];
lua_pushlightuserdata(state, (void *)(uintptr_t)info);
lua_pushboolean(state, sync);
lua_pushcclosure(state, &zcp_synctask_wrapper, 2);
lua_setfield(state, -2, info->name);
}
return (1);
}
diff --git a/sys/contrib/openzfs/module/zfs/zfs_chksum.c b/sys/contrib/openzfs/module/zfs/zfs_chksum.c
index b9dc907afa8d..74b4cb8d2e63 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_chksum.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_chksum.c
@@ -1,355 +1,357 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2021-2022 Tino Reichardt <milky-zfs@mcmilk.de>
*/
#include <sys/types.h>
#include <sys/spa.h>
#include <sys/zio_checksum.h>
#include <sys/zfs_context.h>
#include <sys/zfs_chksum.h>
#include <sys/blake3.h>
/* limit benchmarking to max 256KiB, when EdonR is slower then this: */
#define LIMIT_PERF_MBS 300
typedef struct {
const char *name;
const char *impl;
uint64_t bs1k;
uint64_t bs4k;
uint64_t bs16k;
uint64_t bs64k;
uint64_t bs256k;
uint64_t bs1m;
uint64_t bs4m;
uint64_t bs16m;
zio_cksum_salt_t salt;
zio_checksum_t *(func);
zio_checksum_tmpl_init_t *(init);
zio_checksum_tmpl_free_t *(free);
} chksum_stat_t;
static chksum_stat_t *chksum_stat_data = 0;
static int chksum_stat_cnt = 0;
static kstat_t *chksum_kstat = NULL;
/*
* i3-1005G1 test output:
*
* implementation 1k 4k 16k 64k 256k 1m 4m
* fletcher-4 5421 15001 26468 32555 34720 32801 18847
* edonr-generic 1196 1602 1761 1749 1762 1759 1751
* skein-generic 546 591 608 615 619 612 616
* sha256-generic 246 270 274 274 277 275 276
* sha256-avx 262 296 304 307 307 307 306
* sha256-sha-ni 769 1072 1172 1220 1219 1232 1228
* sha256-openssl 240 300 316 314 304 285 276
* sha512-generic 333 374 385 392 391 393 392
* sha512-openssl 353 441 467 476 472 467 426
* sha512-avx 362 444 473 475 479 476 478
* sha512-avx2 394 500 530 538 543 545 542
* blake3-generic 308 313 313 313 312 313 312
* blake3-sse2 402 1289 1423 1446 1432 1458 1413
* blake3-sse41 427 1470 1625 1704 1679 1607 1629
* blake3-avx2 428 1920 3095 3343 3356 3318 3204
* blake3-avx512 473 2687 4905 5836 5844 5643 5374
*/
static int
chksum_kstat_headers(char *buf, size_t size)
{
ssize_t off = 0;
off += snprintf(buf + off, size, "%-23s", "implementation");
off += snprintf(buf + off, size - off, "%8s", "1k");
off += snprintf(buf + off, size - off, "%8s", "4k");
off += snprintf(buf + off, size - off, "%8s", "16k");
off += snprintf(buf + off, size - off, "%8s", "64k");
off += snprintf(buf + off, size - off, "%8s", "256k");
off += snprintf(buf + off, size - off, "%8s", "1m");
off += snprintf(buf + off, size - off, "%8s", "4m");
(void) snprintf(buf + off, size - off, "%8s\n", "16m");
return (0);
}
static int
chksum_kstat_data(char *buf, size_t size, void *data)
{
chksum_stat_t *cs;
ssize_t off = 0;
char b[24];
cs = (chksum_stat_t *)data;
snprintf(b, 23, "%s-%s", cs->name, cs->impl);
off += snprintf(buf + off, size - off, "%-23s", b);
off += snprintf(buf + off, size - off, "%8llu",
(u_longlong_t)cs->bs1k);
off += snprintf(buf + off, size - off, "%8llu",
(u_longlong_t)cs->bs4k);
off += snprintf(buf + off, size - off, "%8llu",
(u_longlong_t)cs->bs16k);
off += snprintf(buf + off, size - off, "%8llu",
(u_longlong_t)cs->bs64k);
off += snprintf(buf + off, size - off, "%8llu",
(u_longlong_t)cs->bs256k);
off += snprintf(buf + off, size - off, "%8llu",
(u_longlong_t)cs->bs1m);
off += snprintf(buf + off, size - off, "%8llu",
(u_longlong_t)cs->bs4m);
(void) snprintf(buf + off, size - off, "%8llu\n",
(u_longlong_t)cs->bs16m);
return (0);
}
static void *
chksum_kstat_addr(kstat_t *ksp, loff_t n)
{
if (n < chksum_stat_cnt)
ksp->ks_private = (void *)(chksum_stat_data + n);
else
ksp->ks_private = NULL;
return (ksp->ks_private);
}
static void
chksum_run(chksum_stat_t *cs, abd_t *abd, void *ctx, int round,
uint64_t *result)
{
hrtime_t start;
uint64_t run_bw, run_time_ns, run_count = 0, size = 0;
uint32_t l, loops = 0;
zio_cksum_t zcp;
switch (round) {
case 1: /* 1k */
size = 1<<10; loops = 128; break;
case 2: /* 2k */
size = 1<<12; loops = 64; break;
case 3: /* 4k */
size = 1<<14; loops = 32; break;
case 4: /* 16k */
size = 1<<16; loops = 16; break;
case 5: /* 256k */
size = 1<<18; loops = 8; break;
case 6: /* 1m */
size = 1<<20; loops = 4; break;
case 7: /* 4m */
size = 1<<22; loops = 1; break;
case 8: /* 16m */
size = 1<<24; loops = 1; break;
}
kpreempt_disable();
start = gethrtime();
do {
for (l = 0; l < loops; l++, run_count++)
cs->func(abd, size, ctx, &zcp);
run_time_ns = gethrtime() - start;
} while (run_time_ns < MSEC2NSEC(1));
kpreempt_enable();
run_bw = size * run_count * NANOSEC;
run_bw /= run_time_ns; /* B/s */
*result = run_bw/1024/1024; /* MiB/s */
}
#define LIMIT_INIT 0
#define LIMIT_NEEDED 1
#define LIMIT_NOLIMIT 2
static void
chksum_benchit(chksum_stat_t *cs)
{
abd_t *abd;
void *ctx = 0;
void *salt = &cs->salt.zcs_bytes;
static int chksum_stat_limit = LIMIT_INIT;
memset(salt, 0, sizeof (cs->salt.zcs_bytes));
if (cs->init)
ctx = cs->init(&cs->salt);
/* allocate test memory via abd linear interface */
abd = abd_alloc_linear(1<<20, B_FALSE);
chksum_run(cs, abd, ctx, 1, &cs->bs1k);
chksum_run(cs, abd, ctx, 2, &cs->bs4k);
chksum_run(cs, abd, ctx, 3, &cs->bs16k);
chksum_run(cs, abd, ctx, 4, &cs->bs64k);
chksum_run(cs, abd, ctx, 5, &cs->bs256k);
/* check if we ran on a slow cpu */
if (chksum_stat_limit == LIMIT_INIT) {
if (cs->bs1k < LIMIT_PERF_MBS) {
chksum_stat_limit = LIMIT_NEEDED;
} else {
chksum_stat_limit = LIMIT_NOLIMIT;
}
}
/* skip benchmarks >= 1MiB when the CPU is to slow */
if (chksum_stat_limit == LIMIT_NEEDED)
goto abort;
chksum_run(cs, abd, ctx, 6, &cs->bs1m);
abd_free(abd);
/* allocate test memory via abd non linear interface */
abd = abd_alloc(1<<24, B_FALSE);
chksum_run(cs, abd, ctx, 7, &cs->bs4m);
chksum_run(cs, abd, ctx, 8, &cs->bs16m);
abort:
abd_free(abd);
/* free up temp memory */
if (cs->free)
cs->free(ctx);
}
/*
* Initialize and benchmark all supported implementations.
*/
static void
chksum_benchmark(void)
{
#ifndef _KERNEL
/* we need the benchmark only for the kernel module */
return;
#endif
chksum_stat_t *cs;
- int cbid = 0, id;
+ int cbid = 0;
uint64_t max = 0;
+ uint32_t id, id_save;
/* space for the benchmark times */
chksum_stat_cnt = 4;
- chksum_stat_cnt += blake3_get_impl_count();
+ chksum_stat_cnt += blake3_impl_getcnt();
chksum_stat_data = (chksum_stat_t *)kmem_zalloc(
sizeof (chksum_stat_t) * chksum_stat_cnt, KM_SLEEP);
/* edonr - needs to be the first one here (slow CPU check) */
cs = &chksum_stat_data[cbid++];
cs->init = abd_checksum_edonr_tmpl_init;
cs->func = abd_checksum_edonr_native;
cs->free = abd_checksum_edonr_tmpl_free;
cs->name = "edonr";
cs->impl = "generic";
chksum_benchit(cs);
/* skein */
cs = &chksum_stat_data[cbid++];
cs->init = abd_checksum_skein_tmpl_init;
cs->func = abd_checksum_skein_native;
cs->free = abd_checksum_skein_tmpl_free;
cs->name = "skein";
cs->impl = "generic";
chksum_benchit(cs);
/* sha256 */
cs = &chksum_stat_data[cbid++];
cs->init = 0;
cs->func = abd_checksum_SHA256;
cs->free = 0;
cs->name = "sha256";
cs->impl = "generic";
chksum_benchit(cs);
/* sha512 */
cs = &chksum_stat_data[cbid++];
cs->init = 0;
cs->func = abd_checksum_SHA512_native;
cs->free = 0;
cs->name = "sha512";
cs->impl = "generic";
chksum_benchit(cs);
/* blake3 */
- for (id = 0; id < blake3_get_impl_count(); id++) {
- blake3_set_impl_id(id);
+ id_save = blake3_impl_getid();
+ for (id = 0; id < blake3_impl_getcnt(); id++) {
+ blake3_impl_setid(id);
cs = &chksum_stat_data[cbid++];
cs->init = abd_checksum_blake3_tmpl_init;
cs->func = abd_checksum_blake3_native;
cs->free = abd_checksum_blake3_tmpl_free;
cs->name = "blake3";
- cs->impl = blake3_get_impl_name();
+ cs->impl = blake3_impl_getname();
chksum_benchit(cs);
if (cs->bs256k > max) {
max = cs->bs256k;
- blake3_set_impl_fastest(id);
+ blake3_impl_set_fastest(id);
}
}
+
+ /* restore initial value */
+ blake3_impl_setid(id_save);
}
void
chksum_init(void)
{
#ifdef _KERNEL
blake3_per_cpu_ctx_init();
#endif
/* Benchmark supported implementations */
chksum_benchmark();
/* Install kstats for all implementations */
chksum_kstat = kstat_create("zfs", 0, "chksum_bench", "misc",
KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL);
if (chksum_kstat != NULL) {
chksum_kstat->ks_data = NULL;
chksum_kstat->ks_ndata = UINT32_MAX;
kstat_set_raw_ops(chksum_kstat,
chksum_kstat_headers,
chksum_kstat_data,
chksum_kstat_addr);
kstat_install(chksum_kstat);
}
-
- /* setup implementations */
- blake3_setup_impl();
}
void
chksum_fini(void)
{
if (chksum_kstat != NULL) {
kstat_delete(chksum_kstat);
chksum_kstat = NULL;
}
if (chksum_stat_cnt) {
kmem_free(chksum_stat_data,
sizeof (chksum_stat_t) * chksum_stat_cnt);
chksum_stat_cnt = 0;
chksum_stat_data = 0;
}
#ifdef _KERNEL
blake3_per_cpu_ctx_fini();
#endif
}
diff --git a/sys/contrib/openzfs/module/zfs/zfs_ioctl.c b/sys/contrib/openzfs/module/zfs/zfs_ioctl.c
index 382975208b97..259d68c477de 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_ioctl.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_ioctl.c
@@ -1,7872 +1,7887 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Portions Copyright 2011 Martin Matuska
* Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved.
* Portions Copyright 2012 Pawel Jakub Dawidek <pawel@dawidek.net>
* Copyright (c) 2014, 2016 Joyent, Inc. All rights reserved.
* Copyright 2016 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014, Joyent, Inc. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Toomas Soome <tsoome@me.com>
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
* Copyright 2017 RackTop Systems.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
* Copyright (c) 2019 Datto Inc.
* Copyright (c) 2019, 2020 by Christian Schwarz. All rights reserved.
* Copyright (c) 2019, 2021, Klara Inc.
* Copyright (c) 2019, Allan Jude
*/
/*
* ZFS ioctls.
*
* This file handles the ioctls to /dev/zfs, used for configuring ZFS storage
* pools and filesystems, e.g. with /sbin/zfs and /sbin/zpool.
*
* There are two ways that we handle ioctls: the legacy way where almost
* all of the logic is in the ioctl callback, and the new way where most
* of the marshalling is handled in the common entry point, zfsdev_ioctl().
*
* Non-legacy ioctls should be registered by calling
* zfs_ioctl_register() from zfs_ioctl_init(). The ioctl is invoked
* from userland by lzc_ioctl().
*
* The registration arguments are as follows:
*
* const char *name
* The name of the ioctl. This is used for history logging. If the
* ioctl returns successfully (the callback returns 0), and allow_log
* is true, then a history log entry will be recorded with the input &
* output nvlists. The log entry can be printed with "zpool history -i".
*
* zfs_ioc_t ioc
* The ioctl request number, which userland will pass to ioctl(2).
* We want newer versions of libzfs and libzfs_core to run against
* existing zfs kernel modules (i.e. a deferred reboot after an update).
* Therefore the ioctl numbers cannot change from release to release.
*
* zfs_secpolicy_func_t *secpolicy
* This function will be called before the zfs_ioc_func_t, to
* determine if this operation is permitted. It should return EPERM
* on failure, and 0 on success. Checks include determining if the
* dataset is visible in this zone, and if the user has either all
* zfs privileges in the zone (SYS_MOUNT), or has been granted permission
* to do this operation on this dataset with "zfs allow".
*
* zfs_ioc_namecheck_t namecheck
* This specifies what to expect in the zfs_cmd_t:zc_name -- a pool
* name, a dataset name, or nothing. If the name is not well-formed,
* the ioctl will fail and the callback will not be called.
* Therefore, the callback can assume that the name is well-formed
* (e.g. is null-terminated, doesn't have more than one '@' character,
* doesn't have invalid characters).
*
* zfs_ioc_poolcheck_t pool_check
* This specifies requirements on the pool state. If the pool does
* not meet them (is suspended or is readonly), the ioctl will fail
* and the callback will not be called. If any checks are specified
* (i.e. it is not POOL_CHECK_NONE), namecheck must not be NO_NAME.
* Multiple checks can be or-ed together (e.g. POOL_CHECK_SUSPENDED |
* POOL_CHECK_READONLY).
*
* zfs_ioc_key_t *nvl_keys
* The list of expected/allowable innvl input keys. This list is used
* to validate the nvlist input to the ioctl.
*
* boolean_t smush_outnvlist
* If smush_outnvlist is true, then the output is presumed to be a
* list of errors, and it will be "smushed" down to fit into the
* caller's buffer, by removing some entries and replacing them with a
* single "N_MORE_ERRORS" entry indicating how many were removed. See
* nvlist_smush() for details. If smush_outnvlist is false, and the
* outnvlist does not fit into the userland-provided buffer, then the
* ioctl will fail with ENOMEM.
*
* zfs_ioc_func_t *func
* The callback function that will perform the operation.
*
* The callback should return 0 on success, or an error number on
* failure. If the function fails, the userland ioctl will return -1,
* and errno will be set to the callback's return value. The callback
* will be called with the following arguments:
*
* const char *name
* The name of the pool or dataset to operate on, from
* zfs_cmd_t:zc_name. The 'namecheck' argument specifies the
* expected type (pool, dataset, or none).
*
* nvlist_t *innvl
* The input nvlist, deserialized from zfs_cmd_t:zc_nvlist_src. Or
* NULL if no input nvlist was provided. Changes to this nvlist are
* ignored. If the input nvlist could not be deserialized, the
* ioctl will fail and the callback will not be called.
*
* nvlist_t *outnvl
* The output nvlist, initially empty. The callback can fill it in,
* and it will be returned to userland by serializing it into
* zfs_cmd_t:zc_nvlist_dst. If it is non-empty, and serialization
* fails (e.g. because the caller didn't supply a large enough
* buffer), then the overall ioctl will fail. See the
* 'smush_nvlist' argument above for additional behaviors.
*
* There are two typical uses of the output nvlist:
* - To return state, e.g. property values. In this case,
* smush_outnvlist should be false. If the buffer was not large
* enough, the caller will reallocate a larger buffer and try
* the ioctl again.
*
* - To return multiple errors from an ioctl which makes on-disk
* changes. In this case, smush_outnvlist should be true.
* Ioctls which make on-disk modifications should generally not
* use the outnvl if they succeed, because the caller can not
* distinguish between the operation failing, and
* deserialization failing.
*
* IOCTL Interface Errors
*
* The following ioctl input errors can be returned:
* ZFS_ERR_IOC_CMD_UNAVAIL the ioctl number is not supported by kernel
* ZFS_ERR_IOC_ARG_UNAVAIL an input argument is not supported by kernel
* ZFS_ERR_IOC_ARG_REQUIRED a required input argument is missing
* ZFS_ERR_IOC_ARG_BADTYPE an input argument has an invalid type
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/errno.h>
#include <sys/uio_impl.h>
#include <sys/file.h>
#include <sys/kmem.h>
#include <sys/cmn_err.h>
#include <sys/stat.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_quota.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_znode.h>
#include <sys/zap.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/dmu.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_deleg.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_redact.h>
#include <sys/dmu_tx.h>
#include <sys/sunddi.h>
#include <sys/policy.h>
#include <sys/zone.h>
#include <sys/nvpair.h>
#include <sys/pathname.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_onexit.h>
#include <sys/zvol.h>
#include <sys/dsl_scan.h>
#include <sys/fm/util.h>
#include <sys/dsl_crypt.h>
#include <sys/rrwlock.h>
#include <sys/zfs_file.h>
#include <sys/dmu_recv.h>
#include <sys/dmu_send.h>
#include <sys/dmu_recv.h>
#include <sys/dsl_destroy.h>
#include <sys/dsl_bookmark.h>
#include <sys/dsl_userhold.h>
#include <sys/zfeature.h>
#include <sys/zcp.h>
#include <sys/zio_checksum.h>
#include <sys/vdev_removal.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_initialize.h>
#include <sys/vdev_trim.h>
#include "zfs_namecheck.h"
#include "zfs_prop.h"
#include "zfs_deleg.h"
#include "zfs_comutil.h"
#include <sys/lua/lua.h>
#include <sys/lua/lauxlib.h>
#include <sys/zfs_ioctl_impl.h>
kmutex_t zfsdev_state_lock;
static zfsdev_state_t *zfsdev_state_list;
/*
* Limit maximum nvlist size. We don't want users passing in insane values
* for zc->zc_nvlist_src_size, since we will need to allocate that much memory.
* Defaults to 0=auto which is handled by platform code.
*/
unsigned long zfs_max_nvlist_src_size = 0;
/*
* When logging the output nvlist of an ioctl in the on-disk history, limit
* the logged size to this many bytes. This must be less than DMU_MAX_ACCESS.
* This applies primarily to zfs_ioc_channel_program().
*/
static unsigned long zfs_history_output_max = 1024 * 1024;
uint_t zfs_fsyncer_key;
uint_t zfs_allow_log_key;
/* DATA_TYPE_ANY is used when zkey_type can vary. */
#define DATA_TYPE_ANY DATA_TYPE_UNKNOWN
typedef struct zfs_ioc_vec {
zfs_ioc_legacy_func_t *zvec_legacy_func;
zfs_ioc_func_t *zvec_func;
zfs_secpolicy_func_t *zvec_secpolicy;
zfs_ioc_namecheck_t zvec_namecheck;
boolean_t zvec_allow_log;
zfs_ioc_poolcheck_t zvec_pool_check;
boolean_t zvec_smush_outnvlist;
const char *zvec_name;
const zfs_ioc_key_t *zvec_nvl_keys;
size_t zvec_nvl_key_count;
} zfs_ioc_vec_t;
/* This array is indexed by zfs_userquota_prop_t */
static const char *userquota_perms[] = {
ZFS_DELEG_PERM_USERUSED,
ZFS_DELEG_PERM_USERQUOTA,
ZFS_DELEG_PERM_GROUPUSED,
ZFS_DELEG_PERM_GROUPQUOTA,
ZFS_DELEG_PERM_USEROBJUSED,
ZFS_DELEG_PERM_USEROBJQUOTA,
ZFS_DELEG_PERM_GROUPOBJUSED,
ZFS_DELEG_PERM_GROUPOBJQUOTA,
ZFS_DELEG_PERM_PROJECTUSED,
ZFS_DELEG_PERM_PROJECTQUOTA,
ZFS_DELEG_PERM_PROJECTOBJUSED,
ZFS_DELEG_PERM_PROJECTOBJQUOTA,
};
static int zfs_ioc_userspace_upgrade(zfs_cmd_t *zc);
static int zfs_ioc_id_quota_upgrade(zfs_cmd_t *zc);
static int zfs_check_settable(const char *name, nvpair_t *property,
cred_t *cr);
static int zfs_check_clearable(const char *dataset, nvlist_t *props,
nvlist_t **errors);
static int zfs_fill_zplprops_root(uint64_t, nvlist_t *, nvlist_t *,
boolean_t *);
int zfs_set_prop_nvlist(const char *, zprop_source_t, nvlist_t *, nvlist_t *);
static int get_nvlist(uint64_t nvl, uint64_t size, int iflag, nvlist_t **nvp);
static void
history_str_free(char *buf)
{
kmem_free(buf, HIS_MAX_RECORD_LEN);
}
static char *
history_str_get(zfs_cmd_t *zc)
{
char *buf;
if (zc->zc_history == 0)
return (NULL);
buf = kmem_alloc(HIS_MAX_RECORD_LEN, KM_SLEEP);
if (copyinstr((void *)(uintptr_t)zc->zc_history,
buf, HIS_MAX_RECORD_LEN, NULL) != 0) {
history_str_free(buf);
return (NULL);
}
buf[HIS_MAX_RECORD_LEN -1] = '\0';
return (buf);
}
/*
* Return non-zero if the spa version is less than requested version.
*/
static int
zfs_earlier_version(const char *name, int version)
{
spa_t *spa;
if (spa_open(name, &spa, FTAG) == 0) {
if (spa_version(spa) < version) {
spa_close(spa, FTAG);
return (1);
}
spa_close(spa, FTAG);
}
return (0);
}
/*
* Return TRUE if the ZPL version is less than requested version.
*/
static boolean_t
zpl_earlier_version(const char *name, int version)
{
objset_t *os;
boolean_t rc = B_TRUE;
if (dmu_objset_hold(name, FTAG, &os) == 0) {
uint64_t zplversion;
if (dmu_objset_type(os) != DMU_OST_ZFS) {
dmu_objset_rele(os, FTAG);
return (B_TRUE);
}
/* XXX reading from non-owned objset */
if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &zplversion) == 0)
rc = zplversion < version;
dmu_objset_rele(os, FTAG);
}
return (rc);
}
static void
zfs_log_history(zfs_cmd_t *zc)
{
spa_t *spa;
char *buf;
if ((buf = history_str_get(zc)) == NULL)
return;
if (spa_open(zc->zc_name, &spa, FTAG) == 0) {
if (spa_version(spa) >= SPA_VERSION_ZPOOL_HISTORY)
(void) spa_history_log(spa, buf);
spa_close(spa, FTAG);
}
history_str_free(buf);
}
/*
* Policy for top-level read operations (list pools). Requires no privileges,
* and can be used in the local zone, as there is no associated dataset.
*/
static int
zfs_secpolicy_none(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
(void) zc, (void) innvl, (void) cr;
return (0);
}
/*
* Policy for dataset read operations (list children, get statistics). Requires
* no privileges, but must be visible in the local zone.
*/
static int
zfs_secpolicy_read(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
(void) innvl, (void) cr;
if (INGLOBALZONE(curproc) ||
zone_dataset_visible(zc->zc_name, NULL))
return (0);
return (SET_ERROR(ENOENT));
}
static int
zfs_dozonecheck_impl(const char *dataset, uint64_t zoned, cred_t *cr)
{
int writable = 1;
/*
* The dataset must be visible by this zone -- check this first
* so they don't see EPERM on something they shouldn't know about.
*/
if (!INGLOBALZONE(curproc) &&
!zone_dataset_visible(dataset, &writable))
return (SET_ERROR(ENOENT));
if (INGLOBALZONE(curproc)) {
/*
* If the fs is zoned, only root can access it from the
* global zone.
*/
if (secpolicy_zfs(cr) && zoned)
return (SET_ERROR(EPERM));
} else {
/*
* If we are in a local zone, the 'zoned' property must be set.
*/
if (!zoned)
return (SET_ERROR(EPERM));
/* must be writable by this zone */
if (!writable)
return (SET_ERROR(EPERM));
}
return (0);
}
static int
zfs_dozonecheck(const char *dataset, cred_t *cr)
{
uint64_t zoned;
if (dsl_prop_get_integer(dataset, zfs_prop_to_name(ZFS_PROP_ZONED),
&zoned, NULL))
return (SET_ERROR(ENOENT));
return (zfs_dozonecheck_impl(dataset, zoned, cr));
}
static int
zfs_dozonecheck_ds(const char *dataset, dsl_dataset_t *ds, cred_t *cr)
{
uint64_t zoned;
if (dsl_prop_get_int_ds(ds, zfs_prop_to_name(ZFS_PROP_ZONED), &zoned))
return (SET_ERROR(ENOENT));
return (zfs_dozonecheck_impl(dataset, zoned, cr));
}
static int
zfs_secpolicy_write_perms_ds(const char *name, dsl_dataset_t *ds,
const char *perm, cred_t *cr)
{
int error;
error = zfs_dozonecheck_ds(name, ds, cr);
if (error == 0) {
error = secpolicy_zfs(cr);
if (error != 0)
error = dsl_deleg_access_impl(ds, perm, cr);
}
return (error);
}
static int
zfs_secpolicy_write_perms(const char *name, const char *perm, cred_t *cr)
{
int error;
dsl_dataset_t *ds;
dsl_pool_t *dp;
/*
* First do a quick check for root in the global zone, which
* is allowed to do all write_perms. This ensures that zfs_ioc_*
* will get to handle nonexistent datasets.
*/
if (INGLOBALZONE(curproc) && secpolicy_zfs(cr) == 0)
return (0);
error = dsl_pool_hold(name, FTAG, &dp);
if (error != 0)
return (error);
error = dsl_dataset_hold(dp, name, FTAG, &ds);
if (error != 0) {
dsl_pool_rele(dp, FTAG);
return (error);
}
error = zfs_secpolicy_write_perms_ds(name, ds, perm, cr);
dsl_dataset_rele(ds, FTAG);
dsl_pool_rele(dp, FTAG);
return (error);
}
/*
* Policy for setting the security label property.
*
* Returns 0 for success, non-zero for access and other errors.
*/
static int
zfs_set_slabel_policy(const char *name, const char *strval, cred_t *cr)
{
#ifdef HAVE_MLSLABEL
char ds_hexsl[MAXNAMELEN];
bslabel_t ds_sl, new_sl;
boolean_t new_default = FALSE;
uint64_t zoned;
int needed_priv = -1;
int error;
/* First get the existing dataset label. */
error = dsl_prop_get(name, zfs_prop_to_name(ZFS_PROP_MLSLABEL),
1, sizeof (ds_hexsl), &ds_hexsl, NULL);
if (error != 0)
return (SET_ERROR(EPERM));
if (strcasecmp(strval, ZFS_MLSLABEL_DEFAULT) == 0)
new_default = TRUE;
/* The label must be translatable */
if (!new_default && (hexstr_to_label(strval, &new_sl) != 0))
return (SET_ERROR(EINVAL));
/*
* In a non-global zone, disallow attempts to set a label that
* doesn't match that of the zone; otherwise no other checks
* are needed.
*/
if (!INGLOBALZONE(curproc)) {
if (new_default || !blequal(&new_sl, CR_SL(CRED())))
return (SET_ERROR(EPERM));
return (0);
}
/*
* For global-zone datasets (i.e., those whose zoned property is
* "off", verify that the specified new label is valid for the
* global zone.
*/
if (dsl_prop_get_integer(name,
zfs_prop_to_name(ZFS_PROP_ZONED), &zoned, NULL))
return (SET_ERROR(EPERM));
if (!zoned) {
if (zfs_check_global_label(name, strval) != 0)
return (SET_ERROR(EPERM));
}
/*
* If the existing dataset label is nondefault, check if the
* dataset is mounted (label cannot be changed while mounted).
* Get the zfsvfs_t; if there isn't one, then the dataset isn't
* mounted (or isn't a dataset, doesn't exist, ...).
*/
if (strcasecmp(ds_hexsl, ZFS_MLSLABEL_DEFAULT) != 0) {
objset_t *os;
static const char *setsl_tag = "setsl_tag";
/*
* Try to own the dataset; abort if there is any error,
* (e.g., already mounted, in use, or other error).
*/
error = dmu_objset_own(name, DMU_OST_ZFS, B_TRUE, B_TRUE,
setsl_tag, &os);
if (error != 0)
return (SET_ERROR(EPERM));
dmu_objset_disown(os, B_TRUE, setsl_tag);
if (new_default) {
needed_priv = PRIV_FILE_DOWNGRADE_SL;
goto out_check;
}
if (hexstr_to_label(strval, &new_sl) != 0)
return (SET_ERROR(EPERM));
if (blstrictdom(&ds_sl, &new_sl))
needed_priv = PRIV_FILE_DOWNGRADE_SL;
else if (blstrictdom(&new_sl, &ds_sl))
needed_priv = PRIV_FILE_UPGRADE_SL;
} else {
/* dataset currently has a default label */
if (!new_default)
needed_priv = PRIV_FILE_UPGRADE_SL;
}
out_check:
if (needed_priv != -1)
return (PRIV_POLICY(cr, needed_priv, B_FALSE, EPERM, NULL));
return (0);
#else
return (SET_ERROR(ENOTSUP));
#endif /* HAVE_MLSLABEL */
}
static int
zfs_secpolicy_setprop(const char *dsname, zfs_prop_t prop, nvpair_t *propval,
cred_t *cr)
{
char *strval;
/*
* Check permissions for special properties.
*/
switch (prop) {
default:
break;
case ZFS_PROP_ZONED:
/*
* Disallow setting of 'zoned' from within a local zone.
*/
if (!INGLOBALZONE(curproc))
return (SET_ERROR(EPERM));
break;
case ZFS_PROP_QUOTA:
case ZFS_PROP_FILESYSTEM_LIMIT:
case ZFS_PROP_SNAPSHOT_LIMIT:
if (!INGLOBALZONE(curproc)) {
uint64_t zoned;
char setpoint[ZFS_MAX_DATASET_NAME_LEN];
/*
* Unprivileged users are allowed to modify the
* limit on things *under* (ie. contained by)
* the thing they own.
*/
if (dsl_prop_get_integer(dsname,
zfs_prop_to_name(ZFS_PROP_ZONED), &zoned, setpoint))
return (SET_ERROR(EPERM));
if (!zoned || strlen(dsname) <= strlen(setpoint))
return (SET_ERROR(EPERM));
}
break;
case ZFS_PROP_MLSLABEL:
if (!is_system_labeled())
return (SET_ERROR(EPERM));
if (nvpair_value_string(propval, &strval) == 0) {
int err;
err = zfs_set_slabel_policy(dsname, strval, CRED());
if (err != 0)
return (err);
}
break;
}
return (zfs_secpolicy_write_perms(dsname, zfs_prop_to_name(prop), cr));
}
static int
zfs_secpolicy_set_fsacl(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
/*
* permission to set permissions will be evaluated later in
* dsl_deleg_can_allow()
*/
(void) innvl;
return (zfs_dozonecheck(zc->zc_name, cr));
}
static int
zfs_secpolicy_rollback(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
(void) innvl;
return (zfs_secpolicy_write_perms(zc->zc_name,
ZFS_DELEG_PERM_ROLLBACK, cr));
}
static int
zfs_secpolicy_send(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
(void) innvl;
dsl_pool_t *dp;
dsl_dataset_t *ds;
const char *cp;
int error;
/*
* Generate the current snapshot name from the given objsetid, then
* use that name for the secpolicy/zone checks.
*/
cp = strchr(zc->zc_name, '@');
if (cp == NULL)
return (SET_ERROR(EINVAL));
error = dsl_pool_hold(zc->zc_name, FTAG, &dp);
if (error != 0)
return (error);
error = dsl_dataset_hold_obj(dp, zc->zc_sendobj, FTAG, &ds);
if (error != 0) {
dsl_pool_rele(dp, FTAG);
return (error);
}
dsl_dataset_name(ds, zc->zc_name);
error = zfs_secpolicy_write_perms_ds(zc->zc_name, ds,
ZFS_DELEG_PERM_SEND, cr);
dsl_dataset_rele(ds, FTAG);
dsl_pool_rele(dp, FTAG);
return (error);
}
static int
zfs_secpolicy_send_new(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
(void) innvl;
return (zfs_secpolicy_write_perms(zc->zc_name,
ZFS_DELEG_PERM_SEND, cr));
}
static int
zfs_secpolicy_share(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
(void) zc, (void) innvl, (void) cr;
return (SET_ERROR(ENOTSUP));
}
static int
zfs_secpolicy_smb_acl(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
(void) zc, (void) innvl, (void) cr;
return (SET_ERROR(ENOTSUP));
}
static int
zfs_get_parent(const char *datasetname, char *parent, int parentsize)
{
char *cp;
/*
* Remove the @bla or /bla from the end of the name to get the parent.
*/
(void) strncpy(parent, datasetname, parentsize);
cp = strrchr(parent, '@');
if (cp != NULL) {
cp[0] = '\0';
} else {
cp = strrchr(parent, '/');
if (cp == NULL)
return (SET_ERROR(ENOENT));
cp[0] = '\0';
}
return (0);
}
int
zfs_secpolicy_destroy_perms(const char *name, cred_t *cr)
{
int error;
if ((error = zfs_secpolicy_write_perms(name,
ZFS_DELEG_PERM_MOUNT, cr)) != 0)
return (error);
return (zfs_secpolicy_write_perms(name, ZFS_DELEG_PERM_DESTROY, cr));
}
static int
zfs_secpolicy_destroy(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
(void) innvl;
return (zfs_secpolicy_destroy_perms(zc->zc_name, cr));
}
/*
* Destroying snapshots with delegated permissions requires
* descendant mount and destroy permissions.
*/
static int
zfs_secpolicy_destroy_snaps(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
(void) zc;
nvlist_t *snaps;
nvpair_t *pair, *nextpair;
int error = 0;
snaps = fnvlist_lookup_nvlist(innvl, "snaps");
for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
pair = nextpair) {
nextpair = nvlist_next_nvpair(snaps, pair);
error = zfs_secpolicy_destroy_perms(nvpair_name(pair), cr);
if (error == ENOENT) {
/*
* Ignore any snapshots that don't exist (we consider
* them "already destroyed"). Remove the name from the
* nvl here in case the snapshot is created between
* now and when we try to destroy it (in which case
* we don't want to destroy it since we haven't
* checked for permission).
*/
fnvlist_remove_nvpair(snaps, pair);
error = 0;
}
if (error != 0)
break;
}
return (error);
}
int
zfs_secpolicy_rename_perms(const char *from, const char *to, cred_t *cr)
{
char parentname[ZFS_MAX_DATASET_NAME_LEN];
int error;
if ((error = zfs_secpolicy_write_perms(from,
ZFS_DELEG_PERM_RENAME, cr)) != 0)
return (error);
if ((error = zfs_secpolicy_write_perms(from,
ZFS_DELEG_PERM_MOUNT, cr)) != 0)
return (error);
if ((error = zfs_get_parent(to, parentname,
sizeof (parentname))) != 0)
return (error);
if ((error = zfs_secpolicy_write_perms(parentname,
ZFS_DELEG_PERM_CREATE, cr)) != 0)
return (error);
if ((error = zfs_secpolicy_write_perms(parentname,
ZFS_DELEG_PERM_MOUNT, cr)) != 0)
return (error);
return (error);
}
static int
zfs_secpolicy_rename(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
(void) innvl;
return (zfs_secpolicy_rename_perms(zc->zc_name, zc->zc_value, cr));
}
static int
zfs_secpolicy_promote(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
(void) innvl;
dsl_pool_t *dp;
dsl_dataset_t *clone;
int error;
error = zfs_secpolicy_write_perms(zc->zc_name,
ZFS_DELEG_PERM_PROMOTE, cr);
if (error != 0)
return (error);
error = dsl_pool_hold(zc->zc_name, FTAG, &dp);
if (error != 0)
return (error);
error = dsl_dataset_hold(dp, zc->zc_name, FTAG, &clone);
if (error == 0) {
char parentname[ZFS_MAX_DATASET_NAME_LEN];
dsl_dataset_t *origin = NULL;
dsl_dir_t *dd;
dd = clone->ds_dir;
error = dsl_dataset_hold_obj(dd->dd_pool,
dsl_dir_phys(dd)->dd_origin_obj, FTAG, &origin);
if (error != 0) {
dsl_dataset_rele(clone, FTAG);
dsl_pool_rele(dp, FTAG);
return (error);
}
error = zfs_secpolicy_write_perms_ds(zc->zc_name, clone,
ZFS_DELEG_PERM_MOUNT, cr);
dsl_dataset_name(origin, parentname);
if (error == 0) {
error = zfs_secpolicy_write_perms_ds(parentname, origin,
ZFS_DELEG_PERM_PROMOTE, cr);
}
dsl_dataset_rele(clone, FTAG);
dsl_dataset_rele(origin, FTAG);
}
dsl_pool_rele(dp, FTAG);
return (error);
}
static int
zfs_secpolicy_recv(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
(void) innvl;
int error;
if ((error = zfs_secpolicy_write_perms(zc->zc_name,
ZFS_DELEG_PERM_RECEIVE, cr)) != 0)
return (error);
if ((error = zfs_secpolicy_write_perms(zc->zc_name,
ZFS_DELEG_PERM_MOUNT, cr)) != 0)
return (error);
return (zfs_secpolicy_write_perms(zc->zc_name,
ZFS_DELEG_PERM_CREATE, cr));
}
int
zfs_secpolicy_snapshot_perms(const char *name, cred_t *cr)
{
return (zfs_secpolicy_write_perms(name,
ZFS_DELEG_PERM_SNAPSHOT, cr));
}
/*
* Check for permission to create each snapshot in the nvlist.
*/
static int
zfs_secpolicy_snapshot(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
(void) zc;
nvlist_t *snaps;
int error = 0;
nvpair_t *pair;
snaps = fnvlist_lookup_nvlist(innvl, "snaps");
for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
pair = nvlist_next_nvpair(snaps, pair)) {
char *name = nvpair_name(pair);
char *atp = strchr(name, '@');
if (atp == NULL) {
error = SET_ERROR(EINVAL);
break;
}
*atp = '\0';
error = zfs_secpolicy_snapshot_perms(name, cr);
*atp = '@';
if (error != 0)
break;
}
return (error);
}
/*
* Check for permission to create each bookmark in the nvlist.
*/
static int
zfs_secpolicy_bookmark(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
(void) zc;
int error = 0;
for (nvpair_t *pair = nvlist_next_nvpair(innvl, NULL);
pair != NULL; pair = nvlist_next_nvpair(innvl, pair)) {
char *name = nvpair_name(pair);
char *hashp = strchr(name, '#');
if (hashp == NULL) {
error = SET_ERROR(EINVAL);
break;
}
*hashp = '\0';
error = zfs_secpolicy_write_perms(name,
ZFS_DELEG_PERM_BOOKMARK, cr);
*hashp = '#';
if (error != 0)
break;
}
return (error);
}
static int
zfs_secpolicy_destroy_bookmarks(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
(void) zc;
nvpair_t *pair, *nextpair;
int error = 0;
for (pair = nvlist_next_nvpair(innvl, NULL); pair != NULL;
pair = nextpair) {
char *name = nvpair_name(pair);
char *hashp = strchr(name, '#');
nextpair = nvlist_next_nvpair(innvl, pair);
if (hashp == NULL) {
error = SET_ERROR(EINVAL);
break;
}
*hashp = '\0';
error = zfs_secpolicy_write_perms(name,
ZFS_DELEG_PERM_DESTROY, cr);
*hashp = '#';
if (error == ENOENT) {
/*
* Ignore any filesystems that don't exist (we consider
* their bookmarks "already destroyed"). Remove
* the name from the nvl here in case the filesystem
* is created between now and when we try to destroy
* the bookmark (in which case we don't want to
* destroy it since we haven't checked for permission).
*/
fnvlist_remove_nvpair(innvl, pair);
error = 0;
}
if (error != 0)
break;
}
return (error);
}
static int
zfs_secpolicy_log_history(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
(void) zc, (void) innvl, (void) cr;
/*
* Even root must have a proper TSD so that we know what pool
* to log to.
*/
if (tsd_get(zfs_allow_log_key) == NULL)
return (SET_ERROR(EPERM));
return (0);
}
static int
zfs_secpolicy_create_clone(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
char parentname[ZFS_MAX_DATASET_NAME_LEN];
int error;
char *origin;
if ((error = zfs_get_parent(zc->zc_name, parentname,
sizeof (parentname))) != 0)
return (error);
if (nvlist_lookup_string(innvl, "origin", &origin) == 0 &&
(error = zfs_secpolicy_write_perms(origin,
ZFS_DELEG_PERM_CLONE, cr)) != 0)
return (error);
if ((error = zfs_secpolicy_write_perms(parentname,
ZFS_DELEG_PERM_CREATE, cr)) != 0)
return (error);
return (zfs_secpolicy_write_perms(parentname,
ZFS_DELEG_PERM_MOUNT, cr));
}
/*
* Policy for pool operations - create/destroy pools, add vdevs, etc. Requires
* SYS_CONFIG privilege, which is not available in a local zone.
*/
int
zfs_secpolicy_config(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
(void) zc, (void) innvl;
if (secpolicy_sys_config(cr, B_FALSE) != 0)
return (SET_ERROR(EPERM));
return (0);
}
/*
* Policy for object to name lookups.
*/
static int
zfs_secpolicy_diff(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
(void) innvl;
int error;
if ((error = secpolicy_sys_config(cr, B_FALSE)) == 0)
return (0);
error = zfs_secpolicy_write_perms(zc->zc_name, ZFS_DELEG_PERM_DIFF, cr);
return (error);
}
/*
* Policy for fault injection. Requires all privileges.
*/
static int
zfs_secpolicy_inject(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
(void) zc, (void) innvl;
return (secpolicy_zinject(cr));
}
static int
zfs_secpolicy_inherit_prop(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
(void) innvl;
zfs_prop_t prop = zfs_name_to_prop(zc->zc_value);
if (prop == ZPROP_USERPROP) {
if (!zfs_prop_user(zc->zc_value))
return (SET_ERROR(EINVAL));
return (zfs_secpolicy_write_perms(zc->zc_name,
ZFS_DELEG_PERM_USERPROP, cr));
} else {
return (zfs_secpolicy_setprop(zc->zc_name, prop,
NULL, cr));
}
}
static int
zfs_secpolicy_userspace_one(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
int err = zfs_secpolicy_read(zc, innvl, cr);
if (err)
return (err);
if (zc->zc_objset_type >= ZFS_NUM_USERQUOTA_PROPS)
return (SET_ERROR(EINVAL));
if (zc->zc_value[0] == 0) {
/*
* They are asking about a posix uid/gid. If it's
* themself, allow it.
*/
if (zc->zc_objset_type == ZFS_PROP_USERUSED ||
zc->zc_objset_type == ZFS_PROP_USERQUOTA ||
zc->zc_objset_type == ZFS_PROP_USEROBJUSED ||
zc->zc_objset_type == ZFS_PROP_USEROBJQUOTA) {
if (zc->zc_guid == crgetuid(cr))
return (0);
} else if (zc->zc_objset_type == ZFS_PROP_GROUPUSED ||
zc->zc_objset_type == ZFS_PROP_GROUPQUOTA ||
zc->zc_objset_type == ZFS_PROP_GROUPOBJUSED ||
zc->zc_objset_type == ZFS_PROP_GROUPOBJQUOTA) {
if (groupmember(zc->zc_guid, cr))
return (0);
}
/* else is for project quota/used */
}
return (zfs_secpolicy_write_perms(zc->zc_name,
userquota_perms[zc->zc_objset_type], cr));
}
static int
zfs_secpolicy_userspace_many(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
int err = zfs_secpolicy_read(zc, innvl, cr);
if (err)
return (err);
if (zc->zc_objset_type >= ZFS_NUM_USERQUOTA_PROPS)
return (SET_ERROR(EINVAL));
return (zfs_secpolicy_write_perms(zc->zc_name,
userquota_perms[zc->zc_objset_type], cr));
}
static int
zfs_secpolicy_userspace_upgrade(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
(void) innvl;
return (zfs_secpolicy_setprop(zc->zc_name, ZFS_PROP_VERSION,
NULL, cr));
}
static int
zfs_secpolicy_hold(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
(void) zc;
nvpair_t *pair;
nvlist_t *holds;
int error;
holds = fnvlist_lookup_nvlist(innvl, "holds");
for (pair = nvlist_next_nvpair(holds, NULL); pair != NULL;
pair = nvlist_next_nvpair(holds, pair)) {
char fsname[ZFS_MAX_DATASET_NAME_LEN];
error = dmu_fsname(nvpair_name(pair), fsname);
if (error != 0)
return (error);
error = zfs_secpolicy_write_perms(fsname,
ZFS_DELEG_PERM_HOLD, cr);
if (error != 0)
return (error);
}
return (0);
}
static int
zfs_secpolicy_release(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
(void) zc;
nvpair_t *pair;
int error;
for (pair = nvlist_next_nvpair(innvl, NULL); pair != NULL;
pair = nvlist_next_nvpair(innvl, pair)) {
char fsname[ZFS_MAX_DATASET_NAME_LEN];
error = dmu_fsname(nvpair_name(pair), fsname);
if (error != 0)
return (error);
error = zfs_secpolicy_write_perms(fsname,
ZFS_DELEG_PERM_RELEASE, cr);
if (error != 0)
return (error);
}
return (0);
}
/*
* Policy for allowing temporary snapshots to be taken or released
*/
static int
zfs_secpolicy_tmp_snapshot(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
/*
* A temporary snapshot is the same as a snapshot,
* hold, destroy and release all rolled into one.
* Delegated diff alone is sufficient that we allow this.
*/
int error;
if ((error = zfs_secpolicy_write_perms(zc->zc_name,
ZFS_DELEG_PERM_DIFF, cr)) == 0)
return (0);
error = zfs_secpolicy_snapshot_perms(zc->zc_name, cr);
if (innvl != NULL) {
if (error == 0)
error = zfs_secpolicy_hold(zc, innvl, cr);
if (error == 0)
error = zfs_secpolicy_release(zc, innvl, cr);
if (error == 0)
error = zfs_secpolicy_destroy(zc, innvl, cr);
}
return (error);
}
static int
zfs_secpolicy_load_key(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
return (zfs_secpolicy_write_perms(zc->zc_name,
ZFS_DELEG_PERM_LOAD_KEY, cr));
}
static int
zfs_secpolicy_change_key(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
return (zfs_secpolicy_write_perms(zc->zc_name,
ZFS_DELEG_PERM_CHANGE_KEY, cr));
}
/*
* Returns the nvlist as specified by the user in the zfs_cmd_t.
*/
static int
get_nvlist(uint64_t nvl, uint64_t size, int iflag, nvlist_t **nvp)
{
char *packed;
int error;
nvlist_t *list = NULL;
/*
* Read in and unpack the user-supplied nvlist.
*/
if (size == 0)
return (SET_ERROR(EINVAL));
packed = vmem_alloc(size, KM_SLEEP);
if ((error = ddi_copyin((void *)(uintptr_t)nvl, packed, size,
iflag)) != 0) {
vmem_free(packed, size);
return (SET_ERROR(EFAULT));
}
if ((error = nvlist_unpack(packed, size, &list, 0)) != 0) {
vmem_free(packed, size);
return (error);
}
vmem_free(packed, size);
*nvp = list;
return (0);
}
/*
* Reduce the size of this nvlist until it can be serialized in 'max' bytes.
* Entries will be removed from the end of the nvlist, and one int32 entry
* named "N_MORE_ERRORS" will be added indicating how many entries were
* removed.
*/
static int
nvlist_smush(nvlist_t *errors, size_t max)
{
size_t size;
size = fnvlist_size(errors);
if (size > max) {
nvpair_t *more_errors;
int n = 0;
if (max < 1024)
return (SET_ERROR(ENOMEM));
fnvlist_add_int32(errors, ZPROP_N_MORE_ERRORS, 0);
more_errors = nvlist_prev_nvpair(errors, NULL);
do {
nvpair_t *pair = nvlist_prev_nvpair(errors,
more_errors);
fnvlist_remove_nvpair(errors, pair);
n++;
size = fnvlist_size(errors);
} while (size > max);
fnvlist_remove_nvpair(errors, more_errors);
fnvlist_add_int32(errors, ZPROP_N_MORE_ERRORS, n);
ASSERT3U(fnvlist_size(errors), <=, max);
}
return (0);
}
static int
put_nvlist(zfs_cmd_t *zc, nvlist_t *nvl)
{
char *packed = NULL;
int error = 0;
size_t size;
size = fnvlist_size(nvl);
if (size > zc->zc_nvlist_dst_size) {
error = SET_ERROR(ENOMEM);
} else {
packed = fnvlist_pack(nvl, &size);
if (ddi_copyout(packed, (void *)(uintptr_t)zc->zc_nvlist_dst,
size, zc->zc_iflags) != 0)
error = SET_ERROR(EFAULT);
fnvlist_pack_free(packed, size);
}
zc->zc_nvlist_dst_size = size;
zc->zc_nvlist_dst_filled = B_TRUE;
return (error);
}
int
getzfsvfs_impl(objset_t *os, zfsvfs_t **zfvp)
{
int error = 0;
if (dmu_objset_type(os) != DMU_OST_ZFS) {
return (SET_ERROR(EINVAL));
}
mutex_enter(&os->os_user_ptr_lock);
*zfvp = dmu_objset_get_user(os);
/* bump s_active only when non-zero to prevent umount race */
error = zfs_vfs_ref(zfvp);
mutex_exit(&os->os_user_ptr_lock);
return (error);
}
int
getzfsvfs(const char *dsname, zfsvfs_t **zfvp)
{
objset_t *os;
int error;
error = dmu_objset_hold(dsname, FTAG, &os);
if (error != 0)
return (error);
error = getzfsvfs_impl(os, zfvp);
dmu_objset_rele(os, FTAG);
return (error);
}
/*
* Find a zfsvfs_t for a mounted filesystem, or create our own, in which
* case its z_sb will be NULL, and it will be opened as the owner.
* If 'writer' is set, the z_teardown_lock will be held for RW_WRITER,
* which prevents all inode ops from running.
*/
static int
zfsvfs_hold(const char *name, const void *tag, zfsvfs_t **zfvp,
boolean_t writer)
{
int error = 0;
if (getzfsvfs(name, zfvp) != 0)
error = zfsvfs_create(name, B_FALSE, zfvp);
if (error == 0) {
if (writer)
ZFS_TEARDOWN_ENTER_WRITE(*zfvp, tag);
else
ZFS_TEARDOWN_ENTER_READ(*zfvp, tag);
if ((*zfvp)->z_unmounted) {
/*
* XXX we could probably try again, since the unmounting
* thread should be just about to disassociate the
* objset from the zfsvfs.
*/
ZFS_TEARDOWN_EXIT(*zfvp, tag);
return (SET_ERROR(EBUSY));
}
}
return (error);
}
static void
zfsvfs_rele(zfsvfs_t *zfsvfs, const void *tag)
{
ZFS_TEARDOWN_EXIT(zfsvfs, tag);
if (zfs_vfs_held(zfsvfs)) {
zfs_vfs_rele(zfsvfs);
} else {
dmu_objset_disown(zfsvfs->z_os, B_TRUE, zfsvfs);
zfsvfs_free(zfsvfs);
}
}
static int
zfs_ioc_pool_create(zfs_cmd_t *zc)
{
int error;
nvlist_t *config, *props = NULL;
nvlist_t *rootprops = NULL;
nvlist_t *zplprops = NULL;
dsl_crypto_params_t *dcp = NULL;
const char *spa_name = zc->zc_name;
boolean_t unload_wkey = B_TRUE;
if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
zc->zc_iflags, &config)))
return (error);
if (zc->zc_nvlist_src_size != 0 && (error =
get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
zc->zc_iflags, &props))) {
nvlist_free(config);
return (error);
}
if (props) {
nvlist_t *nvl = NULL;
nvlist_t *hidden_args = NULL;
uint64_t version = SPA_VERSION;
char *tname;
(void) nvlist_lookup_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_VERSION), &version);
if (!SPA_VERSION_IS_SUPPORTED(version)) {
error = SET_ERROR(EINVAL);
goto pool_props_bad;
}
(void) nvlist_lookup_nvlist(props, ZPOOL_ROOTFS_PROPS, &nvl);
if (nvl) {
error = nvlist_dup(nvl, &rootprops, KM_SLEEP);
if (error != 0)
goto pool_props_bad;
(void) nvlist_remove_all(props, ZPOOL_ROOTFS_PROPS);
}
(void) nvlist_lookup_nvlist(props, ZPOOL_HIDDEN_ARGS,
&hidden_args);
error = dsl_crypto_params_create_nvlist(DCP_CMD_NONE,
rootprops, hidden_args, &dcp);
if (error != 0)
goto pool_props_bad;
(void) nvlist_remove_all(props, ZPOOL_HIDDEN_ARGS);
VERIFY(nvlist_alloc(&zplprops, NV_UNIQUE_NAME, KM_SLEEP) == 0);
error = zfs_fill_zplprops_root(version, rootprops,
zplprops, NULL);
if (error != 0)
goto pool_props_bad;
if (nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_TNAME), &tname) == 0)
spa_name = tname;
}
error = spa_create(zc->zc_name, config, props, zplprops, dcp);
/*
* Set the remaining root properties
*/
if (!error && (error = zfs_set_prop_nvlist(spa_name,
ZPROP_SRC_LOCAL, rootprops, NULL)) != 0) {
(void) spa_destroy(spa_name);
unload_wkey = B_FALSE; /* spa_destroy() unloads wrapping keys */
}
pool_props_bad:
nvlist_free(rootprops);
nvlist_free(zplprops);
nvlist_free(config);
nvlist_free(props);
dsl_crypto_params_free(dcp, unload_wkey && !!error);
return (error);
}
static int
zfs_ioc_pool_destroy(zfs_cmd_t *zc)
{
int error;
zfs_log_history(zc);
error = spa_destroy(zc->zc_name);
return (error);
}
static int
zfs_ioc_pool_import(zfs_cmd_t *zc)
{
nvlist_t *config, *props = NULL;
uint64_t guid;
int error;
if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
zc->zc_iflags, &config)) != 0)
return (error);
if (zc->zc_nvlist_src_size != 0 && (error =
get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
zc->zc_iflags, &props))) {
nvlist_free(config);
return (error);
}
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &guid) != 0 ||
guid != zc->zc_guid)
error = SET_ERROR(EINVAL);
else
error = spa_import(zc->zc_name, config, props, zc->zc_cookie);
if (zc->zc_nvlist_dst != 0) {
int err;
if ((err = put_nvlist(zc, config)) != 0)
error = err;
}
nvlist_free(config);
nvlist_free(props);
return (error);
}
static int
zfs_ioc_pool_export(zfs_cmd_t *zc)
{
int error;
boolean_t force = (boolean_t)zc->zc_cookie;
boolean_t hardforce = (boolean_t)zc->zc_guid;
zfs_log_history(zc);
error = spa_export(zc->zc_name, NULL, force, hardforce);
return (error);
}
static int
zfs_ioc_pool_configs(zfs_cmd_t *zc)
{
nvlist_t *configs;
int error;
if ((configs = spa_all_configs(&zc->zc_cookie)) == NULL)
return (SET_ERROR(EEXIST));
error = put_nvlist(zc, configs);
nvlist_free(configs);
return (error);
}
/*
* inputs:
* zc_name name of the pool
*
* outputs:
* zc_cookie real errno
* zc_nvlist_dst config nvlist
* zc_nvlist_dst_size size of config nvlist
*/
static int
zfs_ioc_pool_stats(zfs_cmd_t *zc)
{
nvlist_t *config;
int error;
int ret = 0;
error = spa_get_stats(zc->zc_name, &config, zc->zc_value,
sizeof (zc->zc_value));
if (config != NULL) {
ret = put_nvlist(zc, config);
nvlist_free(config);
/*
* The config may be present even if 'error' is non-zero.
* In this case we return success, and preserve the real errno
* in 'zc_cookie'.
*/
zc->zc_cookie = error;
} else {
ret = error;
}
return (ret);
}
/*
* Try to import the given pool, returning pool stats as appropriate so that
* user land knows which devices are available and overall pool health.
*/
static int
zfs_ioc_pool_tryimport(zfs_cmd_t *zc)
{
nvlist_t *tryconfig, *config = NULL;
int error;
if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
zc->zc_iflags, &tryconfig)) != 0)
return (error);
config = spa_tryimport(tryconfig);
nvlist_free(tryconfig);
if (config == NULL)
return (SET_ERROR(EINVAL));
error = put_nvlist(zc, config);
nvlist_free(config);
return (error);
}
/*
* inputs:
* zc_name name of the pool
* zc_cookie scan func (pool_scan_func_t)
* zc_flags scrub pause/resume flag (pool_scrub_cmd_t)
*/
static int
zfs_ioc_pool_scan(zfs_cmd_t *zc)
{
spa_t *spa;
int error;
if (zc->zc_flags >= POOL_SCRUB_FLAGS_END)
return (SET_ERROR(EINVAL));
if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0)
return (error);
if (zc->zc_flags == POOL_SCRUB_PAUSE)
error = spa_scrub_pause_resume(spa, POOL_SCRUB_PAUSE);
else if (zc->zc_cookie == POOL_SCAN_NONE)
error = spa_scan_stop(spa);
else
error = spa_scan(spa, zc->zc_cookie);
spa_close(spa, FTAG);
return (error);
}
static int
zfs_ioc_pool_freeze(zfs_cmd_t *zc)
{
spa_t *spa;
int error;
error = spa_open(zc->zc_name, &spa, FTAG);
if (error == 0) {
spa_freeze(spa);
spa_close(spa, FTAG);
}
return (error);
}
static int
zfs_ioc_pool_upgrade(zfs_cmd_t *zc)
{
spa_t *spa;
int error;
if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0)
return (error);
if (zc->zc_cookie < spa_version(spa) ||
!SPA_VERSION_IS_SUPPORTED(zc->zc_cookie)) {
spa_close(spa, FTAG);
return (SET_ERROR(EINVAL));
}
spa_upgrade(spa, zc->zc_cookie);
spa_close(spa, FTAG);
return (error);
}
static int
zfs_ioc_pool_get_history(zfs_cmd_t *zc)
{
spa_t *spa;
char *hist_buf;
uint64_t size;
int error;
if ((size = zc->zc_history_len) == 0)
return (SET_ERROR(EINVAL));
if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0)
return (error);
if (spa_version(spa) < SPA_VERSION_ZPOOL_HISTORY) {
spa_close(spa, FTAG);
return (SET_ERROR(ENOTSUP));
}
hist_buf = vmem_alloc(size, KM_SLEEP);
if ((error = spa_history_get(spa, &zc->zc_history_offset,
&zc->zc_history_len, hist_buf)) == 0) {
error = ddi_copyout(hist_buf,
(void *)(uintptr_t)zc->zc_history,
zc->zc_history_len, zc->zc_iflags);
}
spa_close(spa, FTAG);
vmem_free(hist_buf, size);
return (error);
}
static int
zfs_ioc_pool_reguid(zfs_cmd_t *zc)
{
spa_t *spa;
int error;
error = spa_open(zc->zc_name, &spa, FTAG);
if (error == 0) {
error = spa_change_guid(spa);
spa_close(spa, FTAG);
}
return (error);
}
static int
zfs_ioc_dsobj_to_dsname(zfs_cmd_t *zc)
{
return (dsl_dsobj_to_dsname(zc->zc_name, zc->zc_obj, zc->zc_value));
}
/*
* inputs:
* zc_name name of filesystem
* zc_obj object to find
*
* outputs:
* zc_value name of object
*/
static int
zfs_ioc_obj_to_path(zfs_cmd_t *zc)
{
objset_t *os;
int error;
/* XXX reading from objset not owned */
if ((error = dmu_objset_hold_flags(zc->zc_name, B_TRUE,
FTAG, &os)) != 0)
return (error);
if (dmu_objset_type(os) != DMU_OST_ZFS) {
dmu_objset_rele_flags(os, B_TRUE, FTAG);
return (SET_ERROR(EINVAL));
}
error = zfs_obj_to_path(os, zc->zc_obj, zc->zc_value,
sizeof (zc->zc_value));
dmu_objset_rele_flags(os, B_TRUE, FTAG);
return (error);
}
/*
* inputs:
* zc_name name of filesystem
* zc_obj object to find
*
* outputs:
* zc_stat stats on object
* zc_value path to object
*/
static int
zfs_ioc_obj_to_stats(zfs_cmd_t *zc)
{
objset_t *os;
int error;
/* XXX reading from objset not owned */
if ((error = dmu_objset_hold_flags(zc->zc_name, B_TRUE,
FTAG, &os)) != 0)
return (error);
if (dmu_objset_type(os) != DMU_OST_ZFS) {
dmu_objset_rele_flags(os, B_TRUE, FTAG);
return (SET_ERROR(EINVAL));
}
error = zfs_obj_to_stats(os, zc->zc_obj, &zc->zc_stat, zc->zc_value,
sizeof (zc->zc_value));
dmu_objset_rele_flags(os, B_TRUE, FTAG);
return (error);
}
static int
zfs_ioc_vdev_add(zfs_cmd_t *zc)
{
spa_t *spa;
int error;
nvlist_t *config;
error = spa_open(zc->zc_name, &spa, FTAG);
if (error != 0)
return (error);
error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
zc->zc_iflags, &config);
if (error == 0) {
error = spa_vdev_add(spa, config);
nvlist_free(config);
}
spa_close(spa, FTAG);
return (error);
}
/*
* inputs:
* zc_name name of the pool
* zc_guid guid of vdev to remove
* zc_cookie cancel removal
*/
static int
zfs_ioc_vdev_remove(zfs_cmd_t *zc)
{
spa_t *spa;
int error;
error = spa_open(zc->zc_name, &spa, FTAG);
if (error != 0)
return (error);
if (zc->zc_cookie != 0) {
error = spa_vdev_remove_cancel(spa);
} else {
error = spa_vdev_remove(spa, zc->zc_guid, B_FALSE);
}
spa_close(spa, FTAG);
return (error);
}
static int
zfs_ioc_vdev_set_state(zfs_cmd_t *zc)
{
spa_t *spa;
int error;
vdev_state_t newstate = VDEV_STATE_UNKNOWN;
if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0)
return (error);
switch (zc->zc_cookie) {
case VDEV_STATE_ONLINE:
error = vdev_online(spa, zc->zc_guid, zc->zc_obj, &newstate);
break;
case VDEV_STATE_OFFLINE:
error = vdev_offline(spa, zc->zc_guid, zc->zc_obj);
break;
case VDEV_STATE_FAULTED:
if (zc->zc_obj != VDEV_AUX_ERR_EXCEEDED &&
zc->zc_obj != VDEV_AUX_EXTERNAL &&
zc->zc_obj != VDEV_AUX_EXTERNAL_PERSIST)
zc->zc_obj = VDEV_AUX_ERR_EXCEEDED;
error = vdev_fault(spa, zc->zc_guid, zc->zc_obj);
break;
case VDEV_STATE_DEGRADED:
if (zc->zc_obj != VDEV_AUX_ERR_EXCEEDED &&
zc->zc_obj != VDEV_AUX_EXTERNAL)
zc->zc_obj = VDEV_AUX_ERR_EXCEEDED;
error = vdev_degrade(spa, zc->zc_guid, zc->zc_obj);
break;
default:
error = SET_ERROR(EINVAL);
}
zc->zc_cookie = newstate;
spa_close(spa, FTAG);
return (error);
}
static int
zfs_ioc_vdev_attach(zfs_cmd_t *zc)
{
spa_t *spa;
nvlist_t *config;
int replacing = zc->zc_cookie;
int rebuild = zc->zc_simple;
int error;
if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0)
return (error);
if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
zc->zc_iflags, &config)) == 0) {
error = spa_vdev_attach(spa, zc->zc_guid, config, replacing,
rebuild);
nvlist_free(config);
}
spa_close(spa, FTAG);
return (error);
}
static int
zfs_ioc_vdev_detach(zfs_cmd_t *zc)
{
spa_t *spa;
int error;
if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0)
return (error);
error = spa_vdev_detach(spa, zc->zc_guid, 0, B_FALSE);
spa_close(spa, FTAG);
return (error);
}
static int
zfs_ioc_vdev_split(zfs_cmd_t *zc)
{
spa_t *spa;
nvlist_t *config, *props = NULL;
int error;
boolean_t exp = !!(zc->zc_cookie & ZPOOL_EXPORT_AFTER_SPLIT);
if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0)
return (error);
if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
zc->zc_iflags, &config))) {
spa_close(spa, FTAG);
return (error);
}
if (zc->zc_nvlist_src_size != 0 && (error =
get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
zc->zc_iflags, &props))) {
spa_close(spa, FTAG);
nvlist_free(config);
return (error);
}
error = spa_vdev_split_mirror(spa, zc->zc_string, config, props, exp);
spa_close(spa, FTAG);
nvlist_free(config);
nvlist_free(props);
return (error);
}
static int
zfs_ioc_vdev_setpath(zfs_cmd_t *zc)
{
spa_t *spa;
const char *path = zc->zc_value;
uint64_t guid = zc->zc_guid;
int error;
error = spa_open(zc->zc_name, &spa, FTAG);
if (error != 0)
return (error);
error = spa_vdev_setpath(spa, guid, path);
spa_close(spa, FTAG);
return (error);
}
static int
zfs_ioc_vdev_setfru(zfs_cmd_t *zc)
{
spa_t *spa;
const char *fru = zc->zc_value;
uint64_t guid = zc->zc_guid;
int error;
error = spa_open(zc->zc_name, &spa, FTAG);
if (error != 0)
return (error);
error = spa_vdev_setfru(spa, guid, fru);
spa_close(spa, FTAG);
return (error);
}
static int
zfs_ioc_objset_stats_impl(zfs_cmd_t *zc, objset_t *os)
{
int error = 0;
nvlist_t *nv;
dmu_objset_fast_stat(os, &zc->zc_objset_stats);
if (zc->zc_nvlist_dst != 0 &&
(error = dsl_prop_get_all(os, &nv)) == 0) {
dmu_objset_stats(os, nv);
/*
* NB: zvol_get_stats() will read the objset contents,
* which we aren't supposed to do with a
* DS_MODE_USER hold, because it could be
* inconsistent. So this is a bit of a workaround...
* XXX reading without owning
*/
if (!zc->zc_objset_stats.dds_inconsistent &&
dmu_objset_type(os) == DMU_OST_ZVOL) {
error = zvol_get_stats(os, nv);
if (error == EIO) {
nvlist_free(nv);
return (error);
}
VERIFY0(error);
}
if (error == 0)
error = put_nvlist(zc, nv);
nvlist_free(nv);
}
return (error);
}
/*
* inputs:
* zc_name name of filesystem
* zc_nvlist_dst_size size of buffer for property nvlist
*
* outputs:
* zc_objset_stats stats
* zc_nvlist_dst property nvlist
* zc_nvlist_dst_size size of property nvlist
*/
static int
zfs_ioc_objset_stats(zfs_cmd_t *zc)
{
objset_t *os;
int error;
error = dmu_objset_hold(zc->zc_name, FTAG, &os);
if (error == 0) {
error = zfs_ioc_objset_stats_impl(zc, os);
dmu_objset_rele(os, FTAG);
}
return (error);
}
/*
* inputs:
* zc_name name of filesystem
* zc_nvlist_dst_size size of buffer for property nvlist
*
* outputs:
* zc_nvlist_dst received property nvlist
* zc_nvlist_dst_size size of received property nvlist
*
* Gets received properties (distinct from local properties on or after
* SPA_VERSION_RECVD_PROPS) for callers who want to differentiate received from
* local property values.
*/
static int
zfs_ioc_objset_recvd_props(zfs_cmd_t *zc)
{
int error = 0;
nvlist_t *nv;
/*
* Without this check, we would return local property values if the
* caller has not already received properties on or after
* SPA_VERSION_RECVD_PROPS.
*/
if (!dsl_prop_get_hasrecvd(zc->zc_name))
return (SET_ERROR(ENOTSUP));
if (zc->zc_nvlist_dst != 0 &&
(error = dsl_prop_get_received(zc->zc_name, &nv)) == 0) {
error = put_nvlist(zc, nv);
nvlist_free(nv);
}
return (error);
}
static int
nvl_add_zplprop(objset_t *os, nvlist_t *props, zfs_prop_t prop)
{
uint64_t value;
int error;
/*
* zfs_get_zplprop() will either find a value or give us
* the default value (if there is one).
*/
if ((error = zfs_get_zplprop(os, prop, &value)) != 0)
return (error);
VERIFY(nvlist_add_uint64(props, zfs_prop_to_name(prop), value) == 0);
return (0);
}
/*
* inputs:
* zc_name name of filesystem
* zc_nvlist_dst_size size of buffer for zpl property nvlist
*
* outputs:
* zc_nvlist_dst zpl property nvlist
* zc_nvlist_dst_size size of zpl property nvlist
*/
static int
zfs_ioc_objset_zplprops(zfs_cmd_t *zc)
{
objset_t *os;
int err;
/* XXX reading without owning */
if ((err = dmu_objset_hold(zc->zc_name, FTAG, &os)))
return (err);
dmu_objset_fast_stat(os, &zc->zc_objset_stats);
/*
* NB: nvl_add_zplprop() will read the objset contents,
* which we aren't supposed to do with a DS_MODE_USER
* hold, because it could be inconsistent.
*/
if (zc->zc_nvlist_dst != 0 &&
!zc->zc_objset_stats.dds_inconsistent &&
dmu_objset_type(os) == DMU_OST_ZFS) {
nvlist_t *nv;
VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
if ((err = nvl_add_zplprop(os, nv, ZFS_PROP_VERSION)) == 0 &&
(err = nvl_add_zplprop(os, nv, ZFS_PROP_NORMALIZE)) == 0 &&
(err = nvl_add_zplprop(os, nv, ZFS_PROP_UTF8ONLY)) == 0 &&
(err = nvl_add_zplprop(os, nv, ZFS_PROP_CASE)) == 0)
err = put_nvlist(zc, nv);
nvlist_free(nv);
} else {
err = SET_ERROR(ENOENT);
}
dmu_objset_rele(os, FTAG);
return (err);
}
/*
* inputs:
* zc_name name of filesystem
* zc_cookie zap cursor
* zc_nvlist_dst_size size of buffer for property nvlist
*
* outputs:
* zc_name name of next filesystem
* zc_cookie zap cursor
* zc_objset_stats stats
* zc_nvlist_dst property nvlist
* zc_nvlist_dst_size size of property nvlist
*/
static int
zfs_ioc_dataset_list_next(zfs_cmd_t *zc)
{
objset_t *os;
int error;
char *p;
size_t orig_len = strlen(zc->zc_name);
top:
if ((error = dmu_objset_hold(zc->zc_name, FTAG, &os))) {
if (error == ENOENT)
error = SET_ERROR(ESRCH);
return (error);
}
p = strrchr(zc->zc_name, '/');
if (p == NULL || p[1] != '\0')
(void) strlcat(zc->zc_name, "/", sizeof (zc->zc_name));
p = zc->zc_name + strlen(zc->zc_name);
do {
error = dmu_dir_list_next(os,
sizeof (zc->zc_name) - (p - zc->zc_name), p,
NULL, &zc->zc_cookie);
if (error == ENOENT)
error = SET_ERROR(ESRCH);
} while (error == 0 && zfs_dataset_name_hidden(zc->zc_name));
dmu_objset_rele(os, FTAG);
/*
* If it's an internal dataset (ie. with a '$' in its name),
* don't try to get stats for it, otherwise we'll return ENOENT.
*/
if (error == 0 && strchr(zc->zc_name, '$') == NULL) {
error = zfs_ioc_objset_stats(zc); /* fill in the stats */
if (error == ENOENT) {
/* We lost a race with destroy, get the next one. */
zc->zc_name[orig_len] = '\0';
goto top;
}
}
return (error);
}
/*
* inputs:
* zc_name name of filesystem
* zc_cookie zap cursor
* zc_nvlist_src iteration range nvlist
* zc_nvlist_src_size size of iteration range nvlist
*
* outputs:
* zc_name name of next snapshot
* zc_objset_stats stats
* zc_nvlist_dst property nvlist
* zc_nvlist_dst_size size of property nvlist
*/
static int
zfs_ioc_snapshot_list_next(zfs_cmd_t *zc)
{
int error;
objset_t *os, *ossnap;
dsl_dataset_t *ds;
uint64_t min_txg = 0, max_txg = 0;
if (zc->zc_nvlist_src_size != 0) {
nvlist_t *props = NULL;
error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
zc->zc_iflags, &props);
if (error != 0)
return (error);
(void) nvlist_lookup_uint64(props, SNAP_ITER_MIN_TXG,
&min_txg);
(void) nvlist_lookup_uint64(props, SNAP_ITER_MAX_TXG,
&max_txg);
nvlist_free(props);
}
error = dmu_objset_hold(zc->zc_name, FTAG, &os);
if (error != 0) {
return (error == ENOENT ? SET_ERROR(ESRCH) : error);
}
/*
* A dataset name of maximum length cannot have any snapshots,
* so exit immediately.
*/
if (strlcat(zc->zc_name, "@", sizeof (zc->zc_name)) >=
ZFS_MAX_DATASET_NAME_LEN) {
dmu_objset_rele(os, FTAG);
return (SET_ERROR(ESRCH));
}
while (error == 0) {
if (issig(JUSTLOOKING) && issig(FORREAL)) {
error = SET_ERROR(EINTR);
break;
}
error = dmu_snapshot_list_next(os,
sizeof (zc->zc_name) - strlen(zc->zc_name),
zc->zc_name + strlen(zc->zc_name), &zc->zc_obj,
&zc->zc_cookie, NULL);
if (error == ENOENT) {
error = SET_ERROR(ESRCH);
break;
} else if (error != 0) {
break;
}
error = dsl_dataset_hold_obj(dmu_objset_pool(os), zc->zc_obj,
FTAG, &ds);
if (error != 0)
break;
if ((min_txg != 0 && dsl_get_creationtxg(ds) < min_txg) ||
(max_txg != 0 && dsl_get_creationtxg(ds) > max_txg)) {
dsl_dataset_rele(ds, FTAG);
/* undo snapshot name append */
*(strchr(zc->zc_name, '@') + 1) = '\0';
/* skip snapshot */
continue;
}
if (zc->zc_simple) {
zc->zc_objset_stats.dds_creation_txg =
dsl_get_creationtxg(ds);
dsl_dataset_rele(ds, FTAG);
break;
}
if ((error = dmu_objset_from_ds(ds, &ossnap)) != 0) {
dsl_dataset_rele(ds, FTAG);
break;
}
if ((error = zfs_ioc_objset_stats_impl(zc, ossnap)) != 0) {
dsl_dataset_rele(ds, FTAG);
break;
}
dsl_dataset_rele(ds, FTAG);
break;
}
dmu_objset_rele(os, FTAG);
/* if we failed, undo the @ that we tacked on to zc_name */
if (error != 0)
*strchr(zc->zc_name, '@') = '\0';
return (error);
}
static int
zfs_prop_set_userquota(const char *dsname, nvpair_t *pair)
{
const char *propname = nvpair_name(pair);
uint64_t *valary;
unsigned int vallen;
const char *dash, *domain;
zfs_userquota_prop_t type;
uint64_t rid;
uint64_t quota;
zfsvfs_t *zfsvfs;
int err;
if (nvpair_type(pair) == DATA_TYPE_NVLIST) {
nvlist_t *attrs;
VERIFY(nvpair_value_nvlist(pair, &attrs) == 0);
if (nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
&pair) != 0)
return (SET_ERROR(EINVAL));
}
/*
* A correctly constructed propname is encoded as
* userquota@<rid>-<domain>.
*/
if ((dash = strchr(propname, '-')) == NULL ||
nvpair_value_uint64_array(pair, &valary, &vallen) != 0 ||
vallen != 3)
return (SET_ERROR(EINVAL));
domain = dash + 1;
type = valary[0];
rid = valary[1];
quota = valary[2];
err = zfsvfs_hold(dsname, FTAG, &zfsvfs, B_FALSE);
if (err == 0) {
err = zfs_set_userquota(zfsvfs, type, domain, rid, quota);
zfsvfs_rele(zfsvfs, FTAG);
}
return (err);
}
/*
* If the named property is one that has a special function to set its value,
* return 0 on success and a positive error code on failure; otherwise if it is
* not one of the special properties handled by this function, return -1.
*
* XXX: It would be better for callers of the property interface if we handled
* these special cases in dsl_prop.c (in the dsl layer).
*/
static int
zfs_prop_set_special(const char *dsname, zprop_source_t source,
nvpair_t *pair)
{
const char *propname = nvpair_name(pair);
zfs_prop_t prop = zfs_name_to_prop(propname);
uint64_t intval = 0;
const char *strval = NULL;
int err = -1;
if (prop == ZPROP_USERPROP) {
if (zfs_prop_userquota(propname))
return (zfs_prop_set_userquota(dsname, pair));
return (-1);
}
if (nvpair_type(pair) == DATA_TYPE_NVLIST) {
nvlist_t *attrs;
VERIFY(nvpair_value_nvlist(pair, &attrs) == 0);
VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
&pair) == 0);
}
/* all special properties are numeric except for keylocation */
if (zfs_prop_get_type(prop) == PROP_TYPE_STRING) {
strval = fnvpair_value_string(pair);
} else {
intval = fnvpair_value_uint64(pair);
}
switch (prop) {
case ZFS_PROP_QUOTA:
err = dsl_dir_set_quota(dsname, source, intval);
break;
case ZFS_PROP_REFQUOTA:
err = dsl_dataset_set_refquota(dsname, source, intval);
break;
case ZFS_PROP_FILESYSTEM_LIMIT:
case ZFS_PROP_SNAPSHOT_LIMIT:
if (intval == UINT64_MAX) {
/* clearing the limit, just do it */
err = 0;
} else {
err = dsl_dir_activate_fs_ss_limit(dsname);
}
/*
* Set err to -1 to force the zfs_set_prop_nvlist code down the
* default path to set the value in the nvlist.
*/
if (err == 0)
err = -1;
break;
case ZFS_PROP_KEYLOCATION:
err = dsl_crypto_can_set_keylocation(dsname, strval);
/*
* Set err to -1 to force the zfs_set_prop_nvlist code down the
* default path to set the value in the nvlist.
*/
if (err == 0)
err = -1;
break;
case ZFS_PROP_RESERVATION:
err = dsl_dir_set_reservation(dsname, source, intval);
break;
case ZFS_PROP_REFRESERVATION:
err = dsl_dataset_set_refreservation(dsname, source, intval);
break;
case ZFS_PROP_COMPRESSION:
err = dsl_dataset_set_compression(dsname, source, intval);
/*
* Set err to -1 to force the zfs_set_prop_nvlist code down the
* default path to set the value in the nvlist.
*/
if (err == 0)
err = -1;
break;
case ZFS_PROP_VOLSIZE:
err = zvol_set_volsize(dsname, intval);
break;
case ZFS_PROP_SNAPDEV:
err = zvol_set_snapdev(dsname, source, intval);
break;
case ZFS_PROP_VOLMODE:
err = zvol_set_volmode(dsname, source, intval);
break;
case ZFS_PROP_VERSION:
{
zfsvfs_t *zfsvfs;
if ((err = zfsvfs_hold(dsname, FTAG, &zfsvfs, B_TRUE)) != 0)
break;
err = zfs_set_version(zfsvfs, intval);
zfsvfs_rele(zfsvfs, FTAG);
if (err == 0 && intval >= ZPL_VERSION_USERSPACE) {
zfs_cmd_t *zc;
zc = kmem_zalloc(sizeof (zfs_cmd_t), KM_SLEEP);
(void) strlcpy(zc->zc_name, dsname,
sizeof (zc->zc_name));
(void) zfs_ioc_userspace_upgrade(zc);
(void) zfs_ioc_id_quota_upgrade(zc);
kmem_free(zc, sizeof (zfs_cmd_t));
}
break;
}
default:
err = -1;
}
return (err);
}
static boolean_t
zfs_is_namespace_prop(zfs_prop_t prop)
{
switch (prop) {
case ZFS_PROP_ATIME:
case ZFS_PROP_RELATIME:
case ZFS_PROP_DEVICES:
case ZFS_PROP_EXEC:
case ZFS_PROP_SETUID:
case ZFS_PROP_READONLY:
case ZFS_PROP_XATTR:
case ZFS_PROP_NBMAND:
return (B_TRUE);
default:
return (B_FALSE);
}
}
/*
* This function is best effort. If it fails to set any of the given properties,
* it continues to set as many as it can and returns the last error
* encountered. If the caller provides a non-NULL errlist, it will be filled in
* with the list of names of all the properties that failed along with the
* corresponding error numbers.
*
* If every property is set successfully, zero is returned and errlist is not
* modified.
*/
int
zfs_set_prop_nvlist(const char *dsname, zprop_source_t source, nvlist_t *nvl,
nvlist_t *errlist)
{
nvpair_t *pair;
nvpair_t *propval;
int rv = 0;
int err;
uint64_t intval;
const char *strval;
boolean_t should_update_mount_cache = B_FALSE;
nvlist_t *genericnvl = fnvlist_alloc();
nvlist_t *retrynvl = fnvlist_alloc();
retry:
pair = NULL;
while ((pair = nvlist_next_nvpair(nvl, pair)) != NULL) {
const char *propname = nvpair_name(pair);
zfs_prop_t prop = zfs_name_to_prop(propname);
err = 0;
/* decode the property value */
propval = pair;
if (nvpair_type(pair) == DATA_TYPE_NVLIST) {
nvlist_t *attrs;
attrs = fnvpair_value_nvlist(pair);
if (nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
&propval) != 0)
err = SET_ERROR(EINVAL);
}
/* Validate value type */
if (err == 0 && source == ZPROP_SRC_INHERITED) {
/* inherited properties are expected to be booleans */
if (nvpair_type(propval) != DATA_TYPE_BOOLEAN)
err = SET_ERROR(EINVAL);
} else if (err == 0 && prop == ZPROP_USERPROP) {
if (zfs_prop_user(propname)) {
if (nvpair_type(propval) != DATA_TYPE_STRING)
err = SET_ERROR(EINVAL);
} else if (zfs_prop_userquota(propname)) {
if (nvpair_type(propval) !=
DATA_TYPE_UINT64_ARRAY)
err = SET_ERROR(EINVAL);
} else {
err = SET_ERROR(EINVAL);
}
} else if (err == 0) {
if (nvpair_type(propval) == DATA_TYPE_STRING) {
if (zfs_prop_get_type(prop) != PROP_TYPE_STRING)
err = SET_ERROR(EINVAL);
} else if (nvpair_type(propval) == DATA_TYPE_UINT64) {
const char *unused;
intval = fnvpair_value_uint64(propval);
switch (zfs_prop_get_type(prop)) {
case PROP_TYPE_NUMBER:
break;
case PROP_TYPE_STRING:
err = SET_ERROR(EINVAL);
break;
case PROP_TYPE_INDEX:
if (zfs_prop_index_to_string(prop,
intval, &unused) != 0)
err =
SET_ERROR(ZFS_ERR_BADPROP);
break;
default:
cmn_err(CE_PANIC,
"unknown property type");
}
} else {
err = SET_ERROR(EINVAL);
}
}
/* Validate permissions */
if (err == 0)
err = zfs_check_settable(dsname, pair, CRED());
if (err == 0) {
if (source == ZPROP_SRC_INHERITED)
err = -1; /* does not need special handling */
else
err = zfs_prop_set_special(dsname, source,
pair);
if (err == -1) {
/*
* For better performance we build up a list of
* properties to set in a single transaction.
*/
err = nvlist_add_nvpair(genericnvl, pair);
} else if (err != 0 && nvl != retrynvl) {
/*
* This may be a spurious error caused by
* receiving quota and reservation out of order.
* Try again in a second pass.
*/
err = nvlist_add_nvpair(retrynvl, pair);
}
}
if (err != 0) {
if (errlist != NULL)
fnvlist_add_int32(errlist, propname, err);
rv = err;
}
if (zfs_is_namespace_prop(prop))
should_update_mount_cache = B_TRUE;
}
if (nvl != retrynvl && !nvlist_empty(retrynvl)) {
nvl = retrynvl;
goto retry;
}
if (nvlist_empty(genericnvl))
goto out;
/*
* Try to set them all in one batch.
*/
err = dsl_props_set(dsname, source, genericnvl);
if (err == 0)
goto out;
/*
* If batching fails, we still want to set as many properties as we
* can, so try setting them individually.
*/
pair = NULL;
while ((pair = nvlist_next_nvpair(genericnvl, pair)) != NULL) {
const char *propname = nvpair_name(pair);
err = 0;
propval = pair;
if (nvpair_type(pair) == DATA_TYPE_NVLIST) {
nvlist_t *attrs;
attrs = fnvpair_value_nvlist(pair);
propval = fnvlist_lookup_nvpair(attrs, ZPROP_VALUE);
}
if (nvpair_type(propval) == DATA_TYPE_STRING) {
strval = fnvpair_value_string(propval);
err = dsl_prop_set_string(dsname, propname,
source, strval);
} else if (nvpair_type(propval) == DATA_TYPE_BOOLEAN) {
err = dsl_prop_inherit(dsname, propname, source);
} else {
intval = fnvpair_value_uint64(propval);
err = dsl_prop_set_int(dsname, propname, source,
intval);
}
if (err != 0) {
if (errlist != NULL) {
fnvlist_add_int32(errlist, propname, err);
}
rv = err;
}
}
out:
if (should_update_mount_cache)
zfs_ioctl_update_mount_cache(dsname);
nvlist_free(genericnvl);
nvlist_free(retrynvl);
return (rv);
}
/*
* Check that all the properties are valid user properties.
*/
static int
zfs_check_userprops(nvlist_t *nvl)
{
nvpair_t *pair = NULL;
while ((pair = nvlist_next_nvpair(nvl, pair)) != NULL) {
const char *propname = nvpair_name(pair);
if (!zfs_prop_user(propname) ||
nvpair_type(pair) != DATA_TYPE_STRING)
return (SET_ERROR(EINVAL));
if (strlen(propname) >= ZAP_MAXNAMELEN)
return (SET_ERROR(ENAMETOOLONG));
if (strlen(fnvpair_value_string(pair)) >= ZAP_MAXVALUELEN)
return (SET_ERROR(E2BIG));
}
return (0);
}
static void
props_skip(nvlist_t *props, nvlist_t *skipped, nvlist_t **newprops)
{
nvpair_t *pair;
VERIFY(nvlist_alloc(newprops, NV_UNIQUE_NAME, KM_SLEEP) == 0);
pair = NULL;
while ((pair = nvlist_next_nvpair(props, pair)) != NULL) {
if (nvlist_exists(skipped, nvpair_name(pair)))
continue;
VERIFY(nvlist_add_nvpair(*newprops, pair) == 0);
}
}
static int
clear_received_props(const char *dsname, nvlist_t *props,
nvlist_t *skipped)
{
int err = 0;
nvlist_t *cleared_props = NULL;
props_skip(props, skipped, &cleared_props);
if (!nvlist_empty(cleared_props)) {
/*
* Acts on local properties until the dataset has received
* properties at least once on or after SPA_VERSION_RECVD_PROPS.
*/
zprop_source_t flags = (ZPROP_SRC_NONE |
(dsl_prop_get_hasrecvd(dsname) ? ZPROP_SRC_RECEIVED : 0));
err = zfs_set_prop_nvlist(dsname, flags, cleared_props, NULL);
}
nvlist_free(cleared_props);
return (err);
}
/*
* inputs:
* zc_name name of filesystem
* zc_value name of property to set
* zc_nvlist_src{_size} nvlist of properties to apply
* zc_cookie received properties flag
*
* outputs:
* zc_nvlist_dst{_size} error for each unapplied received property
*/
static int
zfs_ioc_set_prop(zfs_cmd_t *zc)
{
nvlist_t *nvl;
boolean_t received = zc->zc_cookie;
zprop_source_t source = (received ? ZPROP_SRC_RECEIVED :
ZPROP_SRC_LOCAL);
nvlist_t *errors;
int error;
if ((error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
zc->zc_iflags, &nvl)) != 0)
return (error);
if (received) {
nvlist_t *origprops;
if (dsl_prop_get_received(zc->zc_name, &origprops) == 0) {
(void) clear_received_props(zc->zc_name,
origprops, nvl);
nvlist_free(origprops);
}
error = dsl_prop_set_hasrecvd(zc->zc_name);
}
errors = fnvlist_alloc();
if (error == 0)
error = zfs_set_prop_nvlist(zc->zc_name, source, nvl, errors);
if (zc->zc_nvlist_dst != 0 && errors != NULL) {
(void) put_nvlist(zc, errors);
}
nvlist_free(errors);
nvlist_free(nvl);
return (error);
}
/*
* inputs:
* zc_name name of filesystem
* zc_value name of property to inherit
* zc_cookie revert to received value if TRUE
*
* outputs: none
*/
static int
zfs_ioc_inherit_prop(zfs_cmd_t *zc)
{
const char *propname = zc->zc_value;
zfs_prop_t prop = zfs_name_to_prop(propname);
boolean_t received = zc->zc_cookie;
zprop_source_t source = (received
? ZPROP_SRC_NONE /* revert to received value, if any */
: ZPROP_SRC_INHERITED); /* explicitly inherit */
nvlist_t *dummy;
nvpair_t *pair;
zprop_type_t type;
int err;
if (!received) {
/*
* Only check this in the non-received case. We want to allow
* 'inherit -S' to revert non-inheritable properties like quota
* and reservation to the received or default values even though
* they are not considered inheritable.
*/
if (prop != ZPROP_USERPROP && !zfs_prop_inheritable(prop))
return (SET_ERROR(EINVAL));
}
if (prop == ZPROP_USERPROP) {
if (!zfs_prop_user(propname))
return (SET_ERROR(EINVAL));
type = PROP_TYPE_STRING;
} else if (prop == ZFS_PROP_VOLSIZE || prop == ZFS_PROP_VERSION) {
return (SET_ERROR(EINVAL));
} else {
type = zfs_prop_get_type(prop);
}
/*
* zfs_prop_set_special() expects properties in the form of an
* nvpair with type info.
*/
dummy = fnvlist_alloc();
switch (type) {
case PROP_TYPE_STRING:
VERIFY(0 == nvlist_add_string(dummy, propname, ""));
break;
case PROP_TYPE_NUMBER:
case PROP_TYPE_INDEX:
VERIFY(0 == nvlist_add_uint64(dummy, propname, 0));
break;
default:
err = SET_ERROR(EINVAL);
goto errout;
}
pair = nvlist_next_nvpair(dummy, NULL);
if (pair == NULL) {
err = SET_ERROR(EINVAL);
} else {
err = zfs_prop_set_special(zc->zc_name, source, pair);
if (err == -1) /* property is not "special", needs handling */
err = dsl_prop_inherit(zc->zc_name, zc->zc_value,
source);
}
errout:
nvlist_free(dummy);
return (err);
}
static int
zfs_ioc_pool_set_props(zfs_cmd_t *zc)
{
nvlist_t *props;
spa_t *spa;
int error;
nvpair_t *pair;
if ((error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
zc->zc_iflags, &props)))
return (error);
/*
* If the only property is the configfile, then just do a spa_lookup()
* to handle the faulted case.
*/
pair = nvlist_next_nvpair(props, NULL);
if (pair != NULL && strcmp(nvpair_name(pair),
zpool_prop_to_name(ZPOOL_PROP_CACHEFILE)) == 0 &&
nvlist_next_nvpair(props, pair) == NULL) {
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(zc->zc_name)) != NULL) {
spa_configfile_set(spa, props, B_FALSE);
spa_write_cachefile(spa, B_FALSE, B_TRUE);
}
mutex_exit(&spa_namespace_lock);
if (spa != NULL) {
nvlist_free(props);
return (0);
}
}
if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) {
nvlist_free(props);
return (error);
}
error = spa_prop_set(spa, props);
nvlist_free(props);
spa_close(spa, FTAG);
return (error);
}
static int
zfs_ioc_pool_get_props(zfs_cmd_t *zc)
{
spa_t *spa;
int error;
nvlist_t *nvp = NULL;
if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) {
/*
* If the pool is faulted, there may be properties we can still
* get (such as altroot and cachefile), so attempt to get them
* anyway.
*/
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(zc->zc_name)) != NULL)
error = spa_prop_get(spa, &nvp);
mutex_exit(&spa_namespace_lock);
} else {
error = spa_prop_get(spa, &nvp);
spa_close(spa, FTAG);
}
if (error == 0 && zc->zc_nvlist_dst != 0)
error = put_nvlist(zc, nvp);
else
error = SET_ERROR(EFAULT);
nvlist_free(nvp);
return (error);
}
/*
* innvl: {
* "vdevprops_set_vdev" -> guid
* "vdevprops_set_props" -> { prop -> value }
* }
*
* outnvl: propname -> error code (int32)
*/
static const zfs_ioc_key_t zfs_keys_vdev_set_props[] = {
{ZPOOL_VDEV_PROPS_SET_VDEV, DATA_TYPE_UINT64, 0},
{ZPOOL_VDEV_PROPS_SET_PROPS, DATA_TYPE_NVLIST, 0}
};
static int
zfs_ioc_vdev_set_props(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
{
spa_t *spa;
int error;
vdev_t *vd;
uint64_t vdev_guid;
/* Early validation */
if (nvlist_lookup_uint64(innvl, ZPOOL_VDEV_PROPS_SET_VDEV,
&vdev_guid) != 0)
return (SET_ERROR(EINVAL));
if (outnvl == NULL)
return (SET_ERROR(EINVAL));
if ((error = spa_open(poolname, &spa, FTAG)) != 0)
return (error);
ASSERT(spa_writeable(spa));
if ((vd = spa_lookup_by_guid(spa, vdev_guid, B_TRUE)) == NULL) {
spa_close(spa, FTAG);
return (SET_ERROR(ENOENT));
}
error = vdev_prop_set(vd, innvl, outnvl);
spa_close(spa, FTAG);
return (error);
}
/*
* innvl: {
* "vdevprops_get_vdev" -> guid
* (optional) "vdevprops_get_props" -> { propname -> propid }
* }
*
* outnvl: propname -> value
*/
static const zfs_ioc_key_t zfs_keys_vdev_get_props[] = {
{ZPOOL_VDEV_PROPS_GET_VDEV, DATA_TYPE_UINT64, 0},
{ZPOOL_VDEV_PROPS_GET_PROPS, DATA_TYPE_NVLIST, ZK_OPTIONAL}
};
static int
zfs_ioc_vdev_get_props(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
{
spa_t *spa;
int error;
vdev_t *vd;
uint64_t vdev_guid;
/* Early validation */
if (nvlist_lookup_uint64(innvl, ZPOOL_VDEV_PROPS_GET_VDEV,
&vdev_guid) != 0)
return (SET_ERROR(EINVAL));
if (outnvl == NULL)
return (SET_ERROR(EINVAL));
if ((error = spa_open(poolname, &spa, FTAG)) != 0)
return (error);
if ((vd = spa_lookup_by_guid(spa, vdev_guid, B_TRUE)) == NULL) {
spa_close(spa, FTAG);
return (SET_ERROR(ENOENT));
}
error = vdev_prop_get(vd, innvl, outnvl);
spa_close(spa, FTAG);
return (error);
}
/*
* inputs:
* zc_name name of filesystem
* zc_nvlist_src{_size} nvlist of delegated permissions
* zc_perm_action allow/unallow flag
*
* outputs: none
*/
static int
zfs_ioc_set_fsacl(zfs_cmd_t *zc)
{
int error;
nvlist_t *fsaclnv = NULL;
if ((error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
zc->zc_iflags, &fsaclnv)) != 0)
return (error);
/*
* Verify nvlist is constructed correctly
*/
if ((error = zfs_deleg_verify_nvlist(fsaclnv)) != 0) {
nvlist_free(fsaclnv);
return (SET_ERROR(EINVAL));
}
/*
* If we don't have PRIV_SYS_MOUNT, then validate
* that user is allowed to hand out each permission in
* the nvlist(s)
*/
error = secpolicy_zfs(CRED());
if (error != 0) {
if (zc->zc_perm_action == B_FALSE) {
error = dsl_deleg_can_allow(zc->zc_name,
fsaclnv, CRED());
} else {
error = dsl_deleg_can_unallow(zc->zc_name,
fsaclnv, CRED());
}
}
if (error == 0)
error = dsl_deleg_set(zc->zc_name, fsaclnv, zc->zc_perm_action);
nvlist_free(fsaclnv);
return (error);
}
/*
* inputs:
* zc_name name of filesystem
*
* outputs:
* zc_nvlist_src{_size} nvlist of delegated permissions
*/
static int
zfs_ioc_get_fsacl(zfs_cmd_t *zc)
{
nvlist_t *nvp;
int error;
if ((error = dsl_deleg_get(zc->zc_name, &nvp)) == 0) {
error = put_nvlist(zc, nvp);
nvlist_free(nvp);
}
return (error);
}
static void
zfs_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
{
zfs_creat_t *zct = arg;
zfs_create_fs(os, cr, zct->zct_zplprops, tx);
}
#define ZFS_PROP_UNDEFINED ((uint64_t)-1)
/*
* inputs:
* os parent objset pointer (NULL if root fs)
* fuids_ok fuids allowed in this version of the spa?
* sa_ok SAs allowed in this version of the spa?
* createprops list of properties requested by creator
*
* outputs:
* zplprops values for the zplprops we attach to the master node object
* is_ci true if requested file system will be purely case-insensitive
*
* Determine the settings for utf8only, normalization and
* casesensitivity. Specific values may have been requested by the
* creator and/or we can inherit values from the parent dataset. If
* the file system is of too early a vintage, a creator can not
* request settings for these properties, even if the requested
* setting is the default value. We don't actually want to create dsl
* properties for these, so remove them from the source nvlist after
* processing.
*/
static int
zfs_fill_zplprops_impl(objset_t *os, uint64_t zplver,
boolean_t fuids_ok, boolean_t sa_ok, nvlist_t *createprops,
nvlist_t *zplprops, boolean_t *is_ci)
{
uint64_t sense = ZFS_PROP_UNDEFINED;
uint64_t norm = ZFS_PROP_UNDEFINED;
uint64_t u8 = ZFS_PROP_UNDEFINED;
int error;
ASSERT(zplprops != NULL);
/* parent dataset must be a filesystem */
if (os != NULL && os->os_phys->os_type != DMU_OST_ZFS)
return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
/*
* Pull out creator prop choices, if any.
*/
if (createprops) {
(void) nvlist_lookup_uint64(createprops,
zfs_prop_to_name(ZFS_PROP_VERSION), &zplver);
(void) nvlist_lookup_uint64(createprops,
zfs_prop_to_name(ZFS_PROP_NORMALIZE), &norm);
(void) nvlist_remove_all(createprops,
zfs_prop_to_name(ZFS_PROP_NORMALIZE));
(void) nvlist_lookup_uint64(createprops,
zfs_prop_to_name(ZFS_PROP_UTF8ONLY), &u8);
(void) nvlist_remove_all(createprops,
zfs_prop_to_name(ZFS_PROP_UTF8ONLY));
(void) nvlist_lookup_uint64(createprops,
zfs_prop_to_name(ZFS_PROP_CASE), &sense);
(void) nvlist_remove_all(createprops,
zfs_prop_to_name(ZFS_PROP_CASE));
}
/*
* If the zpl version requested is whacky or the file system
* or pool is version is too "young" to support normalization
* and the creator tried to set a value for one of the props,
* error out.
*/
if ((zplver < ZPL_VERSION_INITIAL || zplver > ZPL_VERSION) ||
(zplver >= ZPL_VERSION_FUID && !fuids_ok) ||
(zplver >= ZPL_VERSION_SA && !sa_ok) ||
(zplver < ZPL_VERSION_NORMALIZATION &&
(norm != ZFS_PROP_UNDEFINED || u8 != ZFS_PROP_UNDEFINED ||
sense != ZFS_PROP_UNDEFINED)))
return (SET_ERROR(ENOTSUP));
/*
* Put the version in the zplprops
*/
VERIFY(nvlist_add_uint64(zplprops,
zfs_prop_to_name(ZFS_PROP_VERSION), zplver) == 0);
if (norm == ZFS_PROP_UNDEFINED &&
(error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &norm)) != 0)
return (error);
VERIFY(nvlist_add_uint64(zplprops,
zfs_prop_to_name(ZFS_PROP_NORMALIZE), norm) == 0);
/*
* If we're normalizing, names must always be valid UTF-8 strings.
*/
if (norm)
u8 = 1;
if (u8 == ZFS_PROP_UNDEFINED &&
(error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &u8)) != 0)
return (error);
VERIFY(nvlist_add_uint64(zplprops,
zfs_prop_to_name(ZFS_PROP_UTF8ONLY), u8) == 0);
if (sense == ZFS_PROP_UNDEFINED &&
(error = zfs_get_zplprop(os, ZFS_PROP_CASE, &sense)) != 0)
return (error);
VERIFY(nvlist_add_uint64(zplprops,
zfs_prop_to_name(ZFS_PROP_CASE), sense) == 0);
if (is_ci)
*is_ci = (sense == ZFS_CASE_INSENSITIVE);
return (0);
}
static int
zfs_fill_zplprops(const char *dataset, nvlist_t *createprops,
nvlist_t *zplprops, boolean_t *is_ci)
{
boolean_t fuids_ok, sa_ok;
uint64_t zplver = ZPL_VERSION;
objset_t *os = NULL;
char parentname[ZFS_MAX_DATASET_NAME_LEN];
spa_t *spa;
uint64_t spa_vers;
int error;
zfs_get_parent(dataset, parentname, sizeof (parentname));
if ((error = spa_open(dataset, &spa, FTAG)) != 0)
return (error);
spa_vers = spa_version(spa);
spa_close(spa, FTAG);
zplver = zfs_zpl_version_map(spa_vers);
fuids_ok = (zplver >= ZPL_VERSION_FUID);
sa_ok = (zplver >= ZPL_VERSION_SA);
/*
* Open parent object set so we can inherit zplprop values.
*/
if ((error = dmu_objset_hold(parentname, FTAG, &os)) != 0)
return (error);
error = zfs_fill_zplprops_impl(os, zplver, fuids_ok, sa_ok, createprops,
zplprops, is_ci);
dmu_objset_rele(os, FTAG);
return (error);
}
static int
zfs_fill_zplprops_root(uint64_t spa_vers, nvlist_t *createprops,
nvlist_t *zplprops, boolean_t *is_ci)
{
boolean_t fuids_ok;
boolean_t sa_ok;
uint64_t zplver = ZPL_VERSION;
int error;
zplver = zfs_zpl_version_map(spa_vers);
fuids_ok = (zplver >= ZPL_VERSION_FUID);
sa_ok = (zplver >= ZPL_VERSION_SA);
error = zfs_fill_zplprops_impl(NULL, zplver, fuids_ok, sa_ok,
createprops, zplprops, is_ci);
return (error);
}
/*
* innvl: {
* "type" -> dmu_objset_type_t (int32)
* (optional) "props" -> { prop -> value }
* (optional) "hidden_args" -> { "wkeydata" -> value }
* raw uint8_t array of encryption wrapping key data (32 bytes)
* }
*
* outnvl: propname -> error code (int32)
*/
static const zfs_ioc_key_t zfs_keys_create[] = {
{"type", DATA_TYPE_INT32, 0},
{"props", DATA_TYPE_NVLIST, ZK_OPTIONAL},
{"hidden_args", DATA_TYPE_NVLIST, ZK_OPTIONAL},
};
static int
zfs_ioc_create(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl)
{
int error = 0;
zfs_creat_t zct = { 0 };
nvlist_t *nvprops = NULL;
nvlist_t *hidden_args = NULL;
void (*cbfunc)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx);
dmu_objset_type_t type;
boolean_t is_insensitive = B_FALSE;
dsl_crypto_params_t *dcp = NULL;
type = (dmu_objset_type_t)fnvlist_lookup_int32(innvl, "type");
(void) nvlist_lookup_nvlist(innvl, "props", &nvprops);
(void) nvlist_lookup_nvlist(innvl, ZPOOL_HIDDEN_ARGS, &hidden_args);
switch (type) {
case DMU_OST_ZFS:
cbfunc = zfs_create_cb;
break;
case DMU_OST_ZVOL:
cbfunc = zvol_create_cb;
break;
default:
cbfunc = NULL;
break;
}
if (strchr(fsname, '@') ||
strchr(fsname, '%'))
return (SET_ERROR(EINVAL));
zct.zct_props = nvprops;
if (cbfunc == NULL)
return (SET_ERROR(EINVAL));
if (type == DMU_OST_ZVOL) {
uint64_t volsize, volblocksize;
if (nvprops == NULL)
return (SET_ERROR(EINVAL));
if (nvlist_lookup_uint64(nvprops,
zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) != 0)
return (SET_ERROR(EINVAL));
if ((error = nvlist_lookup_uint64(nvprops,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
&volblocksize)) != 0 && error != ENOENT)
return (SET_ERROR(EINVAL));
if (error != 0)
volblocksize = zfs_prop_default_numeric(
ZFS_PROP_VOLBLOCKSIZE);
if ((error = zvol_check_volblocksize(fsname,
volblocksize)) != 0 ||
(error = zvol_check_volsize(volsize,
volblocksize)) != 0)
return (error);
} else if (type == DMU_OST_ZFS) {
int error;
/*
* We have to have normalization and
* case-folding flags correct when we do the
* file system creation, so go figure them out
* now.
*/
VERIFY(nvlist_alloc(&zct.zct_zplprops,
NV_UNIQUE_NAME, KM_SLEEP) == 0);
error = zfs_fill_zplprops(fsname, nvprops,
zct.zct_zplprops, &is_insensitive);
if (error != 0) {
nvlist_free(zct.zct_zplprops);
return (error);
}
}
error = dsl_crypto_params_create_nvlist(DCP_CMD_NONE, nvprops,
hidden_args, &dcp);
if (error != 0) {
nvlist_free(zct.zct_zplprops);
return (error);
}
error = dmu_objset_create(fsname, type,
is_insensitive ? DS_FLAG_CI_DATASET : 0, dcp, cbfunc, &zct);
nvlist_free(zct.zct_zplprops);
dsl_crypto_params_free(dcp, !!error);
/*
* It would be nice to do this atomically.
*/
if (error == 0) {
error = zfs_set_prop_nvlist(fsname, ZPROP_SRC_LOCAL,
nvprops, outnvl);
if (error != 0) {
spa_t *spa;
int error2;
/*
* Volumes will return EBUSY and cannot be destroyed
* until all asynchronous minor handling (e.g. from
* setting the volmode property) has completed. Wait for
* the spa_zvol_taskq to drain then retry.
*/
error2 = dsl_destroy_head(fsname);
while ((error2 == EBUSY) && (type == DMU_OST_ZVOL)) {
error2 = spa_open(fsname, &spa, FTAG);
if (error2 == 0) {
taskq_wait(spa->spa_zvol_taskq);
spa_close(spa, FTAG);
}
error2 = dsl_destroy_head(fsname);
}
}
}
return (error);
}
/*
* innvl: {
* "origin" -> name of origin snapshot
* (optional) "props" -> { prop -> value }
* (optional) "hidden_args" -> { "wkeydata" -> value }
* raw uint8_t array of encryption wrapping key data (32 bytes)
* }
*
* outputs:
* outnvl: propname -> error code (int32)
*/
static const zfs_ioc_key_t zfs_keys_clone[] = {
{"origin", DATA_TYPE_STRING, 0},
{"props", DATA_TYPE_NVLIST, ZK_OPTIONAL},
{"hidden_args", DATA_TYPE_NVLIST, ZK_OPTIONAL},
};
static int
zfs_ioc_clone(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl)
{
int error = 0;
nvlist_t *nvprops = NULL;
const char *origin_name;
origin_name = fnvlist_lookup_string(innvl, "origin");
(void) nvlist_lookup_nvlist(innvl, "props", &nvprops);
if (strchr(fsname, '@') ||
strchr(fsname, '%'))
return (SET_ERROR(EINVAL));
if (dataset_namecheck(origin_name, NULL, NULL) != 0)
return (SET_ERROR(EINVAL));
error = dmu_objset_clone(fsname, origin_name);
/*
* It would be nice to do this atomically.
*/
if (error == 0) {
error = zfs_set_prop_nvlist(fsname, ZPROP_SRC_LOCAL,
nvprops, outnvl);
if (error != 0)
(void) dsl_destroy_head(fsname);
}
return (error);
}
static const zfs_ioc_key_t zfs_keys_remap[] = {
/* no nvl keys */
};
static int
zfs_ioc_remap(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl)
{
/* This IOCTL is no longer supported. */
(void) fsname, (void) innvl, (void) outnvl;
return (0);
}
/*
* innvl: {
* "snaps" -> { snapshot1, snapshot2 }
* (optional) "props" -> { prop -> value (string) }
* }
*
* outnvl: snapshot -> error code (int32)
*/
static const zfs_ioc_key_t zfs_keys_snapshot[] = {
{"snaps", DATA_TYPE_NVLIST, 0},
{"props", DATA_TYPE_NVLIST, ZK_OPTIONAL},
};
static int
zfs_ioc_snapshot(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
{
nvlist_t *snaps;
nvlist_t *props = NULL;
int error, poollen;
nvpair_t *pair;
(void) nvlist_lookup_nvlist(innvl, "props", &props);
if (!nvlist_empty(props) &&
zfs_earlier_version(poolname, SPA_VERSION_SNAP_PROPS))
return (SET_ERROR(ENOTSUP));
if ((error = zfs_check_userprops(props)) != 0)
return (error);
snaps = fnvlist_lookup_nvlist(innvl, "snaps");
poollen = strlen(poolname);
for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
pair = nvlist_next_nvpair(snaps, pair)) {
const char *name = nvpair_name(pair);
char *cp = strchr(name, '@');
/*
* The snap name must contain an @, and the part after it must
* contain only valid characters.
*/
if (cp == NULL ||
zfs_component_namecheck(cp + 1, NULL, NULL) != 0)
return (SET_ERROR(EINVAL));
/*
* The snap must be in the specified pool.
*/
if (strncmp(name, poolname, poollen) != 0 ||
(name[poollen] != '/' && name[poollen] != '@'))
return (SET_ERROR(EXDEV));
/*
* Check for permission to set the properties on the fs.
*/
if (!nvlist_empty(props)) {
*cp = '\0';
error = zfs_secpolicy_write_perms(name,
ZFS_DELEG_PERM_USERPROP, CRED());
*cp = '@';
if (error != 0)
return (error);
}
/* This must be the only snap of this fs. */
for (nvpair_t *pair2 = nvlist_next_nvpair(snaps, pair);
pair2 != NULL; pair2 = nvlist_next_nvpair(snaps, pair2)) {
if (strncmp(name, nvpair_name(pair2), cp - name + 1)
== 0) {
return (SET_ERROR(EXDEV));
}
}
}
error = dsl_dataset_snapshot(snaps, props, outnvl);
return (error);
}
/*
* innvl: "message" -> string
*/
static const zfs_ioc_key_t zfs_keys_log_history[] = {
{"message", DATA_TYPE_STRING, 0},
};
static int
zfs_ioc_log_history(const char *unused, nvlist_t *innvl, nvlist_t *outnvl)
{
(void) unused, (void) outnvl;
const char *message;
char *poolname;
spa_t *spa;
int error;
/*
* The poolname in the ioctl is not set, we get it from the TSD,
* which was set at the end of the last successful ioctl that allows
* logging. The secpolicy func already checked that it is set.
* Only one log ioctl is allowed after each successful ioctl, so
* we clear the TSD here.
*/
poolname = tsd_get(zfs_allow_log_key);
if (poolname == NULL)
return (SET_ERROR(EINVAL));
(void) tsd_set(zfs_allow_log_key, NULL);
error = spa_open(poolname, &spa, FTAG);
kmem_strfree(poolname);
if (error != 0)
return (error);
message = fnvlist_lookup_string(innvl, "message");
if (spa_version(spa) < SPA_VERSION_ZPOOL_HISTORY) {
spa_close(spa, FTAG);
return (SET_ERROR(ENOTSUP));
}
error = spa_history_log(spa, message);
spa_close(spa, FTAG);
return (error);
}
/*
* This ioctl is used to set the bootenv configuration on the current
* pool. This configuration is stored in the second padding area of the label,
* and it is used by the bootloader(s) to store the bootloader and/or system
* specific data.
* The data is stored as nvlist data stream, and is protected by
* an embedded checksum.
* The version can have two possible values:
* VB_RAW: nvlist should have key GRUB_ENVMAP, value DATA_TYPE_STRING.
* VB_NVLIST: nvlist with arbitrary <key, value> pairs.
*/
static const zfs_ioc_key_t zfs_keys_set_bootenv[] = {
{"version", DATA_TYPE_UINT64, 0},
{"<keys>", DATA_TYPE_ANY, ZK_OPTIONAL | ZK_WILDCARDLIST},
};
static int
zfs_ioc_set_bootenv(const char *name, nvlist_t *innvl, nvlist_t *outnvl)
{
int error;
spa_t *spa;
if ((error = spa_open(name, &spa, FTAG)) != 0)
return (error);
spa_vdev_state_enter(spa, SCL_ALL);
error = vdev_label_write_bootenv(spa->spa_root_vdev, innvl);
(void) spa_vdev_state_exit(spa, NULL, 0);
spa_close(spa, FTAG);
return (error);
}
static const zfs_ioc_key_t zfs_keys_get_bootenv[] = {
/* no nvl keys */
};
static int
zfs_ioc_get_bootenv(const char *name, nvlist_t *innvl, nvlist_t *outnvl)
{
spa_t *spa;
int error;
if ((error = spa_open(name, &spa, FTAG)) != 0)
return (error);
spa_vdev_state_enter(spa, SCL_ALL);
error = vdev_label_read_bootenv(spa->spa_root_vdev, outnvl);
(void) spa_vdev_state_exit(spa, NULL, 0);
spa_close(spa, FTAG);
return (error);
}
/*
* The dp_config_rwlock must not be held when calling this, because the
* unmount may need to write out data.
*
* This function is best-effort. Callers must deal gracefully if it
* remains mounted (or is remounted after this call).
*
* Returns 0 if the argument is not a snapshot, or it is not currently a
* filesystem, or we were able to unmount it. Returns error code otherwise.
*/
void
zfs_unmount_snap(const char *snapname)
{
if (strchr(snapname, '@') == NULL)
return;
(void) zfsctl_snapshot_unmount(snapname, MNT_FORCE);
}
static int
zfs_unmount_snap_cb(const char *snapname, void *arg)
{
(void) arg;
zfs_unmount_snap(snapname);
return (0);
}
/*
* When a clone is destroyed, its origin may also need to be destroyed,
* in which case it must be unmounted. This routine will do that unmount
* if necessary.
*/
void
zfs_destroy_unmount_origin(const char *fsname)
{
int error;
objset_t *os;
dsl_dataset_t *ds;
error = dmu_objset_hold(fsname, FTAG, &os);
if (error != 0)
return;
ds = dmu_objset_ds(os);
if (dsl_dir_is_clone(ds->ds_dir) && DS_IS_DEFER_DESTROY(ds->ds_prev)) {
char originname[ZFS_MAX_DATASET_NAME_LEN];
dsl_dataset_name(ds->ds_prev, originname);
dmu_objset_rele(os, FTAG);
zfs_unmount_snap(originname);
} else {
dmu_objset_rele(os, FTAG);
}
}
/*
* innvl: {
* "snaps" -> { snapshot1, snapshot2 }
* (optional boolean) "defer"
* }
*
* outnvl: snapshot -> error code (int32)
*/
static const zfs_ioc_key_t zfs_keys_destroy_snaps[] = {
{"snaps", DATA_TYPE_NVLIST, 0},
{"defer", DATA_TYPE_BOOLEAN, ZK_OPTIONAL},
};
static int
zfs_ioc_destroy_snaps(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
{
int poollen;
nvlist_t *snaps;
nvpair_t *pair;
boolean_t defer;
spa_t *spa;
snaps = fnvlist_lookup_nvlist(innvl, "snaps");
defer = nvlist_exists(innvl, "defer");
poollen = strlen(poolname);
for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
pair = nvlist_next_nvpair(snaps, pair)) {
const char *name = nvpair_name(pair);
/*
* The snap must be in the specified pool to prevent the
* invalid removal of zvol minors below.
*/
if (strncmp(name, poolname, poollen) != 0 ||
(name[poollen] != '/' && name[poollen] != '@'))
return (SET_ERROR(EXDEV));
zfs_unmount_snap(nvpair_name(pair));
if (spa_open(name, &spa, FTAG) == 0) {
zvol_remove_minors(spa, name, B_TRUE);
spa_close(spa, FTAG);
}
}
return (dsl_destroy_snapshots_nvl(snaps, defer, outnvl));
}
/*
* Create bookmarks. The bookmark names are of the form <fs>#<bmark>.
* All bookmarks and snapshots must be in the same pool.
* dsl_bookmark_create_nvl_validate describes the nvlist schema in more detail.
*
* innvl: {
* new_bookmark1 -> existing_snapshot,
* new_bookmark2 -> existing_bookmark,
* }
*
* outnvl: bookmark -> error code (int32)
*
*/
static const zfs_ioc_key_t zfs_keys_bookmark[] = {
{"<bookmark>...", DATA_TYPE_STRING, ZK_WILDCARDLIST},
};
static int
zfs_ioc_bookmark(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
{
(void) poolname;
return (dsl_bookmark_create(innvl, outnvl));
}
/*
* innvl: {
* property 1, property 2, ...
* }
*
* outnvl: {
* bookmark name 1 -> { property 1, property 2, ... },
* bookmark name 2 -> { property 1, property 2, ... }
* }
*
*/
static const zfs_ioc_key_t zfs_keys_get_bookmarks[] = {
{"<property>...", DATA_TYPE_BOOLEAN, ZK_WILDCARDLIST | ZK_OPTIONAL},
};
static int
zfs_ioc_get_bookmarks(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl)
{
return (dsl_get_bookmarks(fsname, innvl, outnvl));
}
/*
* innvl is not used.
*
* outnvl: {
* property 1, property 2, ...
* }
*
*/
static const zfs_ioc_key_t zfs_keys_get_bookmark_props[] = {
/* no nvl keys */
};
static int
zfs_ioc_get_bookmark_props(const char *bookmark, nvlist_t *innvl,
nvlist_t *outnvl)
{
(void) innvl;
char fsname[ZFS_MAX_DATASET_NAME_LEN];
char *bmname;
bmname = strchr(bookmark, '#');
if (bmname == NULL)
return (SET_ERROR(EINVAL));
bmname++;
(void) strlcpy(fsname, bookmark, sizeof (fsname));
*(strchr(fsname, '#')) = '\0';
return (dsl_get_bookmark_props(fsname, bmname, outnvl));
}
/*
* innvl: {
* bookmark name 1, bookmark name 2
* }
*
* outnvl: bookmark -> error code (int32)
*
*/
static const zfs_ioc_key_t zfs_keys_destroy_bookmarks[] = {
{"<bookmark>...", DATA_TYPE_BOOLEAN, ZK_WILDCARDLIST},
};
static int
zfs_ioc_destroy_bookmarks(const char *poolname, nvlist_t *innvl,
nvlist_t *outnvl)
{
int error, poollen;
poollen = strlen(poolname);
for (nvpair_t *pair = nvlist_next_nvpair(innvl, NULL);
pair != NULL; pair = nvlist_next_nvpair(innvl, pair)) {
const char *name = nvpair_name(pair);
const char *cp = strchr(name, '#');
/*
* The bookmark name must contain an #, and the part after it
* must contain only valid characters.
*/
if (cp == NULL ||
zfs_component_namecheck(cp + 1, NULL, NULL) != 0)
return (SET_ERROR(EINVAL));
/*
* The bookmark must be in the specified pool.
*/
if (strncmp(name, poolname, poollen) != 0 ||
(name[poollen] != '/' && name[poollen] != '#'))
return (SET_ERROR(EXDEV));
}
error = dsl_bookmark_destroy(innvl, outnvl);
return (error);
}
static const zfs_ioc_key_t zfs_keys_channel_program[] = {
{"program", DATA_TYPE_STRING, 0},
{"arg", DATA_TYPE_ANY, 0},
{"sync", DATA_TYPE_BOOLEAN_VALUE, ZK_OPTIONAL},
{"instrlimit", DATA_TYPE_UINT64, ZK_OPTIONAL},
{"memlimit", DATA_TYPE_UINT64, ZK_OPTIONAL},
};
static int
zfs_ioc_channel_program(const char *poolname, nvlist_t *innvl,
nvlist_t *outnvl)
{
char *program;
uint64_t instrlimit, memlimit;
boolean_t sync_flag;
nvpair_t *nvarg = NULL;
program = fnvlist_lookup_string(innvl, ZCP_ARG_PROGRAM);
if (0 != nvlist_lookup_boolean_value(innvl, ZCP_ARG_SYNC, &sync_flag)) {
sync_flag = B_TRUE;
}
if (0 != nvlist_lookup_uint64(innvl, ZCP_ARG_INSTRLIMIT, &instrlimit)) {
instrlimit = ZCP_DEFAULT_INSTRLIMIT;
}
if (0 != nvlist_lookup_uint64(innvl, ZCP_ARG_MEMLIMIT, &memlimit)) {
memlimit = ZCP_DEFAULT_MEMLIMIT;
}
nvarg = fnvlist_lookup_nvpair(innvl, ZCP_ARG_ARGLIST);
if (instrlimit == 0 || instrlimit > zfs_lua_max_instrlimit)
return (SET_ERROR(EINVAL));
if (memlimit == 0 || memlimit > zfs_lua_max_memlimit)
return (SET_ERROR(EINVAL));
return (zcp_eval(poolname, program, sync_flag, instrlimit, memlimit,
nvarg, outnvl));
}
/*
* innvl: unused
* outnvl: empty
*/
static const zfs_ioc_key_t zfs_keys_pool_checkpoint[] = {
/* no nvl keys */
};
static int
zfs_ioc_pool_checkpoint(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
{
(void) innvl, (void) outnvl;
return (spa_checkpoint(poolname));
}
/*
* innvl: unused
* outnvl: empty
*/
static const zfs_ioc_key_t zfs_keys_pool_discard_checkpoint[] = {
/* no nvl keys */
};
static int
zfs_ioc_pool_discard_checkpoint(const char *poolname, nvlist_t *innvl,
nvlist_t *outnvl)
{
(void) innvl, (void) outnvl;
return (spa_checkpoint_discard(poolname));
}
/*
* inputs:
* zc_name name of dataset to destroy
* zc_defer_destroy mark for deferred destroy
*
* outputs: none
*/
static int
zfs_ioc_destroy(zfs_cmd_t *zc)
{
objset_t *os;
dmu_objset_type_t ost;
int err;
err = dmu_objset_hold(zc->zc_name, FTAG, &os);
if (err != 0)
return (err);
ost = dmu_objset_type(os);
dmu_objset_rele(os, FTAG);
if (ost == DMU_OST_ZFS)
zfs_unmount_snap(zc->zc_name);
if (strchr(zc->zc_name, '@')) {
err = dsl_destroy_snapshot(zc->zc_name, zc->zc_defer_destroy);
} else {
err = dsl_destroy_head(zc->zc_name);
if (err == EEXIST) {
/*
* It is possible that the given DS may have
* hidden child (%recv) datasets - "leftovers"
* resulting from the previously interrupted
* 'zfs receive'.
*
* 6 extra bytes for /%recv
*/
char namebuf[ZFS_MAX_DATASET_NAME_LEN + 6];
if (snprintf(namebuf, sizeof (namebuf), "%s/%s",
zc->zc_name, recv_clone_name) >=
sizeof (namebuf))
return (SET_ERROR(EINVAL));
/*
* Try to remove the hidden child (%recv) and after
* that try to remove the target dataset.
* If the hidden child (%recv) does not exist
* the original error (EEXIST) will be returned
*/
err = dsl_destroy_head(namebuf);
if (err == 0)
err = dsl_destroy_head(zc->zc_name);
else if (err == ENOENT)
err = SET_ERROR(EEXIST);
}
}
return (err);
}
/*
* innvl: {
* "initialize_command" -> POOL_INITIALIZE_{CANCEL|START|SUSPEND} (uint64)
* "initialize_vdevs": { -> guids to initialize (nvlist)
* "vdev_path_1": vdev_guid_1, (uint64),
* "vdev_path_2": vdev_guid_2, (uint64),
* ...
* },
* }
*
* outnvl: {
* "initialize_vdevs": { -> initialization errors (nvlist)
* "vdev_path_1": errno, see function body for possible errnos (uint64)
* "vdev_path_2": errno, ... (uint64)
* ...
* }
* }
*
* EINVAL is returned for an unknown commands or if any of the provided vdev
* guids have be specified with a type other than uint64.
*/
static const zfs_ioc_key_t zfs_keys_pool_initialize[] = {
{ZPOOL_INITIALIZE_COMMAND, DATA_TYPE_UINT64, 0},
{ZPOOL_INITIALIZE_VDEVS, DATA_TYPE_NVLIST, 0}
};
static int
zfs_ioc_pool_initialize(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
{
uint64_t cmd_type;
if (nvlist_lookup_uint64(innvl, ZPOOL_INITIALIZE_COMMAND,
&cmd_type) != 0) {
return (SET_ERROR(EINVAL));
}
if (!(cmd_type == POOL_INITIALIZE_CANCEL ||
cmd_type == POOL_INITIALIZE_START ||
cmd_type == POOL_INITIALIZE_SUSPEND)) {
return (SET_ERROR(EINVAL));
}
nvlist_t *vdev_guids;
if (nvlist_lookup_nvlist(innvl, ZPOOL_INITIALIZE_VDEVS,
&vdev_guids) != 0) {
return (SET_ERROR(EINVAL));
}
for (nvpair_t *pair = nvlist_next_nvpair(vdev_guids, NULL);
pair != NULL; pair = nvlist_next_nvpair(vdev_guids, pair)) {
uint64_t vdev_guid;
if (nvpair_value_uint64(pair, &vdev_guid) != 0) {
return (SET_ERROR(EINVAL));
}
}
spa_t *spa;
int error = spa_open(poolname, &spa, FTAG);
if (error != 0)
return (error);
nvlist_t *vdev_errlist = fnvlist_alloc();
int total_errors = spa_vdev_initialize(spa, vdev_guids, cmd_type,
vdev_errlist);
if (fnvlist_size(vdev_errlist) > 0) {
fnvlist_add_nvlist(outnvl, ZPOOL_INITIALIZE_VDEVS,
vdev_errlist);
}
fnvlist_free(vdev_errlist);
spa_close(spa, FTAG);
return (total_errors > 0 ? SET_ERROR(EINVAL) : 0);
}
/*
* innvl: {
* "trim_command" -> POOL_TRIM_{CANCEL|START|SUSPEND} (uint64)
* "trim_vdevs": { -> guids to TRIM (nvlist)
* "vdev_path_1": vdev_guid_1, (uint64),
* "vdev_path_2": vdev_guid_2, (uint64),
* ...
* },
* "trim_rate" -> Target TRIM rate in bytes/sec.
* "trim_secure" -> Set to request a secure TRIM.
* }
*
* outnvl: {
* "trim_vdevs": { -> TRIM errors (nvlist)
* "vdev_path_1": errno, see function body for possible errnos (uint64)
* "vdev_path_2": errno, ... (uint64)
* ...
* }
* }
*
* EINVAL is returned for an unknown commands or if any of the provided vdev
* guids have be specified with a type other than uint64.
*/
static const zfs_ioc_key_t zfs_keys_pool_trim[] = {
{ZPOOL_TRIM_COMMAND, DATA_TYPE_UINT64, 0},
{ZPOOL_TRIM_VDEVS, DATA_TYPE_NVLIST, 0},
{ZPOOL_TRIM_RATE, DATA_TYPE_UINT64, ZK_OPTIONAL},
{ZPOOL_TRIM_SECURE, DATA_TYPE_BOOLEAN_VALUE, ZK_OPTIONAL},
};
static int
zfs_ioc_pool_trim(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
{
uint64_t cmd_type;
if (nvlist_lookup_uint64(innvl, ZPOOL_TRIM_COMMAND, &cmd_type) != 0)
return (SET_ERROR(EINVAL));
if (!(cmd_type == POOL_TRIM_CANCEL ||
cmd_type == POOL_TRIM_START ||
cmd_type == POOL_TRIM_SUSPEND)) {
return (SET_ERROR(EINVAL));
}
nvlist_t *vdev_guids;
if (nvlist_lookup_nvlist(innvl, ZPOOL_TRIM_VDEVS, &vdev_guids) != 0)
return (SET_ERROR(EINVAL));
for (nvpair_t *pair = nvlist_next_nvpair(vdev_guids, NULL);
pair != NULL; pair = nvlist_next_nvpair(vdev_guids, pair)) {
uint64_t vdev_guid;
if (nvpair_value_uint64(pair, &vdev_guid) != 0) {
return (SET_ERROR(EINVAL));
}
}
/* Optional, defaults to maximum rate when not provided */
uint64_t rate;
if (nvlist_lookup_uint64(innvl, ZPOOL_TRIM_RATE, &rate) != 0)
rate = 0;
/* Optional, defaults to standard TRIM when not provided */
boolean_t secure;
if (nvlist_lookup_boolean_value(innvl, ZPOOL_TRIM_SECURE,
&secure) != 0) {
secure = B_FALSE;
}
spa_t *spa;
int error = spa_open(poolname, &spa, FTAG);
if (error != 0)
return (error);
nvlist_t *vdev_errlist = fnvlist_alloc();
int total_errors = spa_vdev_trim(spa, vdev_guids, cmd_type,
rate, !!zfs_trim_metaslab_skip, secure, vdev_errlist);
if (fnvlist_size(vdev_errlist) > 0)
fnvlist_add_nvlist(outnvl, ZPOOL_TRIM_VDEVS, vdev_errlist);
fnvlist_free(vdev_errlist);
spa_close(spa, FTAG);
return (total_errors > 0 ? SET_ERROR(EINVAL) : 0);
}
/*
* This ioctl waits for activity of a particular type to complete. If there is
* no activity of that type in progress, it returns immediately, and the
* returned value "waited" is false. If there is activity in progress, and no
* tag is passed in, the ioctl blocks until all activity of that type is
* complete, and then returns with "waited" set to true.
*
* If a tag is provided, it identifies a particular instance of an activity to
* wait for. Currently, this is only valid for use with 'initialize', because
* that is the only activity for which there can be multiple instances running
* concurrently. In the case of 'initialize', the tag corresponds to the guid of
* the vdev on which to wait.
*
* If a thread waiting in the ioctl receives a signal, the call will return
* immediately, and the return value will be EINTR.
*
* innvl: {
* "wait_activity" -> int32_t
* (optional) "wait_tag" -> uint64_t
* }
*
* outnvl: "waited" -> boolean_t
*/
static const zfs_ioc_key_t zfs_keys_pool_wait[] = {
{ZPOOL_WAIT_ACTIVITY, DATA_TYPE_INT32, 0},
{ZPOOL_WAIT_TAG, DATA_TYPE_UINT64, ZK_OPTIONAL},
};
static int
zfs_ioc_wait(const char *name, nvlist_t *innvl, nvlist_t *outnvl)
{
int32_t activity;
uint64_t tag;
boolean_t waited;
int error;
if (nvlist_lookup_int32(innvl, ZPOOL_WAIT_ACTIVITY, &activity) != 0)
return (EINVAL);
if (nvlist_lookup_uint64(innvl, ZPOOL_WAIT_TAG, &tag) == 0)
error = spa_wait_tag(name, activity, tag, &waited);
else
error = spa_wait(name, activity, &waited);
if (error == 0)
fnvlist_add_boolean_value(outnvl, ZPOOL_WAIT_WAITED, waited);
return (error);
}
/*
* This ioctl waits for activity of a particular type to complete. If there is
* no activity of that type in progress, it returns immediately, and the
* returned value "waited" is false. If there is activity in progress, and no
* tag is passed in, the ioctl blocks until all activity of that type is
* complete, and then returns with "waited" set to true.
*
* If a thread waiting in the ioctl receives a signal, the call will return
* immediately, and the return value will be EINTR.
*
* innvl: {
* "wait_activity" -> int32_t
* }
*
* outnvl: "waited" -> boolean_t
*/
static const zfs_ioc_key_t zfs_keys_fs_wait[] = {
{ZFS_WAIT_ACTIVITY, DATA_TYPE_INT32, 0},
};
static int
zfs_ioc_wait_fs(const char *name, nvlist_t *innvl, nvlist_t *outnvl)
{
int32_t activity;
boolean_t waited = B_FALSE;
int error;
dsl_pool_t *dp;
dsl_dir_t *dd;
dsl_dataset_t *ds;
if (nvlist_lookup_int32(innvl, ZFS_WAIT_ACTIVITY, &activity) != 0)
return (SET_ERROR(EINVAL));
if (activity >= ZFS_WAIT_NUM_ACTIVITIES || activity < 0)
return (SET_ERROR(EINVAL));
if ((error = dsl_pool_hold(name, FTAG, &dp)) != 0)
return (error);
if ((error = dsl_dataset_hold(dp, name, FTAG, &ds)) != 0) {
dsl_pool_rele(dp, FTAG);
return (error);
}
dd = ds->ds_dir;
mutex_enter(&dd->dd_activity_lock);
dd->dd_activity_waiters++;
/*
* We get a long-hold here so that the dsl_dataset_t and dsl_dir_t
* aren't evicted while we're waiting. Normally this is prevented by
* holding the pool, but we can't do that while we're waiting since
* that would prevent TXGs from syncing out. Some of the functionality
* of long-holds (e.g. preventing deletion) is unnecessary for this
* case, since we would cancel the waiters before proceeding with a
* deletion. An alternative mechanism for keeping the dataset around
* could be developed but this is simpler.
*/
dsl_dataset_long_hold(ds, FTAG);
dsl_pool_rele(dp, FTAG);
error = dsl_dir_wait(dd, ds, activity, &waited);
dsl_dataset_long_rele(ds, FTAG);
dd->dd_activity_waiters--;
if (dd->dd_activity_waiters == 0)
cv_signal(&dd->dd_activity_cv);
mutex_exit(&dd->dd_activity_lock);
dsl_dataset_rele(ds, FTAG);
if (error == 0)
fnvlist_add_boolean_value(outnvl, ZFS_WAIT_WAITED, waited);
return (error);
}
/*
* fsname is name of dataset to rollback (to most recent snapshot)
*
* innvl may contain name of expected target snapshot
*
* outnvl: "target" -> name of most recent snapshot
* }
*/
static const zfs_ioc_key_t zfs_keys_rollback[] = {
{"target", DATA_TYPE_STRING, ZK_OPTIONAL},
};
static int
zfs_ioc_rollback(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl)
{
zfsvfs_t *zfsvfs;
zvol_state_handle_t *zv;
char *target = NULL;
int error;
(void) nvlist_lookup_string(innvl, "target", &target);
if (target != NULL) {
const char *cp = strchr(target, '@');
/*
* The snap name must contain an @, and the part after it must
* contain only valid characters.
*/
if (cp == NULL ||
zfs_component_namecheck(cp + 1, NULL, NULL) != 0)
return (SET_ERROR(EINVAL));
}
if (getzfsvfs(fsname, &zfsvfs) == 0) {
dsl_dataset_t *ds;
ds = dmu_objset_ds(zfsvfs->z_os);
error = zfs_suspend_fs(zfsvfs);
if (error == 0) {
int resume_err;
error = dsl_dataset_rollback(fsname, target, zfsvfs,
outnvl);
resume_err = zfs_resume_fs(zfsvfs, ds);
error = error ? error : resume_err;
}
zfs_vfs_rele(zfsvfs);
} else if ((zv = zvol_suspend(fsname)) != NULL) {
error = dsl_dataset_rollback(fsname, target, zvol_tag(zv),
outnvl);
zvol_resume(zv);
} else {
error = dsl_dataset_rollback(fsname, target, NULL, outnvl);
}
return (error);
}
static int
recursive_unmount(const char *fsname, void *arg)
{
const char *snapname = arg;
char *fullname;
fullname = kmem_asprintf("%s@%s", fsname, snapname);
zfs_unmount_snap(fullname);
kmem_strfree(fullname);
return (0);
}
/*
*
* snapname is the snapshot to redact.
* innvl: {
* "bookname" -> (string)
* shortname of the redaction bookmark to generate
* "snapnv" -> (nvlist, values ignored)
* snapshots to redact snapname with respect to
* }
*
* outnvl is unused
*/
static const zfs_ioc_key_t zfs_keys_redact[] = {
{"bookname", DATA_TYPE_STRING, 0},
{"snapnv", DATA_TYPE_NVLIST, 0},
};
static int
zfs_ioc_redact(const char *snapname, nvlist_t *innvl, nvlist_t *outnvl)
{
(void) outnvl;
nvlist_t *redactnvl = NULL;
char *redactbook = NULL;
if (nvlist_lookup_nvlist(innvl, "snapnv", &redactnvl) != 0)
return (SET_ERROR(EINVAL));
if (fnvlist_num_pairs(redactnvl) == 0)
return (SET_ERROR(ENXIO));
if (nvlist_lookup_string(innvl, "bookname", &redactbook) != 0)
return (SET_ERROR(EINVAL));
return (dmu_redact_snap(snapname, redactnvl, redactbook));
}
/*
* inputs:
* zc_name old name of dataset
* zc_value new name of dataset
* zc_cookie recursive flag (only valid for snapshots)
*
* outputs: none
*/
static int
zfs_ioc_rename(zfs_cmd_t *zc)
{
objset_t *os;
dmu_objset_type_t ost;
boolean_t recursive = zc->zc_cookie & 1;
boolean_t nounmount = !!(zc->zc_cookie & 2);
char *at;
int err;
/* "zfs rename" from and to ...%recv datasets should both fail */
zc->zc_name[sizeof (zc->zc_name) - 1] = '\0';
zc->zc_value[sizeof (zc->zc_value) - 1] = '\0';
if (dataset_namecheck(zc->zc_name, NULL, NULL) != 0 ||
dataset_namecheck(zc->zc_value, NULL, NULL) != 0 ||
strchr(zc->zc_name, '%') || strchr(zc->zc_value, '%'))
return (SET_ERROR(EINVAL));
err = dmu_objset_hold(zc->zc_name, FTAG, &os);
if (err != 0)
return (err);
ost = dmu_objset_type(os);
dmu_objset_rele(os, FTAG);
at = strchr(zc->zc_name, '@');
if (at != NULL) {
/* snaps must be in same fs */
int error;
if (strncmp(zc->zc_name, zc->zc_value, at - zc->zc_name + 1))
return (SET_ERROR(EXDEV));
*at = '\0';
if (ost == DMU_OST_ZFS && !nounmount) {
error = dmu_objset_find(zc->zc_name,
recursive_unmount, at + 1,
recursive ? DS_FIND_CHILDREN : 0);
if (error != 0) {
*at = '@';
return (error);
}
}
error = dsl_dataset_rename_snapshot(zc->zc_name,
at + 1, strchr(zc->zc_value, '@') + 1, recursive);
*at = '@';
return (error);
} else {
return (dsl_dir_rename(zc->zc_name, zc->zc_value));
}
}
static int
zfs_check_settable(const char *dsname, nvpair_t *pair, cred_t *cr)
{
const char *propname = nvpair_name(pair);
boolean_t issnap = (strchr(dsname, '@') != NULL);
zfs_prop_t prop = zfs_name_to_prop(propname);
uint64_t intval, compval;
int err;
if (prop == ZPROP_USERPROP) {
if (zfs_prop_user(propname)) {
if ((err = zfs_secpolicy_write_perms(dsname,
ZFS_DELEG_PERM_USERPROP, cr)))
return (err);
return (0);
}
if (!issnap && zfs_prop_userquota(propname)) {
const char *perm = NULL;
const char *uq_prefix =
zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA];
const char *gq_prefix =
zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA];
const char *uiq_prefix =
zfs_userquota_prop_prefixes[ZFS_PROP_USEROBJQUOTA];
const char *giq_prefix =
zfs_userquota_prop_prefixes[ZFS_PROP_GROUPOBJQUOTA];
const char *pq_prefix =
zfs_userquota_prop_prefixes[ZFS_PROP_PROJECTQUOTA];
const char *piq_prefix = zfs_userquota_prop_prefixes[\
ZFS_PROP_PROJECTOBJQUOTA];
if (strncmp(propname, uq_prefix,
strlen(uq_prefix)) == 0) {
perm = ZFS_DELEG_PERM_USERQUOTA;
} else if (strncmp(propname, uiq_prefix,
strlen(uiq_prefix)) == 0) {
perm = ZFS_DELEG_PERM_USEROBJQUOTA;
} else if (strncmp(propname, gq_prefix,
strlen(gq_prefix)) == 0) {
perm = ZFS_DELEG_PERM_GROUPQUOTA;
} else if (strncmp(propname, giq_prefix,
strlen(giq_prefix)) == 0) {
perm = ZFS_DELEG_PERM_GROUPOBJQUOTA;
} else if (strncmp(propname, pq_prefix,
strlen(pq_prefix)) == 0) {
perm = ZFS_DELEG_PERM_PROJECTQUOTA;
} else if (strncmp(propname, piq_prefix,
strlen(piq_prefix)) == 0) {
perm = ZFS_DELEG_PERM_PROJECTOBJQUOTA;
} else {
/* {USER|GROUP|PROJECT}USED are read-only */
return (SET_ERROR(EINVAL));
}
if ((err = zfs_secpolicy_write_perms(dsname, perm, cr)))
return (err);
return (0);
}
return (SET_ERROR(EINVAL));
}
if (issnap)
return (SET_ERROR(EINVAL));
if (nvpair_type(pair) == DATA_TYPE_NVLIST) {
/*
* dsl_prop_get_all_impl() returns properties in this
* format.
*/
nvlist_t *attrs;
VERIFY(nvpair_value_nvlist(pair, &attrs) == 0);
VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
&pair) == 0);
}
/*
* Check that this value is valid for this pool version
*/
switch (prop) {
case ZFS_PROP_COMPRESSION:
/*
* If the user specified gzip compression, make sure
* the SPA supports it. We ignore any errors here since
* we'll catch them later.
*/
if (nvpair_value_uint64(pair, &intval) == 0) {
compval = ZIO_COMPRESS_ALGO(intval);
if (compval >= ZIO_COMPRESS_GZIP_1 &&
compval <= ZIO_COMPRESS_GZIP_9 &&
zfs_earlier_version(dsname,
SPA_VERSION_GZIP_COMPRESSION)) {
return (SET_ERROR(ENOTSUP));
}
if (compval == ZIO_COMPRESS_ZLE &&
zfs_earlier_version(dsname,
SPA_VERSION_ZLE_COMPRESSION))
return (SET_ERROR(ENOTSUP));
if (compval == ZIO_COMPRESS_LZ4) {
spa_t *spa;
if ((err = spa_open(dsname, &spa, FTAG)) != 0)
return (err);
if (!spa_feature_is_enabled(spa,
SPA_FEATURE_LZ4_COMPRESS)) {
spa_close(spa, FTAG);
return (SET_ERROR(ENOTSUP));
}
spa_close(spa, FTAG);
}
if (compval == ZIO_COMPRESS_ZSTD) {
spa_t *spa;
if ((err = spa_open(dsname, &spa, FTAG)) != 0)
return (err);
if (!spa_feature_is_enabled(spa,
SPA_FEATURE_ZSTD_COMPRESS)) {
spa_close(spa, FTAG);
return (SET_ERROR(ENOTSUP));
}
spa_close(spa, FTAG);
}
}
break;
case ZFS_PROP_COPIES:
if (zfs_earlier_version(dsname, SPA_VERSION_DITTO_BLOCKS))
return (SET_ERROR(ENOTSUP));
break;
case ZFS_PROP_VOLBLOCKSIZE:
case ZFS_PROP_RECORDSIZE:
/* Record sizes above 128k need the feature to be enabled */
if (nvpair_value_uint64(pair, &intval) == 0 &&
intval > SPA_OLD_MAXBLOCKSIZE) {
spa_t *spa;
/*
* We don't allow setting the property above 1MB,
* unless the tunable has been changed.
*/
if (intval > zfs_max_recordsize ||
intval > SPA_MAXBLOCKSIZE)
return (SET_ERROR(ERANGE));
if ((err = spa_open(dsname, &spa, FTAG)) != 0)
return (err);
if (!spa_feature_is_enabled(spa,
SPA_FEATURE_LARGE_BLOCKS)) {
spa_close(spa, FTAG);
return (SET_ERROR(ENOTSUP));
}
spa_close(spa, FTAG);
}
break;
case ZFS_PROP_DNODESIZE:
/* Dnode sizes above 512 need the feature to be enabled */
if (nvpair_value_uint64(pair, &intval) == 0 &&
intval != ZFS_DNSIZE_LEGACY) {
spa_t *spa;
if ((err = spa_open(dsname, &spa, FTAG)) != 0)
return (err);
if (!spa_feature_is_enabled(spa,
SPA_FEATURE_LARGE_DNODE)) {
spa_close(spa, FTAG);
return (SET_ERROR(ENOTSUP));
}
spa_close(spa, FTAG);
}
break;
case ZFS_PROP_SPECIAL_SMALL_BLOCKS:
/*
* This property could require the allocation classes
* feature to be active for setting, however we allow
* it so that tests of settable properties succeed.
* The CLI will issue a warning in this case.
*/
break;
case ZFS_PROP_SHARESMB:
if (zpl_earlier_version(dsname, ZPL_VERSION_FUID))
return (SET_ERROR(ENOTSUP));
break;
case ZFS_PROP_ACLINHERIT:
if (nvpair_type(pair) == DATA_TYPE_UINT64 &&
nvpair_value_uint64(pair, &intval) == 0) {
if (intval == ZFS_ACL_PASSTHROUGH_X &&
zfs_earlier_version(dsname,
SPA_VERSION_PASSTHROUGH_X))
return (SET_ERROR(ENOTSUP));
}
break;
case ZFS_PROP_CHECKSUM:
case ZFS_PROP_DEDUP:
{
spa_feature_t feature;
spa_t *spa;
int err;
/* dedup feature version checks */
if (prop == ZFS_PROP_DEDUP &&
zfs_earlier_version(dsname, SPA_VERSION_DEDUP))
return (SET_ERROR(ENOTSUP));
if (nvpair_type(pair) == DATA_TYPE_UINT64 &&
nvpair_value_uint64(pair, &intval) == 0) {
/* check prop value is enabled in features */
feature = zio_checksum_to_feature(
intval & ZIO_CHECKSUM_MASK);
if (feature == SPA_FEATURE_NONE)
break;
if ((err = spa_open(dsname, &spa, FTAG)) != 0)
return (err);
if (!spa_feature_is_enabled(spa, feature)) {
spa_close(spa, FTAG);
return (SET_ERROR(ENOTSUP));
}
spa_close(spa, FTAG);
}
break;
}
default:
break;
}
return (zfs_secpolicy_setprop(dsname, prop, pair, CRED()));
}
/*
* Removes properties from the given props list that fail permission checks
* needed to clear them and to restore them in case of a receive error. For each
* property, make sure we have both set and inherit permissions.
*
* Returns the first error encountered if any permission checks fail. If the
* caller provides a non-NULL errlist, it also gives the complete list of names
* of all the properties that failed a permission check along with the
* corresponding error numbers. The caller is responsible for freeing the
* returned errlist.
*
* If every property checks out successfully, zero is returned and the list
* pointed at by errlist is NULL.
*/
static int
zfs_check_clearable(const char *dataset, nvlist_t *props, nvlist_t **errlist)
{
zfs_cmd_t *zc;
nvpair_t *pair, *next_pair;
nvlist_t *errors;
int err, rv = 0;
if (props == NULL)
return (0);
VERIFY(nvlist_alloc(&errors, NV_UNIQUE_NAME, KM_SLEEP) == 0);
zc = kmem_alloc(sizeof (zfs_cmd_t), KM_SLEEP);
(void) strlcpy(zc->zc_name, dataset, sizeof (zc->zc_name));
pair = nvlist_next_nvpair(props, NULL);
while (pair != NULL) {
next_pair = nvlist_next_nvpair(props, pair);
(void) strlcpy(zc->zc_value, nvpair_name(pair),
sizeof (zc->zc_value));
if ((err = zfs_check_settable(dataset, pair, CRED())) != 0 ||
(err = zfs_secpolicy_inherit_prop(zc, NULL, CRED())) != 0) {
VERIFY(nvlist_remove_nvpair(props, pair) == 0);
VERIFY(nvlist_add_int32(errors,
zc->zc_value, err) == 0);
}
pair = next_pair;
}
kmem_free(zc, sizeof (zfs_cmd_t));
if ((pair = nvlist_next_nvpair(errors, NULL)) == NULL) {
nvlist_free(errors);
errors = NULL;
} else {
VERIFY(nvpair_value_int32(pair, &rv) == 0);
}
if (errlist == NULL)
nvlist_free(errors);
else
*errlist = errors;
return (rv);
}
static boolean_t
propval_equals(nvpair_t *p1, nvpair_t *p2)
{
if (nvpair_type(p1) == DATA_TYPE_NVLIST) {
/* dsl_prop_get_all_impl() format */
nvlist_t *attrs;
VERIFY(nvpair_value_nvlist(p1, &attrs) == 0);
VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
&p1) == 0);
}
if (nvpair_type(p2) == DATA_TYPE_NVLIST) {
nvlist_t *attrs;
VERIFY(nvpair_value_nvlist(p2, &attrs) == 0);
VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
&p2) == 0);
}
if (nvpair_type(p1) != nvpair_type(p2))
return (B_FALSE);
if (nvpair_type(p1) == DATA_TYPE_STRING) {
char *valstr1, *valstr2;
VERIFY(nvpair_value_string(p1, (char **)&valstr1) == 0);
VERIFY(nvpair_value_string(p2, (char **)&valstr2) == 0);
return (strcmp(valstr1, valstr2) == 0);
} else {
uint64_t intval1, intval2;
VERIFY(nvpair_value_uint64(p1, &intval1) == 0);
VERIFY(nvpair_value_uint64(p2, &intval2) == 0);
return (intval1 == intval2);
}
}
/*
* Remove properties from props if they are not going to change (as determined
* by comparison with origprops). Remove them from origprops as well, since we
* do not need to clear or restore properties that won't change.
*/
static void
props_reduce(nvlist_t *props, nvlist_t *origprops)
{
nvpair_t *pair, *next_pair;
if (origprops == NULL)
return; /* all props need to be received */
pair = nvlist_next_nvpair(props, NULL);
while (pair != NULL) {
const char *propname = nvpair_name(pair);
nvpair_t *match;
next_pair = nvlist_next_nvpair(props, pair);
if ((nvlist_lookup_nvpair(origprops, propname,
&match) != 0) || !propval_equals(pair, match))
goto next; /* need to set received value */
/* don't clear the existing received value */
(void) nvlist_remove_nvpair(origprops, match);
/* don't bother receiving the property */
(void) nvlist_remove_nvpair(props, pair);
next:
pair = next_pair;
}
}
/*
* Extract properties that cannot be set PRIOR to the receipt of a dataset.
* For example, refquota cannot be set until after the receipt of a dataset,
* because in replication streams, an older/earlier snapshot may exceed the
* refquota. We want to receive the older/earlier snapshot, but setting
* refquota pre-receipt will set the dsl's ACTUAL quota, which will prevent
* the older/earlier snapshot from being received (with EDQUOT).
*
* The ZFS test "zfs_receive_011_pos" demonstrates such a scenario.
*
* libzfs will need to be judicious handling errors encountered by props
* extracted by this function.
*/
static nvlist_t *
extract_delay_props(nvlist_t *props)
{
nvlist_t *delayprops;
nvpair_t *nvp, *tmp;
static const zfs_prop_t delayable[] = {
ZFS_PROP_REFQUOTA,
ZFS_PROP_KEYLOCATION,
+ /*
+ * Setting ZFS_PROP_SHARESMB requires the objset type to be
+ * known, which is not possible prior to receipt of raw sends.
+ */
+ ZFS_PROP_SHARESMB,
0
};
int i;
VERIFY(nvlist_alloc(&delayprops, NV_UNIQUE_NAME, KM_SLEEP) == 0);
for (nvp = nvlist_next_nvpair(props, NULL); nvp != NULL;
nvp = nvlist_next_nvpair(props, nvp)) {
/*
* strcmp() is safe because zfs_prop_to_name() always returns
* a bounded string.
*/
for (i = 0; delayable[i] != 0; i++) {
if (strcmp(zfs_prop_to_name(delayable[i]),
nvpair_name(nvp)) == 0) {
break;
}
}
if (delayable[i] != 0) {
tmp = nvlist_prev_nvpair(props, nvp);
VERIFY(nvlist_add_nvpair(delayprops, nvp) == 0);
VERIFY(nvlist_remove_nvpair(props, nvp) == 0);
nvp = tmp;
}
}
if (nvlist_empty(delayprops)) {
nvlist_free(delayprops);
delayprops = NULL;
}
return (delayprops);
}
static void
zfs_allow_log_destroy(void *arg)
{
char *poolname = arg;
if (poolname != NULL)
kmem_strfree(poolname);
}
#ifdef ZFS_DEBUG
static boolean_t zfs_ioc_recv_inject_err;
#endif
/*
* nvlist 'errors' is always allocated. It will contain descriptions of
* encountered errors, if any. It's the callers responsibility to free.
*/
static int
zfs_ioc_recv_impl(char *tofs, char *tosnap, char *origin, nvlist_t *recvprops,
nvlist_t *localprops, nvlist_t *hidden_args, boolean_t force,
boolean_t heal, boolean_t resumable, int input_fd,
dmu_replay_record_t *begin_record, uint64_t *read_bytes,
uint64_t *errflags, nvlist_t **errors)
{
dmu_recv_cookie_t drc;
int error = 0;
int props_error = 0;
offset_t off, noff;
nvlist_t *local_delayprops = NULL;
nvlist_t *recv_delayprops = NULL;
+ nvlist_t *inherited_delayprops = NULL;
nvlist_t *origprops = NULL; /* existing properties */
nvlist_t *origrecvd = NULL; /* existing received properties */
boolean_t first_recvd_props = B_FALSE;
boolean_t tofs_was_redacted;
zfs_file_t *input_fp;
*read_bytes = 0;
*errflags = 0;
*errors = fnvlist_alloc();
off = 0;
if ((input_fp = zfs_file_get(input_fd)) == NULL)
return (SET_ERROR(EBADF));
noff = off = zfs_file_off(input_fp);
error = dmu_recv_begin(tofs, tosnap, begin_record, force, heal,
resumable, localprops, hidden_args, origin, &drc, input_fp,
&off);
if (error != 0)
goto out;
tofs_was_redacted = dsl_get_redacted(drc.drc_ds);
/*
* Set properties before we receive the stream so that they are applied
* to the new data. Note that we must call dmu_recv_stream() if
* dmu_recv_begin() succeeds.
*/
if (recvprops != NULL && !drc.drc_newfs) {
if (spa_version(dsl_dataset_get_spa(drc.drc_ds)) >=
SPA_VERSION_RECVD_PROPS &&
!dsl_prop_get_hasrecvd(tofs))
first_recvd_props = B_TRUE;
/*
* If new received properties are supplied, they are to
* completely replace the existing received properties,
* so stash away the existing ones.
*/
if (dsl_prop_get_received(tofs, &origrecvd) == 0) {
nvlist_t *errlist = NULL;
/*
* Don't bother writing a property if its value won't
* change (and avoid the unnecessary security checks).
*
* The first receive after SPA_VERSION_RECVD_PROPS is a
* special case where we blow away all local properties
* regardless.
*/
if (!first_recvd_props)
props_reduce(recvprops, origrecvd);
if (zfs_check_clearable(tofs, origrecvd, &errlist) != 0)
(void) nvlist_merge(*errors, errlist, 0);
nvlist_free(errlist);
if (clear_received_props(tofs, origrecvd,
first_recvd_props ? NULL : recvprops) != 0)
*errflags |= ZPROP_ERR_NOCLEAR;
} else {
*errflags |= ZPROP_ERR_NOCLEAR;
}
}
/*
* Stash away existing properties so we can restore them on error unless
* we're doing the first receive after SPA_VERSION_RECVD_PROPS, in which
* case "origrecvd" will take care of that.
*/
if (localprops != NULL && !drc.drc_newfs && !first_recvd_props) {
objset_t *os;
if (dmu_objset_hold(tofs, FTAG, &os) == 0) {
if (dsl_prop_get_all(os, &origprops) != 0) {
*errflags |= ZPROP_ERR_NOCLEAR;
}
dmu_objset_rele(os, FTAG);
} else {
*errflags |= ZPROP_ERR_NOCLEAR;
}
}
if (recvprops != NULL) {
props_error = dsl_prop_set_hasrecvd(tofs);
if (props_error == 0) {
recv_delayprops = extract_delay_props(recvprops);
(void) zfs_set_prop_nvlist(tofs, ZPROP_SRC_RECEIVED,
recvprops, *errors);
}
}
if (localprops != NULL) {
nvlist_t *oprops = fnvlist_alloc();
nvlist_t *xprops = fnvlist_alloc();
nvpair_t *nvp = NULL;
while ((nvp = nvlist_next_nvpair(localprops, nvp)) != NULL) {
if (nvpair_type(nvp) == DATA_TYPE_BOOLEAN) {
/* -x property */
const char *name = nvpair_name(nvp);
zfs_prop_t prop = zfs_name_to_prop(name);
if (prop != ZPROP_USERPROP) {
if (!zfs_prop_inheritable(prop))
continue;
} else if (!zfs_prop_user(name))
continue;
fnvlist_add_boolean(xprops, name);
} else {
/* -o property=value */
fnvlist_add_nvpair(oprops, nvp);
}
}
local_delayprops = extract_delay_props(oprops);
(void) zfs_set_prop_nvlist(tofs, ZPROP_SRC_LOCAL,
oprops, *errors);
+ inherited_delayprops = extract_delay_props(xprops);
(void) zfs_set_prop_nvlist(tofs, ZPROP_SRC_INHERITED,
xprops, *errors);
nvlist_free(oprops);
nvlist_free(xprops);
}
error = dmu_recv_stream(&drc, &off);
if (error == 0) {
zfsvfs_t *zfsvfs = NULL;
zvol_state_handle_t *zv = NULL;
if (getzfsvfs(tofs, &zfsvfs) == 0) {
/* online recv */
dsl_dataset_t *ds;
int end_err;
boolean_t stream_is_redacted = DMU_GET_FEATUREFLAGS(
begin_record->drr_u.drr_begin.
drr_versioninfo) & DMU_BACKUP_FEATURE_REDACTED;
ds = dmu_objset_ds(zfsvfs->z_os);
error = zfs_suspend_fs(zfsvfs);
/*
* If the suspend fails, then the recv_end will
* likely also fail, and clean up after itself.
*/
end_err = dmu_recv_end(&drc, zfsvfs);
/*
* If the dataset was not redacted, but we received a
* redacted stream onto it, we need to unmount the
* dataset. Otherwise, resume the filesystem.
*/
if (error == 0 && !drc.drc_newfs &&
stream_is_redacted && !tofs_was_redacted) {
error = zfs_end_fs(zfsvfs, ds);
} else if (error == 0) {
error = zfs_resume_fs(zfsvfs, ds);
}
error = error ? error : end_err;
zfs_vfs_rele(zfsvfs);
} else if ((zv = zvol_suspend(tofs)) != NULL) {
error = dmu_recv_end(&drc, zvol_tag(zv));
zvol_resume(zv);
} else {
error = dmu_recv_end(&drc, NULL);
}
/* Set delayed properties now, after we're done receiving. */
if (recv_delayprops != NULL && error == 0) {
(void) zfs_set_prop_nvlist(tofs, ZPROP_SRC_RECEIVED,
recv_delayprops, *errors);
}
if (local_delayprops != NULL && error == 0) {
(void) zfs_set_prop_nvlist(tofs, ZPROP_SRC_LOCAL,
local_delayprops, *errors);
}
+ if (inherited_delayprops != NULL && error == 0) {
+ (void) zfs_set_prop_nvlist(tofs, ZPROP_SRC_INHERITED,
+ inherited_delayprops, *errors);
+ }
}
/*
* Merge delayed props back in with initial props, in case
* we're DEBUG and zfs_ioc_recv_inject_err is set (which means
* we have to make sure clear_received_props() includes
* the delayed properties).
*
* Since zfs_ioc_recv_inject_err is only in DEBUG kernels,
* using ASSERT() will be just like a VERIFY.
*/
if (recv_delayprops != NULL) {
ASSERT(nvlist_merge(recvprops, recv_delayprops, 0) == 0);
nvlist_free(recv_delayprops);
}
if (local_delayprops != NULL) {
ASSERT(nvlist_merge(localprops, local_delayprops, 0) == 0);
nvlist_free(local_delayprops);
}
+ if (inherited_delayprops != NULL) {
+ ASSERT(nvlist_merge(localprops, inherited_delayprops, 0) == 0);
+ nvlist_free(inherited_delayprops);
+ }
*read_bytes = off - noff;
#ifdef ZFS_DEBUG
if (zfs_ioc_recv_inject_err) {
zfs_ioc_recv_inject_err = B_FALSE;
error = 1;
}
#endif
/*
* On error, restore the original props.
*/
if (error != 0 && recvprops != NULL && !drc.drc_newfs) {
if (clear_received_props(tofs, recvprops, NULL) != 0) {
/*
* We failed to clear the received properties.
* Since we may have left a $recvd value on the
* system, we can't clear the $hasrecvd flag.
*/
*errflags |= ZPROP_ERR_NORESTORE;
} else if (first_recvd_props) {
dsl_prop_unset_hasrecvd(tofs);
}
if (origrecvd == NULL && !drc.drc_newfs) {
/* We failed to stash the original properties. */
*errflags |= ZPROP_ERR_NORESTORE;
}
/*
* dsl_props_set() will not convert RECEIVED to LOCAL on or
* after SPA_VERSION_RECVD_PROPS, so we need to specify LOCAL
* explicitly if we're restoring local properties cleared in the
* first new-style receive.
*/
if (origrecvd != NULL &&
zfs_set_prop_nvlist(tofs, (first_recvd_props ?
ZPROP_SRC_LOCAL : ZPROP_SRC_RECEIVED),
origrecvd, NULL) != 0) {
/*
* We stashed the original properties but failed to
* restore them.
*/
*errflags |= ZPROP_ERR_NORESTORE;
}
}
if (error != 0 && localprops != NULL && !drc.drc_newfs &&
!first_recvd_props) {
nvlist_t *setprops;
nvlist_t *inheritprops;
nvpair_t *nvp;
if (origprops == NULL) {
/* We failed to stash the original properties. */
*errflags |= ZPROP_ERR_NORESTORE;
goto out;
}
/* Restore original props */
setprops = fnvlist_alloc();
inheritprops = fnvlist_alloc();
nvp = NULL;
while ((nvp = nvlist_next_nvpair(localprops, nvp)) != NULL) {
const char *name = nvpair_name(nvp);
const char *source;
nvlist_t *attrs;
if (!nvlist_exists(origprops, name)) {
/*
* Property was not present or was explicitly
* inherited before the receive, restore this.
*/
fnvlist_add_boolean(inheritprops, name);
continue;
}
attrs = fnvlist_lookup_nvlist(origprops, name);
source = fnvlist_lookup_string(attrs, ZPROP_SOURCE);
/* Skip received properties */
if (strcmp(source, ZPROP_SOURCE_VAL_RECVD) == 0)
continue;
if (strcmp(source, tofs) == 0) {
/* Property was locally set */
fnvlist_add_nvlist(setprops, name, attrs);
} else {
/* Property was implicitly inherited */
fnvlist_add_boolean(inheritprops, name);
}
}
if (zfs_set_prop_nvlist(tofs, ZPROP_SRC_LOCAL, setprops,
NULL) != 0)
*errflags |= ZPROP_ERR_NORESTORE;
if (zfs_set_prop_nvlist(tofs, ZPROP_SRC_INHERITED, inheritprops,
NULL) != 0)
*errflags |= ZPROP_ERR_NORESTORE;
nvlist_free(setprops);
nvlist_free(inheritprops);
}
out:
zfs_file_put(input_fp);
nvlist_free(origrecvd);
nvlist_free(origprops);
if (error == 0)
error = props_error;
return (error);
}
/*
* inputs:
* zc_name name of containing filesystem (unused)
* zc_nvlist_src{_size} nvlist of properties to apply
* zc_nvlist_conf{_size} nvlist of properties to exclude
* (DATA_TYPE_BOOLEAN) and override (everything else)
* zc_value name of snapshot to create
* zc_string name of clone origin (if DRR_FLAG_CLONE)
* zc_cookie file descriptor to recv from
* zc_begin_record the BEGIN record of the stream (not byteswapped)
* zc_guid force flag
*
* outputs:
* zc_cookie number of bytes read
* zc_obj zprop_errflags_t
* zc_nvlist_dst{_size} error for each unapplied received property
*/
static int
zfs_ioc_recv(zfs_cmd_t *zc)
{
dmu_replay_record_t begin_record;
nvlist_t *errors = NULL;
nvlist_t *recvdprops = NULL;
nvlist_t *localprops = NULL;
char *origin = NULL;
char *tosnap;
char tofs[ZFS_MAX_DATASET_NAME_LEN];
int error = 0;
if (dataset_namecheck(zc->zc_value, NULL, NULL) != 0 ||
strchr(zc->zc_value, '@') == NULL ||
strchr(zc->zc_value, '%'))
return (SET_ERROR(EINVAL));
(void) strlcpy(tofs, zc->zc_value, sizeof (tofs));
tosnap = strchr(tofs, '@');
*tosnap++ = '\0';
if (zc->zc_nvlist_src != 0 &&
(error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
zc->zc_iflags, &recvdprops)) != 0)
return (error);
if (zc->zc_nvlist_conf != 0 &&
(error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
zc->zc_iflags, &localprops)) != 0)
return (error);
if (zc->zc_string[0])
origin = zc->zc_string;
begin_record.drr_type = DRR_BEGIN;
begin_record.drr_payloadlen = 0;
begin_record.drr_u.drr_begin = zc->zc_begin_record;
error = zfs_ioc_recv_impl(tofs, tosnap, origin, recvdprops, localprops,
NULL, zc->zc_guid, B_FALSE, B_FALSE, zc->zc_cookie, &begin_record,
&zc->zc_cookie, &zc->zc_obj, &errors);
nvlist_free(recvdprops);
nvlist_free(localprops);
/*
* Now that all props, initial and delayed, are set, report the prop
* errors to the caller.
*/
if (zc->zc_nvlist_dst_size != 0 && errors != NULL &&
(nvlist_smush(errors, zc->zc_nvlist_dst_size) != 0 ||
put_nvlist(zc, errors) != 0)) {
/*
* Caller made zc->zc_nvlist_dst less than the minimum expected
* size or supplied an invalid address.
*/
error = SET_ERROR(EINVAL);
}
nvlist_free(errors);
return (error);
}
/*
* innvl: {
* "snapname" -> full name of the snapshot to create
* (optional) "props" -> received properties to set (nvlist)
* (optional) "localprops" -> override and exclude properties (nvlist)
* (optional) "origin" -> name of clone origin (DRR_FLAG_CLONE)
* "begin_record" -> non-byteswapped dmu_replay_record_t
* "input_fd" -> file descriptor to read stream from (int32)
* (optional) "force" -> force flag (value ignored)
* (optional) "heal" -> use send stream to heal data corruption
* (optional) "resumable" -> resumable flag (value ignored)
* (optional) "cleanup_fd" -> unused
* (optional) "action_handle" -> unused
* (optional) "hidden_args" -> { "wkeydata" -> value }
* }
*
* outnvl: {
* "read_bytes" -> number of bytes read
* "error_flags" -> zprop_errflags_t
* "errors" -> error for each unapplied received property (nvlist)
* }
*/
static const zfs_ioc_key_t zfs_keys_recv_new[] = {
{"snapname", DATA_TYPE_STRING, 0},
{"props", DATA_TYPE_NVLIST, ZK_OPTIONAL},
{"localprops", DATA_TYPE_NVLIST, ZK_OPTIONAL},
{"origin", DATA_TYPE_STRING, ZK_OPTIONAL},
{"begin_record", DATA_TYPE_BYTE_ARRAY, 0},
{"input_fd", DATA_TYPE_INT32, 0},
{"force", DATA_TYPE_BOOLEAN, ZK_OPTIONAL},
{"heal", DATA_TYPE_BOOLEAN, ZK_OPTIONAL},
{"resumable", DATA_TYPE_BOOLEAN, ZK_OPTIONAL},
{"cleanup_fd", DATA_TYPE_INT32, ZK_OPTIONAL},
{"action_handle", DATA_TYPE_UINT64, ZK_OPTIONAL},
{"hidden_args", DATA_TYPE_NVLIST, ZK_OPTIONAL},
};
static int
zfs_ioc_recv_new(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl)
{
dmu_replay_record_t *begin_record;
uint_t begin_record_size;
nvlist_t *errors = NULL;
nvlist_t *recvprops = NULL;
nvlist_t *localprops = NULL;
nvlist_t *hidden_args = NULL;
char *snapname;
char *origin = NULL;
char *tosnap;
char tofs[ZFS_MAX_DATASET_NAME_LEN];
boolean_t force;
boolean_t heal;
boolean_t resumable;
uint64_t read_bytes = 0;
uint64_t errflags = 0;
int input_fd = -1;
int error;
snapname = fnvlist_lookup_string(innvl, "snapname");
if (dataset_namecheck(snapname, NULL, NULL) != 0 ||
strchr(snapname, '@') == NULL ||
strchr(snapname, '%'))
return (SET_ERROR(EINVAL));
(void) strlcpy(tofs, snapname, sizeof (tofs));
tosnap = strchr(tofs, '@');
*tosnap++ = '\0';
error = nvlist_lookup_string(innvl, "origin", &origin);
if (error && error != ENOENT)
return (error);
error = nvlist_lookup_byte_array(innvl, "begin_record",
(uchar_t **)&begin_record, &begin_record_size);
if (error != 0 || begin_record_size != sizeof (*begin_record))
return (SET_ERROR(EINVAL));
input_fd = fnvlist_lookup_int32(innvl, "input_fd");
force = nvlist_exists(innvl, "force");
heal = nvlist_exists(innvl, "heal");
resumable = nvlist_exists(innvl, "resumable");
/* we still use "props" here for backwards compatibility */
error = nvlist_lookup_nvlist(innvl, "props", &recvprops);
if (error && error != ENOENT)
return (error);
error = nvlist_lookup_nvlist(innvl, "localprops", &localprops);
if (error && error != ENOENT)
return (error);
error = nvlist_lookup_nvlist(innvl, ZPOOL_HIDDEN_ARGS, &hidden_args);
if (error && error != ENOENT)
return (error);
error = zfs_ioc_recv_impl(tofs, tosnap, origin, recvprops, localprops,
hidden_args, force, heal, resumable, input_fd, begin_record,
&read_bytes, &errflags, &errors);
fnvlist_add_uint64(outnvl, "read_bytes", read_bytes);
fnvlist_add_uint64(outnvl, "error_flags", errflags);
fnvlist_add_nvlist(outnvl, "errors", errors);
nvlist_free(errors);
nvlist_free(recvprops);
nvlist_free(localprops);
return (error);
}
typedef struct dump_bytes_io {
zfs_file_t *dbi_fp;
caddr_t dbi_buf;
int dbi_len;
int dbi_err;
} dump_bytes_io_t;
static void
dump_bytes_cb(void *arg)
{
dump_bytes_io_t *dbi = (dump_bytes_io_t *)arg;
zfs_file_t *fp;
caddr_t buf;
fp = dbi->dbi_fp;
buf = dbi->dbi_buf;
dbi->dbi_err = zfs_file_write(fp, buf, dbi->dbi_len, NULL);
}
static int
dump_bytes(objset_t *os, void *buf, int len, void *arg)
{
dump_bytes_io_t dbi;
dbi.dbi_fp = arg;
dbi.dbi_buf = buf;
dbi.dbi_len = len;
#if defined(HAVE_LARGE_STACKS)
dump_bytes_cb(&dbi);
#else
/*
* The vn_rdwr() call is performed in a taskq to ensure that there is
* always enough stack space to write safely to the target filesystem.
* The ZIO_TYPE_FREE threads are used because there can be a lot of
* them and they are used in vdev_file.c for a similar purpose.
*/
spa_taskq_dispatch_sync(dmu_objset_spa(os), ZIO_TYPE_FREE,
ZIO_TASKQ_ISSUE, dump_bytes_cb, &dbi, TQ_SLEEP);
#endif /* HAVE_LARGE_STACKS */
return (dbi.dbi_err);
}
/*
* inputs:
* zc_name name of snapshot to send
* zc_cookie file descriptor to send stream to
* zc_obj fromorigin flag (mutually exclusive with zc_fromobj)
* zc_sendobj objsetid of snapshot to send
* zc_fromobj objsetid of incremental fromsnap (may be zero)
* zc_guid if set, estimate size of stream only. zc_cookie is ignored.
* output size in zc_objset_type.
* zc_flags lzc_send_flags
*
* outputs:
* zc_objset_type estimated size, if zc_guid is set
*
* NOTE: This is no longer the preferred interface, any new functionality
* should be added to zfs_ioc_send_new() instead.
*/
static int
zfs_ioc_send(zfs_cmd_t *zc)
{
int error;
offset_t off;
boolean_t estimate = (zc->zc_guid != 0);
boolean_t embedok = (zc->zc_flags & 0x1);
boolean_t large_block_ok = (zc->zc_flags & 0x2);
boolean_t compressok = (zc->zc_flags & 0x4);
boolean_t rawok = (zc->zc_flags & 0x8);
boolean_t savedok = (zc->zc_flags & 0x10);
if (zc->zc_obj != 0) {
dsl_pool_t *dp;
dsl_dataset_t *tosnap;
error = dsl_pool_hold(zc->zc_name, FTAG, &dp);
if (error != 0)
return (error);
error = dsl_dataset_hold_obj(dp, zc->zc_sendobj, FTAG, &tosnap);
if (error != 0) {
dsl_pool_rele(dp, FTAG);
return (error);
}
if (dsl_dir_is_clone(tosnap->ds_dir))
zc->zc_fromobj =
dsl_dir_phys(tosnap->ds_dir)->dd_origin_obj;
dsl_dataset_rele(tosnap, FTAG);
dsl_pool_rele(dp, FTAG);
}
if (estimate) {
dsl_pool_t *dp;
dsl_dataset_t *tosnap;
dsl_dataset_t *fromsnap = NULL;
error = dsl_pool_hold(zc->zc_name, FTAG, &dp);
if (error != 0)
return (error);
error = dsl_dataset_hold_obj(dp, zc->zc_sendobj,
FTAG, &tosnap);
if (error != 0) {
dsl_pool_rele(dp, FTAG);
return (error);
}
if (zc->zc_fromobj != 0) {
error = dsl_dataset_hold_obj(dp, zc->zc_fromobj,
FTAG, &fromsnap);
if (error != 0) {
dsl_dataset_rele(tosnap, FTAG);
dsl_pool_rele(dp, FTAG);
return (error);
}
}
error = dmu_send_estimate_fast(tosnap, fromsnap, NULL,
compressok || rawok, savedok, &zc->zc_objset_type);
if (fromsnap != NULL)
dsl_dataset_rele(fromsnap, FTAG);
dsl_dataset_rele(tosnap, FTAG);
dsl_pool_rele(dp, FTAG);
} else {
zfs_file_t *fp;
dmu_send_outparams_t out = {0};
if ((fp = zfs_file_get(zc->zc_cookie)) == NULL)
return (SET_ERROR(EBADF));
off = zfs_file_off(fp);
out.dso_outfunc = dump_bytes;
out.dso_arg = fp;
out.dso_dryrun = B_FALSE;
error = dmu_send_obj(zc->zc_name, zc->zc_sendobj,
zc->zc_fromobj, embedok, large_block_ok, compressok,
rawok, savedok, zc->zc_cookie, &off, &out);
zfs_file_put(fp);
}
return (error);
}
/*
* inputs:
* zc_name name of snapshot on which to report progress
* zc_cookie file descriptor of send stream
*
* outputs:
* zc_cookie number of bytes written in send stream thus far
* zc_objset_type logical size of data traversed by send thus far
*/
static int
zfs_ioc_send_progress(zfs_cmd_t *zc)
{
dsl_pool_t *dp;
dsl_dataset_t *ds;
dmu_sendstatus_t *dsp = NULL;
int error;
error = dsl_pool_hold(zc->zc_name, FTAG, &dp);
if (error != 0)
return (error);
error = dsl_dataset_hold(dp, zc->zc_name, FTAG, &ds);
if (error != 0) {
dsl_pool_rele(dp, FTAG);
return (error);
}
mutex_enter(&ds->ds_sendstream_lock);
/*
* Iterate over all the send streams currently active on this dataset.
* If there's one which matches the specified file descriptor _and_ the
* stream was started by the current process, return the progress of
* that stream.
*/
for (dsp = list_head(&ds->ds_sendstreams); dsp != NULL;
dsp = list_next(&ds->ds_sendstreams, dsp)) {
if (dsp->dss_outfd == zc->zc_cookie &&
zfs_proc_is_caller(dsp->dss_proc))
break;
}
if (dsp != NULL) {
zc->zc_cookie = atomic_cas_64((volatile uint64_t *)dsp->dss_off,
0, 0);
/* This is the closest thing we have to atomic_read_64. */
zc->zc_objset_type = atomic_cas_64(&dsp->dss_blocks, 0, 0);
} else {
error = SET_ERROR(ENOENT);
}
mutex_exit(&ds->ds_sendstream_lock);
dsl_dataset_rele(ds, FTAG);
dsl_pool_rele(dp, FTAG);
return (error);
}
static int
zfs_ioc_inject_fault(zfs_cmd_t *zc)
{
int id, error;
error = zio_inject_fault(zc->zc_name, (int)zc->zc_guid, &id,
&zc->zc_inject_record);
if (error == 0)
zc->zc_guid = (uint64_t)id;
return (error);
}
static int
zfs_ioc_clear_fault(zfs_cmd_t *zc)
{
return (zio_clear_fault((int)zc->zc_guid));
}
static int
zfs_ioc_inject_list_next(zfs_cmd_t *zc)
{
int id = (int)zc->zc_guid;
int error;
error = zio_inject_list_next(&id, zc->zc_name, sizeof (zc->zc_name),
&zc->zc_inject_record);
zc->zc_guid = id;
return (error);
}
static int
zfs_ioc_error_log(zfs_cmd_t *zc)
{
spa_t *spa;
int error;
uint64_t count = zc->zc_nvlist_dst_size;
if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0)
return (error);
error = spa_get_errlog(spa, (void *)(uintptr_t)zc->zc_nvlist_dst,
&count);
if (error == 0)
zc->zc_nvlist_dst_size = count;
else
zc->zc_nvlist_dst_size = spa_get_errlog_size(spa);
spa_close(spa, FTAG);
return (error);
}
static int
zfs_ioc_clear(zfs_cmd_t *zc)
{
spa_t *spa;
vdev_t *vd;
int error;
/*
* On zpool clear we also fix up missing slogs
*/
mutex_enter(&spa_namespace_lock);
spa = spa_lookup(zc->zc_name);
if (spa == NULL) {
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(EIO));
}
if (spa_get_log_state(spa) == SPA_LOG_MISSING) {
/* we need to let spa_open/spa_load clear the chains */
spa_set_log_state(spa, SPA_LOG_CLEAR);
}
spa->spa_last_open_failed = 0;
mutex_exit(&spa_namespace_lock);
if (zc->zc_cookie & ZPOOL_NO_REWIND) {
error = spa_open(zc->zc_name, &spa, FTAG);
} else {
nvlist_t *policy;
nvlist_t *config = NULL;
if (zc->zc_nvlist_src == 0)
return (SET_ERROR(EINVAL));
if ((error = get_nvlist(zc->zc_nvlist_src,
zc->zc_nvlist_src_size, zc->zc_iflags, &policy)) == 0) {
error = spa_open_rewind(zc->zc_name, &spa, FTAG,
policy, &config);
if (config != NULL) {
int err;
if ((err = put_nvlist(zc, config)) != 0)
error = err;
nvlist_free(config);
}
nvlist_free(policy);
}
}
if (error != 0)
return (error);
/*
* If multihost is enabled, resuming I/O is unsafe as another
* host may have imported the pool.
*/
if (spa_multihost(spa) && spa_suspended(spa))
return (SET_ERROR(EINVAL));
spa_vdev_state_enter(spa, SCL_NONE);
if (zc->zc_guid == 0) {
vd = NULL;
} else {
vd = spa_lookup_by_guid(spa, zc->zc_guid, B_TRUE);
if (vd == NULL) {
error = SET_ERROR(ENODEV);
(void) spa_vdev_state_exit(spa, NULL, error);
spa_close(spa, FTAG);
return (error);
}
}
vdev_clear(spa, vd);
(void) spa_vdev_state_exit(spa, spa_suspended(spa) ?
NULL : spa->spa_root_vdev, 0);
/*
* Resume any suspended I/Os.
*/
if (zio_resume(spa) != 0)
error = SET_ERROR(EIO);
spa_close(spa, FTAG);
return (error);
}
/*
* Reopen all the vdevs associated with the pool.
*
* innvl: {
* "scrub_restart" -> when true and scrub is running, allow to restart
* scrub as the side effect of the reopen (boolean).
* }
*
* outnvl is unused
*/
static const zfs_ioc_key_t zfs_keys_pool_reopen[] = {
{"scrub_restart", DATA_TYPE_BOOLEAN_VALUE, ZK_OPTIONAL},
};
static int
zfs_ioc_pool_reopen(const char *pool, nvlist_t *innvl, nvlist_t *outnvl)
{
(void) outnvl;
spa_t *spa;
int error;
boolean_t rc, scrub_restart = B_TRUE;
if (innvl) {
error = nvlist_lookup_boolean_value(innvl,
"scrub_restart", &rc);
if (error == 0)
scrub_restart = rc;
}
error = spa_open(pool, &spa, FTAG);
if (error != 0)
return (error);
spa_vdev_state_enter(spa, SCL_NONE);
/*
* If the scrub_restart flag is B_FALSE and a scrub is already
* in progress then set spa_scrub_reopen flag to B_TRUE so that
* we don't restart the scrub as a side effect of the reopen.
* Otherwise, let vdev_open() decided if a resilver is required.
*/
spa->spa_scrub_reopen = (!scrub_restart &&
dsl_scan_scrubbing(spa->spa_dsl_pool));
vdev_reopen(spa->spa_root_vdev);
spa->spa_scrub_reopen = B_FALSE;
(void) spa_vdev_state_exit(spa, NULL, 0);
spa_close(spa, FTAG);
return (0);
}
/*
* inputs:
* zc_name name of filesystem
*
* outputs:
* zc_string name of conflicting snapshot, if there is one
*/
static int
zfs_ioc_promote(zfs_cmd_t *zc)
{
dsl_pool_t *dp;
dsl_dataset_t *ds, *ods;
char origin[ZFS_MAX_DATASET_NAME_LEN];
char *cp;
int error;
zc->zc_name[sizeof (zc->zc_name) - 1] = '\0';
if (dataset_namecheck(zc->zc_name, NULL, NULL) != 0 ||
strchr(zc->zc_name, '%'))
return (SET_ERROR(EINVAL));
error = dsl_pool_hold(zc->zc_name, FTAG, &dp);
if (error != 0)
return (error);
error = dsl_dataset_hold(dp, zc->zc_name, FTAG, &ds);
if (error != 0) {
dsl_pool_rele(dp, FTAG);
return (error);
}
if (!dsl_dir_is_clone(ds->ds_dir)) {
dsl_dataset_rele(ds, FTAG);
dsl_pool_rele(dp, FTAG);
return (SET_ERROR(EINVAL));
}
error = dsl_dataset_hold_obj(dp,
dsl_dir_phys(ds->ds_dir)->dd_origin_obj, FTAG, &ods);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
dsl_pool_rele(dp, FTAG);
return (error);
}
dsl_dataset_name(ods, origin);
dsl_dataset_rele(ods, FTAG);
dsl_dataset_rele(ds, FTAG);
dsl_pool_rele(dp, FTAG);
/*
* We don't need to unmount *all* the origin fs's snapshots, but
* it's easier.
*/
cp = strchr(origin, '@');
if (cp)
*cp = '\0';
(void) dmu_objset_find(origin,
zfs_unmount_snap_cb, NULL, DS_FIND_SNAPSHOTS);
return (dsl_dataset_promote(zc->zc_name, zc->zc_string));
}
/*
* Retrieve a single {user|group|project}{used|quota}@... property.
*
* inputs:
* zc_name name of filesystem
* zc_objset_type zfs_userquota_prop_t
* zc_value domain name (eg. "S-1-234-567-89")
* zc_guid RID/UID/GID
*
* outputs:
* zc_cookie property value
*/
static int
zfs_ioc_userspace_one(zfs_cmd_t *zc)
{
zfsvfs_t *zfsvfs;
int error;
if (zc->zc_objset_type >= ZFS_NUM_USERQUOTA_PROPS)
return (SET_ERROR(EINVAL));
error = zfsvfs_hold(zc->zc_name, FTAG, &zfsvfs, B_FALSE);
if (error != 0)
return (error);
error = zfs_userspace_one(zfsvfs,
zc->zc_objset_type, zc->zc_value, zc->zc_guid, &zc->zc_cookie);
zfsvfs_rele(zfsvfs, FTAG);
return (error);
}
/*
* inputs:
* zc_name name of filesystem
* zc_cookie zap cursor
* zc_objset_type zfs_userquota_prop_t
* zc_nvlist_dst[_size] buffer to fill (not really an nvlist)
*
* outputs:
* zc_nvlist_dst[_size] data buffer (array of zfs_useracct_t)
* zc_cookie zap cursor
*/
static int
zfs_ioc_userspace_many(zfs_cmd_t *zc)
{
zfsvfs_t *zfsvfs;
int bufsize = zc->zc_nvlist_dst_size;
if (bufsize <= 0)
return (SET_ERROR(ENOMEM));
int error = zfsvfs_hold(zc->zc_name, FTAG, &zfsvfs, B_FALSE);
if (error != 0)
return (error);
void *buf = vmem_alloc(bufsize, KM_SLEEP);
error = zfs_userspace_many(zfsvfs, zc->zc_objset_type, &zc->zc_cookie,
buf, &zc->zc_nvlist_dst_size);
if (error == 0) {
error = xcopyout(buf,
(void *)(uintptr_t)zc->zc_nvlist_dst,
zc->zc_nvlist_dst_size);
}
vmem_free(buf, bufsize);
zfsvfs_rele(zfsvfs, FTAG);
return (error);
}
/*
* inputs:
* zc_name name of filesystem
*
* outputs:
* none
*/
static int
zfs_ioc_userspace_upgrade(zfs_cmd_t *zc)
{
int error = 0;
zfsvfs_t *zfsvfs;
if (getzfsvfs(zc->zc_name, &zfsvfs) == 0) {
if (!dmu_objset_userused_enabled(zfsvfs->z_os)) {
/*
* If userused is not enabled, it may be because the
* objset needs to be closed & reopened (to grow the
* objset_phys_t). Suspend/resume the fs will do that.
*/
dsl_dataset_t *ds, *newds;
ds = dmu_objset_ds(zfsvfs->z_os);
error = zfs_suspend_fs(zfsvfs);
if (error == 0) {
dmu_objset_refresh_ownership(ds, &newds,
B_TRUE, zfsvfs);
error = zfs_resume_fs(zfsvfs, newds);
}
}
if (error == 0) {
mutex_enter(&zfsvfs->z_os->os_upgrade_lock);
if (zfsvfs->z_os->os_upgrade_id == 0) {
/* clear potential error code and retry */
zfsvfs->z_os->os_upgrade_status = 0;
mutex_exit(&zfsvfs->z_os->os_upgrade_lock);
dsl_pool_config_enter(
dmu_objset_pool(zfsvfs->z_os), FTAG);
dmu_objset_userspace_upgrade(zfsvfs->z_os);
dsl_pool_config_exit(
dmu_objset_pool(zfsvfs->z_os), FTAG);
} else {
mutex_exit(&zfsvfs->z_os->os_upgrade_lock);
}
taskq_wait_id(zfsvfs->z_os->os_spa->spa_upgrade_taskq,
zfsvfs->z_os->os_upgrade_id);
error = zfsvfs->z_os->os_upgrade_status;
}
zfs_vfs_rele(zfsvfs);
} else {
objset_t *os;
/* XXX kind of reading contents without owning */
error = dmu_objset_hold_flags(zc->zc_name, B_TRUE, FTAG, &os);
if (error != 0)
return (error);
mutex_enter(&os->os_upgrade_lock);
if (os->os_upgrade_id == 0) {
/* clear potential error code and retry */
os->os_upgrade_status = 0;
mutex_exit(&os->os_upgrade_lock);
dmu_objset_userspace_upgrade(os);
} else {
mutex_exit(&os->os_upgrade_lock);
}
dsl_pool_rele(dmu_objset_pool(os), FTAG);
taskq_wait_id(os->os_spa->spa_upgrade_taskq, os->os_upgrade_id);
error = os->os_upgrade_status;
dsl_dataset_rele_flags(dmu_objset_ds(os), DS_HOLD_FLAG_DECRYPT,
FTAG);
}
return (error);
}
/*
* inputs:
* zc_name name of filesystem
*
* outputs:
* none
*/
static int
zfs_ioc_id_quota_upgrade(zfs_cmd_t *zc)
{
objset_t *os;
int error;
error = dmu_objset_hold_flags(zc->zc_name, B_TRUE, FTAG, &os);
if (error != 0)
return (error);
if (dmu_objset_userobjspace_upgradable(os) ||
dmu_objset_projectquota_upgradable(os)) {
mutex_enter(&os->os_upgrade_lock);
if (os->os_upgrade_id == 0) {
/* clear potential error code and retry */
os->os_upgrade_status = 0;
mutex_exit(&os->os_upgrade_lock);
dmu_objset_id_quota_upgrade(os);
} else {
mutex_exit(&os->os_upgrade_lock);
}
dsl_pool_rele(dmu_objset_pool(os), FTAG);
taskq_wait_id(os->os_spa->spa_upgrade_taskq, os->os_upgrade_id);
error = os->os_upgrade_status;
} else {
dsl_pool_rele(dmu_objset_pool(os), FTAG);
}
dsl_dataset_rele_flags(dmu_objset_ds(os), DS_HOLD_FLAG_DECRYPT, FTAG);
return (error);
}
static int
zfs_ioc_share(zfs_cmd_t *zc)
{
return (SET_ERROR(ENOSYS));
}
/*
* inputs:
* zc_name name of containing filesystem
* zc_obj object # beyond which we want next in-use object #
*
* outputs:
* zc_obj next in-use object #
*/
static int
zfs_ioc_next_obj(zfs_cmd_t *zc)
{
objset_t *os = NULL;
int error;
error = dmu_objset_hold(zc->zc_name, FTAG, &os);
if (error != 0)
return (error);
error = dmu_object_next(os, &zc->zc_obj, B_FALSE, 0);
dmu_objset_rele(os, FTAG);
return (error);
}
/*
* inputs:
* zc_name name of filesystem
* zc_value prefix name for snapshot
* zc_cleanup_fd cleanup-on-exit file descriptor for calling process
*
* outputs:
* zc_value short name of new snapshot
*/
static int
zfs_ioc_tmp_snapshot(zfs_cmd_t *zc)
{
char *snap_name;
char *hold_name;
minor_t minor;
zfs_file_t *fp = zfs_onexit_fd_hold(zc->zc_cleanup_fd, &minor);
if (fp == NULL)
return (SET_ERROR(EBADF));
snap_name = kmem_asprintf("%s-%016llx", zc->zc_value,
(u_longlong_t)ddi_get_lbolt64());
hold_name = kmem_asprintf("%%%s", zc->zc_value);
int error = dsl_dataset_snapshot_tmp(zc->zc_name, snap_name, minor,
hold_name);
if (error == 0)
(void) strlcpy(zc->zc_value, snap_name,
sizeof (zc->zc_value));
kmem_strfree(snap_name);
kmem_strfree(hold_name);
zfs_onexit_fd_rele(fp);
return (error);
}
/*
* inputs:
* zc_name name of "to" snapshot
* zc_value name of "from" snapshot
* zc_cookie file descriptor to write diff data on
*
* outputs:
* dmu_diff_record_t's to the file descriptor
*/
static int
zfs_ioc_diff(zfs_cmd_t *zc)
{
zfs_file_t *fp;
offset_t off;
int error;
if ((fp = zfs_file_get(zc->zc_cookie)) == NULL)
return (SET_ERROR(EBADF));
off = zfs_file_off(fp);
error = dmu_diff(zc->zc_name, zc->zc_value, fp, &off);
zfs_file_put(fp);
return (error);
}
static int
zfs_ioc_smb_acl(zfs_cmd_t *zc)
{
return (SET_ERROR(ENOTSUP));
}
/*
* innvl: {
* "holds" -> { snapname -> holdname (string), ... }
* (optional) "cleanup_fd" -> fd (int32)
* }
*
* outnvl: {
* snapname -> error value (int32)
* ...
* }
*/
static const zfs_ioc_key_t zfs_keys_hold[] = {
{"holds", DATA_TYPE_NVLIST, 0},
{"cleanup_fd", DATA_TYPE_INT32, ZK_OPTIONAL},
};
static int
zfs_ioc_hold(const char *pool, nvlist_t *args, nvlist_t *errlist)
{
(void) pool;
nvpair_t *pair;
nvlist_t *holds;
int cleanup_fd = -1;
int error;
minor_t minor = 0;
zfs_file_t *fp = NULL;
holds = fnvlist_lookup_nvlist(args, "holds");
/* make sure the user didn't pass us any invalid (empty) tags */
for (pair = nvlist_next_nvpair(holds, NULL); pair != NULL;
pair = nvlist_next_nvpair(holds, pair)) {
char *htag;
error = nvpair_value_string(pair, &htag);
if (error != 0)
return (SET_ERROR(error));
if (strlen(htag) == 0)
return (SET_ERROR(EINVAL));
}
if (nvlist_lookup_int32(args, "cleanup_fd", &cleanup_fd) == 0) {
fp = zfs_onexit_fd_hold(cleanup_fd, &minor);
if (fp == NULL)
return (SET_ERROR(EBADF));
}
error = dsl_dataset_user_hold(holds, minor, errlist);
if (fp != NULL) {
ASSERT3U(minor, !=, 0);
zfs_onexit_fd_rele(fp);
}
return (SET_ERROR(error));
}
/*
* innvl is not used.
*
* outnvl: {
* holdname -> time added (uint64 seconds since epoch)
* ...
* }
*/
static const zfs_ioc_key_t zfs_keys_get_holds[] = {
/* no nvl keys */
};
static int
zfs_ioc_get_holds(const char *snapname, nvlist_t *args, nvlist_t *outnvl)
{
(void) args;
return (dsl_dataset_get_holds(snapname, outnvl));
}
/*
* innvl: {
* snapname -> { holdname, ... }
* ...
* }
*
* outnvl: {
* snapname -> error value (int32)
* ...
* }
*/
static const zfs_ioc_key_t zfs_keys_release[] = {
{"<snapname>...", DATA_TYPE_NVLIST, ZK_WILDCARDLIST},
};
static int
zfs_ioc_release(const char *pool, nvlist_t *holds, nvlist_t *errlist)
{
(void) pool;
return (dsl_dataset_user_release(holds, errlist));
}
/*
* inputs:
* zc_guid flags (ZEVENT_NONBLOCK)
* zc_cleanup_fd zevent file descriptor
*
* outputs:
* zc_nvlist_dst next nvlist event
* zc_cookie dropped events since last get
*/
static int
zfs_ioc_events_next(zfs_cmd_t *zc)
{
zfs_zevent_t *ze;
nvlist_t *event = NULL;
minor_t minor;
uint64_t dropped = 0;
int error;
zfs_file_t *fp = zfs_zevent_fd_hold(zc->zc_cleanup_fd, &minor, &ze);
if (fp == NULL)
return (SET_ERROR(EBADF));
do {
error = zfs_zevent_next(ze, &event,
&zc->zc_nvlist_dst_size, &dropped);
if (event != NULL) {
zc->zc_cookie = dropped;
error = put_nvlist(zc, event);
nvlist_free(event);
}
if (zc->zc_guid & ZEVENT_NONBLOCK)
break;
if ((error == 0) || (error != ENOENT))
break;
error = zfs_zevent_wait(ze);
if (error != 0)
break;
} while (1);
zfs_zevent_fd_rele(fp);
return (error);
}
/*
* outputs:
* zc_cookie cleared events count
*/
static int
zfs_ioc_events_clear(zfs_cmd_t *zc)
{
int count;
zfs_zevent_drain_all(&count);
zc->zc_cookie = count;
return (0);
}
/*
* inputs:
* zc_guid eid | ZEVENT_SEEK_START | ZEVENT_SEEK_END
* zc_cleanup zevent file descriptor
*/
static int
zfs_ioc_events_seek(zfs_cmd_t *zc)
{
zfs_zevent_t *ze;
minor_t minor;
int error;
zfs_file_t *fp = zfs_zevent_fd_hold(zc->zc_cleanup_fd, &minor, &ze);
if (fp == NULL)
return (SET_ERROR(EBADF));
error = zfs_zevent_seek(ze, zc->zc_guid);
zfs_zevent_fd_rele(fp);
return (error);
}
/*
* inputs:
* zc_name name of later filesystem or snapshot
* zc_value full name of old snapshot or bookmark
*
* outputs:
* zc_cookie space in bytes
* zc_objset_type compressed space in bytes
* zc_perm_action uncompressed space in bytes
*/
static int
zfs_ioc_space_written(zfs_cmd_t *zc)
{
int error;
dsl_pool_t *dp;
dsl_dataset_t *new;
error = dsl_pool_hold(zc->zc_name, FTAG, &dp);
if (error != 0)
return (error);
error = dsl_dataset_hold(dp, zc->zc_name, FTAG, &new);
if (error != 0) {
dsl_pool_rele(dp, FTAG);
return (error);
}
if (strchr(zc->zc_value, '#') != NULL) {
zfs_bookmark_phys_t bmp;
error = dsl_bookmark_lookup(dp, zc->zc_value,
new, &bmp);
if (error == 0) {
error = dsl_dataset_space_written_bookmark(&bmp, new,
&zc->zc_cookie,
&zc->zc_objset_type, &zc->zc_perm_action);
}
} else {
dsl_dataset_t *old;
error = dsl_dataset_hold(dp, zc->zc_value, FTAG, &old);
if (error == 0) {
error = dsl_dataset_space_written(old, new,
&zc->zc_cookie,
&zc->zc_objset_type, &zc->zc_perm_action);
dsl_dataset_rele(old, FTAG);
}
}
dsl_dataset_rele(new, FTAG);
dsl_pool_rele(dp, FTAG);
return (error);
}
/*
* innvl: {
* "firstsnap" -> snapshot name
* }
*
* outnvl: {
* "used" -> space in bytes
* "compressed" -> compressed space in bytes
* "uncompressed" -> uncompressed space in bytes
* }
*/
static const zfs_ioc_key_t zfs_keys_space_snaps[] = {
{"firstsnap", DATA_TYPE_STRING, 0},
};
static int
zfs_ioc_space_snaps(const char *lastsnap, nvlist_t *innvl, nvlist_t *outnvl)
{
int error;
dsl_pool_t *dp;
dsl_dataset_t *new, *old;
char *firstsnap;
uint64_t used, comp, uncomp;
firstsnap = fnvlist_lookup_string(innvl, "firstsnap");
error = dsl_pool_hold(lastsnap, FTAG, &dp);
if (error != 0)
return (error);
error = dsl_dataset_hold(dp, lastsnap, FTAG, &new);
if (error == 0 && !new->ds_is_snapshot) {
dsl_dataset_rele(new, FTAG);
error = SET_ERROR(EINVAL);
}
if (error != 0) {
dsl_pool_rele(dp, FTAG);
return (error);
}
error = dsl_dataset_hold(dp, firstsnap, FTAG, &old);
if (error == 0 && !old->ds_is_snapshot) {
dsl_dataset_rele(old, FTAG);
error = SET_ERROR(EINVAL);
}
if (error != 0) {
dsl_dataset_rele(new, FTAG);
dsl_pool_rele(dp, FTAG);
return (error);
}
error = dsl_dataset_space_wouldfree(old, new, &used, &comp, &uncomp);
dsl_dataset_rele(old, FTAG);
dsl_dataset_rele(new, FTAG);
dsl_pool_rele(dp, FTAG);
fnvlist_add_uint64(outnvl, "used", used);
fnvlist_add_uint64(outnvl, "compressed", comp);
fnvlist_add_uint64(outnvl, "uncompressed", uncomp);
return (error);
}
/*
* innvl: {
* "fd" -> file descriptor to write stream to (int32)
* (optional) "fromsnap" -> full snap name to send an incremental from
* (optional) "largeblockok" -> (value ignored)
* indicates that blocks > 128KB are permitted
* (optional) "embedok" -> (value ignored)
* presence indicates DRR_WRITE_EMBEDDED records are permitted
* (optional) "compressok" -> (value ignored)
* presence indicates compressed DRR_WRITE records are permitted
* (optional) "rawok" -> (value ignored)
* presence indicates raw encrypted records should be used.
* (optional) "savedok" -> (value ignored)
* presence indicates we should send a partially received snapshot
* (optional) "resume_object" and "resume_offset" -> (uint64)
* if present, resume send stream from specified object and offset.
* (optional) "redactbook" -> (string)
* if present, use this bookmark's redaction list to generate a redacted
* send stream
* }
*
* outnvl is unused
*/
static const zfs_ioc_key_t zfs_keys_send_new[] = {
{"fd", DATA_TYPE_INT32, 0},
{"fromsnap", DATA_TYPE_STRING, ZK_OPTIONAL},
{"largeblockok", DATA_TYPE_BOOLEAN, ZK_OPTIONAL},
{"embedok", DATA_TYPE_BOOLEAN, ZK_OPTIONAL},
{"compressok", DATA_TYPE_BOOLEAN, ZK_OPTIONAL},
{"rawok", DATA_TYPE_BOOLEAN, ZK_OPTIONAL},
{"savedok", DATA_TYPE_BOOLEAN, ZK_OPTIONAL},
{"resume_object", DATA_TYPE_UINT64, ZK_OPTIONAL},
{"resume_offset", DATA_TYPE_UINT64, ZK_OPTIONAL},
{"redactbook", DATA_TYPE_STRING, ZK_OPTIONAL},
};
static int
zfs_ioc_send_new(const char *snapname, nvlist_t *innvl, nvlist_t *outnvl)
{
(void) outnvl;
int error;
offset_t off;
char *fromname = NULL;
int fd;
zfs_file_t *fp;
boolean_t largeblockok;
boolean_t embedok;
boolean_t compressok;
boolean_t rawok;
boolean_t savedok;
uint64_t resumeobj = 0;
uint64_t resumeoff = 0;
char *redactbook = NULL;
fd = fnvlist_lookup_int32(innvl, "fd");
(void) nvlist_lookup_string(innvl, "fromsnap", &fromname);
largeblockok = nvlist_exists(innvl, "largeblockok");
embedok = nvlist_exists(innvl, "embedok");
compressok = nvlist_exists(innvl, "compressok");
rawok = nvlist_exists(innvl, "rawok");
savedok = nvlist_exists(innvl, "savedok");
(void) nvlist_lookup_uint64(innvl, "resume_object", &resumeobj);
(void) nvlist_lookup_uint64(innvl, "resume_offset", &resumeoff);
(void) nvlist_lookup_string(innvl, "redactbook", &redactbook);
if ((fp = zfs_file_get(fd)) == NULL)
return (SET_ERROR(EBADF));
off = zfs_file_off(fp);
dmu_send_outparams_t out = {0};
out.dso_outfunc = dump_bytes;
out.dso_arg = fp;
out.dso_dryrun = B_FALSE;
error = dmu_send(snapname, fromname, embedok, largeblockok,
compressok, rawok, savedok, resumeobj, resumeoff,
redactbook, fd, &off, &out);
zfs_file_put(fp);
return (error);
}
static int
send_space_sum(objset_t *os, void *buf, int len, void *arg)
{
(void) os, (void) buf;
uint64_t *size = arg;
*size += len;
return (0);
}
/*
* Determine approximately how large a zfs send stream will be -- the number
* of bytes that will be written to the fd supplied to zfs_ioc_send_new().
*
* innvl: {
* (optional) "from" -> full snap or bookmark name to send an incremental
* from
* (optional) "largeblockok" -> (value ignored)
* indicates that blocks > 128KB are permitted
* (optional) "embedok" -> (value ignored)
* presence indicates DRR_WRITE_EMBEDDED records are permitted
* (optional) "compressok" -> (value ignored)
* presence indicates compressed DRR_WRITE records are permitted
* (optional) "rawok" -> (value ignored)
* presence indicates raw encrypted records should be used.
* (optional) "resume_object" and "resume_offset" -> (uint64)
* if present, resume send stream from specified object and offset.
* (optional) "fd" -> file descriptor to use as a cookie for progress
* tracking (int32)
* }
*
* outnvl: {
* "space" -> bytes of space (uint64)
* }
*/
static const zfs_ioc_key_t zfs_keys_send_space[] = {
{"from", DATA_TYPE_STRING, ZK_OPTIONAL},
{"fromsnap", DATA_TYPE_STRING, ZK_OPTIONAL},
{"largeblockok", DATA_TYPE_BOOLEAN, ZK_OPTIONAL},
{"embedok", DATA_TYPE_BOOLEAN, ZK_OPTIONAL},
{"compressok", DATA_TYPE_BOOLEAN, ZK_OPTIONAL},
{"rawok", DATA_TYPE_BOOLEAN, ZK_OPTIONAL},
{"fd", DATA_TYPE_INT32, ZK_OPTIONAL},
{"redactbook", DATA_TYPE_STRING, ZK_OPTIONAL},
{"resume_object", DATA_TYPE_UINT64, ZK_OPTIONAL},
{"resume_offset", DATA_TYPE_UINT64, ZK_OPTIONAL},
{"bytes", DATA_TYPE_UINT64, ZK_OPTIONAL},
};
static int
zfs_ioc_send_space(const char *snapname, nvlist_t *innvl, nvlist_t *outnvl)
{
dsl_pool_t *dp;
dsl_dataset_t *tosnap;
dsl_dataset_t *fromsnap = NULL;
int error;
char *fromname = NULL;
char *redactlist_book = NULL;
boolean_t largeblockok;
boolean_t embedok;
boolean_t compressok;
boolean_t rawok;
boolean_t savedok;
uint64_t space = 0;
boolean_t full_estimate = B_FALSE;
uint64_t resumeobj = 0;
uint64_t resumeoff = 0;
uint64_t resume_bytes = 0;
int32_t fd = -1;
zfs_bookmark_phys_t zbm = {0};
error = dsl_pool_hold(snapname, FTAG, &dp);
if (error != 0)
return (error);
error = dsl_dataset_hold(dp, snapname, FTAG, &tosnap);
if (error != 0) {
dsl_pool_rele(dp, FTAG);
return (error);
}
(void) nvlist_lookup_int32(innvl, "fd", &fd);
largeblockok = nvlist_exists(innvl, "largeblockok");
embedok = nvlist_exists(innvl, "embedok");
compressok = nvlist_exists(innvl, "compressok");
rawok = nvlist_exists(innvl, "rawok");
savedok = nvlist_exists(innvl, "savedok");
boolean_t from = (nvlist_lookup_string(innvl, "from", &fromname) == 0);
boolean_t altbook = (nvlist_lookup_string(innvl, "redactbook",
&redactlist_book) == 0);
(void) nvlist_lookup_uint64(innvl, "resume_object", &resumeobj);
(void) nvlist_lookup_uint64(innvl, "resume_offset", &resumeoff);
(void) nvlist_lookup_uint64(innvl, "bytes", &resume_bytes);
if (altbook) {
full_estimate = B_TRUE;
} else if (from) {
if (strchr(fromname, '#')) {
error = dsl_bookmark_lookup(dp, fromname, tosnap, &zbm);
/*
* dsl_bookmark_lookup() will fail with EXDEV if
* the from-bookmark and tosnap are at the same txg.
* However, it's valid to do a send (and therefore,
* a send estimate) from and to the same time point,
* if the bookmark is redacted (the incremental send
* can change what's redacted on the target). In
* this case, dsl_bookmark_lookup() fills in zbm
* but returns EXDEV. Ignore this error.
*/
if (error == EXDEV && zbm.zbm_redaction_obj != 0 &&
zbm.zbm_guid ==
dsl_dataset_phys(tosnap)->ds_guid)
error = 0;
if (error != 0) {
dsl_dataset_rele(tosnap, FTAG);
dsl_pool_rele(dp, FTAG);
return (error);
}
if (zbm.zbm_redaction_obj != 0 || !(zbm.zbm_flags &
ZBM_FLAG_HAS_FBN)) {
full_estimate = B_TRUE;
}
} else if (strchr(fromname, '@')) {
error = dsl_dataset_hold(dp, fromname, FTAG, &fromsnap);
if (error != 0) {
dsl_dataset_rele(tosnap, FTAG);
dsl_pool_rele(dp, FTAG);
return (error);
}
if (!dsl_dataset_is_before(tosnap, fromsnap, 0)) {
full_estimate = B_TRUE;
dsl_dataset_rele(fromsnap, FTAG);
}
} else {
/*
* from is not properly formatted as a snapshot or
* bookmark
*/
dsl_dataset_rele(tosnap, FTAG);
dsl_pool_rele(dp, FTAG);
return (SET_ERROR(EINVAL));
}
}
if (full_estimate) {
dmu_send_outparams_t out = {0};
offset_t off = 0;
out.dso_outfunc = send_space_sum;
out.dso_arg = &space;
out.dso_dryrun = B_TRUE;
/*
* We have to release these holds so dmu_send can take them. It
* will do all the error checking we need.
*/
dsl_dataset_rele(tosnap, FTAG);
dsl_pool_rele(dp, FTAG);
error = dmu_send(snapname, fromname, embedok, largeblockok,
compressok, rawok, savedok, resumeobj, resumeoff,
redactlist_book, fd, &off, &out);
} else {
error = dmu_send_estimate_fast(tosnap, fromsnap,
(from && strchr(fromname, '#') != NULL ? &zbm : NULL),
compressok || rawok, savedok, &space);
space -= resume_bytes;
if (fromsnap != NULL)
dsl_dataset_rele(fromsnap, FTAG);
dsl_dataset_rele(tosnap, FTAG);
dsl_pool_rele(dp, FTAG);
}
fnvlist_add_uint64(outnvl, "space", space);
return (error);
}
/*
* Sync the currently open TXG to disk for the specified pool.
* This is somewhat similar to 'zfs_sync()'.
* For cases that do not result in error this ioctl will wait for
* the currently open TXG to commit before returning back to the caller.
*
* innvl: {
* "force" -> when true, force uberblock update even if there is no dirty data.
* In addition this will cause the vdev configuration to be written
* out including updating the zpool cache file. (boolean_t)
* }
*
* onvl is unused
*/
static const zfs_ioc_key_t zfs_keys_pool_sync[] = {
{"force", DATA_TYPE_BOOLEAN_VALUE, 0},
};
static int
zfs_ioc_pool_sync(const char *pool, nvlist_t *innvl, nvlist_t *onvl)
{
(void) onvl;
int err;
boolean_t rc, force = B_FALSE;
spa_t *spa;
if ((err = spa_open(pool, &spa, FTAG)) != 0)
return (err);
if (innvl) {
err = nvlist_lookup_boolean_value(innvl, "force", &rc);
if (err == 0)
force = rc;
}
if (force) {
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_WRITER);
vdev_config_dirty(spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
txg_wait_synced(spa_get_dsl(spa), 0);
spa_close(spa, FTAG);
return (0);
}
/*
* Load a user's wrapping key into the kernel.
* innvl: {
* "hidden_args" -> { "wkeydata" -> value }
* raw uint8_t array of encryption wrapping key data (32 bytes)
* (optional) "noop" -> (value ignored)
* presence indicated key should only be verified, not loaded
* }
*/
static const zfs_ioc_key_t zfs_keys_load_key[] = {
{"hidden_args", DATA_TYPE_NVLIST, 0},
{"noop", DATA_TYPE_BOOLEAN, ZK_OPTIONAL},
};
static int
zfs_ioc_load_key(const char *dsname, nvlist_t *innvl, nvlist_t *outnvl)
{
(void) outnvl;
int ret;
dsl_crypto_params_t *dcp = NULL;
nvlist_t *hidden_args;
boolean_t noop = nvlist_exists(innvl, "noop");
if (strchr(dsname, '@') != NULL || strchr(dsname, '%') != NULL) {
ret = SET_ERROR(EINVAL);
goto error;
}
hidden_args = fnvlist_lookup_nvlist(innvl, ZPOOL_HIDDEN_ARGS);
ret = dsl_crypto_params_create_nvlist(DCP_CMD_NONE, NULL,
hidden_args, &dcp);
if (ret != 0)
goto error;
ret = spa_keystore_load_wkey(dsname, dcp, noop);
if (ret != 0)
goto error;
dsl_crypto_params_free(dcp, noop);
return (0);
error:
dsl_crypto_params_free(dcp, B_TRUE);
return (ret);
}
/*
* Unload a user's wrapping key from the kernel.
* Both innvl and outnvl are unused.
*/
static const zfs_ioc_key_t zfs_keys_unload_key[] = {
/* no nvl keys */
};
static int
zfs_ioc_unload_key(const char *dsname, nvlist_t *innvl, nvlist_t *outnvl)
{
(void) innvl, (void) outnvl;
int ret = 0;
if (strchr(dsname, '@') != NULL || strchr(dsname, '%') != NULL) {
ret = (SET_ERROR(EINVAL));
goto out;
}
ret = spa_keystore_unload_wkey(dsname);
if (ret != 0)
goto out;
out:
return (ret);
}
/*
* Changes a user's wrapping key used to decrypt a dataset. The keyformat,
* keylocation, pbkdf2salt, and pbkdf2iters properties can also be specified
* here to change how the key is derived in userspace.
*
* innvl: {
* "hidden_args" (optional) -> { "wkeydata" -> value }
* raw uint8_t array of new encryption wrapping key data (32 bytes)
* "props" (optional) -> { prop -> value }
* }
*
* outnvl is unused
*/
static const zfs_ioc_key_t zfs_keys_change_key[] = {
{"crypt_cmd", DATA_TYPE_UINT64, ZK_OPTIONAL},
{"hidden_args", DATA_TYPE_NVLIST, ZK_OPTIONAL},
{"props", DATA_TYPE_NVLIST, ZK_OPTIONAL},
};
static int
zfs_ioc_change_key(const char *dsname, nvlist_t *innvl, nvlist_t *outnvl)
{
(void) outnvl;
int ret;
uint64_t cmd = DCP_CMD_NONE;
dsl_crypto_params_t *dcp = NULL;
nvlist_t *args = NULL, *hidden_args = NULL;
if (strchr(dsname, '@') != NULL || strchr(dsname, '%') != NULL) {
ret = (SET_ERROR(EINVAL));
goto error;
}
(void) nvlist_lookup_uint64(innvl, "crypt_cmd", &cmd);
(void) nvlist_lookup_nvlist(innvl, "props", &args);
(void) nvlist_lookup_nvlist(innvl, ZPOOL_HIDDEN_ARGS, &hidden_args);
ret = dsl_crypto_params_create_nvlist(cmd, args, hidden_args, &dcp);
if (ret != 0)
goto error;
ret = spa_keystore_change_key(dsname, dcp);
if (ret != 0)
goto error;
dsl_crypto_params_free(dcp, B_FALSE);
return (0);
error:
dsl_crypto_params_free(dcp, B_TRUE);
return (ret);
}
static zfs_ioc_vec_t zfs_ioc_vec[ZFS_IOC_LAST - ZFS_IOC_FIRST];
static void
zfs_ioctl_register_legacy(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func,
zfs_secpolicy_func_t *secpolicy, zfs_ioc_namecheck_t namecheck,
boolean_t log_history, zfs_ioc_poolcheck_t pool_check)
{
zfs_ioc_vec_t *vec = &zfs_ioc_vec[ioc - ZFS_IOC_FIRST];
ASSERT3U(ioc, >=, ZFS_IOC_FIRST);
ASSERT3U(ioc, <, ZFS_IOC_LAST);
ASSERT3P(vec->zvec_legacy_func, ==, NULL);
ASSERT3P(vec->zvec_func, ==, NULL);
vec->zvec_legacy_func = func;
vec->zvec_secpolicy = secpolicy;
vec->zvec_namecheck = namecheck;
vec->zvec_allow_log = log_history;
vec->zvec_pool_check = pool_check;
}
/*
* See the block comment at the beginning of this file for details on
* each argument to this function.
*/
void
zfs_ioctl_register(const char *name, zfs_ioc_t ioc, zfs_ioc_func_t *func,
zfs_secpolicy_func_t *secpolicy, zfs_ioc_namecheck_t namecheck,
zfs_ioc_poolcheck_t pool_check, boolean_t smush_outnvlist,
boolean_t allow_log, const zfs_ioc_key_t *nvl_keys, size_t num_keys)
{
zfs_ioc_vec_t *vec = &zfs_ioc_vec[ioc - ZFS_IOC_FIRST];
ASSERT3U(ioc, >=, ZFS_IOC_FIRST);
ASSERT3U(ioc, <, ZFS_IOC_LAST);
ASSERT3P(vec->zvec_legacy_func, ==, NULL);
ASSERT3P(vec->zvec_func, ==, NULL);
/* if we are logging, the name must be valid */
ASSERT(!allow_log || namecheck != NO_NAME);
vec->zvec_name = name;
vec->zvec_func = func;
vec->zvec_secpolicy = secpolicy;
vec->zvec_namecheck = namecheck;
vec->zvec_pool_check = pool_check;
vec->zvec_smush_outnvlist = smush_outnvlist;
vec->zvec_allow_log = allow_log;
vec->zvec_nvl_keys = nvl_keys;
vec->zvec_nvl_key_count = num_keys;
}
static void
zfs_ioctl_register_pool(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func,
zfs_secpolicy_func_t *secpolicy, boolean_t log_history,
zfs_ioc_poolcheck_t pool_check)
{
zfs_ioctl_register_legacy(ioc, func, secpolicy,
POOL_NAME, log_history, pool_check);
}
void
zfs_ioctl_register_dataset_nolog(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func,
zfs_secpolicy_func_t *secpolicy, zfs_ioc_poolcheck_t pool_check)
{
zfs_ioctl_register_legacy(ioc, func, secpolicy,
DATASET_NAME, B_FALSE, pool_check);
}
static void
zfs_ioctl_register_pool_modify(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func)
{
zfs_ioctl_register_legacy(ioc, func, zfs_secpolicy_config,
POOL_NAME, B_TRUE, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY);
}
static void
zfs_ioctl_register_pool_meta(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func,
zfs_secpolicy_func_t *secpolicy)
{
zfs_ioctl_register_legacy(ioc, func, secpolicy,
NO_NAME, B_FALSE, POOL_CHECK_NONE);
}
static void
zfs_ioctl_register_dataset_read_secpolicy(zfs_ioc_t ioc,
zfs_ioc_legacy_func_t *func, zfs_secpolicy_func_t *secpolicy)
{
zfs_ioctl_register_legacy(ioc, func, secpolicy,
DATASET_NAME, B_FALSE, POOL_CHECK_SUSPENDED);
}
static void
zfs_ioctl_register_dataset_read(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func)
{
zfs_ioctl_register_dataset_read_secpolicy(ioc, func,
zfs_secpolicy_read);
}
static void
zfs_ioctl_register_dataset_modify(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func,
zfs_secpolicy_func_t *secpolicy)
{
zfs_ioctl_register_legacy(ioc, func, secpolicy,
DATASET_NAME, B_TRUE, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY);
}
static void
zfs_ioctl_init(void)
{
zfs_ioctl_register("snapshot", ZFS_IOC_SNAPSHOT,
zfs_ioc_snapshot, zfs_secpolicy_snapshot, POOL_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
zfs_keys_snapshot, ARRAY_SIZE(zfs_keys_snapshot));
zfs_ioctl_register("log_history", ZFS_IOC_LOG_HISTORY,
zfs_ioc_log_history, zfs_secpolicy_log_history, NO_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_FALSE, B_FALSE,
zfs_keys_log_history, ARRAY_SIZE(zfs_keys_log_history));
zfs_ioctl_register("space_snaps", ZFS_IOC_SPACE_SNAPS,
zfs_ioc_space_snaps, zfs_secpolicy_read, DATASET_NAME,
POOL_CHECK_SUSPENDED, B_FALSE, B_FALSE,
zfs_keys_space_snaps, ARRAY_SIZE(zfs_keys_space_snaps));
zfs_ioctl_register("send", ZFS_IOC_SEND_NEW,
zfs_ioc_send_new, zfs_secpolicy_send_new, DATASET_NAME,
POOL_CHECK_SUSPENDED, B_FALSE, B_FALSE,
zfs_keys_send_new, ARRAY_SIZE(zfs_keys_send_new));
zfs_ioctl_register("send_space", ZFS_IOC_SEND_SPACE,
zfs_ioc_send_space, zfs_secpolicy_read, DATASET_NAME,
POOL_CHECK_SUSPENDED, B_FALSE, B_FALSE,
zfs_keys_send_space, ARRAY_SIZE(zfs_keys_send_space));
zfs_ioctl_register("create", ZFS_IOC_CREATE,
zfs_ioc_create, zfs_secpolicy_create_clone, DATASET_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
zfs_keys_create, ARRAY_SIZE(zfs_keys_create));
zfs_ioctl_register("clone", ZFS_IOC_CLONE,
zfs_ioc_clone, zfs_secpolicy_create_clone, DATASET_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
zfs_keys_clone, ARRAY_SIZE(zfs_keys_clone));
zfs_ioctl_register("remap", ZFS_IOC_REMAP,
zfs_ioc_remap, zfs_secpolicy_none, DATASET_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_FALSE, B_TRUE,
zfs_keys_remap, ARRAY_SIZE(zfs_keys_remap));
zfs_ioctl_register("destroy_snaps", ZFS_IOC_DESTROY_SNAPS,
zfs_ioc_destroy_snaps, zfs_secpolicy_destroy_snaps, POOL_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
zfs_keys_destroy_snaps, ARRAY_SIZE(zfs_keys_destroy_snaps));
zfs_ioctl_register("hold", ZFS_IOC_HOLD,
zfs_ioc_hold, zfs_secpolicy_hold, POOL_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
zfs_keys_hold, ARRAY_SIZE(zfs_keys_hold));
zfs_ioctl_register("release", ZFS_IOC_RELEASE,
zfs_ioc_release, zfs_secpolicy_release, POOL_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
zfs_keys_release, ARRAY_SIZE(zfs_keys_release));
zfs_ioctl_register("get_holds", ZFS_IOC_GET_HOLDS,
zfs_ioc_get_holds, zfs_secpolicy_read, DATASET_NAME,
POOL_CHECK_SUSPENDED, B_FALSE, B_FALSE,
zfs_keys_get_holds, ARRAY_SIZE(zfs_keys_get_holds));
zfs_ioctl_register("rollback", ZFS_IOC_ROLLBACK,
zfs_ioc_rollback, zfs_secpolicy_rollback, DATASET_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_FALSE, B_TRUE,
zfs_keys_rollback, ARRAY_SIZE(zfs_keys_rollback));
zfs_ioctl_register("bookmark", ZFS_IOC_BOOKMARK,
zfs_ioc_bookmark, zfs_secpolicy_bookmark, POOL_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
zfs_keys_bookmark, ARRAY_SIZE(zfs_keys_bookmark));
zfs_ioctl_register("get_bookmarks", ZFS_IOC_GET_BOOKMARKS,
zfs_ioc_get_bookmarks, zfs_secpolicy_read, DATASET_NAME,
POOL_CHECK_SUSPENDED, B_FALSE, B_FALSE,
zfs_keys_get_bookmarks, ARRAY_SIZE(zfs_keys_get_bookmarks));
zfs_ioctl_register("get_bookmark_props", ZFS_IOC_GET_BOOKMARK_PROPS,
zfs_ioc_get_bookmark_props, zfs_secpolicy_read, ENTITY_NAME,
POOL_CHECK_SUSPENDED, B_FALSE, B_FALSE, zfs_keys_get_bookmark_props,
ARRAY_SIZE(zfs_keys_get_bookmark_props));
zfs_ioctl_register("destroy_bookmarks", ZFS_IOC_DESTROY_BOOKMARKS,
zfs_ioc_destroy_bookmarks, zfs_secpolicy_destroy_bookmarks,
POOL_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
zfs_keys_destroy_bookmarks,
ARRAY_SIZE(zfs_keys_destroy_bookmarks));
zfs_ioctl_register("receive", ZFS_IOC_RECV_NEW,
zfs_ioc_recv_new, zfs_secpolicy_recv, DATASET_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
zfs_keys_recv_new, ARRAY_SIZE(zfs_keys_recv_new));
zfs_ioctl_register("load-key", ZFS_IOC_LOAD_KEY,
zfs_ioc_load_key, zfs_secpolicy_load_key,
DATASET_NAME, POOL_CHECK_SUSPENDED, B_TRUE, B_TRUE,
zfs_keys_load_key, ARRAY_SIZE(zfs_keys_load_key));
zfs_ioctl_register("unload-key", ZFS_IOC_UNLOAD_KEY,
zfs_ioc_unload_key, zfs_secpolicy_load_key,
DATASET_NAME, POOL_CHECK_SUSPENDED, B_TRUE, B_TRUE,
zfs_keys_unload_key, ARRAY_SIZE(zfs_keys_unload_key));
zfs_ioctl_register("change-key", ZFS_IOC_CHANGE_KEY,
zfs_ioc_change_key, zfs_secpolicy_change_key,
DATASET_NAME, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY,
B_TRUE, B_TRUE, zfs_keys_change_key,
ARRAY_SIZE(zfs_keys_change_key));
zfs_ioctl_register("sync", ZFS_IOC_POOL_SYNC,
zfs_ioc_pool_sync, zfs_secpolicy_none, POOL_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_FALSE, B_FALSE,
zfs_keys_pool_sync, ARRAY_SIZE(zfs_keys_pool_sync));
zfs_ioctl_register("reopen", ZFS_IOC_POOL_REOPEN, zfs_ioc_pool_reopen,
zfs_secpolicy_config, POOL_NAME, POOL_CHECK_SUSPENDED, B_TRUE,
B_TRUE, zfs_keys_pool_reopen, ARRAY_SIZE(zfs_keys_pool_reopen));
zfs_ioctl_register("channel_program", ZFS_IOC_CHANNEL_PROGRAM,
zfs_ioc_channel_program, zfs_secpolicy_config,
POOL_NAME, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE,
B_TRUE, zfs_keys_channel_program,
ARRAY_SIZE(zfs_keys_channel_program));
zfs_ioctl_register("redact", ZFS_IOC_REDACT,
zfs_ioc_redact, zfs_secpolicy_config, DATASET_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
zfs_keys_redact, ARRAY_SIZE(zfs_keys_redact));
zfs_ioctl_register("zpool_checkpoint", ZFS_IOC_POOL_CHECKPOINT,
zfs_ioc_pool_checkpoint, zfs_secpolicy_config, POOL_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
zfs_keys_pool_checkpoint, ARRAY_SIZE(zfs_keys_pool_checkpoint));
zfs_ioctl_register("zpool_discard_checkpoint",
ZFS_IOC_POOL_DISCARD_CHECKPOINT, zfs_ioc_pool_discard_checkpoint,
zfs_secpolicy_config, POOL_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
zfs_keys_pool_discard_checkpoint,
ARRAY_SIZE(zfs_keys_pool_discard_checkpoint));
zfs_ioctl_register("initialize", ZFS_IOC_POOL_INITIALIZE,
zfs_ioc_pool_initialize, zfs_secpolicy_config, POOL_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
zfs_keys_pool_initialize, ARRAY_SIZE(zfs_keys_pool_initialize));
zfs_ioctl_register("trim", ZFS_IOC_POOL_TRIM,
zfs_ioc_pool_trim, zfs_secpolicy_config, POOL_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
zfs_keys_pool_trim, ARRAY_SIZE(zfs_keys_pool_trim));
zfs_ioctl_register("wait", ZFS_IOC_WAIT,
zfs_ioc_wait, zfs_secpolicy_none, POOL_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_FALSE, B_FALSE,
zfs_keys_pool_wait, ARRAY_SIZE(zfs_keys_pool_wait));
zfs_ioctl_register("wait_fs", ZFS_IOC_WAIT_FS,
zfs_ioc_wait_fs, zfs_secpolicy_none, DATASET_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_FALSE, B_FALSE,
zfs_keys_fs_wait, ARRAY_SIZE(zfs_keys_fs_wait));
zfs_ioctl_register("set_bootenv", ZFS_IOC_SET_BOOTENV,
zfs_ioc_set_bootenv, zfs_secpolicy_config, POOL_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_FALSE, B_TRUE,
zfs_keys_set_bootenv, ARRAY_SIZE(zfs_keys_set_bootenv));
zfs_ioctl_register("get_bootenv", ZFS_IOC_GET_BOOTENV,
zfs_ioc_get_bootenv, zfs_secpolicy_none, POOL_NAME,
POOL_CHECK_SUSPENDED, B_FALSE, B_TRUE,
zfs_keys_get_bootenv, ARRAY_SIZE(zfs_keys_get_bootenv));
zfs_ioctl_register("zpool_vdev_get_props", ZFS_IOC_VDEV_GET_PROPS,
zfs_ioc_vdev_get_props, zfs_secpolicy_read, POOL_NAME,
POOL_CHECK_NONE, B_FALSE, B_FALSE, zfs_keys_vdev_get_props,
ARRAY_SIZE(zfs_keys_vdev_get_props));
zfs_ioctl_register("zpool_vdev_set_props", ZFS_IOC_VDEV_SET_PROPS,
zfs_ioc_vdev_set_props, zfs_secpolicy_config, POOL_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_FALSE, B_FALSE,
zfs_keys_vdev_set_props, ARRAY_SIZE(zfs_keys_vdev_set_props));
/* IOCTLS that use the legacy function signature */
zfs_ioctl_register_legacy(ZFS_IOC_POOL_FREEZE, zfs_ioc_pool_freeze,
zfs_secpolicy_config, NO_NAME, B_FALSE, POOL_CHECK_READONLY);
zfs_ioctl_register_pool(ZFS_IOC_POOL_CREATE, zfs_ioc_pool_create,
zfs_secpolicy_config, B_TRUE, POOL_CHECK_NONE);
zfs_ioctl_register_pool_modify(ZFS_IOC_POOL_SCAN,
zfs_ioc_pool_scan);
zfs_ioctl_register_pool_modify(ZFS_IOC_POOL_UPGRADE,
zfs_ioc_pool_upgrade);
zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_ADD,
zfs_ioc_vdev_add);
zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_REMOVE,
zfs_ioc_vdev_remove);
zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_SET_STATE,
zfs_ioc_vdev_set_state);
zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_ATTACH,
zfs_ioc_vdev_attach);
zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_DETACH,
zfs_ioc_vdev_detach);
zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_SETPATH,
zfs_ioc_vdev_setpath);
zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_SETFRU,
zfs_ioc_vdev_setfru);
zfs_ioctl_register_pool_modify(ZFS_IOC_POOL_SET_PROPS,
zfs_ioc_pool_set_props);
zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_SPLIT,
zfs_ioc_vdev_split);
zfs_ioctl_register_pool_modify(ZFS_IOC_POOL_REGUID,
zfs_ioc_pool_reguid);
zfs_ioctl_register_pool_meta(ZFS_IOC_POOL_CONFIGS,
zfs_ioc_pool_configs, zfs_secpolicy_none);
zfs_ioctl_register_pool_meta(ZFS_IOC_POOL_TRYIMPORT,
zfs_ioc_pool_tryimport, zfs_secpolicy_config);
zfs_ioctl_register_pool_meta(ZFS_IOC_INJECT_FAULT,
zfs_ioc_inject_fault, zfs_secpolicy_inject);
zfs_ioctl_register_pool_meta(ZFS_IOC_CLEAR_FAULT,
zfs_ioc_clear_fault, zfs_secpolicy_inject);
zfs_ioctl_register_pool_meta(ZFS_IOC_INJECT_LIST_NEXT,
zfs_ioc_inject_list_next, zfs_secpolicy_inject);
/*
* pool destroy, and export don't log the history as part of
* zfsdev_ioctl, but rather zfs_ioc_pool_export
* does the logging of those commands.
*/
zfs_ioctl_register_pool(ZFS_IOC_POOL_DESTROY, zfs_ioc_pool_destroy,
zfs_secpolicy_config, B_FALSE, POOL_CHECK_SUSPENDED);
zfs_ioctl_register_pool(ZFS_IOC_POOL_EXPORT, zfs_ioc_pool_export,
zfs_secpolicy_config, B_FALSE, POOL_CHECK_SUSPENDED);
zfs_ioctl_register_pool(ZFS_IOC_POOL_STATS, zfs_ioc_pool_stats,
zfs_secpolicy_read, B_FALSE, POOL_CHECK_NONE);
zfs_ioctl_register_pool(ZFS_IOC_POOL_GET_PROPS, zfs_ioc_pool_get_props,
zfs_secpolicy_read, B_FALSE, POOL_CHECK_NONE);
zfs_ioctl_register_pool(ZFS_IOC_ERROR_LOG, zfs_ioc_error_log,
zfs_secpolicy_inject, B_FALSE, POOL_CHECK_SUSPENDED);
zfs_ioctl_register_pool(ZFS_IOC_DSOBJ_TO_DSNAME,
zfs_ioc_dsobj_to_dsname,
zfs_secpolicy_diff, B_FALSE, POOL_CHECK_SUSPENDED);
zfs_ioctl_register_pool(ZFS_IOC_POOL_GET_HISTORY,
zfs_ioc_pool_get_history,
zfs_secpolicy_config, B_FALSE, POOL_CHECK_SUSPENDED);
zfs_ioctl_register_pool(ZFS_IOC_POOL_IMPORT, zfs_ioc_pool_import,
zfs_secpolicy_config, B_TRUE, POOL_CHECK_NONE);
zfs_ioctl_register_pool(ZFS_IOC_CLEAR, zfs_ioc_clear,
zfs_secpolicy_config, B_TRUE, POOL_CHECK_READONLY);
zfs_ioctl_register_dataset_read(ZFS_IOC_SPACE_WRITTEN,
zfs_ioc_space_written);
zfs_ioctl_register_dataset_read(ZFS_IOC_OBJSET_RECVD_PROPS,
zfs_ioc_objset_recvd_props);
zfs_ioctl_register_dataset_read(ZFS_IOC_NEXT_OBJ,
zfs_ioc_next_obj);
zfs_ioctl_register_dataset_read(ZFS_IOC_GET_FSACL,
zfs_ioc_get_fsacl);
zfs_ioctl_register_dataset_read(ZFS_IOC_OBJSET_STATS,
zfs_ioc_objset_stats);
zfs_ioctl_register_dataset_read(ZFS_IOC_OBJSET_ZPLPROPS,
zfs_ioc_objset_zplprops);
zfs_ioctl_register_dataset_read(ZFS_IOC_DATASET_LIST_NEXT,
zfs_ioc_dataset_list_next);
zfs_ioctl_register_dataset_read(ZFS_IOC_SNAPSHOT_LIST_NEXT,
zfs_ioc_snapshot_list_next);
zfs_ioctl_register_dataset_read(ZFS_IOC_SEND_PROGRESS,
zfs_ioc_send_progress);
zfs_ioctl_register_dataset_read_secpolicy(ZFS_IOC_DIFF,
zfs_ioc_diff, zfs_secpolicy_diff);
zfs_ioctl_register_dataset_read_secpolicy(ZFS_IOC_OBJ_TO_STATS,
zfs_ioc_obj_to_stats, zfs_secpolicy_diff);
zfs_ioctl_register_dataset_read_secpolicy(ZFS_IOC_OBJ_TO_PATH,
zfs_ioc_obj_to_path, zfs_secpolicy_diff);
zfs_ioctl_register_dataset_read_secpolicy(ZFS_IOC_USERSPACE_ONE,
zfs_ioc_userspace_one, zfs_secpolicy_userspace_one);
zfs_ioctl_register_dataset_read_secpolicy(ZFS_IOC_USERSPACE_MANY,
zfs_ioc_userspace_many, zfs_secpolicy_userspace_many);
zfs_ioctl_register_dataset_read_secpolicy(ZFS_IOC_SEND,
zfs_ioc_send, zfs_secpolicy_send);
zfs_ioctl_register_dataset_modify(ZFS_IOC_SET_PROP, zfs_ioc_set_prop,
zfs_secpolicy_none);
zfs_ioctl_register_dataset_modify(ZFS_IOC_DESTROY, zfs_ioc_destroy,
zfs_secpolicy_destroy);
zfs_ioctl_register_dataset_modify(ZFS_IOC_RENAME, zfs_ioc_rename,
zfs_secpolicy_rename);
zfs_ioctl_register_dataset_modify(ZFS_IOC_RECV, zfs_ioc_recv,
zfs_secpolicy_recv);
zfs_ioctl_register_dataset_modify(ZFS_IOC_PROMOTE, zfs_ioc_promote,
zfs_secpolicy_promote);
zfs_ioctl_register_dataset_modify(ZFS_IOC_INHERIT_PROP,
zfs_ioc_inherit_prop, zfs_secpolicy_inherit_prop);
zfs_ioctl_register_dataset_modify(ZFS_IOC_SET_FSACL, zfs_ioc_set_fsacl,
zfs_secpolicy_set_fsacl);
zfs_ioctl_register_dataset_nolog(ZFS_IOC_SHARE, zfs_ioc_share,
zfs_secpolicy_share, POOL_CHECK_NONE);
zfs_ioctl_register_dataset_nolog(ZFS_IOC_SMB_ACL, zfs_ioc_smb_acl,
zfs_secpolicy_smb_acl, POOL_CHECK_NONE);
zfs_ioctl_register_dataset_nolog(ZFS_IOC_USERSPACE_UPGRADE,
zfs_ioc_userspace_upgrade, zfs_secpolicy_userspace_upgrade,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY);
zfs_ioctl_register_dataset_nolog(ZFS_IOC_TMP_SNAPSHOT,
zfs_ioc_tmp_snapshot, zfs_secpolicy_tmp_snapshot,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY);
zfs_ioctl_register_legacy(ZFS_IOC_EVENTS_NEXT, zfs_ioc_events_next,
zfs_secpolicy_config, NO_NAME, B_FALSE, POOL_CHECK_NONE);
zfs_ioctl_register_legacy(ZFS_IOC_EVENTS_CLEAR, zfs_ioc_events_clear,
zfs_secpolicy_config, NO_NAME, B_FALSE, POOL_CHECK_NONE);
zfs_ioctl_register_legacy(ZFS_IOC_EVENTS_SEEK, zfs_ioc_events_seek,
zfs_secpolicy_config, NO_NAME, B_FALSE, POOL_CHECK_NONE);
zfs_ioctl_init_os();
}
/*
* Verify that for non-legacy ioctls the input nvlist
* pairs match against the expected input.
*
* Possible errors are:
* ZFS_ERR_IOC_ARG_UNAVAIL An unrecognized nvpair was encountered
* ZFS_ERR_IOC_ARG_REQUIRED A required nvpair is missing
* ZFS_ERR_IOC_ARG_BADTYPE Invalid type for nvpair
*/
static int
zfs_check_input_nvpairs(nvlist_t *innvl, const zfs_ioc_vec_t *vec)
{
const zfs_ioc_key_t *nvl_keys = vec->zvec_nvl_keys;
boolean_t required_keys_found = B_FALSE;
/*
* examine each input pair
*/
for (nvpair_t *pair = nvlist_next_nvpair(innvl, NULL);
pair != NULL; pair = nvlist_next_nvpair(innvl, pair)) {
char *name = nvpair_name(pair);
data_type_t type = nvpair_type(pair);
boolean_t identified = B_FALSE;
/*
* check pair against the documented names and type
*/
for (int k = 0; k < vec->zvec_nvl_key_count; k++) {
/* if not a wild card name, check for an exact match */
if ((nvl_keys[k].zkey_flags & ZK_WILDCARDLIST) == 0 &&
strcmp(nvl_keys[k].zkey_name, name) != 0)
continue;
identified = B_TRUE;
if (nvl_keys[k].zkey_type != DATA_TYPE_ANY &&
nvl_keys[k].zkey_type != type) {
return (SET_ERROR(ZFS_ERR_IOC_ARG_BADTYPE));
}
if (nvl_keys[k].zkey_flags & ZK_OPTIONAL)
continue;
required_keys_found = B_TRUE;
break;
}
/* allow an 'optional' key, everything else is invalid */
if (!identified &&
(strcmp(name, "optional") != 0 ||
type != DATA_TYPE_NVLIST)) {
return (SET_ERROR(ZFS_ERR_IOC_ARG_UNAVAIL));
}
}
/* verify that all required keys were found */
for (int k = 0; k < vec->zvec_nvl_key_count; k++) {
if (nvl_keys[k].zkey_flags & ZK_OPTIONAL)
continue;
if (nvl_keys[k].zkey_flags & ZK_WILDCARDLIST) {
/* at least one non-optional key is expected here */
if (!required_keys_found)
return (SET_ERROR(ZFS_ERR_IOC_ARG_REQUIRED));
continue;
}
if (!nvlist_exists(innvl, nvl_keys[k].zkey_name))
return (SET_ERROR(ZFS_ERR_IOC_ARG_REQUIRED));
}
return (0);
}
static int
pool_status_check(const char *name, zfs_ioc_namecheck_t type,
zfs_ioc_poolcheck_t check)
{
spa_t *spa;
int error;
ASSERT(type == POOL_NAME || type == DATASET_NAME ||
type == ENTITY_NAME);
if (check & POOL_CHECK_NONE)
return (0);
error = spa_open(name, &spa, FTAG);
if (error == 0) {
if ((check & POOL_CHECK_SUSPENDED) && spa_suspended(spa))
error = SET_ERROR(EAGAIN);
else if ((check & POOL_CHECK_READONLY) && !spa_writeable(spa))
error = SET_ERROR(EROFS);
spa_close(spa, FTAG);
}
return (error);
}
int
zfsdev_getminor(zfs_file_t *fp, minor_t *minorp)
{
zfsdev_state_t *zs, *fpd;
ASSERT(!MUTEX_HELD(&zfsdev_state_lock));
fpd = zfs_file_private(fp);
if (fpd == NULL)
return (SET_ERROR(EBADF));
mutex_enter(&zfsdev_state_lock);
for (zs = zfsdev_state_list; zs != NULL; zs = zs->zs_next) {
if (zs->zs_minor == -1)
continue;
if (fpd == zs) {
*minorp = fpd->zs_minor;
mutex_exit(&zfsdev_state_lock);
return (0);
}
}
mutex_exit(&zfsdev_state_lock);
return (SET_ERROR(EBADF));
}
void *
zfsdev_get_state(minor_t minor, enum zfsdev_state_type which)
{
zfsdev_state_t *zs;
for (zs = zfsdev_state_list; zs != NULL; zs = zs->zs_next) {
if (zs->zs_minor == minor) {
- smp_rmb();
+ membar_consumer();
switch (which) {
case ZST_ONEXIT:
return (zs->zs_onexit);
case ZST_ZEVENT:
return (zs->zs_zevent);
case ZST_ALL:
return (zs);
}
}
}
return (NULL);
}
/*
* Find a free minor number. The zfsdev_state_list is expected to
* be short since it is only a list of currently open file handles.
*/
static minor_t
zfsdev_minor_alloc(void)
{
static minor_t last_minor = 0;
minor_t m;
ASSERT(MUTEX_HELD(&zfsdev_state_lock));
for (m = last_minor + 1; m != last_minor; m++) {
if (m > ZFSDEV_MAX_MINOR)
m = 1;
if (zfsdev_get_state(m, ZST_ALL) == NULL) {
last_minor = m;
return (m);
}
}
return (0);
}
int
zfsdev_state_init(void *priv)
{
zfsdev_state_t *zs, *zsprev = NULL;
minor_t minor;
boolean_t newzs = B_FALSE;
ASSERT(MUTEX_HELD(&zfsdev_state_lock));
minor = zfsdev_minor_alloc();
if (minor == 0)
return (SET_ERROR(ENXIO));
for (zs = zfsdev_state_list; zs != NULL; zs = zs->zs_next) {
if (zs->zs_minor == -1)
break;
zsprev = zs;
}
if (!zs) {
zs = kmem_zalloc(sizeof (zfsdev_state_t), KM_SLEEP);
newzs = B_TRUE;
}
zfsdev_private_set_state(priv, zs);
zfs_onexit_init((zfs_onexit_t **)&zs->zs_onexit);
zfs_zevent_init((zfs_zevent_t **)&zs->zs_zevent);
/*
* In order to provide for lock-free concurrent read access
* to the minor list in zfsdev_get_state(), new entries
* must be completely written before linking them into the
* list whereas existing entries are already linked; the last
* operation must be updating zs_minor (from -1 to the new
* value).
*/
if (newzs) {
zs->zs_minor = minor;
membar_producer();
zsprev->zs_next = zs;
} else {
membar_producer();
zs->zs_minor = minor;
}
return (0);
}
void
zfsdev_state_destroy(void *priv)
{
zfsdev_state_t *zs = zfsdev_private_get_state(priv);
ASSERT(zs != NULL);
ASSERT3S(zs->zs_minor, >, 0);
/*
* The last reference to this zfsdev file descriptor is being dropped.
* We don't have to worry about lookup grabbing this state object, and
* zfsdev_state_init() will not try to reuse this object until it is
* invalidated by setting zs_minor to -1. Invalidation must be done
* last, with a memory barrier to ensure ordering. This lets us avoid
* taking the global zfsdev state lock around destruction.
*/
zfs_onexit_destroy(zs->zs_onexit);
zfs_zevent_destroy(zs->zs_zevent);
zs->zs_onexit = NULL;
zs->zs_zevent = NULL;
membar_producer();
zs->zs_minor = -1;
}
long
zfsdev_ioctl_common(uint_t vecnum, zfs_cmd_t *zc, int flag)
{
int error, cmd;
const zfs_ioc_vec_t *vec;
char *saved_poolname = NULL;
uint64_t max_nvlist_src_size;
size_t saved_poolname_len = 0;
nvlist_t *innvl = NULL;
fstrans_cookie_t cookie;
hrtime_t start_time = gethrtime();
cmd = vecnum;
error = 0;
if (vecnum >= sizeof (zfs_ioc_vec) / sizeof (zfs_ioc_vec[0]))
return (SET_ERROR(ZFS_ERR_IOC_CMD_UNAVAIL));
vec = &zfs_ioc_vec[vecnum];
/*
* The registered ioctl list may be sparse, verify that either
* a normal or legacy handler are registered.
*/
if (vec->zvec_func == NULL && vec->zvec_legacy_func == NULL)
return (SET_ERROR(ZFS_ERR_IOC_CMD_UNAVAIL));
zc->zc_iflags = flag & FKIOCTL;
max_nvlist_src_size = zfs_max_nvlist_src_size_os();
if (zc->zc_nvlist_src_size > max_nvlist_src_size) {
/*
* Make sure the user doesn't pass in an insane value for
* zc_nvlist_src_size. We have to check, since we will end
* up allocating that much memory inside of get_nvlist(). This
* prevents a nefarious user from allocating tons of kernel
* memory.
*
* Also, we return EINVAL instead of ENOMEM here. The reason
* being that returning ENOMEM from an ioctl() has a special
* connotation; that the user's size value is too small and
* needs to be expanded to hold the nvlist. See
* zcmd_expand_dst_nvlist() for details.
*/
error = SET_ERROR(EINVAL); /* User's size too big */
} else if (zc->zc_nvlist_src_size != 0) {
error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
zc->zc_iflags, &innvl);
if (error != 0)
goto out;
}
/*
* Ensure that all pool/dataset names are valid before we pass down to
* the lower layers.
*/
zc->zc_name[sizeof (zc->zc_name) - 1] = '\0';
switch (vec->zvec_namecheck) {
case POOL_NAME:
if (pool_namecheck(zc->zc_name, NULL, NULL) != 0)
error = SET_ERROR(EINVAL);
else
error = pool_status_check(zc->zc_name,
vec->zvec_namecheck, vec->zvec_pool_check);
break;
case DATASET_NAME:
if (dataset_namecheck(zc->zc_name, NULL, NULL) != 0)
error = SET_ERROR(EINVAL);
else
error = pool_status_check(zc->zc_name,
vec->zvec_namecheck, vec->zvec_pool_check);
break;
case ENTITY_NAME:
if (entity_namecheck(zc->zc_name, NULL, NULL) != 0) {
error = SET_ERROR(EINVAL);
} else {
error = pool_status_check(zc->zc_name,
vec->zvec_namecheck, vec->zvec_pool_check);
}
break;
case NO_NAME:
break;
}
/*
* Ensure that all input pairs are valid before we pass them down
* to the lower layers.
*
* The vectored functions can use fnvlist_lookup_{type} for any
* required pairs since zfs_check_input_nvpairs() confirmed that
* they exist and are of the correct type.
*/
if (error == 0 && vec->zvec_func != NULL) {
error = zfs_check_input_nvpairs(innvl, vec);
if (error != 0)
goto out;
}
if (error == 0) {
cookie = spl_fstrans_mark();
error = vec->zvec_secpolicy(zc, innvl, CRED());
spl_fstrans_unmark(cookie);
}
if (error != 0)
goto out;
/* legacy ioctls can modify zc_name */
/*
* Can't use kmem_strdup() as we might truncate the string and
* kmem_strfree() would then free with incorrect size.
*/
saved_poolname_len = strlen(zc->zc_name) + 1;
saved_poolname = kmem_alloc(saved_poolname_len, KM_SLEEP);
strlcpy(saved_poolname, zc->zc_name, saved_poolname_len);
saved_poolname[strcspn(saved_poolname, "/@#")] = '\0';
if (vec->zvec_func != NULL) {
nvlist_t *outnvl;
int puterror = 0;
spa_t *spa;
nvlist_t *lognv = NULL;
ASSERT(vec->zvec_legacy_func == NULL);
/*
* Add the innvl to the lognv before calling the func,
* in case the func changes the innvl.
*/
if (vec->zvec_allow_log) {
lognv = fnvlist_alloc();
fnvlist_add_string(lognv, ZPOOL_HIST_IOCTL,
vec->zvec_name);
if (!nvlist_empty(innvl)) {
fnvlist_add_nvlist(lognv, ZPOOL_HIST_INPUT_NVL,
innvl);
}
}
outnvl = fnvlist_alloc();
cookie = spl_fstrans_mark();
error = vec->zvec_func(zc->zc_name, innvl, outnvl);
spl_fstrans_unmark(cookie);
/*
* Some commands can partially execute, modify state, and still
* return an error. In these cases, attempt to record what
* was modified.
*/
if ((error == 0 ||
(cmd == ZFS_IOC_CHANNEL_PROGRAM && error != EINVAL)) &&
vec->zvec_allow_log &&
spa_open(zc->zc_name, &spa, FTAG) == 0) {
if (!nvlist_empty(outnvl)) {
size_t out_size = fnvlist_size(outnvl);
if (out_size > zfs_history_output_max) {
fnvlist_add_int64(lognv,
ZPOOL_HIST_OUTPUT_SIZE, out_size);
} else {
fnvlist_add_nvlist(lognv,
ZPOOL_HIST_OUTPUT_NVL, outnvl);
}
}
if (error != 0) {
fnvlist_add_int64(lognv, ZPOOL_HIST_ERRNO,
error);
}
fnvlist_add_int64(lognv, ZPOOL_HIST_ELAPSED_NS,
gethrtime() - start_time);
(void) spa_history_log_nvl(spa, lognv);
spa_close(spa, FTAG);
}
fnvlist_free(lognv);
if (!nvlist_empty(outnvl) || zc->zc_nvlist_dst_size != 0) {
int smusherror = 0;
if (vec->zvec_smush_outnvlist) {
smusherror = nvlist_smush(outnvl,
zc->zc_nvlist_dst_size);
}
if (smusherror == 0)
puterror = put_nvlist(zc, outnvl);
}
if (puterror != 0)
error = puterror;
nvlist_free(outnvl);
} else {
cookie = spl_fstrans_mark();
error = vec->zvec_legacy_func(zc);
spl_fstrans_unmark(cookie);
}
out:
nvlist_free(innvl);
if (error == 0 && vec->zvec_allow_log) {
char *s = tsd_get(zfs_allow_log_key);
if (s != NULL)
kmem_strfree(s);
(void) tsd_set(zfs_allow_log_key, kmem_strdup(saved_poolname));
}
if (saved_poolname != NULL)
kmem_free(saved_poolname, saved_poolname_len);
return (error);
}
int
zfs_kmod_init(void)
{
int error;
if ((error = zvol_init()) != 0)
return (error);
spa_init(SPA_MODE_READ | SPA_MODE_WRITE);
zfs_init();
zfs_ioctl_init();
mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL);
zfsdev_state_list = kmem_zalloc(sizeof (zfsdev_state_t), KM_SLEEP);
zfsdev_state_list->zs_minor = -1;
if ((error = zfsdev_attach()) != 0)
goto out;
tsd_create(&zfs_fsyncer_key, NULL);
tsd_create(&rrw_tsd_key, rrw_tsd_destroy);
tsd_create(&zfs_allow_log_key, zfs_allow_log_destroy);
return (0);
out:
zfs_fini();
spa_fini();
zvol_fini();
return (error);
}
void
zfs_kmod_fini(void)
{
zfsdev_state_t *zs, *zsnext = NULL;
zfsdev_detach();
mutex_destroy(&zfsdev_state_lock);
for (zs = zfsdev_state_list; zs != NULL; zs = zsnext) {
zsnext = zs->zs_next;
if (zs->zs_onexit)
zfs_onexit_destroy(zs->zs_onexit);
if (zs->zs_zevent)
zfs_zevent_destroy(zs->zs_zevent);
kmem_free(zs, sizeof (zfsdev_state_t));
}
zfs_ereport_taskq_fini(); /* run before zfs_fini() on Linux */
zfs_fini();
spa_fini();
zvol_fini();
tsd_destroy(&zfs_fsyncer_key);
tsd_destroy(&rrw_tsd_key);
tsd_destroy(&zfs_allow_log_key);
}
ZFS_MODULE_PARAM(zfs, zfs_, max_nvlist_src_size, ULONG, ZMOD_RW,
"Maximum size in bytes allowed for src nvlist passed with ZFS ioctls");
ZFS_MODULE_PARAM(zfs, zfs_, history_output_max, ULONG, ZMOD_RW,
"Maximum size in bytes of ZFS ioctl output that will be logged");
diff --git a/sys/contrib/openzfs/module/zfs/zfs_vnops.c b/sys/contrib/openzfs/module/zfs/zfs_vnops.c
index b02e8283c77d..57f03f116273 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_vnops.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_vnops.c
@@ -1,993 +1,995 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright (c) 2015 by Chunwei Chen. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
*/
/* Portions Copyright 2007 Jeremy Teo */
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/sysmacros.h>
#include <sys/vfs.h>
#include <sys/uio_impl.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/kmem.h>
#include <sys/cmn_err.h>
#include <sys/errno.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_acl.h>
#include <sys/zfs_ioctl.h>
#include <sys/fs/zfs.h>
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/dbuf.h>
#include <sys/policy.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_quota.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_znode.h>
static ulong_t zfs_fsync_sync_cnt = 4;
int
zfs_fsync(znode_t *zp, int syncflag, cred_t *cr)
{
+ int error = 0;
zfsvfs_t *zfsvfs = ZTOZSB(zp);
(void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ goto out;
atomic_inc_32(&zp->z_sync_writes_cnt);
zil_commit(zfsvfs->z_log, zp->z_id);
atomic_dec_32(&zp->z_sync_writes_cnt);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
}
+out:
tsd_set(zfs_fsyncer_key, NULL);
- return (0);
+ return (error);
}
#if defined(SEEK_HOLE) && defined(SEEK_DATA)
/*
* Lseek support for finding holes (cmd == SEEK_HOLE) and
* data (cmd == SEEK_DATA). "off" is an in/out parameter.
*/
static int
zfs_holey_common(znode_t *zp, ulong_t cmd, loff_t *off)
{
zfs_locked_range_t *lr;
uint64_t noff = (uint64_t)*off; /* new offset */
uint64_t file_sz;
int error;
boolean_t hole;
file_sz = zp->z_size;
if (noff >= file_sz) {
return (SET_ERROR(ENXIO));
}
if (cmd == F_SEEK_HOLE)
hole = B_TRUE;
else
hole = B_FALSE;
/* Flush any mmap()'d data to disk */
if (zn_has_cached_data(zp))
zn_flush_cached_data(zp, B_FALSE);
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, file_sz, RL_READER);
error = dmu_offset_next(ZTOZSB(zp)->z_os, zp->z_id, hole, &noff);
zfs_rangelock_exit(lr);
if (error == ESRCH)
return (SET_ERROR(ENXIO));
/* File was dirty, so fall back to using generic logic */
if (error == EBUSY) {
if (hole)
*off = file_sz;
return (0);
}
/*
* We could find a hole that begins after the logical end-of-file,
* because dmu_offset_next() only works on whole blocks. If the
* EOF falls mid-block, then indicate that the "virtual hole"
* at the end of the file begins at the logical EOF, rather than
* at the end of the last block.
*/
if (noff > file_sz) {
ASSERT(hole);
noff = file_sz;
}
if (noff < *off)
return (error);
*off = noff;
return (error);
}
int
zfs_holey(znode_t *zp, ulong_t cmd, loff_t *off)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int error;
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (error);
error = zfs_holey_common(zp, cmd, off);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
#endif /* SEEK_HOLE && SEEK_DATA */
int
zfs_access(znode_t *zp, int mode, int flag, cred_t *cr)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int error;
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (error);
if (flag & V_ACE_MASK)
error = zfs_zaccess(zp, mode, flag, B_FALSE, cr);
else
error = zfs_zaccess_rwx(zp, mode, flag, cr);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
static unsigned long zfs_vnops_read_chunk_size = 1024 * 1024; /* Tunable */
/*
* Read bytes from specified file into supplied buffer.
*
* IN: zp - inode of file to be read from.
* uio - structure supplying read location, range info,
* and return buffer.
* ioflag - O_SYNC flags; used to provide FRSYNC semantics.
* O_DIRECT flag; used to bypass page cache.
* cr - credentials of caller.
*
* OUT: uio - updated offset and range, buffer filled.
*
* RETURN: 0 on success, error code on failure.
*
* Side Effects:
* inode - atime updated if byte count > 0
*/
int
zfs_read(struct znode *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
{
(void) cr;
int error = 0;
boolean_t frsync = B_FALSE;
zfsvfs_t *zfsvfs = ZTOZSB(zp);
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (error);
if (zp->z_pflags & ZFS_AV_QUARANTINED) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EACCES));
}
/* We don't copy out anything useful for directories. */
if (Z_ISDIR(ZTOTYPE(zp))) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EISDIR));
}
/*
* Validate file offset
*/
if (zfs_uio_offset(uio) < (offset_t)0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* Fasttrack empty reads
*/
if (zfs_uio_resid(uio) == 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (0);
}
#ifdef FRSYNC
/*
* If we're in FRSYNC mode, sync out this znode before reading it.
* Only do this for non-snapshots.
*
* Some platforms do not support FRSYNC and instead map it
* to O_SYNC, which results in unnecessary calls to zil_commit. We
* only honor FRSYNC requests on platforms which support it.
*/
frsync = !!(ioflag & FRSYNC);
#endif
if (zfsvfs->z_log &&
(frsync || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS))
zil_commit(zfsvfs->z_log, zp->z_id);
/*
* Lock the range against changes.
*/
zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
zfs_uio_offset(uio), zfs_uio_resid(uio), RL_READER);
/*
* If we are reading past end-of-file we can skip
* to the end; but we might still need to set atime.
*/
if (zfs_uio_offset(uio) >= zp->z_size) {
error = 0;
goto out;
}
ASSERT(zfs_uio_offset(uio) < zp->z_size);
#if defined(__linux__)
ssize_t start_offset = zfs_uio_offset(uio);
#endif
ssize_t n = MIN(zfs_uio_resid(uio), zp->z_size - zfs_uio_offset(uio));
ssize_t start_resid = n;
while (n > 0) {
ssize_t nbytes = MIN(n, zfs_vnops_read_chunk_size -
P2PHASE(zfs_uio_offset(uio), zfs_vnops_read_chunk_size));
#ifdef UIO_NOCOPY
if (zfs_uio_segflg(uio) == UIO_NOCOPY)
error = mappedread_sf(zp, nbytes, uio);
else
#endif
if (zn_has_cached_data(zp) && !(ioflag & O_DIRECT)) {
error = mappedread(zp, nbytes, uio);
} else {
error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
uio, nbytes);
}
if (error) {
/* convert checksum errors into IO errors */
if (error == ECKSUM)
error = SET_ERROR(EIO);
#if defined(__linux__)
/*
* if we actually read some bytes, bubbling EFAULT
* up to become EAGAIN isn't what we want here...
*
* ...on Linux, at least. On FBSD, doing this breaks.
*/
if (error == EFAULT &&
(zfs_uio_offset(uio) - start_offset) != 0)
error = 0;
#endif
break;
}
n -= nbytes;
}
int64_t nread = start_resid - n;
dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, nread);
task_io_account_read(nread);
out:
zfs_rangelock_exit(lr);
ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
static void
zfs_clear_setid_bits_if_necessary(zfsvfs_t *zfsvfs, znode_t *zp, cred_t *cr,
uint64_t *clear_setid_bits_txgp, dmu_tx_t *tx)
{
zilog_t *zilog = zfsvfs->z_log;
const uint64_t uid = KUID_TO_SUID(ZTOUID(zp));
ASSERT(clear_setid_bits_txgp != NULL);
ASSERT(tx != NULL);
/*
* Clear Set-UID/Set-GID bits on successful write if not
* privileged and at least one of the execute bits is set.
*
* It would be nice to do this after all writes have
* been done, but that would still expose the ISUID/ISGID
* to another app after the partial write is committed.
*
* Note: we don't call zfs_fuid_map_id() here because
* user 0 is not an ephemeral uid.
*/
mutex_enter(&zp->z_acl_lock);
if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) | (S_IXUSR >> 6))) != 0 &&
(zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
secpolicy_vnode_setid_retain(zp, cr,
((zp->z_mode & S_ISUID) != 0 && uid == 0)) != 0) {
uint64_t newmode;
zp->z_mode &= ~(S_ISUID | S_ISGID);
newmode = zp->z_mode;
(void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
(void *)&newmode, sizeof (uint64_t), tx);
mutex_exit(&zp->z_acl_lock);
/*
* Make sure SUID/SGID bits will be removed when we replay the
* log. If the setid bits are keep coming back, don't log more
* than one TX_SETATTR per transaction group.
*/
if (*clear_setid_bits_txgp != dmu_tx_get_txg(tx)) {
vattr_t va = {0};
va.va_mask = ATTR_MODE;
va.va_nodeid = zp->z_id;
va.va_mode = newmode;
zfs_log_setattr(zilog, tx, TX_SETATTR, zp, &va,
ATTR_MODE, NULL);
*clear_setid_bits_txgp = dmu_tx_get_txg(tx);
}
} else {
mutex_exit(&zp->z_acl_lock);
}
}
/*
* Write the bytes to a file.
*
* IN: zp - znode of file to be written to.
* uio - structure supplying write location, range info,
* and data buffer.
* ioflag - O_APPEND flag set if in append mode.
* O_DIRECT flag; used to bypass page cache.
* cr - credentials of caller.
*
* OUT: uio - updated offset and range.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* ip - ctime|mtime updated if byte count > 0
*/
int
zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
{
int error = 0, error1;
ssize_t start_resid = zfs_uio_resid(uio);
uint64_t clear_setid_bits_txg = 0;
/*
* Fasttrack empty write
*/
ssize_t n = start_resid;
if (n == 0)
return (0);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (error);
sa_bulk_attr_t bulk[4];
int count = 0;
uint64_t mtime[2], ctime[2];
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
&zp->z_size, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, 8);
/*
* Callers might not be able to detect properly that we are read-only,
* so check it explicitly here.
*/
if (zfs_is_readonly(zfsvfs)) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EROFS));
}
/*
* If immutable or not appending then return EPERM.
* Intentionally allow ZFS_READONLY through here.
* See zfs_zaccess_common()
*/
if ((zp->z_pflags & ZFS_IMMUTABLE) ||
((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & O_APPEND) &&
(zfs_uio_offset(uio) < zp->z_size))) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EPERM));
}
/*
* Validate file offset
*/
offset_t woff = ioflag & O_APPEND ? zp->z_size : zfs_uio_offset(uio);
if (woff < 0) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
const uint64_t max_blksz = zfsvfs->z_max_blksz;
/*
* Pre-fault the pages to ensure slow (eg NFS) pages
* don't hold up txg.
* Skip this if uio contains loaned arc_buf.
*/
if (zfs_uio_prefaultpages(MIN(n, max_blksz), uio)) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EFAULT));
}
/*
* If in append mode, set the io offset pointer to eof.
*/
zfs_locked_range_t *lr;
if (ioflag & O_APPEND) {
/*
* Obtain an appending range lock to guarantee file append
* semantics. We reset the write offset once we have the lock.
*/
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, n, RL_APPEND);
woff = lr->lr_offset;
if (lr->lr_length == UINT64_MAX) {
/*
* We overlocked the file because this write will cause
* the file block size to increase.
* Note that zp_size cannot change with this lock held.
*/
woff = zp->z_size;
}
zfs_uio_setoffset(uio, woff);
} else {
/*
* Note that if the file block size will change as a result of
* this write, then this range lock will lock the entire file
* so that we can re-write the block safely.
*/
lr = zfs_rangelock_enter(&zp->z_rangelock, woff, n, RL_WRITER);
}
if (zn_rlimit_fsize(zp, uio)) {
zfs_rangelock_exit(lr);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EFBIG));
}
const rlim64_t limit = MAXOFFSET_T;
if (woff >= limit) {
zfs_rangelock_exit(lr);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EFBIG));
}
if (n > limit - woff)
n = limit - woff;
uint64_t end_size = MAX(zp->z_size, woff + n);
zilog_t *zilog = zfsvfs->z_log;
const uint64_t uid = KUID_TO_SUID(ZTOUID(zp));
const uint64_t gid = KGID_TO_SGID(ZTOGID(zp));
const uint64_t projid = zp->z_projid;
/*
* Write the file in reasonable size chunks. Each chunk is written
* in a separate transaction; this keeps the intent log records small
* and allows us to do more fine-grained space accounting.
*/
while (n > 0) {
woff = zfs_uio_offset(uio);
if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT, uid) ||
zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT, gid) ||
(projid != ZFS_DEFAULT_PROJID &&
zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
projid))) {
error = SET_ERROR(EDQUOT);
break;
}
arc_buf_t *abuf = NULL;
if (n >= max_blksz && woff >= zp->z_size &&
P2PHASE(woff, max_blksz) == 0 &&
zp->z_blksz == max_blksz) {
/*
* This write covers a full block. "Borrow" a buffer
* from the dmu so that we can fill it before we enter
* a transaction. This avoids the possibility of
* holding up the transaction if the data copy hangs
* up on a pagefault (e.g., from an NFS server mapping).
*/
size_t cbytes;
abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
max_blksz);
ASSERT(abuf != NULL);
ASSERT(arc_buf_size(abuf) == max_blksz);
if ((error = zfs_uiocopy(abuf->b_data, max_blksz,
UIO_WRITE, uio, &cbytes))) {
dmu_return_arcbuf(abuf);
break;
}
ASSERT3S(cbytes, ==, max_blksz);
}
/*
* Start a transaction.
*/
dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
dmu_buf_impl_t *db = (dmu_buf_impl_t *)sa_get_db(zp->z_sa_hdl);
DB_DNODE_ENTER(db);
dmu_tx_hold_write_by_dnode(tx, DB_DNODE(db), woff,
MIN(n, max_blksz));
DB_DNODE_EXIT(db);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
if (abuf != NULL)
dmu_return_arcbuf(abuf);
break;
}
/*
* NB: We must call zfs_clear_setid_bits_if_necessary before
* committing the transaction!
*/
/*
* If rangelock_enter() over-locked we grow the blocksize
* and then reduce the lock range. This will only happen
* on the first iteration since rangelock_reduce() will
* shrink down lr_length to the appropriate size.
*/
if (lr->lr_length == UINT64_MAX) {
uint64_t new_blksz;
if (zp->z_blksz > max_blksz) {
/*
* File's blocksize is already larger than the
* "recordsize" property. Only let it grow to
* the next power of 2.
*/
ASSERT(!ISP2(zp->z_blksz));
new_blksz = MIN(end_size,
1 << highbit64(zp->z_blksz));
} else {
new_blksz = MIN(end_size, max_blksz);
}
zfs_grow_blocksize(zp, new_blksz, tx);
zfs_rangelock_reduce(lr, woff, n);
}
/*
* XXX - should we really limit each write to z_max_blksz?
* Perhaps we should use SPA_MAXBLOCKSIZE chunks?
*/
const ssize_t nbytes =
MIN(n, max_blksz - P2PHASE(woff, max_blksz));
ssize_t tx_bytes;
if (abuf == NULL) {
tx_bytes = zfs_uio_resid(uio);
zfs_uio_fault_disable(uio, B_TRUE);
error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
uio, nbytes, tx);
zfs_uio_fault_disable(uio, B_FALSE);
#ifdef __linux__
if (error == EFAULT) {
zfs_clear_setid_bits_if_necessary(zfsvfs, zp,
cr, &clear_setid_bits_txg, tx);
dmu_tx_commit(tx);
/*
* Account for partial writes before
* continuing the loop.
* Update needs to occur before the next
* zfs_uio_prefaultpages, or prefaultpages may
* error, and we may break the loop early.
*/
if (tx_bytes != zfs_uio_resid(uio))
n -= tx_bytes - zfs_uio_resid(uio);
if (zfs_uio_prefaultpages(MIN(n, max_blksz),
uio)) {
break;
}
continue;
}
#endif
/*
* On FreeBSD, EFAULT should be propagated back to the
* VFS, which will handle faulting and will retry.
*/
if (error != 0 && error != EFAULT) {
zfs_clear_setid_bits_if_necessary(zfsvfs, zp,
cr, &clear_setid_bits_txg, tx);
dmu_tx_commit(tx);
break;
}
tx_bytes -= zfs_uio_resid(uio);
} else {
/* Implied by abuf != NULL: */
ASSERT3S(n, >=, max_blksz);
ASSERT0(P2PHASE(woff, max_blksz));
/*
* We can simplify nbytes to MIN(n, max_blksz) since
* P2PHASE(woff, max_blksz) is 0, and knowing
* n >= max_blksz lets us simplify further:
*/
ASSERT3S(nbytes, ==, max_blksz);
/*
* Thus, we're writing a full block at a block-aligned
* offset and extending the file past EOF.
*
* dmu_assign_arcbuf_by_dbuf() will directly assign the
* arc buffer to a dbuf.
*/
error = dmu_assign_arcbuf_by_dbuf(
sa_get_db(zp->z_sa_hdl), woff, abuf, tx);
if (error != 0) {
/*
* XXX This might not be necessary if
* dmu_assign_arcbuf_by_dbuf is guaranteed
* to be atomic.
*/
zfs_clear_setid_bits_if_necessary(zfsvfs, zp,
cr, &clear_setid_bits_txg, tx);
dmu_return_arcbuf(abuf);
dmu_tx_commit(tx);
break;
}
ASSERT3S(nbytes, <=, zfs_uio_resid(uio));
zfs_uioskip(uio, nbytes);
tx_bytes = nbytes;
}
if (tx_bytes && zn_has_cached_data(zp) &&
!(ioflag & O_DIRECT)) {
update_pages(zp, woff, tx_bytes, zfsvfs->z_os);
}
/*
* If we made no progress, we're done. If we made even
* partial progress, update the znode and ZIL accordingly.
*/
if (tx_bytes == 0) {
(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
(void *)&zp->z_size, sizeof (uint64_t), tx);
dmu_tx_commit(tx);
ASSERT(error != 0);
break;
}
zfs_clear_setid_bits_if_necessary(zfsvfs, zp, cr,
&clear_setid_bits_txg, tx);
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
/*
* Update the file size (zp_size) if it has changed;
* account for possible concurrent updates.
*/
while ((end_size = zp->z_size) < zfs_uio_offset(uio)) {
(void) atomic_cas_64(&zp->z_size, end_size,
zfs_uio_offset(uio));
ASSERT(error == 0 || error == EFAULT);
}
/*
* If we are replaying and eof is non zero then force
* the file size to the specified eof. Note, there's no
* concurrency during replay.
*/
if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
zp->z_size = zfsvfs->z_replay_eof;
error1 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
if (error1 != 0)
/* Avoid clobbering EFAULT. */
error = error1;
/*
* NB: During replay, the TX_SETATTR record logged by
* zfs_clear_setid_bits_if_necessary must precede any of
* the TX_WRITE records logged here.
*/
zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag,
NULL, NULL);
dmu_tx_commit(tx);
if (error != 0)
break;
ASSERT3S(tx_bytes, ==, nbytes);
n -= nbytes;
if (n > 0) {
if (zfs_uio_prefaultpages(MIN(n, max_blksz), uio)) {
error = SET_ERROR(EFAULT);
break;
}
}
}
zfs_znode_update_vfs(zp);
zfs_rangelock_exit(lr);
/*
* If we're in replay mode, or we made no progress, or the
* uio data is inaccessible return an error. Otherwise, it's
* at least a partial write, so it's successful.
*/
if (zfsvfs->z_replay || zfs_uio_resid(uio) == start_resid ||
error == EFAULT) {
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
if (ioflag & (O_SYNC | O_DSYNC) ||
zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, zp->z_id);
const int64_t nwritten = start_resid - zfs_uio_resid(uio);
dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, nwritten);
task_io_account_write(nwritten);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (0);
}
int
zfs_getsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int error;
boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (error);
error = zfs_getacl(zp, vsecp, skipaclchk, cr);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
int
zfs_setsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int error;
boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
zilog_t *zilog = zfsvfs->z_log;
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
+ return (error);
error = zfs_setacl(zp, vsecp, skipaclchk, cr);
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
- ZFS_EXIT(zfsvfs);
+ zfs_exit(zfsvfs, FTAG);
return (error);
}
#ifdef ZFS_DEBUG
static int zil_fault_io = 0;
#endif
static void zfs_get_done(zgd_t *zgd, int error);
/*
* Get data to generate a TX_WRITE intent log record.
*/
int
zfs_get_data(void *arg, uint64_t gen, lr_write_t *lr, char *buf,
struct lwb *lwb, zio_t *zio)
{
zfsvfs_t *zfsvfs = arg;
objset_t *os = zfsvfs->z_os;
znode_t *zp;
uint64_t object = lr->lr_foid;
uint64_t offset = lr->lr_offset;
uint64_t size = lr->lr_length;
dmu_buf_t *db;
zgd_t *zgd;
int error = 0;
uint64_t zp_gen;
ASSERT3P(lwb, !=, NULL);
ASSERT3P(zio, !=, NULL);
ASSERT3U(size, !=, 0);
/*
* Nothing to do if the file has been removed
*/
if (zfs_zget(zfsvfs, object, &zp) != 0)
return (SET_ERROR(ENOENT));
if (zp->z_unlinked) {
/*
* Release the vnode asynchronously as we currently have the
* txg stopped from syncing.
*/
zfs_zrele_async(zp);
return (SET_ERROR(ENOENT));
}
/* check if generation number matches */
if (sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), &zp_gen,
sizeof (zp_gen)) != 0) {
zfs_zrele_async(zp);
return (SET_ERROR(EIO));
}
if (zp_gen != gen) {
zfs_zrele_async(zp);
return (SET_ERROR(ENOENT));
}
zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
zgd->zgd_lwb = lwb;
zgd->zgd_private = zp;
/*
* Write records come in two flavors: immediate and indirect.
* For small writes it's cheaper to store the data with the
* log record (immediate); for large writes it's cheaper to
* sync the data and get a pointer to it (indirect) so that
* we don't have to write the data twice.
*/
if (buf != NULL) { /* immediate write */
zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock,
offset, size, RL_READER);
/* test for truncation needs to be done while range locked */
if (offset >= zp->z_size) {
error = SET_ERROR(ENOENT);
} else {
error = dmu_read(os, object, offset, size, buf,
DMU_READ_NO_PREFETCH);
}
ASSERT(error == 0 || error == ENOENT);
} else { /* indirect write */
/*
* Have to lock the whole block to ensure when it's
* written out and its checksum is being calculated
* that no one can change the data. We need to re-check
* blocksize after we get the lock in case it's changed!
*/
for (;;) {
uint64_t blkoff;
size = zp->z_blksz;
blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
offset -= blkoff;
zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock,
offset, size, RL_READER);
if (zp->z_blksz == size)
break;
offset += blkoff;
zfs_rangelock_exit(zgd->zgd_lr);
}
/* test for truncation needs to be done while range locked */
if (lr->lr_offset >= zp->z_size)
error = SET_ERROR(ENOENT);
#ifdef ZFS_DEBUG
if (zil_fault_io) {
error = SET_ERROR(EIO);
zil_fault_io = 0;
}
#endif
if (error == 0)
error = dmu_buf_hold(os, object, offset, zgd, &db,
DMU_READ_NO_PREFETCH);
if (error == 0) {
blkptr_t *bp = &lr->lr_blkptr;
zgd->zgd_db = db;
zgd->zgd_bp = bp;
ASSERT(db->db_offset == offset);
ASSERT(db->db_size == size);
error = dmu_sync(zio, lr->lr_common.lrc_txg,
zfs_get_done, zgd);
ASSERT(error || lr->lr_length <= size);
/*
* On success, we need to wait for the write I/O
* initiated by dmu_sync() to complete before we can
* release this dbuf. We will finish everything up
* in the zfs_get_done() callback.
*/
if (error == 0)
return (0);
if (error == EALREADY) {
lr->lr_common.lrc_txtype = TX_WRITE2;
/*
* TX_WRITE2 relies on the data previously
* written by the TX_WRITE that caused
* EALREADY. We zero out the BP because
* it is the old, currently-on-disk BP.
*/
zgd->zgd_bp = NULL;
BP_ZERO(bp);
error = 0;
}
}
}
zfs_get_done(zgd, error);
return (error);
}
static void
zfs_get_done(zgd_t *zgd, int error)
{
(void) error;
znode_t *zp = zgd->zgd_private;
if (zgd->zgd_db)
dmu_buf_rele(zgd->zgd_db, zgd);
zfs_rangelock_exit(zgd->zgd_lr);
/*
* Release the vnode asynchronously as we currently have the
* txg stopped from syncing.
*/
zfs_zrele_async(zp);
kmem_free(zgd, sizeof (zgd_t));
}
EXPORT_SYMBOL(zfs_access);
EXPORT_SYMBOL(zfs_fsync);
EXPORT_SYMBOL(zfs_holey);
EXPORT_SYMBOL(zfs_read);
EXPORT_SYMBOL(zfs_write);
EXPORT_SYMBOL(zfs_getsecattr);
EXPORT_SYMBOL(zfs_setsecattr);
ZFS_MODULE_PARAM(zfs_vnops, zfs_vnops_, read_chunk_size, ULONG, ZMOD_RW,
"Bytes to read per chunk");
diff --git a/sys/contrib/openzfs/rpm/generic/zfs.spec.in b/sys/contrib/openzfs/rpm/generic/zfs.spec.in
index b1a94fbb7ab2..aea82d241781 100644
--- a/sys/contrib/openzfs/rpm/generic/zfs.spec.in
+++ b/sys/contrib/openzfs/rpm/generic/zfs.spec.in
@@ -1,578 +1,580 @@
%global _sbindir /sbin
%global _libdir /%{_lib}
# Set the default udev directory based on distribution.
%if %{undefined _udevdir}
%if 0%{?fedora}%{?rhel}%{?centos}
%global _udevdir %{_prefix}/lib/udev
%else
%global _udevdir /lib/udev
%endif
%endif
# Set the default udevrule directory based on distribution.
%if %{undefined _udevruledir}
%if 0%{?fedora}%{?rhel}%{?centos}
%global _udevruledir %{_prefix}/lib/udev/rules.d
%else
%global _udevruledir /lib/udev/rules.d
%endif
%endif
# Set the default dracut directory based on distribution.
%if %{undefined _dracutdir}
%if 0%{?fedora}%{?rhel}%{?centos}
%global _dracutdir %{_prefix}/lib/dracut
%else
%global _dracutdir %{_prefix}/share/dracut
%endif
%endif
%if %{undefined _initconfdir}
%global _initconfdir /etc/sysconfig
%endif
%if %{undefined _unitdir}
%global _unitdir %{_prefix}/lib/systemd/system
%endif
%if %{undefined _presetdir}
%global _presetdir %{_prefix}/lib/systemd/system-preset
%endif
%if %{undefined _modulesloaddir}
%global _modulesloaddir %{_prefix}/lib/modules-load.d
%endif
%if %{undefined _systemdgeneratordir}
%global _systemdgeneratordir %{_prefix}/lib/systemd/system-generators
%endif
%if %{undefined _pkgconfigdir}
%global _pkgconfigdir %{_prefix}/%{_lib}/pkgconfig
%endif
%bcond_with debug
%bcond_with debuginfo
%bcond_with asan
%bcond_with ubsan
%bcond_with systemd
%bcond_with pam
%bcond_without pyzfs
# Generic enable switch for systemd
%if %{with systemd}
%define _systemd 1
%endif
# Distros below support systemd
%if 0%{?rhel}%{?fedora}%{?centos}%{?suse_version}
%define _systemd 1
%endif
# When not specified default to distribution provided version.
%if %{undefined __use_python}
%define __python /usr/bin/python3
%define __python_pkg_version 3
%else
%define __python %{__use_python}
%define __python_pkg_version %{__use_python_pkg_version}
%endif
%define __python_sitelib %(%{__python} -Esc "from distutils.sysconfig import get_python_lib; print(get_python_lib())")
Name: @PACKAGE@
Version: @VERSION@
Release: @RELEASE@%{?dist}
Summary: Commands to control the kernel modules and libraries
Group: System Environment/Kernel
License: @ZFS_META_LICENSE@
URL: https://github.com/openzfs/zfs
Source0: %{name}-%{version}.tar.gz
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
Requires: libzpool5%{?_isa} = %{version}-%{release}
Requires: libnvpair3%{?_isa} = %{version}-%{release}
Requires: libuutil3%{?_isa} = %{version}-%{release}
Requires: libzfs5%{?_isa} = %{version}-%{release}
Requires: %{name}-kmod = %{version}
Provides: %{name}-kmod-common = %{version}-%{release}
Obsoletes: spl <= %{version}
# zfs-fuse provides the same commands and man pages that OpenZFS does.
# Renaming those on either side would conflict with all available documentation.
Conflicts: zfs-fuse
%if 0%{?rhel}%{?centos}%{?fedora}%{?suse_version}
BuildRequires: gcc, make
BuildRequires: zlib-devel
BuildRequires: libuuid-devel
BuildRequires: libblkid-devel
BuildRequires: libudev-devel
BuildRequires: libattr-devel
BuildRequires: openssl-devel
%if 0%{?fedora} || 0%{?rhel} >= 8 || 0%{?centos} >= 8
BuildRequires: libtirpc-devel
%endif
%if (0%{?fedora}%{?suse_version}) || (0%{?rhel} && 0%{?rhel} < 9)
# We don't directly use it, but if this isn't installed, rpmbuild as root can
# crash+corrupt rpmdb
# See issue #12071
BuildRequires: ncompress
%endif
Requires: openssl
%if 0%{?_systemd}
BuildRequires: systemd
%endif
%endif
%if 0%{?_systemd}
Requires(post): systemd
Requires(preun): systemd
Requires(postun): systemd
%endif
# The zpool iostat/status -c scripts call some utilities like lsblk and iostat
Requires: util-linux
Requires: sysstat
%description
This package contains the core ZFS command line utilities.
%package -n libzpool5
Summary: Native ZFS pool library for Linux
Group: System Environment/Kernel
Obsoletes: libzpool2 <= %{version}
Obsoletes: libzpool4 <= %{version}
%description -n libzpool5
This package contains the zpool library, which provides support
for managing zpools
%if %{defined ldconfig_scriptlets}
%ldconfig_scriptlets -n libzpool5
%else
%post -n libzpool5 -p /sbin/ldconfig
%postun -n libzpool5 -p /sbin/ldconfig
%endif
%package -n libnvpair3
Summary: Solaris name-value library for Linux
Group: System Environment/Kernel
Obsoletes: libnvpair1 <= %{version}
%description -n libnvpair3
This package contains routines for packing and unpacking name-value
pairs. This functionality is used to portably transport data across
process boundaries, between kernel and user space, and can be used
to write self describing data structures on disk.
%if %{defined ldconfig_scriptlets}
%ldconfig_scriptlets -n libnvpair3
%else
%post -n libnvpair3 -p /sbin/ldconfig
%postun -n libnvpair3 -p /sbin/ldconfig
%endif
%package -n libuutil3
Summary: Solaris userland utility library for Linux
Group: System Environment/Kernel
Obsoletes: libuutil1 <= %{version}
%description -n libuutil3
This library provides a variety of compatibility functions for OpenZFS:
* libspl: The Solaris Porting Layer userland library, which provides APIs
that make it possible to run Solaris user code in a Linux environment
with relatively minimal modification.
* libavl: The Adelson-Velskii Landis balanced binary tree manipulation
library.
* libefi: The Extensible Firmware Interface library for GUID disk
partitioning.
* libshare: NFS, SMB, and iSCSI service integration for ZFS.
%if %{defined ldconfig_scriptlets}
%ldconfig_scriptlets -n libuutil3
%else
%post -n libuutil3 -p /sbin/ldconfig
%postun -n libuutil3 -p /sbin/ldconfig
%endif
# The library version is encoded in the package name. When updating the
# version information it is important to add an obsoletes line below for
# the previous version of the package.
%package -n libzfs5
Summary: Native ZFS filesystem library for Linux
Group: System Environment/Kernel
Obsoletes: libzfs2 <= %{version}
Obsoletes: libzfs4 <= %{version}
%description -n libzfs5
This package provides support for managing ZFS filesystems
%if %{defined ldconfig_scriptlets}
%ldconfig_scriptlets -n libzfs5
%else
%post -n libzfs5 -p /sbin/ldconfig
%postun -n libzfs5 -p /sbin/ldconfig
%endif
%package -n libzfs5-devel
Summary: Development headers
Group: System Environment/Kernel
Requires: libzfs5%{?_isa} = %{version}-%{release}
Requires: libzpool5%{?_isa} = %{version}-%{release}
Requires: libnvpair3%{?_isa} = %{version}-%{release}
Requires: libuutil3%{?_isa} = %{version}-%{release}
Provides: libzpool5-devel = %{version}-%{release}
Provides: libnvpair3-devel = %{version}-%{release}
Provides: libuutil3-devel = %{version}-%{release}
Obsoletes: zfs-devel <= %{version}
Obsoletes: libzfs2-devel <= %{version}
Obsoletes: libzfs4-devel <= %{version}
%description -n libzfs5-devel
This package contains the header files needed for building additional
applications against the ZFS libraries.
%package test
Summary: Test infrastructure
Group: System Environment/Kernel
Requires: %{name}%{?_isa} = %{version}-%{release}
Requires: parted
Requires: lsscsi
Requires: mdadm
Requires: bc
Requires: ksh
Requires: fio
Requires: acl
Requires: sudo
Requires: sysstat
Requires: libaio
Requires: python%{__python_pkg_version}
%if 0%{?rhel}%{?centos}%{?fedora}%{?suse_version}
BuildRequires: libaio-devel
%endif
AutoReqProv: no
%description test
This package contains test infrastructure and support scripts for
validating the file system.
%package dracut
Summary: Dracut module
Group: System Environment/Kernel
BuildArch: noarch
Requires: %{name} >= %{version}
Requires: dracut
Requires: /usr/bin/awk
Requires: grep
%description dracut
This package contains a dracut module used to construct an initramfs
image which is ZFS aware.
%if %{with pyzfs}
# Enforce `python36-` package prefix for CentOS 7
# since dependencies come from EPEL and are named this way
%package -n python%{__python_pkg_version}-pyzfs
Summary: Python %{python_version} wrapper for libzfs_core
Group: Development/Languages/Python
License: Apache-2.0
BuildArch: noarch
Requires: libzfs5 = %{version}-%{release}
Requires: libnvpair3 = %{version}-%{release}
Requires: libffi
Requires: python%{__python_pkg_version}
%if 0%{?centos} == 7
Requires: python36-cffi
%else
Requires: python%{__python_pkg_version}-cffi
%endif
%if 0%{?rhel}%{?centos}%{?fedora}%{?suse_version}
%if 0%{?centos} == 7
BuildRequires: python36-packaging
BuildRequires: python36-devel
BuildRequires: python36-cffi
BuildRequires: python36-setuptools
%else
BuildRequires: python%{__python_pkg_version}-packaging
BuildRequires: python%{__python_pkg_version}-devel
BuildRequires: python%{__python_pkg_version}-cffi
BuildRequires: python%{__python_pkg_version}-setuptools
%endif
BuildRequires: libffi-devel
%endif
%description -n python%{__python_pkg_version}-pyzfs
This package provides a python wrapper for the libzfs_core C library.
%endif
%if 0%{?_initramfs}
%package initramfs
Summary: Initramfs module
Group: System Environment/Kernel
Requires: %{name}%{?_isa} = %{version}-%{release}
Requires: initramfs-tools
%description initramfs
This package contains a initramfs module used to construct an initramfs
image which is ZFS aware.
%endif
%if %{with pam}
%package -n pam_zfs_key
Summary: PAM module for encrypted ZFS datasets
%if 0%{?rhel}%{?centos}%{?fedora}%{?suse_version}
BuildRequires: pam-devel
%endif
%description -n pam_zfs_key
This package contains the pam_zfs_key PAM module, which provides
support for unlocking datasets on user login.
%endif
%prep
%if %{with debug}
%define debug --enable-debug
%else
%define debug --disable-debug
%endif
%if %{with debuginfo}
%define debuginfo --enable-debuginfo
%else
%define debuginfo --disable-debuginfo
%endif
%if %{with asan}
%define asan --enable-asan
%else
%define asan --disable-asan
%endif
%if %{with ubsan}
%define ubsan --enable-ubsan
%else
%define ubsan --disable-ubsan
%endif
%if 0%{?_systemd}
%define systemd --enable-systemd --with-systemdunitdir=%{_unitdir} --with-systemdpresetdir=%{_presetdir} --with-systemdmodulesloaddir=%{_modulesloaddir} --with-systemdgeneratordir=%{_systemdgeneratordir} --disable-sysvinit
%define systemd_svcs zfs-import-cache.service zfs-import-scan.service zfs-mount.service zfs-share.service zfs-zed.service zfs.target zfs-import.target zfs-volume-wait.service zfs-volumes.target
%else
%define systemd --enable-sysvinit --disable-systemd
%endif
%if %{with pyzfs}
%define pyzfs --enable-pyzfs
%else
%define pyzfs --disable-pyzfs
%endif
%if %{with pam}
%define pam --enable-pam
%else
%define pam --disable-pam
%endif
%setup -q
%build
%configure \
--with-config=user \
--with-udevdir=%{_udevdir} \
--with-udevruledir=%{_udevruledir} \
--with-dracutdir=%{_dracutdir} \
--with-pamconfigsdir=%{_datadir}/pam-configs \
--with-pammoduledir=%{_libdir}/security \
--with-python=%{__python} \
--with-pkgconfigdir=%{_pkgconfigdir} \
--disable-static \
%{debug} \
%{debuginfo} \
%{asan} \
%{ubsan} \
%{systemd} \
%{pam} \
%{pyzfs}
make %{?_smp_mflags}
%install
%{__rm} -rf $RPM_BUILD_ROOT
make install DESTDIR=%{?buildroot}
find %{?buildroot}%{_libdir} -name '*.la' -exec rm -f {} \;
%if 0%{!?__brp_mangle_shebangs:1}
find %{?buildroot}%{_bindir} \
- \( -name arc_summary -or -name arcstat -or -name dbufstat \) \
+ \( -name arc_summary -or -name arcstat -or -name dbufstat \
+ -or -name zilstat \) \
-exec %{__sed} -i 's|^#!.*|#!%{__python}|' {} \;
find %{?buildroot}%{_datadir} \
\( -name test-runner.py -or -name zts-report.py \) \
-exec %{__sed} -i 's|^#!.*|#!%{__python}|' {} \;
%endif
%post
%if 0%{?_systemd}
%if 0%{?systemd_post:1}
%systemd_post %{systemd_svcs}
%else
if [ "$1" = "1" -o "$1" = "install" ] ; then
# Initial installation
systemctl preset %{systemd_svcs} >/dev/null || true
fi
%endif
%else
if [ -x /sbin/chkconfig ]; then
/sbin/chkconfig --add zfs-import
/sbin/chkconfig --add zfs-load-key
/sbin/chkconfig --add zfs-mount
/sbin/chkconfig --add zfs-share
/sbin/chkconfig --add zfs-zed
fi
%endif
exit 0
# On RHEL/CentOS 7 the static nodes aren't refreshed by default after
# installing a package. This is the default behavior for Fedora.
%posttrans
%if 0%{?rhel} == 7 || 0%{?centos} == 7
systemctl restart kmod-static-nodes
systemctl restart systemd-tmpfiles-setup-dev
udevadm trigger
%endif
%preun
%if 0%{?_systemd}
%if 0%{?systemd_preun:1}
%systemd_preun %{systemd_svcs}
%else
if [ "$1" = "0" -o "$1" = "remove" ] ; then
# Package removal, not upgrade
systemctl --no-reload disable %{systemd_svcs} >/dev/null || true
systemctl stop %{systemd_svcs} >/dev/null || true
fi
%endif
%else
if [ "$1" = "0" -o "$1" = "remove" ] && [ -x /sbin/chkconfig ]; then
/sbin/chkconfig --del zfs-import
/sbin/chkconfig --del zfs-load-key
/sbin/chkconfig --del zfs-mount
/sbin/chkconfig --del zfs-share
/sbin/chkconfig --del zfs-zed
fi
%endif
exit 0
%postun
%if 0%{?_systemd}
%if 0%{?systemd_postun:1}
%systemd_postun %{systemd_svcs}
%else
systemctl --system daemon-reload >/dev/null || true
%endif
%endif
%files
# Core utilities
%{_sbindir}/*
%{_bindir}/raidz_test
%{_sbindir}/zgenhostid
%{_bindir}/zvol_wait
# Optional Python 3 scripts
%{_bindir}/arc_summary
%{_bindir}/arcstat
%{_bindir}/dbufstat
+%{_bindir}/zilstat
# Man pages
%{_mandir}/man1/*
%{_mandir}/man4/*
%{_mandir}/man5/*
%{_mandir}/man7/*
%{_mandir}/man8/*
# Configuration files and scripts
%{_libexecdir}/%{name}
%{_udevdir}/vdev_id
%{_udevdir}/zvol_id
%{_udevdir}/rules.d/*
%{_datadir}/%{name}/compatibility.d
%if ! 0%{?_systemd} || 0%{?_initramfs}
# Files needed for sysvinit and initramfs-tools
%{_sysconfdir}/%{name}/zfs-functions
%config(noreplace) %{_initconfdir}/zfs
%else
%exclude %{_sysconfdir}/%{name}/zfs-functions
%exclude %{_initconfdir}/zfs
%endif
%if 0%{?_systemd}
%{_unitdir}/*
%{_presetdir}/*
%{_modulesloaddir}/*
%{_systemdgeneratordir}/*
%else
%config(noreplace) %{_sysconfdir}/init.d/*
%endif
%config(noreplace) %{_sysconfdir}/%{name}/zed.d/*
%config(noreplace) %{_sysconfdir}/%{name}/zpool.d/*
%config(noreplace) %{_sysconfdir}/%{name}/vdev_id.conf.*.example
%attr(440, root, root) %config(noreplace) %{_sysconfdir}/sudoers.d/*
%config(noreplace) %{_sysconfdir}/bash_completion.d/zfs
%files -n libzpool5
%{_libdir}/libzpool.so.*
%files -n libnvpair3
%{_libdir}/libnvpair.so.*
%files -n libuutil3
%{_libdir}/libuutil.so.*
%files -n libzfs5
%{_libdir}/libzfs*.so.*
%files -n libzfs5-devel
%{_pkgconfigdir}/libzfs.pc
%{_pkgconfigdir}/libzfsbootenv.pc
%{_pkgconfigdir}/libzfs_core.pc
%{_libdir}/*.so
%{_includedir}/*
%doc AUTHORS COPYRIGHT LICENSE NOTICE README.md
%files test
%{_datadir}/%{name}/zfs-tests
%{_datadir}/%{name}/test-runner
%{_datadir}/%{name}/runfiles
%{_datadir}/%{name}/*.sh
%files dracut
%doc contrib/dracut/README.md
%{_dracutdir}/modules.d/*
%if %{with pyzfs}
%files -n python%{__python_pkg_version}-pyzfs
%doc contrib/pyzfs/README
%doc contrib/pyzfs/LICENSE
%defattr(-,root,root,-)
%{__python_sitelib}/libzfs_core/*
%{__python_sitelib}/pyzfs*
%endif
%if 0%{?_initramfs}
%files initramfs
%doc contrib/initramfs/README.md
/usr/share/initramfs-tools/*
%else
# Since we're not building the initramfs package,
# ignore those files.
%exclude /usr/share/initramfs-tools
%endif
%if %{with pam}
%files -n pam_zfs_key
%{_libdir}/security/*
%{_datadir}/pam-configs/*
%endif
diff --git a/sys/contrib/openzfs/tests/runfiles/common.run b/sys/contrib/openzfs/tests/runfiles/common.run
index b9a9e0efcc82..e8443ffabcfa 100644
--- a/sys/contrib/openzfs/tests/runfiles/common.run
+++ b/sys/contrib/openzfs/tests/runfiles/common.run
@@ -1,971 +1,972 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
# This run file contains all of the common functional tests. When
# adding a new test consider also adding it to the sanity.run file
# if the new test runs to completion in only a few seconds.
#
# Approximate run time: 4-5 hours
#
[DEFAULT]
pre = setup
quiet = False
pre_user = root
user = root
timeout = 600
post_user = root
post = cleanup
failsafe_user = root
failsafe = callbacks/zfs_failsafe
outputdir = /var/tmp/test_results
tags = ['functional']
[tests/functional/acl/off]
tests = ['dosmode', 'posixmode']
tags = ['functional', 'acl']
[tests/functional/alloc_class]
tests = ['alloc_class_001_pos', 'alloc_class_002_neg', 'alloc_class_003_pos',
'alloc_class_004_pos', 'alloc_class_005_pos', 'alloc_class_006_pos',
'alloc_class_007_pos', 'alloc_class_008_pos', 'alloc_class_009_pos',
'alloc_class_010_pos', 'alloc_class_011_neg', 'alloc_class_012_pos',
'alloc_class_013_pos']
tags = ['functional', 'alloc_class']
[tests/functional/append]
tests = ['file_append', 'threadsappend_001_pos']
tags = ['functional', 'append']
[tests/functional/arc]
tests = ['dbufstats_001_pos', 'dbufstats_002_pos', 'dbufstats_003_pos',
'arcstats_runtime_tuning']
tags = ['functional', 'arc']
[tests/functional/atime]
tests = ['atime_001_pos', 'atime_002_neg', 'root_atime_off', 'root_atime_on']
tags = ['functional', 'atime']
[tests/functional/bootfs]
tests = ['bootfs_001_pos', 'bootfs_002_neg', 'bootfs_003_pos',
'bootfs_004_neg', 'bootfs_005_neg', 'bootfs_006_pos', 'bootfs_007_pos',
'bootfs_008_pos']
tags = ['functional', 'bootfs']
[tests/functional/btree]
tests = ['btree_positive', 'btree_negative']
tags = ['functional', 'btree']
pre =
post =
[tests/functional/cache]
tests = ['cache_001_pos', 'cache_002_pos', 'cache_003_pos', 'cache_004_neg',
'cache_005_neg', 'cache_006_pos', 'cache_007_neg', 'cache_008_neg',
'cache_009_pos', 'cache_010_pos', 'cache_011_pos', 'cache_012_pos']
tags = ['functional', 'cache']
[tests/functional/cachefile]
tests = ['cachefile_001_pos', 'cachefile_002_pos', 'cachefile_003_pos',
'cachefile_004_pos']
tags = ['functional', 'cachefile']
[tests/functional/casenorm]
tests = ['case_all_values', 'norm_all_values', 'mixed_create_failure',
'sensitive_none_lookup', 'sensitive_none_delete',
'sensitive_formd_lookup', 'sensitive_formd_delete',
'insensitive_none_lookup', 'insensitive_none_delete',
'insensitive_formd_lookup', 'insensitive_formd_delete',
'mixed_none_lookup', 'mixed_none_lookup_ci', 'mixed_none_delete',
'mixed_formd_lookup', 'mixed_formd_lookup_ci', 'mixed_formd_delete']
tags = ['functional', 'casenorm']
[tests/functional/channel_program/lua_core]
tests = ['tst.args_to_lua', 'tst.divide_by_zero', 'tst.exists',
'tst.integer_illegal', 'tst.integer_overflow', 'tst.language_functions_neg',
'tst.language_functions_pos', 'tst.large_prog', 'tst.libraries',
'tst.memory_limit', 'tst.nested_neg', 'tst.nested_pos', 'tst.nvlist_to_lua',
'tst.recursive_neg', 'tst.recursive_pos', 'tst.return_large',
'tst.return_nvlist_neg', 'tst.return_nvlist_pos',
'tst.return_recursive_table', 'tst.stack_gsub', 'tst.timeout']
tags = ['functional', 'channel_program', 'lua_core']
[tests/functional/channel_program/synctask_core]
tests = ['tst.destroy_fs', 'tst.destroy_snap', 'tst.get_count_and_limit',
'tst.get_index_props', 'tst.get_mountpoint', 'tst.get_neg',
'tst.get_number_props', 'tst.get_string_props', 'tst.get_type',
'tst.get_userquota', 'tst.get_written', 'tst.inherit', 'tst.list_bookmarks',
'tst.list_children', 'tst.list_clones', 'tst.list_holds',
'tst.list_snapshots', 'tst.list_system_props',
'tst.list_user_props', 'tst.parse_args_neg','tst.promote_conflict',
'tst.promote_multiple', 'tst.promote_simple', 'tst.rollback_mult',
'tst.rollback_one', 'tst.set_props', 'tst.snapshot_destroy', 'tst.snapshot_neg',
'tst.snapshot_recursive', 'tst.snapshot_simple',
'tst.bookmark.create', 'tst.bookmark.copy',
'tst.terminate_by_signal'
]
tags = ['functional', 'channel_program', 'synctask_core']
[tests/functional/checksum]
tests = ['run_edonr_test', 'run_sha2_test', 'run_skein_test', 'run_blake3_test',
'filetest_001_pos', 'filetest_002_pos']
tags = ['functional', 'checksum']
[tests/functional/clean_mirror]
tests = [ 'clean_mirror_001_pos', 'clean_mirror_002_pos',
'clean_mirror_003_pos', 'clean_mirror_004_pos']
tags = ['functional', 'clean_mirror']
[tests/functional/cli_root/zdb]
tests = ['zdb_002_pos', 'zdb_003_pos', 'zdb_004_pos', 'zdb_005_pos',
'zdb_006_pos', 'zdb_args_neg', 'zdb_args_pos',
'zdb_block_size_histogram', 'zdb_checksum', 'zdb_decompress',
'zdb_display_block', 'zdb_label_checksum', 'zdb_object_range_neg',
'zdb_object_range_pos', 'zdb_objset_id', 'zdb_decompress_zstd',
'zdb_recover', 'zdb_recover_2']
pre =
post =
tags = ['functional', 'cli_root', 'zdb']
[tests/functional/cli_root/zfs]
tests = ['zfs_001_neg', 'zfs_002_pos']
tags = ['functional', 'cli_root', 'zfs']
[tests/functional/cli_root/zfs_bookmark]
tests = ['zfs_bookmark_cliargs']
tags = ['functional', 'cli_root', 'zfs_bookmark']
[tests/functional/cli_root/zfs_change-key]
tests = ['zfs_change-key', 'zfs_change-key_child', 'zfs_change-key_format',
'zfs_change-key_inherit', 'zfs_change-key_load', 'zfs_change-key_location',
'zfs_change-key_pbkdf2iters', 'zfs_change-key_clones']
tags = ['functional', 'cli_root', 'zfs_change-key']
[tests/functional/cli_root/zfs_clone]
tests = ['zfs_clone_001_neg', 'zfs_clone_002_pos', 'zfs_clone_003_pos',
'zfs_clone_004_pos', 'zfs_clone_005_pos', 'zfs_clone_006_pos',
'zfs_clone_007_pos', 'zfs_clone_008_neg', 'zfs_clone_009_neg',
'zfs_clone_010_pos', 'zfs_clone_encrypted', 'zfs_clone_deeply_nested']
tags = ['functional', 'cli_root', 'zfs_clone']
[tests/functional/cli_root/zfs_copies]
tests = ['zfs_copies_001_pos', 'zfs_copies_002_pos', 'zfs_copies_003_pos',
'zfs_copies_004_neg', 'zfs_copies_005_neg', 'zfs_copies_006_pos']
tags = ['functional', 'cli_root', 'zfs_copies']
[tests/functional/cli_root/zfs_create]
tests = ['zfs_create_001_pos', 'zfs_create_002_pos', 'zfs_create_003_pos',
'zfs_create_004_pos', 'zfs_create_005_pos', 'zfs_create_006_pos',
'zfs_create_007_pos', 'zfs_create_008_neg', 'zfs_create_009_neg',
'zfs_create_010_neg', 'zfs_create_011_pos', 'zfs_create_012_pos',
'zfs_create_013_pos', 'zfs_create_014_pos', 'zfs_create_encrypted',
'zfs_create_crypt_combos', 'zfs_create_dryrun', 'zfs_create_nomount',
'zfs_create_verbose']
tags = ['functional', 'cli_root', 'zfs_create']
[tests/functional/cli_root/zfs_destroy]
tests = ['zfs_clone_livelist_condense_and_disable',
'zfs_clone_livelist_condense_races', 'zfs_clone_livelist_dedup',
'zfs_destroy_001_pos', 'zfs_destroy_002_pos', 'zfs_destroy_003_pos',
'zfs_destroy_004_pos', 'zfs_destroy_005_neg', 'zfs_destroy_006_neg',
'zfs_destroy_007_neg', 'zfs_destroy_008_pos', 'zfs_destroy_009_pos',
'zfs_destroy_010_pos', 'zfs_destroy_011_pos', 'zfs_destroy_012_pos',
'zfs_destroy_013_neg', 'zfs_destroy_014_pos', 'zfs_destroy_015_pos',
'zfs_destroy_016_pos', 'zfs_destroy_clone_livelist',
'zfs_destroy_dev_removal', 'zfs_destroy_dev_removal_condense']
tags = ['functional', 'cli_root', 'zfs_destroy']
[tests/functional/cli_root/zfs_diff]
tests = ['zfs_diff_changes', 'zfs_diff_cliargs', 'zfs_diff_timestamp',
'zfs_diff_types', 'zfs_diff_encrypted', 'zfs_diff_mangle']
tags = ['functional', 'cli_root', 'zfs_diff']
[tests/functional/cli_root/zfs_get]
tests = ['zfs_get_001_pos', 'zfs_get_002_pos', 'zfs_get_003_pos',
'zfs_get_004_pos', 'zfs_get_005_neg', 'zfs_get_006_neg', 'zfs_get_007_neg',
'zfs_get_008_pos', 'zfs_get_009_pos', 'zfs_get_010_neg']
tags = ['functional', 'cli_root', 'zfs_get']
[tests/functional/cli_root/zfs_ids_to_path]
tests = ['zfs_ids_to_path_001_pos']
tags = ['functional', 'cli_root', 'zfs_ids_to_path']
[tests/functional/cli_root/zfs_inherit]
tests = ['zfs_inherit_001_neg', 'zfs_inherit_002_neg', 'zfs_inherit_003_pos',
'zfs_inherit_mountpoint']
tags = ['functional', 'cli_root', 'zfs_inherit']
[tests/functional/cli_root/zfs_load-key]
tests = ['zfs_load-key', 'zfs_load-key_all', 'zfs_load-key_file',
'zfs_load-key_https', 'zfs_load-key_location', 'zfs_load-key_noop',
'zfs_load-key_recursive']
tags = ['functional', 'cli_root', 'zfs_load-key']
[tests/functional/cli_root/zfs_mount]
tests = ['zfs_mount_001_pos', 'zfs_mount_002_pos', 'zfs_mount_003_pos',
'zfs_mount_004_pos', 'zfs_mount_005_pos', 'zfs_mount_007_pos',
'zfs_mount_009_neg', 'zfs_mount_010_neg', 'zfs_mount_011_neg',
'zfs_mount_012_pos', 'zfs_mount_all_001_pos', 'zfs_mount_encrypted',
'zfs_mount_remount', 'zfs_mount_all_fail', 'zfs_mount_all_mountpoints',
'zfs_mount_test_race']
tags = ['functional', 'cli_root', 'zfs_mount']
[tests/functional/cli_root/zfs_program]
tests = ['zfs_program_json']
tags = ['functional', 'cli_root', 'zfs_program']
[tests/functional/cli_root/zfs_promote]
tests = ['zfs_promote_001_pos', 'zfs_promote_002_pos', 'zfs_promote_003_pos',
'zfs_promote_004_pos', 'zfs_promote_005_pos', 'zfs_promote_006_neg',
'zfs_promote_007_neg', 'zfs_promote_008_pos', 'zfs_promote_encryptionroot']
tags = ['functional', 'cli_root', 'zfs_promote']
[tests/functional/cli_root/zfs_property]
tests = ['zfs_written_property_001_pos']
tags = ['functional', 'cli_root', 'zfs_property']
[tests/functional/cli_root/zfs_receive]
tests = ['zfs_receive_001_pos', 'zfs_receive_002_pos', 'zfs_receive_003_pos',
'zfs_receive_004_neg', 'zfs_receive_005_neg', 'zfs_receive_006_pos',
'zfs_receive_007_neg', 'zfs_receive_008_pos', 'zfs_receive_009_neg',
'zfs_receive_010_pos', 'zfs_receive_011_pos', 'zfs_receive_012_pos',
'zfs_receive_013_pos', 'zfs_receive_014_pos', 'zfs_receive_015_pos',
'zfs_receive_016_pos', 'receive-o-x_props_override',
'receive-o-x_props_aliases',
'zfs_receive_from_encrypted', 'zfs_receive_to_encrypted',
'zfs_receive_raw', 'zfs_receive_raw_incremental', 'zfs_receive_-e',
'zfs_receive_raw_-d', 'zfs_receive_from_zstd', 'zfs_receive_new_props',
'zfs_receive_-wR-encrypted-mix', 'zfs_receive_corrective',
'zfs_receive_compressed_corrective']
tags = ['functional', 'cli_root', 'zfs_receive']
[tests/functional/cli_root/zfs_rename]
tests = ['zfs_rename_001_pos', 'zfs_rename_002_pos', 'zfs_rename_003_pos',
'zfs_rename_004_neg', 'zfs_rename_005_neg', 'zfs_rename_006_pos',
'zfs_rename_007_pos', 'zfs_rename_008_pos', 'zfs_rename_009_neg',
'zfs_rename_010_neg', 'zfs_rename_011_pos', 'zfs_rename_012_neg',
'zfs_rename_013_pos', 'zfs_rename_014_neg', 'zfs_rename_encrypted_child',
'zfs_rename_to_encrypted', 'zfs_rename_mountpoint', 'zfs_rename_nounmount']
tags = ['functional', 'cli_root', 'zfs_rename']
[tests/functional/cli_root/zfs_reservation]
tests = ['zfs_reservation_001_pos', 'zfs_reservation_002_pos']
tags = ['functional', 'cli_root', 'zfs_reservation']
[tests/functional/cli_root/zfs_rollback]
tests = ['zfs_rollback_001_pos', 'zfs_rollback_002_pos',
'zfs_rollback_003_neg', 'zfs_rollback_004_neg']
tags = ['functional', 'cli_root', 'zfs_rollback']
[tests/functional/cli_root/zfs_send]
tests = ['zfs_send_001_pos', 'zfs_send_002_pos', 'zfs_send_003_pos',
'zfs_send_004_neg', 'zfs_send_005_pos', 'zfs_send_006_pos',
'zfs_send_007_pos', 'zfs_send_encrypted', 'zfs_send_raw',
'zfs_send_sparse', 'zfs_send-b', 'zfs_send_skip_missing']
tags = ['functional', 'cli_root', 'zfs_send']
[tests/functional/cli_root/zfs_set]
tests = ['cache_001_pos', 'cache_002_neg', 'canmount_001_pos',
'canmount_002_pos', 'canmount_003_pos', 'canmount_004_pos',
'checksum_001_pos', 'compression_001_pos', 'mountpoint_001_pos',
'mountpoint_002_pos', 'reservation_001_neg', 'user_property_002_pos',
'share_mount_001_neg', 'snapdir_001_pos', 'onoffs_001_pos',
'user_property_001_pos', 'user_property_003_neg', 'readonly_001_pos',
'user_property_004_pos', 'version_001_neg', 'zfs_set_001_neg',
'zfs_set_002_neg', 'zfs_set_003_neg', 'property_alias_001_pos',
'mountpoint_003_pos', 'ro_props_001_pos', 'zfs_set_keylocation',
'zfs_set_feature_activation']
tags = ['functional', 'cli_root', 'zfs_set']
[tests/functional/cli_root/zfs_share]
tests = ['zfs_share_001_pos', 'zfs_share_002_pos', 'zfs_share_003_pos',
'zfs_share_004_pos', 'zfs_share_006_pos', 'zfs_share_008_neg',
'zfs_share_010_neg', 'zfs_share_011_pos', 'zfs_share_concurrent_shares']
tags = ['functional', 'cli_root', 'zfs_share']
[tests/functional/cli_root/zfs_snapshot]
tests = ['zfs_snapshot_001_neg', 'zfs_snapshot_002_neg',
'zfs_snapshot_003_neg', 'zfs_snapshot_004_neg', 'zfs_snapshot_005_neg',
'zfs_snapshot_006_pos', 'zfs_snapshot_007_neg', 'zfs_snapshot_008_neg',
'zfs_snapshot_009_pos']
tags = ['functional', 'cli_root', 'zfs_snapshot']
[tests/functional/cli_root/zfs_unload-key]
tests = ['zfs_unload-key', 'zfs_unload-key_all', 'zfs_unload-key_recursive']
tags = ['functional', 'cli_root', 'zfs_unload-key']
[tests/functional/cli_root/zfs_unmount]
tests = ['zfs_unmount_001_pos', 'zfs_unmount_002_pos', 'zfs_unmount_003_pos',
'zfs_unmount_004_pos', 'zfs_unmount_005_pos', 'zfs_unmount_006_pos',
'zfs_unmount_007_neg', 'zfs_unmount_008_neg', 'zfs_unmount_009_pos',
'zfs_unmount_all_001_pos', 'zfs_unmount_nested', 'zfs_unmount_unload_keys']
tags = ['functional', 'cli_root', 'zfs_unmount']
[tests/functional/cli_root/zfs_unshare]
tests = ['zfs_unshare_001_pos', 'zfs_unshare_002_pos', 'zfs_unshare_003_pos',
'zfs_unshare_004_neg', 'zfs_unshare_005_neg', 'zfs_unshare_006_pos',
'zfs_unshare_007_pos', 'zfs_unshare_008_pos']
tags = ['functional', 'cli_root', 'zfs_unshare']
[tests/functional/cli_root/zfs_upgrade]
tests = ['zfs_upgrade_001_pos', 'zfs_upgrade_002_pos', 'zfs_upgrade_003_pos',
'zfs_upgrade_004_pos', 'zfs_upgrade_005_pos', 'zfs_upgrade_006_neg',
'zfs_upgrade_007_neg']
tags = ['functional', 'cli_root', 'zfs_upgrade']
[tests/functional/cli_root/zfs_wait]
tests = ['zfs_wait_deleteq', 'zfs_wait_getsubopt']
tags = ['functional', 'cli_root', 'zfs_wait']
[tests/functional/cli_root/zhack]
tests = ['zhack_label_checksum']
pre =
post =
tags = ['functional', 'cli_root', 'zhack']
[tests/functional/cli_root/zpool]
tests = ['zpool_001_neg', 'zpool_002_pos', 'zpool_003_pos', 'zpool_colors']
tags = ['functional', 'cli_root', 'zpool']
[tests/functional/cli_root/zpool_add]
tests = ['zpool_add_001_pos', 'zpool_add_002_pos', 'zpool_add_003_pos',
'zpool_add_004_pos', 'zpool_add_006_pos', 'zpool_add_007_neg',
'zpool_add_008_neg', 'zpool_add_009_neg', 'zpool_add_010_pos',
'add-o_ashift', 'add_prop_ashift', 'zpool_add_dryrun_output']
tags = ['functional', 'cli_root', 'zpool_add']
[tests/functional/cli_root/zpool_attach]
tests = ['zpool_attach_001_neg', 'attach-o_ashift']
tags = ['functional', 'cli_root', 'zpool_attach']
[tests/functional/cli_root/zpool_clear]
tests = ['zpool_clear_001_pos', 'zpool_clear_002_neg', 'zpool_clear_003_neg',
'zpool_clear_readonly']
tags = ['functional', 'cli_root', 'zpool_clear']
[tests/functional/cli_root/zpool_create]
tests = ['zpool_create_001_pos', 'zpool_create_002_pos',
'zpool_create_003_pos', 'zpool_create_004_pos', 'zpool_create_005_pos',
'zpool_create_006_pos', 'zpool_create_007_neg', 'zpool_create_008_pos',
'zpool_create_009_neg', 'zpool_create_010_neg', 'zpool_create_011_neg',
'zpool_create_012_neg', 'zpool_create_014_neg', 'zpool_create_015_neg',
'zpool_create_017_neg', 'zpool_create_018_pos', 'zpool_create_019_pos',
'zpool_create_020_pos', 'zpool_create_021_pos', 'zpool_create_022_pos',
'zpool_create_023_neg', 'zpool_create_024_pos',
'zpool_create_encrypted', 'zpool_create_crypt_combos',
'zpool_create_draid_001_pos', 'zpool_create_draid_002_pos',
'zpool_create_draid_003_pos', 'zpool_create_draid_004_pos',
'zpool_create_features_001_pos', 'zpool_create_features_002_pos',
'zpool_create_features_003_pos', 'zpool_create_features_004_neg',
'zpool_create_features_005_pos', 'zpool_create_features_006_pos',
'zpool_create_features_007_pos', 'zpool_create_features_008_pos',
'zpool_create_features_009_pos', 'create-o_ashift',
'zpool_create_tempname', 'zpool_create_dryrun_output']
tags = ['functional', 'cli_root', 'zpool_create']
[tests/functional/cli_root/zpool_destroy]
tests = ['zpool_destroy_001_pos', 'zpool_destroy_002_pos',
'zpool_destroy_003_neg']
pre =
post =
tags = ['functional', 'cli_root', 'zpool_destroy']
[tests/functional/cli_root/zpool_detach]
tests = ['zpool_detach_001_neg']
tags = ['functional', 'cli_root', 'zpool_detach']
[tests/functional/cli_root/zpool_events]
tests = ['zpool_events_clear', 'zpool_events_cliargs', 'zpool_events_follow',
'zpool_events_poolname', 'zpool_events_errors', 'zpool_events_duplicates',
'zpool_events_clear_retained']
tags = ['functional', 'cli_root', 'zpool_events']
[tests/functional/cli_root/zpool_export]
tests = ['zpool_export_001_pos', 'zpool_export_002_pos',
'zpool_export_003_neg', 'zpool_export_004_pos']
tags = ['functional', 'cli_root', 'zpool_export']
[tests/functional/cli_root/zpool_get]
tests = ['zpool_get_001_pos', 'zpool_get_002_pos', 'zpool_get_003_pos',
'zpool_get_004_neg', 'zpool_get_005_pos']
tags = ['functional', 'cli_root', 'zpool_get']
[tests/functional/cli_root/zpool_history]
tests = ['zpool_history_001_neg', 'zpool_history_002_pos']
tags = ['functional', 'cli_root', 'zpool_history']
[tests/functional/cli_root/zpool_import]
tests = ['zpool_import_001_pos', 'zpool_import_002_pos',
'zpool_import_003_pos', 'zpool_import_004_pos', 'zpool_import_005_pos',
'zpool_import_006_pos', 'zpool_import_007_pos', 'zpool_import_008_pos',
'zpool_import_009_neg', 'zpool_import_010_pos', 'zpool_import_011_neg',
'zpool_import_012_pos', 'zpool_import_013_neg', 'zpool_import_014_pos',
'zpool_import_015_pos', 'zpool_import_016_pos', 'zpool_import_017_pos',
'zpool_import_features_001_pos', 'zpool_import_features_002_neg',
'zpool_import_features_003_pos', 'zpool_import_missing_001_pos',
'zpool_import_missing_002_pos', 'zpool_import_missing_003_pos',
'zpool_import_rename_001_pos', 'zpool_import_all_001_pos',
'zpool_import_encrypted', 'zpool_import_encrypted_load',
'zpool_import_errata3', 'zpool_import_errata4',
'import_cachefile_device_added',
'import_cachefile_device_removed',
'import_cachefile_device_replaced',
'import_cachefile_mirror_attached',
'import_cachefile_mirror_detached',
'import_cachefile_paths_changed',
'import_cachefile_shared_device',
'import_devices_missing',
'import_paths_changed',
'import_rewind_config_changed',
'import_rewind_device_replaced']
tags = ['functional', 'cli_root', 'zpool_import']
timeout = 1200
[tests/functional/cli_root/zpool_labelclear]
tests = ['zpool_labelclear_active', 'zpool_labelclear_exported',
'zpool_labelclear_removed', 'zpool_labelclear_valid']
pre =
post =
tags = ['functional', 'cli_root', 'zpool_labelclear']
[tests/functional/cli_root/zpool_initialize]
tests = ['zpool_initialize_attach_detach_add_remove',
'zpool_initialize_fault_export_import_online',
'zpool_initialize_import_export',
'zpool_initialize_offline_export_import_online',
'zpool_initialize_online_offline',
'zpool_initialize_split',
'zpool_initialize_start_and_cancel_neg',
'zpool_initialize_start_and_cancel_pos',
'zpool_initialize_suspend_resume',
'zpool_initialize_unsupported_vdevs',
'zpool_initialize_verify_checksums',
'zpool_initialize_verify_initialized']
pre =
tags = ['functional', 'cli_root', 'zpool_initialize']
[tests/functional/cli_root/zpool_offline]
tests = ['zpool_offline_001_pos', 'zpool_offline_002_neg',
'zpool_offline_003_pos']
tags = ['functional', 'cli_root', 'zpool_offline']
[tests/functional/cli_root/zpool_online]
tests = ['zpool_online_001_pos', 'zpool_online_002_neg']
tags = ['functional', 'cli_root', 'zpool_online']
[tests/functional/cli_root/zpool_remove]
tests = ['zpool_remove_001_neg', 'zpool_remove_002_pos',
'zpool_remove_003_pos']
tags = ['functional', 'cli_root', 'zpool_remove']
[tests/functional/cli_root/zpool_replace]
tests = ['zpool_replace_001_neg', 'replace-o_ashift', 'replace_prop_ashift']
tags = ['functional', 'cli_root', 'zpool_replace']
[tests/functional/cli_root/zpool_resilver]
tests = ['zpool_resilver_bad_args', 'zpool_resilver_restart']
tags = ['functional', 'cli_root', 'zpool_resilver']
[tests/functional/cli_root/zpool_scrub]
tests = ['zpool_scrub_001_neg', 'zpool_scrub_002_pos', 'zpool_scrub_003_pos',
'zpool_scrub_004_pos', 'zpool_scrub_005_pos',
'zpool_scrub_encrypted_unloaded', 'zpool_scrub_print_repairing',
'zpool_scrub_offline_device', 'zpool_scrub_multiple_copies']
tags = ['functional', 'cli_root', 'zpool_scrub']
[tests/functional/cli_root/zpool_set]
tests = ['zpool_set_001_pos', 'zpool_set_002_neg', 'zpool_set_003_neg',
'zpool_set_ashift', 'zpool_set_features']
tags = ['functional', 'cli_root', 'zpool_set']
[tests/functional/cli_root/zpool_split]
tests = ['zpool_split_cliargs', 'zpool_split_devices',
'zpool_split_encryption', 'zpool_split_props', 'zpool_split_vdevs',
'zpool_split_resilver', 'zpool_split_indirect',
'zpool_split_dryrun_output']
tags = ['functional', 'cli_root', 'zpool_split']
[tests/functional/cli_root/zpool_status]
tests = ['zpool_status_001_pos', 'zpool_status_002_pos',
'zpool_status_003_pos', 'zpool_status_004_pos',
'zpool_status_005_pos', 'zpool_status_features_001_pos']
tags = ['functional', 'cli_root', 'zpool_status']
[tests/functional/cli_root/zpool_sync]
tests = ['zpool_sync_001_pos', 'zpool_sync_002_neg']
tags = ['functional', 'cli_root', 'zpool_sync']
[tests/functional/cli_root/zpool_trim]
tests = ['zpool_trim_attach_detach_add_remove',
'zpool_trim_fault_export_import_online',
'zpool_trim_import_export', 'zpool_trim_multiple', 'zpool_trim_neg',
'zpool_trim_offline_export_import_online', 'zpool_trim_online_offline',
'zpool_trim_partial', 'zpool_trim_rate', 'zpool_trim_rate_neg',
'zpool_trim_secure', 'zpool_trim_split', 'zpool_trim_start_and_cancel_neg',
'zpool_trim_start_and_cancel_pos', 'zpool_trim_suspend_resume',
'zpool_trim_unsupported_vdevs', 'zpool_trim_verify_checksums',
'zpool_trim_verify_trimmed']
tags = ['functional', 'zpool_trim']
[tests/functional/cli_root/zpool_upgrade]
tests = ['zpool_upgrade_001_pos', 'zpool_upgrade_002_pos',
'zpool_upgrade_003_pos', 'zpool_upgrade_004_pos',
'zpool_upgrade_005_neg', 'zpool_upgrade_006_neg',
'zpool_upgrade_007_pos', 'zpool_upgrade_008_pos',
'zpool_upgrade_009_neg', 'zpool_upgrade_features_001_pos']
tags = ['functional', 'cli_root', 'zpool_upgrade']
[tests/functional/cli_root/zpool_wait]
tests = ['zpool_wait_discard', 'zpool_wait_freeing',
'zpool_wait_initialize_basic', 'zpool_wait_initialize_cancel',
'zpool_wait_initialize_flag', 'zpool_wait_multiple',
'zpool_wait_no_activity', 'zpool_wait_remove', 'zpool_wait_remove_cancel',
'zpool_wait_trim_basic', 'zpool_wait_trim_cancel', 'zpool_wait_trim_flag',
'zpool_wait_usage']
tags = ['functional', 'cli_root', 'zpool_wait']
[tests/functional/cli_root/zpool_wait/scan]
tests = ['zpool_wait_replace_cancel', 'zpool_wait_rebuild',
'zpool_wait_resilver', 'zpool_wait_scrub_cancel',
'zpool_wait_replace', 'zpool_wait_scrub_basic', 'zpool_wait_scrub_flag']
tags = ['functional', 'cli_root', 'zpool_wait']
[tests/functional/cli_user/misc]
tests = ['zdb_001_neg', 'zfs_001_neg', 'zfs_allow_001_neg',
'zfs_clone_001_neg', 'zfs_create_001_neg', 'zfs_destroy_001_neg',
'zfs_get_001_neg', 'zfs_inherit_001_neg', 'zfs_mount_001_neg',
'zfs_promote_001_neg', 'zfs_receive_001_neg', 'zfs_rename_001_neg',
'zfs_rollback_001_neg', 'zfs_send_001_neg', 'zfs_set_001_neg',
'zfs_share_001_neg', 'zfs_snapshot_001_neg', 'zfs_unallow_001_neg',
'zfs_unmount_001_neg', 'zfs_unshare_001_neg', 'zfs_upgrade_001_neg',
'zpool_001_neg', 'zpool_add_001_neg', 'zpool_attach_001_neg',
'zpool_clear_001_neg', 'zpool_create_001_neg', 'zpool_destroy_001_neg',
'zpool_detach_001_neg', 'zpool_export_001_neg', 'zpool_get_001_neg',
'zpool_history_001_neg', 'zpool_import_001_neg', 'zpool_import_002_neg',
'zpool_offline_001_neg', 'zpool_online_001_neg', 'zpool_remove_001_neg',
'zpool_replace_001_neg', 'zpool_scrub_001_neg', 'zpool_set_001_neg',
'zpool_status_001_neg', 'zpool_upgrade_001_neg', 'arcstat_001_pos',
- 'arc_summary_001_pos', 'arc_summary_002_neg', 'zpool_wait_privilege']
+ 'arc_summary_001_pos', 'arc_summary_002_neg', 'zpool_wait_privilege',
+ 'zilstat_001_pos']
user =
tags = ['functional', 'cli_user', 'misc']
[tests/functional/cli_user/zfs_list]
tests = ['zfs_list_001_pos', 'zfs_list_002_pos', 'zfs_list_003_pos',
'zfs_list_004_neg', 'zfs_list_005_neg', 'zfs_list_007_pos',
'zfs_list_008_neg']
user =
tags = ['functional', 'cli_user', 'zfs_list']
[tests/functional/cli_user/zpool_iostat]
tests = ['zpool_iostat_001_neg', 'zpool_iostat_002_pos',
'zpool_iostat_003_neg', 'zpool_iostat_004_pos',
'zpool_iostat_005_pos', 'zpool_iostat_-c_disable',
'zpool_iostat_-c_homedir', 'zpool_iostat_-c_searchpath']
user =
tags = ['functional', 'cli_user', 'zpool_iostat']
[tests/functional/cli_user/zpool_list]
tests = ['zpool_list_001_pos', 'zpool_list_002_neg']
user =
tags = ['functional', 'cli_user', 'zpool_list']
[tests/functional/cli_user/zpool_status]
tests = ['zpool_status_003_pos', 'zpool_status_-c_disable',
'zpool_status_-c_homedir', 'zpool_status_-c_searchpath']
user =
tags = ['functional', 'cli_user', 'zpool_status']
[tests/functional/compression]
tests = ['compress_001_pos', 'compress_002_pos', 'compress_003_pos',
'l2arc_compressed_arc', 'l2arc_compressed_arc_disabled',
'l2arc_encrypted', 'l2arc_encrypted_no_compressed_arc']
tags = ['functional', 'compression']
[tests/functional/cp_files]
tests = ['cp_files_001_pos']
tags = ['functional', 'cp_files']
[tests/functional/crtime]
tests = ['crtime_001_pos' ]
tags = ['functional', 'crtime']
[tests/functional/ctime]
tests = ['ctime_001_pos' ]
tags = ['functional', 'ctime']
[tests/functional/deadman]
tests = ['deadman_ratelimit', 'deadman_sync', 'deadman_zio']
pre =
post =
tags = ['functional', 'deadman']
[tests/functional/delegate]
tests = ['zfs_allow_001_pos', 'zfs_allow_002_pos', 'zfs_allow_003_pos',
'zfs_allow_004_pos', 'zfs_allow_005_pos', 'zfs_allow_006_pos',
'zfs_allow_007_pos', 'zfs_allow_008_pos', 'zfs_allow_009_neg',
'zfs_allow_010_pos', 'zfs_allow_011_neg', 'zfs_allow_012_neg',
'zfs_unallow_001_pos', 'zfs_unallow_002_pos', 'zfs_unallow_003_pos',
'zfs_unallow_004_pos', 'zfs_unallow_005_pos', 'zfs_unallow_006_pos',
'zfs_unallow_007_neg', 'zfs_unallow_008_neg']
tags = ['functional', 'delegate']
[tests/functional/exec]
tests = ['exec_001_pos', 'exec_002_neg']
tags = ['functional', 'exec']
[tests/functional/fallocate]
tests = ['fallocate_punch-hole']
tags = ['functional', 'fallocate']
[tests/functional/features/async_destroy]
tests = ['async_destroy_001_pos']
tags = ['functional', 'features', 'async_destroy']
[tests/functional/features/large_dnode]
tests = ['large_dnode_001_pos', 'large_dnode_003_pos', 'large_dnode_004_neg',
'large_dnode_005_pos', 'large_dnode_007_neg', 'large_dnode_009_pos']
tags = ['functional', 'features', 'large_dnode']
[tests/functional/grow]
pre =
post =
tests = ['grow_pool_001_pos', 'grow_replicas_001_pos']
tags = ['functional', 'grow']
[tests/functional/history]
tests = ['history_001_pos', 'history_002_pos', 'history_003_pos',
'history_004_pos', 'history_005_neg', 'history_006_neg',
'history_007_pos', 'history_008_pos', 'history_009_pos',
'history_010_pos']
tags = ['functional', 'history']
[tests/functional/hkdf]
pre =
post =
tests = ['hkdf_test']
tags = ['functional', 'hkdf']
[tests/functional/inheritance]
tests = ['inherit_001_pos']
pre =
tags = ['functional', 'inheritance']
[tests/functional/io]
tests = ['sync', 'psync', 'posixaio', 'mmap']
tags = ['functional', 'io']
[tests/functional/inuse]
tests = ['inuse_004_pos', 'inuse_005_pos', 'inuse_008_pos', 'inuse_009_pos']
post =
tags = ['functional', 'inuse']
[tests/functional/large_files]
tests = ['large_files_001_pos', 'large_files_002_pos']
tags = ['functional', 'large_files']
[tests/functional/limits]
tests = ['filesystem_count', 'filesystem_limit', 'snapshot_count',
'snapshot_limit']
tags = ['functional', 'limits']
[tests/functional/link_count]
tests = ['link_count_001', 'link_count_root_inode']
tags = ['functional', 'link_count']
[tests/functional/migration]
tests = ['migration_001_pos', 'migration_002_pos', 'migration_003_pos',
'migration_004_pos', 'migration_005_pos', 'migration_006_pos',
'migration_007_pos', 'migration_008_pos', 'migration_009_pos',
'migration_010_pos', 'migration_011_pos', 'migration_012_pos']
tags = ['functional', 'migration']
[tests/functional/mmap]
tests = ['mmap_write_001_pos', 'mmap_read_001_pos', 'mmap_seek_001_pos', 'mmap_sync_001_pos']
tags = ['functional', 'mmap']
[tests/functional/mount]
tests = ['umount_001', 'umountall_001']
tags = ['functional', 'mount']
[tests/functional/mv_files]
tests = ['mv_files_001_pos', 'mv_files_002_pos', 'random_creation']
tags = ['functional', 'mv_files']
[tests/functional/nestedfs]
tests = ['nestedfs_001_pos']
tags = ['functional', 'nestedfs']
[tests/functional/no_space]
tests = ['enospc_001_pos', 'enospc_002_pos', 'enospc_003_pos',
'enospc_df', 'enospc_rm']
tags = ['functional', 'no_space']
[tests/functional/nopwrite]
tests = ['nopwrite_copies', 'nopwrite_mtime', 'nopwrite_negative',
'nopwrite_promoted_clone', 'nopwrite_recsize', 'nopwrite_sync',
'nopwrite_varying_compression', 'nopwrite_volume']
tags = ['functional', 'nopwrite']
[tests/functional/online_offline]
tests = ['online_offline_001_pos', 'online_offline_002_neg',
'online_offline_003_neg']
tags = ['functional', 'online_offline']
[tests/functional/pool_checkpoint]
tests = ['checkpoint_after_rewind', 'checkpoint_big_rewind',
'checkpoint_capacity', 'checkpoint_conf_change', 'checkpoint_discard',
'checkpoint_discard_busy', 'checkpoint_discard_many',
'checkpoint_indirect', 'checkpoint_invalid', 'checkpoint_lun_expsz',
'checkpoint_open', 'checkpoint_removal', 'checkpoint_rewind',
'checkpoint_ro_rewind', 'checkpoint_sm_scale', 'checkpoint_twice',
'checkpoint_vdev_add', 'checkpoint_zdb', 'checkpoint_zhack_feat']
tags = ['functional', 'pool_checkpoint']
timeout = 1800
[tests/functional/pool_names]
tests = ['pool_names_001_pos', 'pool_names_002_neg']
pre =
post =
tags = ['functional', 'pool_names']
[tests/functional/poolversion]
tests = ['poolversion_001_pos', 'poolversion_002_pos']
tags = ['functional', 'poolversion']
[tests/functional/pyzfs]
tests = ['pyzfs_unittest']
pre =
post =
tags = ['functional', 'pyzfs']
[tests/functional/quota]
tests = ['quota_001_pos', 'quota_002_pos', 'quota_003_pos',
'quota_004_pos', 'quota_005_pos', 'quota_006_neg']
tags = ['functional', 'quota']
[tests/functional/redacted_send]
tests = ['redacted_compressed', 'redacted_contents', 'redacted_deleted',
'redacted_disabled_feature', 'redacted_embedded', 'redacted_holes',
'redacted_incrementals', 'redacted_largeblocks', 'redacted_many_clones',
'redacted_mixed_recsize', 'redacted_mounts', 'redacted_negative',
'redacted_origin', 'redacted_panic', 'redacted_props', 'redacted_resume',
'redacted_size', 'redacted_volume']
tags = ['functional', 'redacted_send']
[tests/functional/raidz]
tests = ['raidz_001_neg', 'raidz_002_pos', 'raidz_003_pos', 'raidz_004_pos']
tags = ['functional', 'raidz']
[tests/functional/redundancy]
tests = ['redundancy_draid', 'redundancy_draid1', 'redundancy_draid2',
'redundancy_draid3', 'redundancy_draid_damaged1',
'redundancy_draid_damaged2', 'redundancy_draid_spare1',
'redundancy_draid_spare2', 'redundancy_draid_spare3', 'redundancy_mirror',
'redundancy_raidz', 'redundancy_raidz1', 'redundancy_raidz2',
'redundancy_raidz3', 'redundancy_stripe']
tags = ['functional', 'redundancy']
timeout = 1200
[tests/functional/refquota]
tests = ['refquota_001_pos', 'refquota_002_pos', 'refquota_003_pos',
'refquota_004_pos', 'refquota_005_pos', 'refquota_006_neg',
'refquota_007_neg', 'refquota_008_neg']
tags = ['functional', 'refquota']
[tests/functional/refreserv]
tests = ['refreserv_001_pos', 'refreserv_002_pos', 'refreserv_003_pos',
'refreserv_004_pos', 'refreserv_005_pos', 'refreserv_multi_raidz',
'refreserv_raidz']
tags = ['functional', 'refreserv']
[tests/functional/removal]
pre =
tests = ['removal_all_vdev', 'removal_cancel', 'removal_check_space',
'removal_condense_export', 'removal_multiple_indirection',
'removal_nopwrite', 'removal_remap_deadlists',
'removal_resume_export', 'removal_sanity', 'removal_with_add',
'removal_with_create_fs', 'removal_with_dedup',
'removal_with_errors', 'removal_with_export',
'removal_with_ganging', 'removal_with_faulted',
'removal_with_remove', 'removal_with_scrub', 'removal_with_send',
'removal_with_send_recv', 'removal_with_snapshot',
'removal_with_write', 'removal_with_zdb', 'remove_expanded',
'remove_mirror', 'remove_mirror_sanity', 'remove_raidz',
'remove_indirect', 'remove_attach_mirror']
tags = ['functional', 'removal']
[tests/functional/rename_dirs]
tests = ['rename_dirs_001_pos']
tags = ['functional', 'rename_dirs']
[tests/functional/replacement]
tests = ['attach_import', 'attach_multiple', 'attach_rebuild',
'attach_resilver', 'detach', 'rebuild_disabled_feature',
'rebuild_multiple', 'rebuild_raidz', 'replace_import', 'replace_rebuild',
'replace_resilver', 'resilver_restart_001', 'resilver_restart_002',
'scrub_cancel']
tags = ['functional', 'replacement']
[tests/functional/reservation]
tests = ['reservation_001_pos', 'reservation_002_pos', 'reservation_003_pos',
'reservation_004_pos', 'reservation_005_pos', 'reservation_006_pos',
'reservation_007_pos', 'reservation_008_pos', 'reservation_009_pos',
'reservation_010_pos', 'reservation_011_pos', 'reservation_012_pos',
'reservation_013_pos', 'reservation_014_pos', 'reservation_015_pos',
'reservation_016_pos', 'reservation_017_pos', 'reservation_018_pos',
'reservation_019_pos', 'reservation_020_pos', 'reservation_021_neg',
'reservation_022_pos']
tags = ['functional', 'reservation']
[tests/functional/rootpool]
tests = ['rootpool_002_neg', 'rootpool_003_neg', 'rootpool_007_pos']
tags = ['functional', 'rootpool']
[tests/functional/rsend]
tests = ['recv_dedup', 'recv_dedup_encrypted_zvol', 'rsend_001_pos',
'rsend_002_pos', 'rsend_003_pos', 'rsend_004_pos', 'rsend_005_pos',
'rsend_006_pos', 'rsend_007_pos', 'rsend_008_pos', 'rsend_009_pos',
'rsend_010_pos', 'rsend_011_pos', 'rsend_012_pos', 'rsend_013_pos',
'rsend_014_pos', 'rsend_016_neg', 'rsend_019_pos', 'rsend_020_pos',
'rsend_021_pos', 'rsend_022_pos', 'rsend_024_pos', 'rsend_025_pos',
'rsend_026_neg', 'rsend_027_pos', 'rsend_028_neg', 'rsend_029_neg',
'send-c_verify_ratio', 'send-c_verify_contents', 'send-c_props',
'send-c_incremental', 'send-c_volume', 'send-c_zstreamdump',
'send-c_lz4_disabled', 'send-c_recv_lz4_disabled',
'send-c_mixed_compression', 'send-c_stream_size_estimate',
'send-c_embedded_blocks', 'send-c_resume', 'send-cpL_varied_recsize',
'send-c_recv_dedup', 'send-L_toggle', 'send_encrypted_hierarchy',
'send_encrypted_props', 'send_encrypted_truncated_files',
'send_freeobjects', 'send_realloc_files',
'send_realloc_encrypted_files', 'send_spill_block', 'send_holds',
'send_hole_birth', 'send_mixed_raw', 'send-wR_encrypted_zvol',
'send_partial_dataset', 'send_invalid', 'send_doall',
'send_raw_spill_block', 'send_raw_ashift']
tags = ['functional', 'rsend']
[tests/functional/scrub_mirror]
tests = ['scrub_mirror_001_pos', 'scrub_mirror_002_pos',
'scrub_mirror_003_pos', 'scrub_mirror_004_pos']
tags = ['functional', 'scrub_mirror']
[tests/functional/slog]
tests = ['slog_001_pos', 'slog_002_pos', 'slog_003_pos', 'slog_004_pos',
'slog_005_pos', 'slog_006_pos', 'slog_007_pos', 'slog_008_neg',
'slog_009_neg', 'slog_010_neg', 'slog_011_neg', 'slog_012_neg',
'slog_013_pos', 'slog_014_pos', 'slog_015_neg', 'slog_replay_fs_001',
'slog_replay_fs_002', 'slog_replay_volume', 'slog_016_pos']
tags = ['functional', 'slog']
[tests/functional/snapshot]
tests = ['clone_001_pos', 'rollback_001_pos', 'rollback_002_pos',
'rollback_003_pos', 'snapshot_001_pos', 'snapshot_002_pos',
'snapshot_003_pos', 'snapshot_004_pos', 'snapshot_005_pos',
'snapshot_006_pos', 'snapshot_007_pos', 'snapshot_008_pos',
'snapshot_009_pos', 'snapshot_010_pos', 'snapshot_011_pos',
'snapshot_012_pos', 'snapshot_013_pos', 'snapshot_014_pos',
'snapshot_017_pos', 'snapshot_018_pos']
tags = ['functional', 'snapshot']
[tests/functional/snapused]
tests = ['snapused_001_pos', 'snapused_002_pos', 'snapused_003_pos',
'snapused_004_pos', 'snapused_005_pos']
tags = ['functional', 'snapused']
[tests/functional/sparse]
tests = ['sparse_001_pos']
tags = ['functional', 'sparse']
[tests/functional/stat]
tests = ['stat_001_pos']
tags = ['functional', 'stat']
[tests/functional/suid]
tests = ['suid_write_to_suid', 'suid_write_to_sgid', 'suid_write_to_suid_sgid',
'suid_write_to_none', 'suid_write_zil_replay']
tags = ['functional', 'suid']
[tests/functional/trim]
tests = ['autotrim_integrity', 'autotrim_config', 'autotrim_trim_integrity',
'trim_integrity', 'trim_config', 'trim_l2arc']
tags = ['functional', 'trim']
[tests/functional/truncate]
tests = ['truncate_001_pos', 'truncate_002_pos', 'truncate_timestamps']
tags = ['functional', 'truncate']
[tests/functional/upgrade]
tests = ['upgrade_userobj_001_pos', 'upgrade_readonly_pool']
tags = ['functional', 'upgrade']
[tests/functional/userquota]
tests = [
'userquota_001_pos', 'userquota_002_pos', 'userquota_003_pos',
'userquota_004_pos', 'userquota_005_neg', 'userquota_006_pos',
'userquota_007_pos', 'userquota_008_pos', 'userquota_009_pos',
'userquota_010_pos', 'userquota_011_pos', 'userquota_012_neg',
'userspace_001_pos', 'userspace_002_pos', 'userspace_encrypted',
'userspace_send_encrypted']
tags = ['functional', 'userquota']
[tests/functional/vdev_zaps]
tests = ['vdev_zaps_001_pos', 'vdev_zaps_002_pos', 'vdev_zaps_003_pos',
'vdev_zaps_004_pos', 'vdev_zaps_005_pos', 'vdev_zaps_006_pos',
'vdev_zaps_007_pos']
tags = ['functional', 'vdev_zaps']
[tests/functional/write_dirs]
tests = ['write_dirs_001_pos', 'write_dirs_002_pos']
tags = ['functional', 'write_dirs']
[tests/functional/xattr]
tests = ['xattr_001_pos', 'xattr_002_neg', 'xattr_003_neg', 'xattr_004_pos',
'xattr_005_pos', 'xattr_006_pos', 'xattr_007_neg',
'xattr_011_pos', 'xattr_012_pos', 'xattr_013_pos', 'xattr_compat']
tags = ['functional', 'xattr']
[tests/functional/zvol/zvol_ENOSPC]
tests = ['zvol_ENOSPC_001_pos']
tags = ['functional', 'zvol', 'zvol_ENOSPC']
[tests/functional/zvol/zvol_cli]
tests = ['zvol_cli_001_pos', 'zvol_cli_002_pos', 'zvol_cli_003_neg']
tags = ['functional', 'zvol', 'zvol_cli']
[tests/functional/zvol/zvol_misc]
tests = ['zvol_misc_002_pos', 'zvol_misc_hierarchy', 'zvol_misc_rename_inuse',
'zvol_misc_snapdev', 'zvol_misc_trim', 'zvol_misc_volmode', 'zvol_misc_zil']
tags = ['functional', 'zvol', 'zvol_misc']
[tests/functional/zvol/zvol_stress]
tests = ['zvol_stress']
tags = ['functional', 'zvol', 'zvol_stress']
[tests/functional/zvol/zvol_swap]
tests = ['zvol_swap_001_pos', 'zvol_swap_002_pos', 'zvol_swap_004_pos']
tags = ['functional', 'zvol', 'zvol_swap']
[tests/functional/libzfs]
tests = ['many_fds', 'libzfs_input']
tags = ['functional', 'libzfs']
[tests/functional/log_spacemap]
tests = ['log_spacemap_import_logs']
pre =
post =
tags = ['functional', 'log_spacemap']
[tests/functional/l2arc]
tests = ['l2arc_arcstats_pos', 'l2arc_mfuonly_pos', 'l2arc_l2miss_pos',
'persist_l2arc_001_pos', 'persist_l2arc_002_pos',
'persist_l2arc_003_neg', 'persist_l2arc_004_pos', 'persist_l2arc_005_pos']
tags = ['functional', 'l2arc']
[tests/functional/zpool_influxdb]
tests = ['zpool_influxdb']
tags = ['functional', 'zpool_influxdb']
diff --git a/sys/contrib/openzfs/tests/runfiles/linux.run b/sys/contrib/openzfs/tests/runfiles/linux.run
index 9b32e73afb1e..09dfb5eb1e1d 100644
--- a/sys/contrib/openzfs/tests/runfiles/linux.run
+++ b/sys/contrib/openzfs/tests/runfiles/linux.run
@@ -1,192 +1,196 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
[DEFAULT]
pre = setup
quiet = False
pre_user = root
user = root
timeout = 600
post_user = root
post = cleanup
failsafe_user = root
failsafe = callbacks/zfs_failsafe
outputdir = /var/tmp/test_results
tags = ['functional']
[tests/functional/acl/posix:Linux]
tests = ['posix_001_pos', 'posix_002_pos', 'posix_003_pos', 'posix_004_pos']
tags = ['functional', 'acl', 'posix']
[tests/functional/acl/posix-sa:Linux]
tests = ['posix_001_pos', 'posix_002_pos', 'posix_003_pos', 'posix_004_pos']
tags = ['functional', 'acl', 'posix-sa']
[tests/functional/atime:Linux]
tests = ['atime_003_pos', 'root_relatime_on']
tags = ['functional', 'atime']
[tests/functional/chattr:Linux]
tests = ['chattr_001_pos', 'chattr_002_neg']
tags = ['functional', 'chattr']
[tests/functional/cli_root/zfs:Linux]
tests = ['zfs_003_neg']
tags = ['functional', 'cli_root', 'zfs']
[tests/functional/cli_root/zfs_mount:Linux]
tests = ['zfs_mount_006_pos', 'zfs_mount_008_pos', 'zfs_mount_013_pos',
'zfs_mount_014_neg', 'zfs_multi_mount']
tags = ['functional', 'cli_root', 'zfs_mount']
[tests/functional/cli_root/zfs_share:Linux]
tests = ['zfs_share_005_pos', 'zfs_share_007_neg', 'zfs_share_009_neg',
'zfs_share_012_pos', 'zfs_share_013_pos']
tags = ['functional', 'cli_root', 'zfs_share']
[tests/functional/cli_root/zfs_sysfs:Linux]
tests = ['zfeature_set_unsupported', 'zfs_get_unsupported',
'zfs_set_unsupported', 'zfs_sysfs_live', 'zpool_get_unsupported',
'zpool_set_unsupported']
tags = ['functional', 'cli_root', 'zfs_sysfs']
[tests/functional/cli_root/zpool_add:Linux]
tests = ['add_nested_replacing_spare']
tags = ['functional', 'cli_root', 'zpool_add']
[tests/functional/cli_root/zpool_expand:Linux]
tests = ['zpool_expand_001_pos', 'zpool_expand_002_pos',
'zpool_expand_003_neg', 'zpool_expand_004_pos', 'zpool_expand_005_pos']
tags = ['functional', 'cli_root', 'zpool_expand']
[tests/functional/cli_root/zpool_reopen:Linux]
tests = ['zpool_reopen_001_pos', 'zpool_reopen_002_pos',
'zpool_reopen_003_pos', 'zpool_reopen_004_pos', 'zpool_reopen_005_pos',
'zpool_reopen_006_neg', 'zpool_reopen_007_pos']
tags = ['functional', 'cli_root', 'zpool_reopen']
[tests/functional/cli_root/zpool_split:Linux]
tests = ['zpool_split_wholedisk']
tags = ['functional', 'cli_root', 'zpool_split']
[tests/functional/compression:Linux]
tests = ['compress_004_pos']
tags = ['functional', 'compression']
[tests/functional/devices:Linux]
tests = ['devices_001_pos', 'devices_002_neg', 'devices_003_pos']
tags = ['functional', 'devices']
[tests/functional/events:Linux]
tests = ['events_001_pos', 'events_002_pos', 'zed_rc_filter', 'zed_fd_spill']
tags = ['functional', 'events']
+[tests/functional/fadvise:Linux]
+tests = ['fadvise_sequential']
+tags = ['functional', 'fadvise']
+
[tests/functional/fallocate:Linux]
tests = ['fallocate_prealloc', 'fallocate_zero-range']
tags = ['functional', 'fallocate']
[tests/functional/fault:Linux]
tests = ['auto_offline_001_pos', 'auto_online_001_pos', 'auto_online_002_pos',
'auto_replace_001_pos', 'auto_spare_001_pos', 'auto_spare_002_pos',
'auto_spare_multiple', 'auto_spare_ashift', 'auto_spare_shared',
'decrypt_fault', 'decompress_fault', 'scrub_after_resilver',
'zpool_status_-s']
tags = ['functional', 'fault']
[tests/functional/features/large_dnode:Linux]
tests = ['large_dnode_002_pos', 'large_dnode_006_pos', 'large_dnode_008_pos']
tags = ['functional', 'features', 'large_dnode']
[tests/functional/io:Linux]
tests = ['libaio', 'io_uring']
tags = ['functional', 'io']
[tests/functional/largest_pool:Linux]
tests = ['largest_pool_001_pos']
pre =
post =
tags = ['functional', 'largest_pool']
[tests/functional/mmap:Linux]
tests = ['mmap_libaio_001_pos']
tags = ['functional', 'mmap']
[tests/functional/mmp:Linux]
tests = ['mmp_on_thread', 'mmp_on_uberblocks', 'mmp_on_off', 'mmp_interval',
'mmp_active_import', 'mmp_inactive_import', 'mmp_exported_import',
'mmp_write_uberblocks', 'mmp_reset_interval', 'multihost_history',
'mmp_on_zdb', 'mmp_write_distribution', 'mmp_hostid']
tags = ['functional', 'mmp']
[tests/functional/mount:Linux]
tests = ['umount_unlinked_drain']
tags = ['functional', 'mount']
[tests/functional/pam:Linux]
tests = ['pam_basic', 'pam_nounmount', 'pam_short_password']
tags = ['functional', 'pam']
[tests/functional/procfs:Linux]
tests = ['procfs_list_basic', 'procfs_list_concurrent_readers',
'procfs_list_stale_read', 'pool_state']
tags = ['functional', 'procfs']
[tests/functional/projectquota:Linux]
tests = ['projectid_001_pos', 'projectid_002_pos', 'projectid_003_pos',
'projectquota_001_pos', 'projectquota_002_pos', 'projectquota_003_pos',
'projectquota_004_neg', 'projectquota_005_pos', 'projectquota_006_pos',
'projectquota_007_pos', 'projectquota_008_pos', 'projectquota_009_pos',
'projectspace_001_pos', 'projectspace_002_pos', 'projectspace_003_pos',
'projectspace_004_pos',
'projecttree_001_pos', 'projecttree_002_pos', 'projecttree_003_neg']
tags = ['functional', 'projectquota']
[tests/functional/dos_attributes:Linux]
tests = ['read_dos_attrs_001', 'write_dos_attrs_001']
tags = ['functional', 'dos_attributes']
[tests/functional/rsend:Linux]
tests = ['send_realloc_dnode_size', 'send_encrypted_files']
tags = ['functional', 'rsend']
[tests/functional/simd:Linux]
pre =
post =
tests = ['simd_supported']
tags = ['functional', 'simd']
[tests/functional/snapshot:Linux]
tests = ['snapshot_015_pos', 'snapshot_016_pos']
tags = ['functional', 'snapshot']
[tests/functional/tmpfile:Linux]
tests = ['tmpfile_001_pos', 'tmpfile_002_pos', 'tmpfile_003_pos',
'tmpfile_stat_mode']
tags = ['functional', 'tmpfile']
[tests/functional/upgrade:Linux]
tests = ['upgrade_projectquota_001_pos']
tags = ['functional', 'upgrade']
[tests/functional/user_namespace:Linux]
tests = ['user_namespace_001', 'user_namespace_002', 'user_namespace_003',
'user_namespace_004']
tags = ['functional', 'user_namespace']
[tests/functional/userquota:Linux]
tests = ['groupspace_001_pos', 'groupspace_002_pos', 'groupspace_003_pos',
'userquota_013_pos', 'userspace_003_pos']
tags = ['functional', 'userquota']
[tests/functional/zvol/zvol_misc:Linux]
tests = ['zvol_misc_fua']
tags = ['functional', 'zvol', 'zvol_misc']
diff --git a/sys/contrib/openzfs/tests/runfiles/sanity.run b/sys/contrib/openzfs/tests/runfiles/sanity.run
index 7c4667196431..f115f0b578c6 100644
--- a/sys/contrib/openzfs/tests/runfiles/sanity.run
+++ b/sys/contrib/openzfs/tests/runfiles/sanity.run
@@ -1,625 +1,626 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
# This run file contains a subset of functional tests which exercise
# as much functionality as possible while still executing relatively
# quickly. The included tests should take no more than a few seconds
# each to run at most. This provides a convenient way to sanity test a
# change before committing to a full test run which takes several hours.
#
# Approximate run time: 15 minutes
#
[DEFAULT]
pre = setup
quiet = False
pre_user = root
user = root
timeout = 180
post_user = root
post = cleanup
failsafe_user = root
failsafe = callbacks/zfs_failsafe
outputdir = /var/tmp/test_results
tags = ['functional']
[tests/functional/acl/off]
tests = ['posixmode']
tags = ['functional', 'acl']
[tests/functional/alloc_class]
tests = ['alloc_class_003_pos', 'alloc_class_004_pos', 'alloc_class_005_pos',
'alloc_class_006_pos', 'alloc_class_008_pos', 'alloc_class_010_pos',
'alloc_class_011_neg']
tags = ['functional', 'alloc_class']
[tests/functional/arc]
tests = ['dbufstats_001_pos', 'dbufstats_002_pos', 'arcstats_runtime_tuning']
tags = ['functional', 'arc']
[tests/functional/bootfs]
tests = ['bootfs_004_neg', 'bootfs_007_pos']
tags = ['functional', 'bootfs']
[tests/functional/cache]
tests = ['cache_004_neg', 'cache_005_neg', 'cache_007_neg', 'cache_010_pos']
tags = ['functional', 'cache']
[tests/functional/cachefile]
tests = ['cachefile_001_pos', 'cachefile_002_pos', 'cachefile_003_pos',
'cachefile_004_pos']
tags = ['functional', 'cachefile']
[tests/functional/casenorm]
tests = ['case_all_values', 'norm_all_values', 'sensitive_none_lookup',
'sensitive_none_delete', 'insensitive_none_lookup',
'insensitive_none_delete', 'mixed_none_lookup', 'mixed_none_delete']
tags = ['functional', 'casenorm']
[tests/functional/channel_program/lua_core]
tests = ['tst.args_to_lua', 'tst.divide_by_zero', 'tst.exists',
'tst.integer_illegal', 'tst.integer_overflow', 'tst.language_functions_neg',
'tst.language_functions_pos', 'tst.large_prog', 'tst.libraries',
'tst.memory_limit', 'tst.nested_neg', 'tst.nested_pos', 'tst.nvlist_to_lua',
'tst.recursive_neg', 'tst.recursive_pos', 'tst.return_large',
'tst.return_nvlist_neg', 'tst.return_nvlist_pos',
'tst.return_recursive_table', 'tst.stack_gsub', 'tst.timeout']
tags = ['functional', 'channel_program', 'lua_core']
[tests/functional/channel_program/synctask_core]
tests = ['tst.destroy_fs', 'tst.destroy_snap', 'tst.get_count_and_limit',
'tst.get_index_props', 'tst.get_mountpoint', 'tst.get_neg',
'tst.get_number_props', 'tst.get_string_props', 'tst.get_type',
'tst.get_userquota', 'tst.get_written', 'tst.inherit', 'tst.list_bookmarks',
'tst.list_children', 'tst.list_clones', 'tst.list_holds',
'tst.list_snapshots', 'tst.list_system_props',
'tst.list_user_props', 'tst.parse_args_neg','tst.promote_conflict',
'tst.promote_multiple', 'tst.promote_simple', 'tst.rollback_mult',
'tst.rollback_one', 'tst.set_props', 'tst.snapshot_destroy',
'tst.snapshot_neg', 'tst.snapshot_recursive', 'tst.snapshot_simple',
'tst.bookmark.create', 'tst.bookmark.copy']
tags = ['functional', 'channel_program', 'synctask_core']
[tests/functional/cli_root/zdb]
tests = ['zdb_003_pos', 'zdb_004_pos', 'zdb_005_pos']
pre =
post =
tags = ['functional', 'cli_root', 'zdb']
[tests/functional/cli_root/zfs]
tests = ['zfs_001_neg', 'zfs_002_pos']
tags = ['functional', 'cli_root', 'zfs']
[tests/functional/cli_root/zfs_bookmark]
tests = ['zfs_bookmark_cliargs']
tags = ['functional', 'cli_root', 'zfs_bookmark']
[tests/functional/cli_root/zfs_change-key]
tests = ['zfs_change-key', 'zfs_change-key_child', 'zfs_change-key_format',
'zfs_change-key_inherit', 'zfs_change-key_load', 'zfs_change-key_location',
'zfs_change-key_pbkdf2iters', 'zfs_change-key_clones']
tags = ['functional', 'cli_root', 'zfs_change-key']
[tests/functional/cli_root/zfs_clone]
tests = ['zfs_clone_001_neg', 'zfs_clone_002_pos', 'zfs_clone_003_pos',
'zfs_clone_004_pos', 'zfs_clone_005_pos', 'zfs_clone_006_pos',
'zfs_clone_007_pos', 'zfs_clone_008_neg', 'zfs_clone_009_neg',
'zfs_clone_encrypted']
tags = ['functional', 'cli_root', 'zfs_clone']
[tests/functional/cli_root/zfs_create]
tests = ['zfs_create_001_pos', 'zfs_create_002_pos', 'zfs_create_003_pos',
'zfs_create_004_pos', 'zfs_create_005_pos', 'zfs_create_006_pos',
'zfs_create_007_pos', 'zfs_create_011_pos', 'zfs_create_012_pos',
'zfs_create_013_pos', 'zfs_create_014_pos', 'zfs_create_encrypted',
'zfs_create_dryrun', 'zfs_create_verbose']
tags = ['functional', 'cli_root', 'zfs_create']
[tests/functional/cli_root/zfs_destroy]
tests = ['zfs_destroy_002_pos', 'zfs_destroy_003_pos',
'zfs_destroy_004_pos', 'zfs_destroy_006_neg', 'zfs_destroy_007_neg',
'zfs_destroy_008_pos', 'zfs_destroy_009_pos', 'zfs_destroy_010_pos',
'zfs_destroy_011_pos', 'zfs_destroy_012_pos', 'zfs_destroy_013_neg',
'zfs_destroy_014_pos', 'zfs_destroy_dev_removal',
'zfs_destroy_dev_removal_condense']
tags = ['functional', 'cli_root', 'zfs_destroy']
[tests/functional/cli_root/zfs_diff]
tests = ['zfs_diff_cliargs', 'zfs_diff_encrypted']
tags = ['functional', 'cli_root', 'zfs_diff']
[tests/functional/cli_root/zfs_get]
tests = ['zfs_get_003_pos', 'zfs_get_006_neg', 'zfs_get_007_neg',
'zfs_get_010_neg']
tags = ['functional', 'cli_root', 'zfs_get']
[tests/functional/cli_root/zfs_inherit]
tests = ['zfs_inherit_001_neg', 'zfs_inherit_003_pos', 'zfs_inherit_mountpoint']
tags = ['functional', 'cli_root', 'zfs_inherit']
[tests/functional/cli_root/zfs_load-key]
tests = ['zfs_load-key', 'zfs_load-key_all', 'zfs_load-key_file',
'zfs_load-key_https', 'zfs_load-key_location', 'zfs_load-key_noop',
'zfs_load-key_recursive']
tags = ['functional', 'cli_root', 'zfs_load-key']
[tests/functional/cli_root/zfs_mount]
tests = ['zfs_mount_001_pos', 'zfs_mount_002_pos', 'zfs_mount_003_pos',
'zfs_mount_004_pos', 'zfs_mount_005_pos', 'zfs_mount_007_pos',
'zfs_mount_009_neg', 'zfs_mount_010_neg', 'zfs_mount_011_neg',
'zfs_mount_012_pos', 'zfs_mount_encrypted', 'zfs_mount_remount',
'zfs_mount_all_fail', 'zfs_mount_all_mountpoints', 'zfs_mount_test_race']
tags = ['functional', 'cli_root', 'zfs_mount']
[tests/functional/cli_root/zfs_program]
tests = ['zfs_program_json']
tags = ['functional', 'cli_root', 'zfs_program']
[tests/functional/cli_root/zfs_promote]
tests = ['zfs_promote_001_pos', 'zfs_promote_002_pos', 'zfs_promote_003_pos',
'zfs_promote_004_pos', 'zfs_promote_005_pos', 'zfs_promote_006_neg',
'zfs_promote_007_neg', 'zfs_promote_008_pos', 'zfs_promote_encryptionroot']
tags = ['functional', 'cli_root', 'zfs_promote']
[tests/functional/cli_root/zfs_receive]
tests = ['zfs_receive_001_pos', 'zfs_receive_002_pos', 'zfs_receive_003_pos',
'zfs_receive_004_neg', 'zfs_receive_005_neg', 'zfs_receive_006_pos',
'zfs_receive_007_neg', 'zfs_receive_008_pos', 'zfs_receive_009_neg',
'zfs_receive_010_pos', 'zfs_receive_011_pos', 'zfs_receive_012_pos',
'zfs_receive_013_pos', 'zfs_receive_014_pos', 'zfs_receive_015_pos',
'zfs_receive_016_pos', 'zfs_receive_from_encrypted',
'zfs_receive_to_encrypted', 'zfs_receive_raw',
'zfs_receive_raw_incremental', 'zfs_receive_-e',
'zfs_receive_raw_-d', 'zfs_receive_from_zstd', 'zfs_receive_new_props']
tags = ['functional', 'cli_root', 'zfs_receive']
[tests/functional/cli_root/zfs_rename]
tests = ['zfs_rename_003_pos', 'zfs_rename_004_neg',
'zfs_rename_005_neg', 'zfs_rename_006_pos', 'zfs_rename_007_pos',
'zfs_rename_008_pos', 'zfs_rename_009_neg', 'zfs_rename_010_neg',
'zfs_rename_011_pos', 'zfs_rename_012_neg', 'zfs_rename_013_pos',
'zfs_rename_encrypted_child', 'zfs_rename_to_encrypted',
'zfs_rename_mountpoint', 'zfs_rename_nounmount']
tags = ['functional', 'cli_root', 'zfs_rename']
[tests/functional/cli_root/zfs_reservation]
tests = ['zfs_reservation_001_pos', 'zfs_reservation_002_pos']
tags = ['functional', 'cli_root', 'zfs_reservation']
[tests/functional/cli_root/zfs_rollback]
tests = ['zfs_rollback_003_neg', 'zfs_rollback_004_neg']
tags = ['functional', 'cli_root', 'zfs_rollback']
[tests/functional/cli_root/zfs_send]
tests = ['zfs_send_001_pos', 'zfs_send_002_pos', 'zfs_send_003_pos',
'zfs_send_004_neg', 'zfs_send_005_pos', 'zfs_send_encrypted',
'zfs_send_raw']
tags = ['functional', 'cli_root', 'zfs_send']
[tests/functional/cli_root/zfs_set]
tests = ['cache_001_pos', 'cache_002_neg', 'canmount_001_pos',
'canmount_002_pos', 'canmount_003_pos', 'canmount_004_pos',
'checksum_001_pos', 'compression_001_pos', 'mountpoint_001_pos',
'mountpoint_002_pos', 'user_property_002_pos',
'share_mount_001_neg', 'snapdir_001_pos', 'onoffs_001_pos',
'user_property_001_pos', 'user_property_003_neg', 'readonly_001_pos',
'user_property_004_pos', 'version_001_neg',
'zfs_set_003_neg', 'property_alias_001_pos',
'zfs_set_keylocation', 'zfs_set_feature_activation']
tags = ['functional', 'cli_root', 'zfs_set']
[tests/functional/cli_root/zfs_snapshot]
tests = ['zfs_snapshot_001_neg', 'zfs_snapshot_002_neg',
'zfs_snapshot_003_neg', 'zfs_snapshot_006_pos', 'zfs_snapshot_007_neg']
tags = ['functional', 'cli_root', 'zfs_snapshot']
[tests/functional/cli_root/zfs_unload-key]
tests = ['zfs_unload-key', 'zfs_unload-key_all', 'zfs_unload-key_recursive']
tags = ['functional', 'cli_root', 'zfs_unload-key']
[tests/functional/cli_root/zfs_unmount]
tests = ['zfs_unmount_001_pos', 'zfs_unmount_002_pos', 'zfs_unmount_003_pos',
'zfs_unmount_004_pos', 'zfs_unmount_007_neg', 'zfs_unmount_008_neg',
'zfs_unmount_009_pos', 'zfs_unmount_unload_keys']
tags = ['functional', 'cli_root', 'zfs_unmount']
[tests/functional/cli_root/zfs_upgrade]
tests = ['zfs_upgrade_001_pos', 'zfs_upgrade_002_pos', 'zfs_upgrade_006_neg',
'zfs_upgrade_007_neg']
tags = ['functional', 'cli_root', 'zfs_upgrade']
[tests/functional/cli_root/zfs_wait]
tests = ['zfs_wait_deleteq', 'zfs_wait_getsubopt']
tags = ['functional', 'cli_root', 'zfs_wait']
[tests/functional/cli_root/zpool]
tests = ['zpool_001_neg', 'zpool_003_pos', 'zpool_colors']
tags = ['functional', 'cli_root', 'zpool']
[tests/functional/cli_root/zpool_add]
tests = ['zpool_add_002_pos', 'zpool_add_003_pos',
'zpool_add_004_pos', 'zpool_add_006_pos', 'zpool_add_007_neg',
'zpool_add_008_neg', 'zpool_add_009_neg']
tags = ['functional', 'cli_root', 'zpool_add']
[tests/functional/cli_root/zpool_attach]
tests = ['zpool_attach_001_neg']
tags = ['functional', 'cli_root', 'zpool_attach']
[tests/functional/cli_root/zpool_clear]
tests = ['zpool_clear_002_neg']
tags = ['functional', 'cli_root', 'zpool_clear']
[tests/functional/cli_root/zpool_create]
tests = ['zpool_create_001_pos', 'zpool_create_002_pos',
'zpool_create_003_pos', 'zpool_create_004_pos', 'zpool_create_007_neg',
'zpool_create_008_pos', 'zpool_create_010_neg', 'zpool_create_011_neg',
'zpool_create_012_neg', 'zpool_create_014_neg', 'zpool_create_015_neg',
'zpool_create_017_neg', 'zpool_create_018_pos', 'zpool_create_019_pos',
'zpool_create_020_pos', 'zpool_create_021_pos', 'zpool_create_022_pos',
'zpool_create_encrypted',
'zpool_create_features_001_pos', 'zpool_create_features_002_pos',
'zpool_create_features_003_pos', 'zpool_create_features_004_neg',
'zpool_create_features_005_pos']
tags = ['functional', 'cli_root', 'zpool_create']
[tests/functional/cli_root/zpool_destroy]
tests = ['zpool_destroy_001_pos', 'zpool_destroy_002_pos',
'zpool_destroy_003_neg']
pre =
post =
tags = ['functional', 'cli_root', 'zpool_destroy']
[tests/functional/cli_root/zpool_detach]
tests = ['zpool_detach_001_neg']
tags = ['functional', 'cli_root', 'zpool_detach']
[tests/functional/cli_root/zpool_events]
tests = ['zpool_events_clear', 'zpool_events_follow', 'zpool_events_poolname']
tags = ['functional', 'cli_root', 'zpool_events']
[tests/functional/cli_root/zpool_export]
tests = ['zpool_export_001_pos', 'zpool_export_002_pos', 'zpool_export_003_neg']
tags = ['functional', 'cli_root', 'zpool_export']
[tests/functional/cli_root/zpool_get]
tests = ['zpool_get_001_pos', 'zpool_get_002_pos', 'zpool_get_003_pos',
'zpool_get_004_neg', 'zpool_get_005_pos']
tags = ['functional', 'cli_root', 'zpool_get']
[tests/functional/cli_root/zpool_history]
tests = ['zpool_history_001_neg', 'zpool_history_002_pos']
tags = ['functional', 'cli_root', 'zpool_history']
[tests/functional/cli_root/zpool_import]
tests = ['zpool_import_003_pos', 'zpool_import_010_pos', 'zpool_import_011_neg',
'zpool_import_014_pos', 'zpool_import_features_001_pos',
'zpool_import_all_001_pos', 'zpool_import_encrypted']
tags = ['functional', 'cli_root', 'zpool_import']
[tests/functional/cli_root/zpool_labelclear]
tests = ['zpool_labelclear_active', 'zpool_labelclear_exported',
'zpool_labelclear_removed', 'zpool_labelclear_valid']
pre =
post =
tags = ['functional', 'cli_root', 'zpool_labelclear']
[tests/functional/cli_root/zpool_initialize]
tests = ['zpool_initialize_online_offline']
pre =
tags = ['functional', 'cli_root', 'zpool_initialize']
[tests/functional/cli_root/zpool_offline]
tests = ['zpool_offline_001_pos', 'zpool_offline_002_neg']
tags = ['functional', 'cli_root', 'zpool_offline']
[tests/functional/cli_root/zpool_online]
tests = ['zpool_online_001_pos', 'zpool_online_002_neg']
tags = ['functional', 'cli_root', 'zpool_online']
[tests/functional/cli_root/zpool_remove]
tests = ['zpool_remove_001_neg', 'zpool_remove_002_pos',
'zpool_remove_003_pos']
tags = ['functional', 'cli_root', 'zpool_remove']
[tests/functional/cli_root/zpool_replace]
tests = ['zpool_replace_001_neg']
tags = ['functional', 'cli_root', 'zpool_replace']
[tests/functional/cli_root/zpool_resilver]
tests = ['zpool_resilver_bad_args']
tags = ['functional', 'cli_root', 'zpool_resilver']
[tests/functional/cli_root/zpool_scrub]
tests = ['zpool_scrub_001_neg', 'zpool_scrub_003_pos',
'zpool_scrub_encrypted_unloaded', 'zpool_scrub_print_repairing',
'zpool_scrub_offline_device', 'zpool_scrub_multiple_copies']
tags = ['functional', 'cli_root', 'zpool_scrub']
[tests/functional/cli_root/zpool_set]
tests = ['zpool_set_001_pos', 'zpool_set_002_neg', 'zpool_set_003_neg',
'zpool_set_ashift', 'zpool_set_features']
tags = ['functional', 'cli_root', 'zpool_set']
[tests/functional/cli_root/zpool_split]
tests = ['zpool_split_cliargs', 'zpool_split_devices',
'zpool_split_props', 'zpool_split_vdevs', 'zpool_split_indirect']
tags = ['functional', 'cli_root', 'zpool_split']
[tests/functional/cli_root/zpool_status]
tests = ['zpool_status_001_pos', 'zpool_status_002_pos']
tags = ['functional', 'cli_root', 'zpool_status']
[tests/functional/cli_root/zpool_sync]
tests = ['zpool_sync_002_neg']
tags = ['functional', 'cli_root', 'zpool_sync']
[tests/functional/cli_root/zpool_trim]
tests = ['zpool_trim_attach_detach_add_remove', 'zpool_trim_neg',
'zpool_trim_offline_export_import_online', 'zpool_trim_online_offline',
'zpool_trim_rate_neg', 'zpool_trim_secure', 'zpool_trim_split',
'zpool_trim_start_and_cancel_neg', 'zpool_trim_start_and_cancel_pos']
tags = ['functional', 'zpool_trim']
[tests/functional/cli_root/zpool_upgrade]
tests = ['zpool_upgrade_001_pos', 'zpool_upgrade_003_pos',
'zpool_upgrade_005_neg', 'zpool_upgrade_006_neg',
'zpool_upgrade_009_neg']
tags = ['functional', 'cli_root', 'zpool_upgrade']
[tests/functional/cli_root/zpool_wait]
tests = ['zpool_wait_no_activity', 'zpool_wait_usage']
tags = ['functional', 'cli_root', 'zpool_wait']
[tests/functional/cli_root/zpool_wait/scan]
tests = ['zpool_wait_scrub_flag']
tags = ['functional', 'cli_root', 'zpool_wait']
[tests/functional/cli_user/misc]
tests = ['zdb_001_neg', 'zfs_001_neg', 'zfs_allow_001_neg',
'zfs_clone_001_neg', 'zfs_create_001_neg', 'zfs_destroy_001_neg',
'zfs_get_001_neg', 'zfs_inherit_001_neg', 'zfs_mount_001_neg',
'zfs_promote_001_neg', 'zfs_receive_001_neg', 'zfs_rename_001_neg',
'zfs_rollback_001_neg', 'zfs_send_001_neg', 'zfs_set_001_neg',
'zfs_snapshot_001_neg', 'zfs_unallow_001_neg',
'zfs_unmount_001_neg', 'zfs_upgrade_001_neg',
'zpool_001_neg', 'zpool_add_001_neg', 'zpool_attach_001_neg',
'zpool_clear_001_neg', 'zpool_create_001_neg', 'zpool_destroy_001_neg',
'zpool_detach_001_neg', 'zpool_export_001_neg', 'zpool_get_001_neg',
'zpool_history_001_neg', 'zpool_offline_001_neg', 'zpool_online_001_neg',
'zpool_remove_001_neg', 'zpool_scrub_001_neg', 'zpool_set_001_neg',
'zpool_status_001_neg', 'zpool_upgrade_001_neg', 'arcstat_001_pos',
- 'arc_summary_001_pos', 'arc_summary_002_neg', 'zpool_wait_privilege']
+ 'arc_summary_001_pos', 'arc_summary_002_neg', 'zpool_wait_privilege',
+ 'zilstat_001_pos']
user =
tags = ['functional', 'cli_user', 'misc']
[tests/functional/cli_user/zpool_iostat]
tests = ['zpool_iostat_001_neg', 'zpool_iostat_002_pos',
'zpool_iostat_003_neg', 'zpool_iostat_004_pos',
'zpool_iostat_-c_disable',
'zpool_iostat_-c_homedir', 'zpool_iostat_-c_searchpath']
user =
tags = ['functional', 'cli_user', 'zpool_iostat']
[tests/functional/cli_user/zpool_list]
tests = ['zpool_list_001_pos', 'zpool_list_002_neg']
user =
tags = ['functional', 'cli_user', 'zpool_list']
[tests/functional/compression]
tests = ['compress_003_pos','compress_zstd_bswap']
tags = ['functional', 'compression']
[tests/functional/exec]
tests = ['exec_001_pos', 'exec_002_neg']
tags = ['functional', 'exec']
[tests/functional/features/large_dnode]
tests = ['large_dnode_003_pos', 'large_dnode_004_neg',
'large_dnode_005_pos', 'large_dnode_007_neg']
tags = ['functional', 'features', 'large_dnode']
[tests/functional/grow]
pre =
post =
tests = ['grow_pool_001_pos', 'grow_replicas_001_pos']
tags = ['functional', 'grow']
[tests/functional/history]
tests = ['history_004_pos', 'history_005_neg', 'history_007_pos',
'history_009_pos']
tags = ['functional', 'history']
[tests/functional/hkdf]
pre =
post =
tests = ['hkdf_test']
tags = ['functional', 'hkdf']
[tests/functional/inuse]
tests = ['inuse_004_pos', 'inuse_005_pos']
post =
tags = ['functional', 'inuse']
[tests/functional/large_files]
tests = ['large_files_001_pos', 'large_files_002_pos']
tags = ['functional', 'large_files']
[tests/functional/libzfs]
tests = ['many_fds', 'libzfs_input']
tags = ['functional', 'libzfs']
[tests/functional/limits]
tests = ['filesystem_count', 'snapshot_count']
tags = ['functional', 'limits']
[tests/functional/link_count]
tests = ['link_count_root_inode']
tags = ['functional', 'link_count']
[tests/functional/log_spacemap]
tests = ['log_spacemap_import_logs']
pre =
post =
tags = ['functional', 'log_spacemap']
[tests/functional/migration]
tests = ['migration_001_pos', 'migration_002_pos', 'migration_003_pos',
'migration_004_pos', 'migration_005_pos', 'migration_006_pos',
'migration_007_pos', 'migration_008_pos', 'migration_009_pos',
'migration_010_pos', 'migration_011_pos', 'migration_012_pos']
tags = ['functional', 'migration']
[tests/functional/mmap]
tests = ['mmap_read_001_pos']
tags = ['functional', 'mmap']
[tests/functional/nestedfs]
tests = ['nestedfs_001_pos']
tags = ['functional', 'nestedfs']
[tests/functional/nopwrite]
tests = ['nopwrite_sync', 'nopwrite_volume']
tags = ['functional', 'nopwrite']
[tests/functional/pool_checkpoint]
tests = ['checkpoint_conf_change', 'checkpoint_discard_many',
'checkpoint_removal', 'checkpoint_sm_scale', 'checkpoint_twice']
tags = ['functional', 'pool_checkpoint']
timeout = 1800
[tests/functional/poolversion]
tests = ['poolversion_001_pos', 'poolversion_002_pos']
tags = ['functional', 'poolversion']
[tests/functional/redacted_send]
tests = ['redacted_compressed', 'redacted_contents', 'redacted_deleted',
'redacted_disabled_feature', 'redacted_incrementals',
'redacted_largeblocks', 'redacted_mixed_recsize', 'redacted_negative',
'redacted_origin', 'redacted_props', 'redacted_resume', 'redacted_size']
tags = ['functional', 'redacted_send']
[tests/functional/raidz]
tests = ['raidz_001_neg']
tags = ['functional', 'raidz']
[tests/functional/refquota]
tests = ['refquota_001_pos', 'refquota_002_pos', 'refquota_003_pos',
'refquota_004_pos', 'refquota_005_pos', 'refquota_006_neg',
'refquota_007_neg']
tags = ['functional', 'refquota']
[tests/functional/refreserv]
tests = ['refreserv_001_pos', 'refreserv_002_pos', 'refreserv_003_pos',
'refreserv_005_pos', 'refreserv_multi_raidz']
tags = ['functional', 'refreserv']
[tests/functional/removal]
pre =
tests = ['removal_all_vdev', 'removal_sanity', 'removal_with_dedup',
'removal_with_ganging', 'removal_with_faulted']
tags = ['functional', 'removal']
[tests/functional/replacement]
tests = ['rebuild_raidz']
tags = ['functional', 'replacement']
[tests/functional/reservation]
tests = ['reservation_001_pos', 'reservation_002_pos', 'reservation_003_pos',
'reservation_004_pos', 'reservation_005_pos', 'reservation_006_pos',
'reservation_007_pos', 'reservation_008_pos', 'reservation_009_pos',
'reservation_010_pos', 'reservation_011_pos', 'reservation_012_pos',
'reservation_014_pos', 'reservation_015_pos',
'reservation_016_pos', 'reservation_017_pos', 'reservation_018_pos',
'reservation_019_pos', 'reservation_020_pos', 'reservation_021_neg',
'reservation_022_pos']
tags = ['functional', 'reservation']
[tests/functional/rsend]
tests = ['recv_dedup', 'recv_dedup_encrypted_zvol', 'rsend_001_pos',
'rsend_002_pos', 'rsend_003_pos', 'rsend_004_pos', 'rsend_005_pos',
'rsend_006_pos', 'rsend_009_pos', 'rsend_010_pos', 'rsend_011_pos',
'rsend_014_pos', 'rsend_016_neg', 'send-c_verify_contents',
'send-c_volume', 'send-c_zstreamdump', 'send-c_recv_dedup',
'send-L_toggle', 'send_encrypted_hierarchy', 'send_encrypted_props',
'send_encrypted_truncated_files', 'send_freeobjects', 'send_holds',
'send_mixed_raw', 'send-wR_encrypted_zvol', 'send_partial_dataset',
'send_invalid']
tags = ['functional', 'rsend']
[tests/functional/scrub_mirror]
tests = ['scrub_mirror_001_pos', 'scrub_mirror_002_pos']
tags = ['functional', 'scrub_mirror']
[tests/functional/slog]
tests = ['slog_008_neg', 'slog_009_neg', 'slog_010_neg']
tags = ['functional', 'slog']
[tests/functional/snapshot]
tests = ['clone_001_pos', 'rollback_001_pos', 'rollback_002_pos',
'snapshot_001_pos', 'snapshot_002_pos', 'snapshot_003_pos',
'snapshot_004_pos', 'snapshot_005_pos', 'snapshot_006_pos',
'snapshot_007_pos', 'snapshot_008_pos', 'snapshot_009_pos',
'snapshot_010_pos', 'snapshot_011_pos', 'snapshot_012_pos',
'snapshot_013_pos', 'snapshot_014_pos', 'snapshot_017_pos',
'snapshot_018_pos']
tags = ['functional', 'snapshot']
[tests/functional/snapused]
tests = ['snapused_002_pos', 'snapused_004_pos', 'snapused_005_pos']
tags = ['functional', 'snapused']
[tests/functional/sparse]
tests = ['sparse_001_pos']
tags = ['functional', 'sparse']
[tests/functional/suid]
tests = ['suid_write_to_suid', 'suid_write_to_sgid', 'suid_write_to_suid_sgid',
'suid_write_to_none']
tags = ['functional', 'suid']
[tests/functional/append]
tests = ['threadsappend_001_pos']
tags = ['functional', 'threadsappend']
[tests/functional/truncate]
tests = ['truncate_001_pos', 'truncate_002_pos']
tags = ['functional', 'truncate']
[tests/functional/upgrade]
tests = ['upgrade_userobj_001_pos', 'upgrade_readonly_pool']
tags = ['functional', 'upgrade']
[tests/functional/vdev_zaps]
tests = ['vdev_zaps_001_pos', 'vdev_zaps_003_pos', 'vdev_zaps_004_pos',
'vdev_zaps_005_pos', 'vdev_zaps_006_pos']
tags = ['functional', 'vdev_zaps']
[tests/functional/xattr]
tests = ['xattr_001_pos', 'xattr_002_neg', 'xattr_003_neg', 'xattr_004_pos',
'xattr_005_pos', 'xattr_006_pos', 'xattr_007_neg',
'xattr_011_pos', 'xattr_013_pos', 'xattr_compat']
tags = ['functional', 'xattr']
[tests/functional/zvol/zvol_ENOSPC]
tests = ['zvol_ENOSPC_001_pos']
tags = ['functional', 'zvol', 'zvol_ENOSPC']
[tests/functional/zvol/zvol_cli]
tests = ['zvol_cli_001_pos', 'zvol_cli_002_pos', 'zvol_cli_003_neg']
tags = ['functional', 'zvol', 'zvol_cli']
[tests/functional/zvol/zvol_swap]
tests = ['zvol_swap_001_pos', 'zvol_swap_002_pos']
tags = ['functional', 'zvol', 'zvol_swap']
[tests/functional/zpool_influxdb]
tests = ['zpool_influxdb']
tags = ['functional', 'zpool_influxdb']
diff --git a/sys/contrib/openzfs/tests/zfs-tests/cmd/.gitignore b/sys/contrib/openzfs/tests/zfs-tests/cmd/.gitignore
index 20d1382532bd..1fd54c1dd510 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/cmd/.gitignore
+++ b/sys/contrib/openzfs/tests/zfs-tests/cmd/.gitignore
@@ -1,48 +1,49 @@
/badsend
/btree_test
/chg_usr_exec
/devname2devid
/dir_rd_update
/draid
+/file_fadvise
/file_append
/file_check
/file_trunc
/file_write
/get_diff
/getversion
/largest_file
/libzfs_input_check
/mkbusy
/mkfile
/mkfiles
/mktree
/mmap_exec
/mmap_libaio
/mmap_seek
/mmap_sync
/mmapwrite
/nvlist_to_lua
/randfree_file
/randwritecomp
/read_dos_attributes
/readmmap
/rename_dir
/rm_lnkcnt_zero_file
/send_doall
/stride_dd
/threadsappend
/user_ns_exec
/write_dos_attributes
/xattrtest
/zed_fd_spill-zedlet
/suid_write_to_file
/cp_files
/ctime
/truncate_test
/ereports
/zfs_diff-socket
/dosmode_readonly_write
/blake3_test
/edonr_test
/skein_test
/sha2_test
diff --git a/sys/contrib/openzfs/tests/zfs-tests/cmd/Makefile.am b/sys/contrib/openzfs/tests/zfs-tests/cmd/Makefile.am
index 3c8faf5afbbb..c19c870cf698 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/cmd/Makefile.am
+++ b/sys/contrib/openzfs/tests/zfs-tests/cmd/Makefile.am
@@ -1,131 +1,134 @@
scripts_zfs_tests_bindir = $(datadir)/$(PACKAGE)/zfs-tests/bin
scripts_zfs_tests_bin_PROGRAMS = %D%/chg_usr_exec
scripts_zfs_tests_bin_PROGRAMS += %D%/cp_files
scripts_zfs_tests_bin_PROGRAMS += %D%/ctime
scripts_zfs_tests_bin_PROGRAMS += %D%/dir_rd_update
scripts_zfs_tests_bin_PROGRAMS += %D%/dosmode_readonly_write
scripts_zfs_tests_bin_PROGRAMS += %D%/get_diff
scripts_zfs_tests_bin_PROGRAMS += %D%/rename_dir
scripts_zfs_tests_bin_PROGRAMS += %D%/suid_write_to_file
scripts_zfs_tests_bin_PROGRAMS += %D%/truncate_test
scripts_zfs_tests_bin_PROGRAMS += %D%/zfs_diff-socket
scripts_zfs_tests_bin_PROGRAMS += %D%/badsend
%C%_badsend_LDADD = \
libzfs_core.la \
libzfs.la \
libnvpair.la
scripts_zfs_tests_bin_PROGRAMS += %D%/btree_test
%C%_btree_test_CPPFLAGS = $(AM_CPPFLAGS) $(FORCEDEBUG_CPPFLAGS)
%C%_btree_test_LDADD = \
libzpool.la \
libzfs_core.la
if WANT_DEVNAME2DEVID
scripts_zfs_tests_bin_PROGRAMS += %D%/devname2devid
%C%_devname2devid_CFLAGS = $(AM_CFLAGS) $(LIBUDEV_CFLAGS)
%C%_devname2devid_LDADD = $(LIBUDEV_LIBS)
endif
scripts_zfs_tests_bin_PROGRAMS += %D%/draid
%C%_draid_CFLAGS = $(AM_CFLAGS) $(ZLIB_CFLAGS)
%C%_draid_LDADD = \
libzpool.la \
libnvpair.la
%C%_draid_LDADD += $(ZLIB_LIBS)
dist_noinst_DATA += %D%/file/file_common.h
scripts_zfs_tests_bin_PROGRAMS += %D%/file_append %D%/file_check %D%/file_trunc %D%/file_write %D%/largest_file %D%/randwritecomp
%C%_file_append_SOURCES = %D%/file/file_append.c
%C%_file_check_SOURCES = %D%/file/file_check.c
%C%_file_trunc_SOURCES = %D%/file/file_trunc.c
%C%_file_write_SOURCES = %D%/file/file_write.c
%C%_largest_file_SOURCES = %D%/file/largest_file.c
%C%_randwritecomp_SOURCES = %D%/file/randwritecomp.c
scripts_zfs_tests_bin_PROGRAMS += %D%/libzfs_input_check
%C%_libzfs_input_check_CPPFLAGS = $(AM_CPPFLAGS) -I$(top_srcdir)/include/os/@ac_system_l@/zfs
%C%_libzfs_input_check_LDADD = \
libzfs_core.la \
libnvpair.la
scripts_zfs_tests_bin_PROGRAMS += %D%/mkbusy %D%/mkfile %D%/mkfiles %D%/mktree
%C%_mkfile_LDADD = $(LTLIBINTL)
scripts_zfs_tests_bin_PROGRAMS += %D%/mmap_exec %D%/mmap_seek %D%/mmap_sync %D%/mmapwrite %D%/readmmap
%C%_mmapwrite_LDADD = -lpthread
if WANT_MMAP_LIBAIO
scripts_zfs_tests_bin_PROGRAMS += %D%/mmap_libaio
%C%_mmap_libaio_CFLAGS = $(AM_CFLAGS) $(LIBAIO_CFLAGS)
%C%_mmap_libaio_LDADD = $(LIBAIO_LIBS)
endif
scripts_zfs_tests_bin_PROGRAMS += %D%/nvlist_to_lua
%C%_nvlist_to_lua_LDADD = \
libzfs_core.la \
libnvpair.la
scripts_zfs_tests_bin_PROGRAMS += %D%/rm_lnkcnt_zero_file
%C%_rm_lnkcnt_zero_file_LDADD = -lpthread
scripts_zfs_tests_bin_PROGRAMS += %D%/send_doall
%C%_send_doall_LDADD = \
libzfs_core.la \
libzfs.la \
libnvpair.la
scripts_zfs_tests_bin_PROGRAMS += %D%/stride_dd
%C%_stride_dd_LDADD = -lrt
scripts_zfs_tests_bin_PROGRAMS += %D%/threadsappend
%C%_threadsappend_LDADD = -lpthread
scripts_zfs_tests_bin_PROGRAMS += %D%/ereports
%C%_ereports_LDADD = \
libnvpair.la \
libzfs.la
scripts_zfs_tests_bin_PROGRAMS += %D%/edonr_test %D%/skein_test \
%D%/sha2_test %D%/blake3_test
%C%_skein_test_SOURCES = %D%/checksum/skein_test.c
%C%_sha2_test_SOURCES = %D%/checksum/sha2_test.c
%C%_edonr_test_SOURCES = %D%/checksum/edonr_test.c
%C%_blake3_test_SOURCES = %D%/checksum/blake3_test.c
%C%_skein_test_LDADD = \
libicp.la \
libspl.la \
libspl_assert.la
%C%_sha2_test_LDADD = $(%C%_skein_test_LDADD)
%C%_edonr_test_LDADD = $(%C%_skein_test_LDADD)
%C%_blake3_test_LDADD = $(%C%_skein_test_LDADD)
if BUILD_LINUX
scripts_zfs_tests_bin_PROGRAMS += %D%/getversion
scripts_zfs_tests_bin_PROGRAMS += %D%/user_ns_exec
scripts_zfs_tests_bin_PROGRAMS += %D%/xattrtest
scripts_zfs_tests_bin_PROGRAMS += %D%/zed_fd_spill-zedlet
dist_noinst_DATA += %D%/linux_dos_attributes/dos_attributes.h
scripts_zfs_tests_bin_PROGRAMS += %D%/read_dos_attributes %D%/write_dos_attributes
%C%_read_dos_attributes_SOURCES = %D%/linux_dos_attributes/read_dos_attributes.c
%C%_write_dos_attributes_SOURCES = %D%/linux_dos_attributes/write_dos_attributes.c
scripts_zfs_tests_bin_PROGRAMS += %D%/randfree_file
%C%_randfree_file_SOURCES = %D%/file/randfree_file.c
+
+scripts_zfs_tests_bin_PROGRAMS += %D%/file_fadvise
+%C%_file_fadvise_SOURCES = %D%/file/file_fadvise.c
endif
diff --git a/sys/contrib/openzfs/tests/zfs-tests/cmd/checksum/blake3_test.c b/sys/contrib/openzfs/tests/zfs-tests/cmd/checksum/blake3_test.c
index d57d0e047f01..648e1faaaeb7 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/cmd/checksum/blake3_test.c
+++ b/sys/contrib/openzfs/tests/zfs-tests/cmd/checksum/blake3_test.c
@@ -1,575 +1,575 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2022 Tino Reichardt <milky-zfs@mcmilk.de>
*/
#include <stdarg.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <sys/time.h>
#include <sys/blake3.h>
/*
* set it to a define for debugging
*/
#undef BLAKE3_DEBUG
/*
* C version of:
* https://github.com/BLAKE3-team/BLAKE3/tree/master/test_vectors
*/
typedef struct {
/* input length for this entry */
const int input_len;
/* hash value */
const char *hash;
/* salted hash value */
const char *shash;
} blake3_test_t;
/* BLAKE3 is variable here */
#define TEST_DIGEST_LEN 262
/*
* key for the keyed hashing
*/
static const char *salt = "whats the Elvish word for friend";
static blake3_test_t TestArray[] = {
{
0,
"af1349b9f5f9a1a6a0404dea36dcc9499bcb25c9adc112b7cc9a93cae41f3262e0"
"0f03e7b69af26b7faaf09fcd333050338ddfe085b8cc869ca98b206c08243a26f5"
"487789e8f660afe6c99ef9e0c52b92e7393024a80459cf91f476f9ffdbda7001c2"
"2e159b402631f277ca96f2defdf1078282314e763699a31c5363165421cce14d",
"92b2b75604ed3c761f9d6f62392c8a9227ad0ea3f09573e783f1498a4ed60d26b1"
"8171a2f22a4b94822c701f107153dba24918c4bae4d2945c20ece13387627d3b73"
"cbf97b797d5e59948c7ef788f54372df45e45e4293c7dc18c1d41144a9758be589"
"60856be1eabbe22c2653190de560ca3b2ac4aa692a9210694254c371e851bc8f",
},
{
1,
"2d3adedff11b61f14c886e35afa036736dcd87a74d27b5c1510225d0f592e213c3"
"a6cb8bf623e20cdb535f8d1a5ffb86342d9c0b64aca3bce1d31f60adfa137b358a"
"d4d79f97b47c3d5e79f179df87a3b9776ef8325f8329886ba42f07fb138bb502f4"
"081cbcec3195c5871e6c23e2cc97d3c69a613eba131e5f1351f3f1da786545e5",
"6d7878dfff2f485635d39013278ae14f1454b8c0a3a2d34bc1ab38228a80c95b65"
"68c0490609413006fbd428eb3fd14e7756d90f73a4725fad147f7bf70fd61c4e0c"
"f7074885e92b0e3f125978b4154986d4fb202a3f331a3fb6cf349a3a70e49990f9"
"8fe4289761c8602c4e6ab1138d31d3b62218078b2f3ba9a88e1d08d0dd4cea11",
},
{
2,
"7b7015bb92cf0b318037702a6cdd81dee41224f734684c2c122cd6359cb1ee63d8"
"386b22e2ddc05836b7c1bb693d92af006deb5ffbc4c70fb44d0195d0c6f252faac"
"61659ef86523aa16517f87cb5f1340e723756ab65efb2f91964e14391de2a43226"
"3a6faf1d146937b35a33621c12d00be8223a7f1919cec0acd12097ff3ab00ab1",
"5392ddae0e0a69d5f40160462cbd9bd889375082ff224ac9c758802b7a6fd20a9f"
"fbf7efd13e989a6c246f96d3a96b9d279f2c4e63fb0bdff633957acf50ee1a5f65"
"8be144bab0f6f16500dee4aa5967fc2c586d85a04caddec90fffb7633f46a60786"
"024353b9e5cebe277fcd9514217fee2267dcda8f7b31697b7c54fab6a939bf8f",
},
{
3,
"e1be4d7a8ab5560aa4199eea339849ba8e293d55ca0a81006726d184519e647f5b"
"49b82f805a538c68915c1ae8035c900fd1d4b13902920fd05e1450822f36de9454"
"b7e9996de4900c8e723512883f93f4345f8a58bfe64ee38d3ad71ab027765d25cd"
"d0e448328a8e7a683b9a6af8b0af94fa09010d9186890b096a08471e4230a134",
"39e67b76b5a007d4921969779fe666da67b5213b096084ab674742f0d5ec62b9b9"
"142d0fab08e1b161efdbb28d18afc64d8f72160c958e53a950cdecf91c1a1bbab1"
"a9c0f01def762a77e2e8545d4dec241e98a89b6db2e9a5b070fc110caae2622690"
"bd7b76c02ab60750a3ea75426a6bb8803c370ffe465f07fb57def95df772c39f",
},
{
4,
"f30f5ab28fe047904037f77b6da4fea1e27241c5d132638d8bedce9d40494f328f"
"603ba4564453e06cdcee6cbe728a4519bbe6f0d41e8a14b5b225174a566dbfa61b"
"56afb1e452dc08c804f8c3143c9e2cc4a31bb738bf8c1917b55830c6e657972117"
"01dc0b98daa1faeaa6ee9e56ab606ce03a1a881e8f14e87a4acf4646272cfd12",
"7671dde590c95d5ac9616651ff5aa0a27bee5913a348e053b8aa9108917fe07011"
"6c0acff3f0d1fa97ab38d813fd46506089118147d83393019b068a55d646251ecf"
"81105f798d76a10ae413f3d925787d6216a7eb444e510fd56916f1d753a5544ecf"
"0072134a146b2615b42f50c179f56b8fae0788008e3e27c67482349e249cb86a",
},
{
5,
"b40b44dfd97e7a84a996a91af8b85188c66c126940ba7aad2e7ae6b385402aa2eb"
"cfdac6c5d32c31209e1f81a454751280db64942ce395104e1e4eaca62607de1c2c"
"a748251754ea5bbe8c20150e7f47efd57012c63b3c6a6632dc1c7cd15f3e1c9999"
"04037d60fac2eb9397f2adbe458d7f264e64f1e73aa927b30988e2aed2f03620",
"73ac69eecf286894d8102018a6fc729f4b1f4247d3703f69bdc6a5fe3e0c84616a"
"b199d1f2f3e53bffb17f0a2209fe8b4f7d4c7bae59c2bc7d01f1ff94c67588cc6b"
"38fa6024886f2c078bfe09b5d9e6584cd6c521c3bb52f4de7687b37117a2dbbec0"
"d59e92fa9a8cc3240d4432f91757aabcae03e87431dac003e7d73574bfdd8218",
},
{
6,
"06c4e8ffb6872fad96f9aaca5eee1553eb62aed0ad7198cef42e87f6a616c84461"
"1a30c4e4f37fe2fe23c0883cde5cf7059d88b657c7ed2087e3d210925ede716435"
"d6d5d82597a1e52b9553919e804f5656278bd739880692c94bff2824d8e0b48cac"
"1d24682699e4883389dc4f2faa2eb3b4db6e39debd5061ff3609916f3e07529a",
"82d3199d0013035682cc7f2a399d4c212544376a839aa863a0f4c91220ca7a6dc2"
"ffb3aa05f2631f0fa9ac19b6e97eb7e6669e5ec254799350c8b8d189e880780084"
"2a5383c4d907c932f34490aaf00064de8cdb157357bde37c1504d2960034930887"
"603abc5ccb9f5247f79224baff6120a3c622a46d7b1bcaee02c5025460941256",
},
{
7,
"3f8770f387faad08faa9d8414e9f449ac68e6ff0417f673f602a646a891419fe66"
"036ef6e6d1a8f54baa9fed1fc11c77cfb9cff65bae915045027046ebe0c01bf5a9"
"41f3bb0f73791d3fc0b84370f9f30af0cd5b0fc334dd61f70feb60dad785f070fe"
"f1f343ed933b49a5ca0d16a503f599a365a4296739248b28d1a20b0e2cc8975c",
"af0a7ec382aedc0cfd626e49e7628bc7a353a4cb108855541a5651bf64fbb28a7c"
"5035ba0f48a9c73dabb2be0533d02e8fd5d0d5639a18b2803ba6bf527e1d145d5f"
"d6406c437b79bcaad6c7bdf1cf4bd56a893c3eb9510335a7a798548c6753f74617"
"bede88bef924ba4b334f8852476d90b26c5dc4c3668a2519266a562c6c8034a6",
},
{
8,
"2351207d04fc16ade43ccab08600939c7c1fa70a5c0aaca76063d04c3228eaeb72"
"5d6d46ceed8f785ab9f2f9b06acfe398c6699c6129da084cb531177445a682894f"
"9685eaf836999221d17c9a64a3a057000524cd2823986db378b074290a1a9b93a2"
"2e135ed2c14c7e20c6d045cd00b903400374126676ea78874d79f2dd7883cf5c",
"be2f5495c61cba1bb348a34948c004045e3bd4dae8f0fe82bf44d0da245a060048"
"eb5e68ce6dea1eb0229e144f578b3aa7e9f4f85febd135df8525e6fe40c6f0340d"
"13dd09b255ccd5112a94238f2be3c0b5b7ecde06580426a93e0708555a265305ab"
"f86d874e34b4995b788e37a823491f25127a502fe0704baa6bfdf04e76c13276",
},
{
63,
"e9bc37a594daad83be9470df7f7b3798297c3d834ce80ba85d6e207627b7db7b11"
"97012b1e7d9af4d7cb7bdd1f3bb49a90a9b5dec3ea2bbc6eaebce77f4e470cbf46"
"87093b5352f04e4a4570fba233164e6acc36900e35d185886a827f7ea9bdc1e5c3"
"ce88b095a200e62c10c043b3e9bc6cb9b6ac4dfa51794b02ace9f98779040755",
"bb1eb5d4afa793c1ebdd9fb08def6c36d10096986ae0cfe148cd101170ce37aea0"
"5a63d74a840aecd514f654f080e51ac50fd617d22610d91780fe6b07a26b0847ab"
"b38291058c97474ef6ddd190d30fc318185c09ca1589d2024f0a6f16d45f116783"
"77483fa5c005b2a107cb9943e5da634e7046855eaa888663de55d6471371d55d",
},
{
64,
"4eed7141ea4a5cd4b788606bd23f46e212af9cacebacdc7d1f4c6dc7f2511b98fc"
"9cc56cb831ffe33ea8e7e1d1df09b26efd2767670066aa82d023b1dfe8ab1b2b7f"
"bb5b97592d46ffe3e05a6a9b592e2949c74160e4674301bc3f97e04903f8c6cf95"
"b863174c33228924cdef7ae47559b10b294acd660666c4538833582b43f82d74",
"ba8ced36f327700d213f120b1a207a3b8c04330528586f414d09f2f7d9ccb7e682"
"44c26010afc3f762615bbac552a1ca909e67c83e2fd5478cf46b9e811efccc93f7"
"7a21b17a152ebaca1695733fdb086e23cd0eb48c41c034d52523fc21236e5d8c92"
"55306e48d52ba40b4dac24256460d56573d1312319afcf3ed39d72d0bfc69acb",
},
{
65,
"de1e5fa0be70df6d2be8fffd0e99ceaa8eb6e8c93a63f2d8d1c30ecb6b263dee0e"
"16e0a4749d6811dd1d6d1265c29729b1b75a9ac346cf93f0e1d7296dfcfd4313b3"
"a227faaaaf7757cc95b4e87a49be3b8a270a12020233509b1c3632b3485eef309d"
"0abc4a4a696c9decc6e90454b53b000f456a3f10079072baaf7a981653221f2c",
"c0a4edefa2d2accb9277c371ac12fcdbb52988a86edc54f0716e1591b4326e72d5"
"e795f46a596b02d3d4bfb43abad1e5d19211152722ec1f20fef2cd413e3c22f2fc"
"5da3d73041275be6ede3517b3b9f0fc67ade5956a672b8b75d96cb43294b904149"
"7de92637ed3f2439225e683910cb3ae923374449ca788fb0f9bea92731bc26ad",
},
{
127,
"d81293fda863f008c09e92fc382a81f5a0b4a1251cba1634016a0f86a6bd640de3"
"137d477156d1fde56b0cf36f8ef18b44b2d79897bece12227539ac9ae0a5119da4"
"7644d934d26e74dc316145dcb8bb69ac3f2e05c242dd6ee06484fcb0e956dc4435"
"5b452c5e2bbb5e2b66e99f5dd443d0cbcaaafd4beebaed24ae2f8bb672bcef78",
"c64200ae7dfaf35577ac5a9521c47863fb71514a3bcad18819218b818de85818ee"
"7a317aaccc1458f78d6f65f3427ec97d9c0adb0d6dacd4471374b621b7b5f35cd5"
"4663c64dbe0b9e2d95632f84c611313ea5bd90b71ce97b3cf645776f3adc11e27d"
"135cbadb9875c2bf8d3ae6b02f8a0206aba0c35bfe42574011931c9a255ce6dc",
},
{
128,
"f17e570564b26578c33bb7f44643f539624b05df1a76c81f30acd548c44b45efa6"
"9faba091427f9c5c4caa873aa07828651f19c55bad85c47d1368b11c6fd99e47ec"
"ba5820a0325984d74fe3e4058494ca12e3f1d3293d0010a9722f7dee64f71246f7"
"5e9361f44cc8e214a100650db1313ff76a9f93ec6e84edb7add1cb4a95019b0c",
"b04fe15577457267ff3b6f3c947d93be581e7e3a4b018679125eaf86f6a628ecd8"
"6bbe0001f10bda47e6077b735016fca8119da11348d93ca302bbd125bde0db2b50"
"edbe728a620bb9d3e6f706286aedea973425c0b9eedf8a38873544cf91badf49ad"
"92a635a93f71ddfcee1eae536c25d1b270956be16588ef1cfef2f1d15f650bd5",
},
{
129,
"683aaae9f3c5ba37eaaf072aed0f9e30bac0865137bae68b1fde4ca2aebdcb12f9"
"6ffa7b36dd78ba321be7e842d364a62a42e3746681c8bace18a4a8a79649285c71"
"27bf8febf125be9de39586d251f0d41da20980b70d35e3dac0eee59e468a894fa7"
"e6a07129aaad09855f6ad4801512a116ba2b7841e6cfc99ad77594a8f2d181a7",
"d4a64dae6cdccbac1e5287f54f17c5f985105457c1a2ec1878ebd4b57e20d38f1c"
"9db018541eec241b748f87725665b7b1ace3e0065b29c3bcb232c90e37897fa5aa"
"ee7e1e8a2ecfcd9b51463e42238cfdd7fee1aecb3267fa7f2128079176132a412c"
"d8aaf0791276f6b98ff67359bd8652ef3a203976d5ff1cd41885573487bcd683",
},
{
1023,
"10108970eeda3eb932baac1428c7a2163b0e924c9a9e25b35bba72b28f70bd11a1"
"82d27a591b05592b15607500e1e8dd56bc6c7fc063715b7a1d737df5bad3339c56"
"778957d870eb9717b57ea3d9fb68d1b55127bba6a906a4a24bbd5acb2d123a37b2"
"8f9e9a81bbaae360d58f85e5fc9d75f7c370a0cc09b6522d9c8d822f2f28f485",
"c951ecdf03288d0fcc96ee3413563d8a6d3589547f2c2fb36d9786470f1b9d6e89"
"0316d2e6d8b8c25b0a5b2180f94fb1a158ef508c3cde45e2966bd796a696d3e13e"
"fd86259d756387d9becf5c8bf1ce2192b87025152907b6d8cc33d17826d8b7b9bc"
"97e38c3c85108ef09f013e01c229c20a83d9e8efac5b37470da28575fd755a10",
},
{
1024,
"42214739f095a406f3fc83deb889744ac00df831c10daa55189b5d121c855af71c"
"f8107265ecdaf8505b95d8fcec83a98a6a96ea5109d2c179c47a387ffbb404756f"
"6eeae7883b446b70ebb144527c2075ab8ab204c0086bb22b7c93d465efc57f8d91"
"7f0b385c6df265e77003b85102967486ed57db5c5ca170ba441427ed9afa684e",
"75c46f6f3d9eb4f55ecaaee480db732e6c2105546f1e675003687c31719c7ba4a7"
"8bc838c72852d4f49c864acb7adafe2478e824afe51c8919d06168414c265f298a"
"8094b1ad813a9b8614acabac321f24ce61c5a5346eb519520d38ecc43e89b50002"
"36df0597243e4d2493fd626730e2ba17ac4d8824d09d1a4a8f57b8227778e2de",
},
{
1025,
"d00278ae47eb27b34faecf67b4fe263f82d5412916c1ffd97c8cb7fb814b8444f4"
"c4a22b4b399155358a994e52bf255de60035742ec71bd08ac275a1b51cc6bfe332"
"b0ef84b409108cda080e6269ed4b3e2c3f7d722aa4cdc98d16deb554e5627be8f9"
"55c98e1d5f9565a9194cad0c4285f93700062d9595adb992ae68ff12800ab67a",
"357dc55de0c7e382c900fd6e320acc04146be01db6a8ce7210b7189bd664ea6936"
"2396b77fdc0d2634a552970843722066c3c15902ae5097e00ff53f1e116f1cd535"
"2720113a837ab2452cafbde4d54085d9cf5d21ca613071551b25d52e69d6c81123"
"872b6f19cd3bc1333edf0c52b94de23ba772cf82636cff4542540a7738d5b930",
},
{
2048,
"e776b6028c7cd22a4d0ba182a8bf62205d2ef576467e838ed6f2529b85fba24a9a"
"60bf80001410ec9eea6698cd537939fad4749edd484cb541aced55cd9bf54764d0"
"63f23f6f1e32e12958ba5cfeb1bf618ad094266d4fc3c968c2088f677454c288c6"
"7ba0dba337b9d91c7e1ba586dc9a5bc2d5e90c14f53a8863ac75655461cea8f9",
"879cf1fa2ea0e79126cb1063617a05b6ad9d0b696d0d757cf053439f60a99dd101"
"73b961cd574288194b23ece278c330fbb8585485e74967f31352a8183aa782b2b2"
"2f26cdcadb61eed1a5bc144b8198fbb0c13abbf8e3192c145d0a5c21633b0ef860"
"54f42809df823389ee40811a5910dcbd1018af31c3b43aa55201ed4edaac74fe",
},
{
2049,
"5f4d72f40d7a5f82b15ca2b2e44b1de3c2ef86c426c95c1af0b687952256303096"
"de31d71d74103403822a2e0bc1eb193e7aecc9643a76b7bbc0c9f9c52e8783aae9"
"8764ca468962b5c2ec92f0c74eb5448d519713e09413719431c802f948dd5d9042"
"5a4ecdadece9eb178d80f26efccae630734dff63340285adec2aed3b51073ad3",
"9f29700902f7c86e514ddc4df1e3049f258b2472b6dd5267f61bf13983b78dd5f9"
"a88abfefdfa1e00b418971f2b39c64ca621e8eb37fceac57fd0c8fc8e117d43b81"
"447be22d5d8186f8f5919ba6bcc6846bd7d50726c06d245672c2ad4f61702c6464"
"99ee1173daa061ffe15bf45a631e2946d616a4c345822f1151284712f76b2b0e",
},
{
3072,
"b98cb0ff3623be03326b373de6b9095218513e64f1ee2edd2525c7ad1e5cffd29a"
"3f6b0b978d6608335c09dc94ccf682f9951cdfc501bfe47b9c9189a6fc7b404d12"
"0258506341a6d802857322fbd20d3e5dae05b95c88793fa83db1cb08e7d8008d15"
"99b6209d78336e24839724c191b2a52a80448306e0daa84a3fdb566661a37e11",
"044a0e7b172a312dc02a4c9a818c036ffa2776368d7f528268d2e6b5df19177022"
"f302d0529e4174cc507c463671217975e81dab02b8fdeb0d7ccc7568dd22574c78"
"3a76be215441b32e91b9a904be8ea81f7a0afd14bad8ee7c8efc305ace5d3dd61b"
"996febe8da4f56ca0919359a7533216e2999fc87ff7d8f176fbecb3d6f34278b",
},
{
3073,
"7124b49501012f81cc7f11ca069ec9226cecb8a2c850cfe644e327d22d3e1cd39a"
"27ae3b79d68d89da9bf25bc27139ae65a324918a5f9b7828181e52cf373c84f35b"
"639b7fccbb985b6f2fa56aea0c18f531203497b8bbd3a07ceb5926f1cab74d14bd"
"66486d9a91eba99059a98bd1cd25876b2af5a76c3e9eed554ed72ea952b603bf",
"68dede9bef00ba89e43f31a6825f4cf433389fedae75c04ee9f0cf16a427c95a96"
"d6da3fe985054d3478865be9a092250839a697bbda74e279e8a9e69f0025e4cfdd"
"d6cfb434b1cd9543aaf97c635d1b451a4386041e4bb100f5e45407cbbc24fa53ea"
"2de3536ccb329e4eb9466ec37093a42cf62b82903c696a93a50b702c80f3c3c5",
},
{
4096,
"015094013f57a5277b59d8475c0501042c0b642e531b0a1c8f58d2163229e96902"
"89e9409ddb1b99768eafe1623da896faf7e1114bebeadc1be30829b6f8af707d85"
"c298f4f0ff4d9438aef948335612ae921e76d411c3a9111df62d27eaf871959ae0"
"062b5492a0feb98ef3ed4af277f5395172dbe5c311918ea0074ce0036454f620",
"befc660aea2f1718884cd8deb9902811d332f4fc4a38cf7c7300d597a081bfc0bb"
"b64a36edb564e01e4b4aaf3b060092a6b838bea44afebd2deb8298fa562b7b597c"
"757b9df4c911c3ca462e2ac89e9a787357aaf74c3b56d5c07bc93ce899568a3eb1"
"7d9250c20f6c5f6c1e792ec9a2dcb715398d5a6ec6d5c54f586a00403a1af1de",
},
{
4097,
"9b4052b38f1c5fc8b1f9ff7ac7b27cd242487b3d890d15c96a1c25b8aa0fb99505"
"f91b0b5600a11251652eacfa9497b31cd3c409ce2e45cfe6c0a016967316c426bd"
"26f619eab5d70af9a418b845c608840390f361630bd497b1ab44019316357c61db"
"e091ce72fc16dc340ac3d6e009e050b3adac4b5b2c92e722cffdc46501531956",
"00df940cd36bb9fa7cbbc3556744e0dbc8191401afe70520ba292ee3ca80abbc60"
"6db4976cfdd266ae0abf667d9481831ff12e0caa268e7d3e57260c0824115a54ce"
"595ccc897786d9dcbf495599cfd90157186a46ec800a6763f1c59e36197e9939e9"
"00809f7077c102f888caaf864b253bc41eea812656d46742e4ea42769f89b83f",
},
{
5120,
"9cadc15fed8b5d854562b26a9536d9707cadeda9b143978f319ab34230535833ac"
"c61c8fdc114a2010ce8038c853e121e1544985133fccdd0a2d507e8e615e611e9a"
"0ba4f47915f49e53d721816a9198e8b30f12d20ec3689989175f1bf7a300eee0d9"
"321fad8da232ece6efb8e9fd81b42ad161f6b9550a069e66b11b40487a5f5059",
"2c493e48e9b9bf31e0553a22b23503c0a3388f035cece68eb438d22fa1943e209b"
"4dc9209cd80ce7c1f7c9a744658e7e288465717ae6e56d5463d4f80cdb2ef56495"
"f6a4f5487f69749af0c34c2cdfa857f3056bf8d807336a14d7b89bf62bef2fb54f"
"9af6a546f818dc1e98b9e07f8a5834da50fa28fb5874af91bf06020d1bf0120e",
},
{
5121,
"628bd2cb2004694adaab7bbd778a25df25c47b9d4155a55f8fbd79f2fe154cff96"
"adaab0613a6146cdaabe498c3a94e529d3fc1da2bd08edf54ed64d40dcd6777647"
"eac51d8277d70219a9694334a68bc8f0f23e20b0ff70ada6f844542dfa32cd4204"
"ca1846ef76d811cdb296f65e260227f477aa7aa008bac878f72257484f2b6c95",
"6ccf1c34753e7a044db80798ecd0782a8f76f33563accaddbfbb2e0ea4b2d0240d"
"07e63f13667a8d1490e5e04f13eb617aea16a8c8a5aaed1ef6fbde1b0515e3c810"
"50b361af6ead126032998290b563e3caddeaebfab592e155f2e161fb7cba939092"
"133f23f9e65245e58ec23457b78a2e8a125588aad6e07d7f11a85b88d375b72d",
},
{
6144,
"3e2e5b74e048f3add6d21faab3f83aa44d3b2278afb83b80b3c35164ebeca2054d"
"742022da6fdda444ebc384b04a54c3ac5839b49da7d39f6d8a9db03deab32aade1"
"56c1c0311e9b3435cde0ddba0dce7b26a376cad121294b689193508dd63151603c"
"6ddb866ad16c2ee41585d1633a2cea093bea714f4c5d6b903522045b20395c83",
"3d6b6d21281d0ade5b2b016ae4034c5dec10ca7e475f90f76eac7138e9bc8f1dc3"
"5754060091dc5caf3efabe0603c60f45e415bb3407db67e6beb3d11cf8e4f79075"
"61f05dace0c15807f4b5f389c841eb114d81a82c02a00b57206b1d11fa6e803486"
"b048a5ce87105a686dee041207e095323dfe172df73deb8c9532066d88f9da7e",
},
{
6145,
"f1323a8631446cc50536a9f705ee5cb619424d46887f3c376c695b70e0f0507f18"
"a2cfdd73c6e39dd75ce7c1c6e3ef238fd54465f053b25d21044ccb2093beb01501"
"5532b108313b5829c3621ce324b8e14229091b7c93f32db2e4e63126a377d2a63a"
"3597997d4f1cba59309cb4af240ba70cebff9a23d5e3ff0cdae2cfd54e070022",
"9ac301e9e39e45e3250a7e3b3df701aa0fb6889fbd80eeecf28dbc6300fbc539f3"
"c184ca2f59780e27a576c1d1fb9772e99fd17881d02ac7dfd39675aca918453283"
"ed8c3169085ef4a466b91c1649cc341dfdee60e32231fc34c9c4e0b9a2ba87ca8f"
"372589c744c15fd6f985eec15e98136f25beeb4b13c4e43dc84abcc79cd4646c",
},
{
7168,
"61da957ec2499a95d6b8023e2b0e604ec7f6b50e80a9678b89d2628e99ada77a57"
"07c321c83361793b9af62a40f43b523df1c8633cecb4cd14d00bdc79c78fca5165"
"b863893f6d38b02ff7236c5a9a8ad2dba87d24c547cab046c29fc5bc1ed142e1de"
"4763613bb162a5a538e6ef05ed05199d751f9eb58d332791b8d73fb74e4fce95",
"b42835e40e9d4a7f42ad8cc04f85a963a76e18198377ed84adddeaecacc6f3fca2"
"f01d5277d69bb681c70fa8d36094f73ec06e452c80d2ff2257ed82e7ba34840098"
"9a65ee8daa7094ae0933e3d2210ac6395c4af24f91c2b590ef87d7788d7066ea3e"
"aebca4c08a4f14b9a27644f99084c3543711b64a070b94f2c9d1d8a90d035d52",
},
{
7169,
"a003fc7a51754a9b3c7fae0367ab3d782dccf28855a03d435f8cfe74605e781798"
"a8b20534be1ca9eb2ae2df3fae2ea60e48c6fb0b850b1385b5de0fe460dbe9d9f9"
"b0d8db4435da75c601156df9d047f4ede008732eb17adc05d96180f8a735485228"
"40779e6062d643b79478a6e8dbce68927f36ebf676ffa7d72d5f68f050b119c8",
"ed9b1a922c046fdb3d423ae34e143b05ca1bf28b710432857bf738bcedbfa5113c"
"9e28d72fcbfc020814ce3f5d4fc867f01c8f5b6caf305b3ea8a8ba2da3ab69fabc"
"b438f19ff11f5378ad4484d75c478de425fb8e6ee809b54eec9bdb184315dc8566"
"17c09f5340451bf42fd3270a7b0b6566169f242e533777604c118a6358250f54",
},
{
8192,
"aae792484c8efe4f19e2ca7d371d8c467ffb10748d8a5a1ae579948f718a2a635f"
"e51a27db045a567c1ad51be5aa34c01c6651c4d9b5b5ac5d0fd58cf18dd61a4777"
"8566b797a8c67df7b1d60b97b19288d2d877bb2df417ace009dcb0241ca1257d62"
"712b6a4043b4ff33f690d849da91ea3bf711ed583cb7b7a7da2839ba71309bbf",
"dc9637c8845a770b4cbf76b8daec0eebf7dc2eac11498517f08d44c8fc00d58a48"
"34464159dcbc12a0ba0c6d6eb41bac0ed6585cabfe0aca36a375e6c5480c22afdc"
"40785c170f5a6b8a1107dbee282318d00d915ac9ed1143ad40765ec120042ee121"
"cd2baa36250c618adaf9e27260fda2f94dea8fb6f08c04f8f10c78292aa46102",
},
{
8193,
"bab6c09cb8ce8cf459261398d2e7aef35700bf488116ceb94a36d0f5f1b7bc3bb2"
"282aa69be089359ea1154b9a9286c4a56af4de975a9aa4a5c497654914d279bea6"
"0bb6d2cf7225a2fa0ff5ef56bbe4b149f3ed15860f78b4e2ad04e158e375c1e0c0"
"b551cd7dfc82f1b155c11b6b3ed51ec9edb30d133653bb5709d1dbd55f4e1ff6",
"954a2a75420c8d6547e3ba5b98d963e6fa6491addc8c023189cc519821b4a1f5f0"
"3228648fd983aef045c2fa8290934b0866b615f585149587dda229903996532883"
"5a2b18f1d63b7e300fc76ff260b571839fe44876a4eae66cbac8c67694411ed7e0"
"9df51068a22c6e67d6d3dd2cca8ff12e3275384006c80f4db68023f24eebba57",
},
{
16384,
"f875d6646de28985646f34ee13be9a576fd515f76b5b0a26bb324735041ddde49d"
"764c270176e53e97bdffa58d549073f2c660be0e81293767ed4e4929f9ad34bbb3"
"9a529334c57c4a381ffd2a6d4bfdbf1482651b172aa883cc13408fa67758a3e475"
"03f93f87720a3177325f7823251b85275f64636a8f1d599c2e49722f42e93893",
"9e9fc4eb7cf081ea7c47d1807790ed211bfec56aa25bb7037784c13c4b707b0df9"
"e601b101e4cf63a404dfe50f2e1865bb12edc8fca166579ce0c70dba5a5c0fc960"
"ad6f3772183416a00bd29d4c6e651ea7620bb100c9449858bf14e1ddc9ecd35725"
"581ca5b9160de04060045993d972571c3e8f71e9d0496bfa744656861b169d65",
},
{
31744,
"62b6960e1a44bcc1eb1a611a8d6235b6b4b78f32e7abc4fb4c6cdcce94895c4786"
"0cc51f2b0c28a7b77304bd55fe73af663c02d3f52ea053ba43431ca5bab7bfea2f"
"5e9d7121770d88f70ae9649ea713087d1914f7f312147e247f87eb2d4ffef0ac97"
"8bf7b6579d57d533355aa20b8b77b13fd09748728a5cc327a8ec470f4013226f",
"efa53b389ab67c593dba624d898d0f7353ab99e4ac9d42302ee64cbf9939a4193a"
"7258db2d9cd32a7a3ecfce46144114b15c2fcb68a618a976bd74515d47be08b628"
"be420b5e830fade7c080e351a076fbc38641ad80c736c8a18fe3c66ce12f95c61c"
"2462a9770d60d0f77115bbcd3782b593016a4e728d4c06cee4505cb0c08a42ec",
},
{
102400,
"bc3e3d41a1146b069abffad3c0d44860cf664390afce4d9661f7902e7943e085e0"
"1c59dab908c04c3342b816941a26d69c2605ebee5ec5291cc55e15b76146e6745f"
"0601156c3596cb75065a9c57f35585a52e1ac70f69131c23d611ce11ee4ab1ec2c"
"009012d236648e77be9295dd0426f29b764d65de58eb7d01dd42248204f45f8e",
"1c35d1a5811083fd7119f5d5d1ba027b4d01c0c6c49fb6ff2cf75393ea5db4a7f9"
"dbdd3e1d81dcbca3ba241bb18760f207710b751846faaeb9dff8262710999a59b2"
"aa1aca298a032d94eacfadf1aa192418eb54808db23b56e34213266aa08499a16b"
"354f018fc4967d05f8b9d2ad87a7278337be9693fc638a3bfdbe314574ee6fc4",
},
{
0, 0, 0
}
};
#ifdef BLAKE3_DEBUG
#define dprintf printf
#else
#define dprintf(...)
#endif
static char fmt_tohex(char c);
static size_t fmt_hexdump(char *dest, const char *src, size_t len);
static char fmt_tohex(char c) {
return ((char)(c >= 10 ? c-10+'a' : c+'0'));
}
static size_t fmt_hexdump(char *dest, const char *src, size_t len) {
register const unsigned char *s = (const unsigned char *) src;
size_t written = 0, i;
if (!dest)
return ((len > ((size_t)-1)/2) ? (size_t)-1 : len*2);
for (i = 0; i < len; ++i) {
dest[written] = fmt_tohex(s[i]>>4);
dest[written+1] = fmt_tohex(s[i]&15);
written += 2;
}
return (written);
}
int
main(int argc, char *argv[])
{
boolean_t failed = B_FALSE;
uint8_t buffer[102400];
uint64_t cpu_mhz = 0;
int id, i, j;
if (argc == 2)
cpu_mhz = atoi(argv[1]);
/* fill test message */
for (i = 0, j = 0; i < sizeof (buffer); i++, j++) {
if (j == 251)
j = 0;
buffer[i] = (uint8_t)j;
}
(void) printf("Running algorithm correctness tests:\n");
- for (id = 0; id < blake3_get_impl_count(); id++) {
- blake3_set_impl_id(id);
- const char *name = blake3_get_impl_name();
+ for (id = 0; id < blake3_impl_getcnt(); id++) {
+ blake3_impl_setid(id);
+ const char *name = blake3_impl_getname();
dprintf("Result for BLAKE3-%s:\n", name);
for (i = 0; TestArray[i].hash; i++) {
blake3_test_t *cur = &TestArray[i];
BLAKE3_CTX ctx;
uint8_t digest[TEST_DIGEST_LEN];
char result[TEST_DIGEST_LEN];
/* default hashing */
Blake3_Init(&ctx);
Blake3_Update(&ctx, buffer, cur->input_len);
Blake3_FinalSeek(&ctx, 0, digest, TEST_DIGEST_LEN);
fmt_hexdump(result, (char *)digest, 131);
if (memcmp(result, cur->hash, 131) != 0)
failed = B_TRUE;
dprintf("HASH-res: %s\n", result);
dprintf("HASH-ref: %s\n", cur->hash);
/* salted hashing */
Blake3_InitKeyed(&ctx, (const uint8_t *)salt);
Blake3_Update(&ctx, buffer, cur->input_len);
Blake3_FinalSeek(&ctx, 0, digest, TEST_DIGEST_LEN);
fmt_hexdump(result, (char *)digest, 131);
if (memcmp(result, cur->shash, 131) != 0)
failed = B_TRUE;
dprintf("SHASH-res: %s\n", result);
dprintf("SHASH-ref: %s\n", cur->shash);
printf("BLAKE3-%s Message (inlen=%d)\tResult: %s\n",
name, cur->input_len, failed?"FAILED!":"OK");
}
}
if (failed)
return (1);
#define BLAKE3_PERF_TEST(impl, diglen) \
do { \
BLAKE3_CTX ctx; \
uint8_t digest[diglen / 8]; \
uint8_t block[131072]; \
uint64_t delta; \
double cpb = 0; \
int i; \
struct timeval start, end; \
memset(block, 0, sizeof (block)); \
(void) gettimeofday(&start, NULL); \
Blake3_Init(&ctx); \
for (i = 0; i < 8192; i++) \
Blake3_Update(&ctx, block, sizeof (block)); \
Blake3_Final(&ctx, digest); \
(void) gettimeofday(&end, NULL); \
delta = (end.tv_sec * 1000000llu + end.tv_usec) - \
(start.tv_sec * 1000000llu + start.tv_usec); \
if (cpu_mhz != 0) { \
cpb = (cpu_mhz * 1e6 * ((double)delta / \
1000000)) / (8192 * 128 * 1024); \
} \
(void) printf("BLAKE3-%s %llu us (%.02f CPB)\n", impl, \
(u_longlong_t)delta, cpb); \
} while (0)
printf("Running performance tests (hashing 1024 MiB of data):\n");
- for (id = 0; id < blake3_get_impl_count(); id++) {
- blake3_set_impl_id(id);
- const char *name = blake3_get_impl_name();
+ for (id = 0; id < blake3_impl_getcnt(); id++) {
+ blake3_impl_setid(id);
+ const char *name = blake3_impl_getname();
BLAKE3_PERF_TEST(name, 256);
}
return (0);
}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/cmd/file/file_fadvise.c b/sys/contrib/openzfs/tests/zfs-tests/cmd/file/file_fadvise.c
new file mode 100644
index 000000000000..e1afb6d0a11c
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/cmd/file/file_fadvise.c
@@ -0,0 +1,97 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or https://opensource.org/licenses/CDDL-1.0.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * Copyright (c) 2022 by Information2 Software, Inc. All rights reserved.
+ */
+
+#include "file_common.h"
+#include <sys/types.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <string.h>
+
+/*
+ * Call fadvise to prefetch data
+ */
+static const char *execname = "file_fadvise";
+
+static void
+usage(void)
+{
+ (void) fprintf(stderr,
+ "usage: %s -f filename -a advise \n", execname);
+}
+
+int
+main(int argc, char *argv[])
+{
+ char *filename = NULL;
+ int advise = 0;
+ int fd, ch;
+ int err = 0;
+
+ while ((ch = getopt(argc, argv, "a:f:")) != EOF) {
+ switch (ch) {
+ case 'a':
+ advise = atoll(optarg);
+ break;
+ case 'f':
+ filename = optarg;
+ break;
+ case '?':
+ (void) printf("unknown arg %c\n", optopt);
+ usage();
+ break;
+ }
+ }
+
+ if (!filename) {
+ (void) printf("Filename not specified (-f <file>)\n");
+ err++;
+ }
+
+ if (advise < POSIX_FADV_NORMAL || advise > POSIX_FADV_NOREUSE) {
+ (void) printf("advise is invalid\n");
+ err++;
+ }
+
+ if (err) {
+ usage(); /* no return */
+ return (1);
+ }
+
+ if ((fd = open(filename, O_RDWR, 0666)) < 0) {
+ perror("open");
+ return (1);
+ }
+
+ posix_fadvise(fd, 0, 0, advise);
+
+ close(fd);
+
+ return (0);
+}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/include/commands.cfg b/sys/contrib/openzfs/tests/zfs-tests/include/commands.cfg
index 47357dca57fb..c05b918325b7 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/include/commands.cfg
+++ b/sys/contrib/openzfs/tests/zfs-tests/include/commands.cfg
@@ -1,227 +1,229 @@
#
# Copyright (c) 2016, 2019 by Delphix. All rights reserved.
# These variables are used by zfs-tests.sh to constrain which utilities
# may be used by the suite. The suite will create a directory which is
# the only element of $PATH and create symlinks from that dir to the
# binaries listed below.
#
# Please keep the contents of each variable sorted for ease of reading
# and maintenance.
#
export SYSTEM_FILES_COMMON='awk
basename
bc
bunzip2
bzcat
cat
chgrp
chmod
chown
cksum
cmp
cp
cpio
cut
date
dd
df
diff
dirname
dmesg
du
echo
env
expr
false
file
find
fio
getconf
getent
getfacl
grep
gunzip
gzip
head
hostname
id
iostat
kill
ksh
ldd
ln
ls
mkdir
mknod
mkfifo
mktemp
mount
mv
net
od
openssl
pamtester
pax
pgrep
ping
pkill
printf
ps
python3
readlink
rm
rmdir
rsync
scp
script
sed
seq
setfacl
sh
sleep
sort
ssh
stat
strings
sudo
swapoff
swapon
sync
tail
tar
timeout
touch
tr
true
truncate
umount
uname
uniq
vmstat
wc'
export SYSTEM_FILES_FREEBSD='chflags
compress
diskinfo
fsck
getextattr
gpart
jail
jexec
jls
lsextattr
md5
mdconfig
newfs
pw
rmextattr
setextattr
sha256
showmount
swapctl
sysctl
trim
uncompress'
export SYSTEM_FILES_LINUX='attr
blkid
blkdiscard
blockdev
chattr
exportfs
fallocate
free
getfattr
groupadd
groupdel
groupmod
hostid
losetup
lsattr
lsblk
lscpu
lsmod
lsscsi
md5sum
mkswap
modprobe
mpstat
nsenter
parted
perf
setfattr
sha256sum
udevadm
unshare
useradd
userdel
usermod
flock
logger'
export ZFS_FILES='zdb
zfs
zhack
zinject
zpool
ztest
raidz_test
arc_summary
arcstat
+ zilstat
dbufstat
mount.zfs
zed
zgenhostid
zstream
zfs_ids_to_path
zpool_influxdb'
export ZFSTEST_FILES='badsend
btree_test
chg_usr_exec
devname2devid
dir_rd_update
draid
+ file_fadvise
file_append
file_check
file_trunc
file_write
get_diff
getversion
largest_file
libzfs_input_check
mkbusy
mkfile
mkfiles
mktree
mmap_exec
mmap_libaio
mmap_seek
mmap_sync
mmapwrite
nvlist_to_lua
randfree_file
randwritecomp
readmmap
read_dos_attributes
rename_dir
rm_lnkcnt_zero_file
send_doall
threadsappend
user_ns_exec
write_dos_attributes
xattrtest
stride_dd
zed_fd_spill-zedlet
suid_write_to_file
cp_files
blake3_test
edonr_test
skein_test
sha2_test
ctime
truncate_test
ereports
zfs_diff-socket
dosmode_readonly_write'
diff --git a/sys/contrib/openzfs/tests/zfs-tests/include/libtest.shlib b/sys/contrib/openzfs/tests/zfs-tests/include/libtest.shlib
index 435dcb81c3c3..d163fc7c8ccc 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/include/libtest.shlib
+++ b/sys/contrib/openzfs/tests/zfs-tests/include/libtest.shlib
@@ -1,3849 +1,3849 @@
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright (c) 2009, Sun Microsystems Inc. All rights reserved.
# Copyright (c) 2012, 2020, Delphix. All rights reserved.
# Copyright (c) 2017, Tim Chase. All rights reserved.
# Copyright (c) 2017, Nexenta Systems Inc. All rights reserved.
# Copyright (c) 2017, Lawrence Livermore National Security LLC.
# Copyright (c) 2017, Datto Inc. All rights reserved.
# Copyright (c) 2017, Open-E Inc. All rights reserved.
# Copyright (c) 2021, The FreeBSD Foundation.
# Use is subject to license terms.
#
. ${STF_SUITE}/include/tunables.cfg
. ${STF_TOOLS}/include/logapi.shlib
. ${STF_SUITE}/include/math.shlib
. ${STF_SUITE}/include/blkdev.shlib
#
# Apply constrained path when available. This is required since the
# PATH may have been modified by sudo's secure_path behavior.
#
if [ -n "$STF_PATH" ]; then
export PATH="$STF_PATH"
fi
#
# Generic dot version comparison function
#
# Returns success when version $1 is greater than or equal to $2.
#
function compare_version_gte
{
[ "$(printf "$1\n$2" | sort -V | tail -n1)" = "$1" ]
}
# Linux kernel version comparison function
#
# $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version
#
# Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ]
#
function linux_version
{
typeset ver="$1"
[ -z "$ver" ] && ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+")
typeset version major minor _
IFS='.' read -r version major minor _ <<<"$ver"
[ -z "$version" ] && version=0
[ -z "$major" ] && major=0
[ -z "$minor" ] && minor=0
echo $((version * 100000 + major * 1000 + minor))
}
# Determine if this is a Linux test system
#
# Return 0 if platform Linux, 1 if otherwise
function is_linux
{
[ "$UNAME" = "Linux" ]
}
# Determine if this is an illumos test system
#
# Return 0 if platform illumos, 1 if otherwise
function is_illumos
{
[ "$UNAME" = "illumos" ]
}
# Determine if this is a FreeBSD test system
#
# Return 0 if platform FreeBSD, 1 if otherwise
function is_freebsd
{
[ "$UNAME" = "FreeBSD" ]
}
# Determine if this is a 32-bit system
#
# Return 0 if platform is 32-bit, 1 if otherwise
function is_32bit
{
[ $(getconf LONG_BIT) = "32" ]
}
# Determine if kmemleak is enabled
#
# Return 0 if kmemleak is enabled, 1 if otherwise
function is_kmemleak
{
is_linux && [ -e /sys/kernel/debug/kmemleak ]
}
# Determine whether a dataset is mounted
#
# $1 dataset name
# $2 filesystem type; optional - defaulted to zfs
#
# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
function ismounted
{
typeset fstype=$2
[[ -z $fstype ]] && fstype=zfs
typeset out dir name
case $fstype in
zfs)
if [[ "$1" == "/"* ]] ; then
! zfs mount | awk -v fs="$1" '$2 == fs {exit 1}'
else
! zfs mount | awk -v ds="$1" '$1 == ds {exit 1}'
fi
;;
ufs|nfs)
if is_freebsd; then
mount -pt $fstype | while read dev dir _t _flags; do
[[ "$1" == "$dev" || "$1" == "$dir" ]] && return 0
done
else
out=$(df -F $fstype $1 2>/dev/null) || return
dir=${out%%\(*}
dir=${dir%% *}
name=${out##*\(}
name=${name%%\)*}
name=${name%% *}
[[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
fi
;;
ext*)
df -t $fstype $1 > /dev/null 2>&1
;;
zvol)
if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
link=$(readlink -f $ZVOL_DEVDIR/$1)
[[ -n "$link" ]] && \
mount | grep -q "^$link" && \
return 0
fi
;;
*)
false
;;
esac
}
# Return 0 if a dataset is mounted; 1 otherwise
#
# $1 dataset name
# $2 filesystem type; optional - defaulted to zfs
function mounted
{
ismounted $1 $2
}
# Return 0 if a dataset is unmounted; 1 otherwise
#
# $1 dataset name
# $2 filesystem type; optional - defaulted to zfs
function unmounted
{
! ismounted $1 $2
}
function default_setup
{
default_setup_noexit "$@"
log_pass
}
function default_setup_no_mountpoint
{
default_setup_noexit "$1" "$2" "$3" "yes"
log_pass
}
#
# Given a list of disks, setup storage pools and datasets.
#
function default_setup_noexit
{
typeset disklist=$1
typeset container=$2
typeset volume=$3
typeset no_mountpoint=$4
log_note begin default_setup_noexit
if is_global_zone; then
if poolexists $TESTPOOL ; then
destroy_pool $TESTPOOL
fi
[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
log_must zpool create -f $TESTPOOL $disklist
else
reexport_pool
fi
rm -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
log_must zfs create $TESTPOOL/$TESTFS
if [[ -z $no_mountpoint ]]; then
log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
fi
if [[ -n $container ]]; then
rm -rf $TESTDIR1 || \
log_unresolved Could not remove $TESTDIR1
mkdir -p $TESTDIR1 || \
log_unresolved Could not create $TESTDIR1
log_must zfs create $TESTPOOL/$TESTCTR
log_must zfs set canmount=off $TESTPOOL/$TESTCTR
log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
if [[ -z $no_mountpoint ]]; then
log_must zfs set mountpoint=$TESTDIR1 \
$TESTPOOL/$TESTCTR/$TESTFS1
fi
fi
if [[ -n $volume ]]; then
if is_global_zone ; then
log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
block_device_wait
else
log_must zfs create $TESTPOOL/$TESTVOL
fi
fi
}
#
# Given a list of disks, setup a storage pool, file system and
# a container.
#
function default_container_setup
{
typeset disklist=$1
default_setup "$disklist" "true"
}
#
# Given a list of disks, setup a storage pool,file system
# and a volume.
#
function default_volume_setup
{
typeset disklist=$1
default_setup "$disklist" "" "true"
}
#
# Given a list of disks, setup a storage pool,file system,
# a container and a volume.
#
function default_container_volume_setup
{
typeset disklist=$1
default_setup "$disklist" "true" "true"
}
#
# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
# filesystem
#
# $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
# $2 snapshot name. Default, $TESTSNAP
#
function create_snapshot
{
typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
typeset snap=${2:-$TESTSNAP}
[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
if snapexists $fs_vol@$snap; then
log_fail "$fs_vol@$snap already exists."
fi
datasetexists $fs_vol || \
log_fail "$fs_vol must exist."
log_must zfs snapshot $fs_vol@$snap
}
#
# Create a clone from a snapshot, default clone name is $TESTCLONE.
#
# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
#
function create_clone # snapshot clone
{
typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
typeset clone=${2:-$TESTPOOL/$TESTCLONE}
[[ -z $snap ]] && \
log_fail "Snapshot name is undefined."
[[ -z $clone ]] && \
log_fail "Clone name is undefined."
log_must zfs clone $snap $clone
}
#
# Create a bookmark of the given snapshot. Defaultly create a bookmark on
# filesystem.
#
# $1 Existing filesystem or volume name. Default, $TESTFS
# $2 Existing snapshot name. Default, $TESTSNAP
# $3 bookmark name. Default, $TESTBKMARK
#
function create_bookmark
{
typeset fs_vol=${1:-$TESTFS}
typeset snap=${2:-$TESTSNAP}
typeset bkmark=${3:-$TESTBKMARK}
[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
[[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
if bkmarkexists $fs_vol#$bkmark; then
log_fail "$fs_vol#$bkmark already exists."
fi
datasetexists $fs_vol || \
log_fail "$fs_vol must exist."
snapexists $fs_vol@$snap || \
log_fail "$fs_vol@$snap must exist."
log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
}
#
# Create a temporary clone result of an interrupted resumable 'zfs receive'
# $1 Destination filesystem name. Must not exist, will be created as the result
# of this function along with its %recv temporary clone
# $2 Source filesystem name. Must not exist, will be created and destroyed
#
function create_recv_clone
{
typeset recvfs="$1"
typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
typeset snap="$sendfs@snap1"
typeset incr="$sendfs@snap2"
typeset mountpoint="$TESTDIR/create_recv_clone"
typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
[[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
datasetexists $recvfs && log_fail "Recv filesystem must not exist."
datasetexists $sendfs && log_fail "Send filesystem must not exist."
log_must zfs create -o compression=off -o mountpoint="$mountpoint" $sendfs
log_must zfs snapshot $snap
log_must eval "zfs send $snap | zfs recv -u $recvfs"
log_must mkfile 1m "$mountpoint/data"
log_must zfs snapshot $incr
log_must eval "zfs send -i $snap $incr | dd bs=10K count=1 \
iflag=fullblock > $sendfile"
log_mustnot eval "zfs recv -su $recvfs < $sendfile"
destroy_dataset "$sendfs" "-r"
log_must rm -f "$sendfile"
if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
log_fail "Error creating temporary $recvfs/%recv clone"
fi
}
function default_mirror_setup
{
default_mirror_setup_noexit $1 $2 $3
log_pass
}
#
# Given a pair of disks, set up a storage pool and dataset for the mirror
# @parameters: $1 the primary side of the mirror
# $2 the secondary side of the mirror
# @uses: ZPOOL ZFS TESTPOOL TESTFS
function default_mirror_setup_noexit
{
readonly func="default_mirror_setup_noexit"
typeset primary=$1
typeset secondary=$2
[[ -z $primary ]] && \
log_fail "$func: No parameters passed"
[[ -z $secondary ]] && \
log_fail "$func: No secondary partition passed"
[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
log_must zpool create -f $TESTPOOL mirror $@
log_must zfs create $TESTPOOL/$TESTFS
log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
}
#
# Destroy the configured testpool mirrors.
# the mirrors are of the form ${TESTPOOL}{number}
# @uses: ZPOOL ZFS TESTPOOL
function destroy_mirrors
{
default_cleanup_noexit
log_pass
}
function default_raidz_setup
{
default_raidz_setup_noexit "$*"
log_pass
}
#
# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
# $1 the list of disks
#
function default_raidz_setup_noexit
{
typeset disklist="$*"
disks=(${disklist[*]})
if [[ ${#disks[*]} -lt 2 ]]; then
log_fail "A raid-z requires a minimum of two disks."
fi
[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
log_must zpool create -f $TESTPOOL raidz $disklist
log_must zfs create $TESTPOOL/$TESTFS
log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
}
#
# Common function used to cleanup storage pools and datasets.
#
# Invoked at the start of the test suite to ensure the system
# is in a known state, and also at the end of each set of
# sub-tests to ensure errors from one set of tests doesn't
# impact the execution of the next set.
function default_cleanup
{
default_cleanup_noexit
log_pass
}
#
# Utility function used to list all available pool names.
#
# NOTE: $KEEP is a variable containing pool names, separated by a newline
# character, that must be excluded from the returned list.
#
function get_all_pools
{
zpool list -H -o name | grep -Fvx "$KEEP" | grep -v "$NO_POOLS"
}
function default_cleanup_noexit
{
typeset pool=""
#
# Destroying the pool will also destroy any
# filesystems it contains.
#
if is_global_zone; then
zfs unmount -a > /dev/null 2>&1
ALL_POOLS=$(get_all_pools)
# Here, we loop through the pools we're allowed to
# destroy, only destroying them if it's safe to do
# so.
while [ ! -z ${ALL_POOLS} ]
do
for pool in ${ALL_POOLS}
do
if safe_to_destroy_pool $pool ;
then
destroy_pool $pool
fi
done
ALL_POOLS=$(get_all_pools)
done
zfs mount -a
else
typeset fs=""
for fs in $(zfs list -H -o name \
| grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
destroy_dataset "$fs" "-Rf"
done
# Need cleanup here to avoid garbage dir left.
for fs in $(zfs list -H -o name); do
[[ $fs == /$ZONE_POOL ]] && continue
[[ -d $fs ]] && log_must rm -rf $fs/*
done
#
# Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
# the default value
#
for fs in $(zfs list -H -o name); do
if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
log_must zfs set reservation=none $fs
log_must zfs set recordsize=128K $fs
log_must zfs set mountpoint=/$fs $fs
typeset enc=$(get_prop encryption $fs)
if [ -z "$enc" ] || [ "$enc" = "off" ]; then
log_must zfs set checksum=on $fs
fi
log_must zfs set compression=off $fs
log_must zfs set atime=on $fs
log_must zfs set devices=off $fs
log_must zfs set exec=on $fs
log_must zfs set setuid=on $fs
log_must zfs set readonly=off $fs
log_must zfs set snapdir=hidden $fs
log_must zfs set aclmode=groupmask $fs
log_must zfs set aclinherit=secure $fs
fi
done
fi
[[ -d $TESTDIR ]] && \
log_must rm -rf $TESTDIR
disk1=${DISKS%% *}
if is_mpath_device $disk1; then
delete_partitions
fi
rm -f $TEST_BASE_DIR/{err,out}
}
#
# Common function used to cleanup storage pools, file systems
# and containers.
#
function default_container_cleanup
{
if ! is_global_zone; then
reexport_pool
fi
ismounted $TESTPOOL/$TESTCTR/$TESTFS1 &&
log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
destroy_dataset "$TESTPOOL/$TESTCTR/$TESTFS1" "-R"
destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf"
[[ -e $TESTDIR1 ]] && \
log_must rm -rf $TESTDIR1
default_cleanup
}
#
# Common function used to cleanup snapshot of file system or volume. Default to
# delete the file system's snapshot
#
# $1 snapshot name
#
function destroy_snapshot
{
typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
if ! snapexists $snap; then
log_fail "'$snap' does not exist."
fi
#
# For the sake of the value which come from 'get_prop' is not equal
# to the really mountpoint when the snapshot is unmounted. So, firstly
# check and make sure this snapshot's been mounted in current system.
#
typeset mtpt=""
if ismounted $snap; then
mtpt=$(get_prop mountpoint $snap)
fi
destroy_dataset "$snap"
[[ $mtpt != "" && -d $mtpt ]] && \
log_must rm -rf $mtpt
}
#
# Common function used to cleanup clone.
#
# $1 clone name
#
function destroy_clone
{
typeset clone=${1:-$TESTPOOL/$TESTCLONE}
if ! datasetexists $clone; then
log_fail "'$clone' does not existed."
fi
# With the same reason in destroy_snapshot
typeset mtpt=""
if ismounted $clone; then
mtpt=$(get_prop mountpoint $clone)
fi
destroy_dataset "$clone"
[[ $mtpt != "" && -d $mtpt ]] && \
log_must rm -rf $mtpt
}
#
# Common function used to cleanup bookmark of file system or volume. Default
# to delete the file system's bookmark.
#
# $1 bookmark name
#
function destroy_bookmark
{
typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
if ! bkmarkexists $bkmark; then
log_fail "'$bkmarkp' does not existed."
fi
destroy_dataset "$bkmark"
}
# Return 0 if a snapshot exists; $? otherwise
#
# $1 - snapshot name
function snapexists
{
zfs list -H -t snapshot "$1" > /dev/null 2>&1
}
#
# Return 0 if a bookmark exists; $? otherwise
#
# $1 - bookmark name
#
function bkmarkexists
{
zfs list -H -t bookmark "$1" > /dev/null 2>&1
}
#
# Return 0 if a hold exists; $? otherwise
#
# $1 - hold tag
# $2 - snapshot name
#
function holdexists
{
! zfs holds "$2" | awk -v t="$1" '$2 ~ t { exit 1 }'
}
#
# Set a property to a certain value on a dataset.
# Sets a property of the dataset to the value as passed in.
# @param:
# $1 dataset who's property is being set
# $2 property to set
# $3 value to set property to
# @return:
# 0 if the property could be set.
# non-zero otherwise.
# @use: ZFS
#
function dataset_setprop
{
typeset fn=dataset_setprop
if (($# < 3)); then
log_note "$fn: Insufficient parameters (need 3, had $#)"
return 1
fi
typeset output=
output=$(zfs set $2=$3 $1 2>&1)
typeset rv=$?
if ((rv != 0)); then
log_note "Setting property on $1 failed."
log_note "property $2=$3"
log_note "Return Code: $rv"
log_note "Output: $output"
return $rv
fi
return 0
}
#
# Check a numeric assertion
# @parameter: $@ the assertion to check
# @output: big loud notice if assertion failed
# @use: log_fail
#
function assert
{
(($@)) || log_fail "$@"
}
#
# Function to format partition size of a disk
# Given a disk cxtxdx reduces all partitions
# to 0 size
#
function zero_partitions #<whole_disk_name>
{
typeset diskname=$1
typeset i
if is_freebsd; then
gpart destroy -F $diskname
elif is_linux; then
DSK=$DEV_DSKDIR/$diskname
DSK=$(echo $DSK | sed -e "s|//|/|g")
log_must parted $DSK -s -- mklabel gpt
blockdev --rereadpt $DSK 2>/dev/null
block_device_wait
else
for i in 0 1 3 4 5 6 7
do
log_must set_partition $i "" 0mb $diskname
done
fi
return 0
}
#
# Given a slice, size and disk, this function
# formats the slice to the specified size.
# Size should be specified with units as per
# the `format` command requirements eg. 100mb 3gb
#
# NOTE: This entire interface is problematic for the Linux parted utility
# which requires the end of the partition to be specified. It would be
# best to retire this interface and replace it with something more flexible.
# At the moment a best effort is made.
#
# arguments: <slice_num> <slice_start> <size_plus_units> <whole_disk_name>
function set_partition
{
typeset -i slicenum=$1
typeset start=$2
typeset size=$3
typeset disk=${4#$DEV_DSKDIR/}
disk=${disk#$DEV_RDSKDIR/}
case "$UNAME" in
Linux)
if [[ -z $size || -z $disk ]]; then
log_fail "The size or disk name is unspecified."
fi
disk=$DEV_DSKDIR/$disk
typeset size_mb=${size%%[mMgG]}
size_mb=${size_mb%%[mMgG][bB]}
if [[ ${size:1:1} == 'g' ]]; then
((size_mb = size_mb * 1024))
fi
# Create GPT partition table when setting slice 0 or
# when the device doesn't already contain a GPT label.
parted $disk -s -- print 1 >/dev/null
typeset ret_val=$?
if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
if ! parted $disk -s -- mklabel gpt; then
log_note "Failed to create GPT partition table on $disk"
return 1
fi
fi
# When no start is given align on the first cylinder.
if [[ -z "$start" ]]; then
start=1
fi
# Determine the cylinder size for the device and using
# that calculate the end offset in cylinders.
typeset -i cly_size_kb=0
cly_size_kb=$(parted -m $disk -s -- unit cyl print |
awk -F '[:k.]' 'NR == 3 {print $4}')
((end = (size_mb * 1024 / cly_size_kb) + start))
parted $disk -s -- \
mkpart part$slicenum ${start}cyl ${end}cyl
typeset ret_val=$?
if [[ $ret_val -ne 0 ]]; then
log_note "Failed to create partition $slicenum on $disk"
return 1
fi
blockdev --rereadpt $disk 2>/dev/null
block_device_wait $disk
;;
FreeBSD)
if [[ -z $size || -z $disk ]]; then
log_fail "The size or disk name is unspecified."
fi
disk=$DEV_DSKDIR/$disk
if [[ $slicenum -eq 0 ]] || ! gpart show $disk >/dev/null 2>&1; then
gpart destroy -F $disk >/dev/null 2>&1
if ! gpart create -s GPT $disk; then
log_note "Failed to create GPT partition table on $disk"
return 1
fi
fi
typeset index=$((slicenum + 1))
if [[ -n $start ]]; then
start="-b $start"
fi
gpart add -t freebsd-zfs $start -s $size -i $index $disk
if [[ $ret_val -ne 0 ]]; then
log_note "Failed to create partition $slicenum on $disk"
return 1
fi
block_device_wait $disk
;;
*)
if [[ -z $slicenum || -z $size || -z $disk ]]; then
log_fail "The slice, size or disk name is unspecified."
fi
typeset format_file=/var/tmp/format_in.$$
echo "partition" >$format_file
echo "$slicenum" >> $format_file
echo "" >> $format_file
echo "" >> $format_file
echo "$start" >> $format_file
echo "$size" >> $format_file
echo "label" >> $format_file
echo "" >> $format_file
echo "q" >> $format_file
echo "q" >> $format_file
format -e -s -d $disk -f $format_file
typeset ret_val=$?
rm -f $format_file
;;
esac
if [[ $ret_val -ne 0 ]]; then
log_note "Unable to format $disk slice $slicenum to $size"
return 1
fi
return 0
}
#
# Delete all partitions on all disks - this is specifically for the use of multipath
# devices which currently can only be used in the test suite as raw/un-partitioned
# devices (ie a zpool cannot be created on a whole mpath device that has partitions)
#
function delete_partitions
{
typeset disk
if [[ -z $DISKSARRAY ]]; then
DISKSARRAY=$DISKS
fi
if is_linux; then
typeset -i part
for disk in $DISKSARRAY; do
for (( part = 1; part < MAX_PARTITIONS; part++ )); do
typeset partition=${disk}${SLICE_PREFIX}${part}
parted $DEV_DSKDIR/$disk -s rm $part > /dev/null 2>&1
if lsblk | grep -qF ${partition}; then
log_fail "Partition ${partition} not deleted"
else
log_note "Partition ${partition} deleted"
fi
done
done
elif is_freebsd; then
for disk in $DISKSARRAY; do
if gpart destroy -F $disk; then
log_note "Partitions for ${disk} deleted"
else
log_fail "Partitions for ${disk} not deleted"
fi
done
fi
}
#
# Get the end cyl of the given slice
#
function get_endslice #<disk> <slice>
{
typeset disk=$1
typeset slice=$2
if [[ -z $disk || -z $slice ]] ; then
log_fail "The disk name or slice number is unspecified."
fi
case "$UNAME" in
Linux)
endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \
awk "/part${slice}/"' {sub(/cyl/, "", $3); print $3}')
((endcyl = (endcyl + 1)))
;;
FreeBSD)
disk=${disk#/dev/zvol/}
disk=${disk%p*}
slice=$((slice + 1))
endcyl=$(gpart show $disk | \
awk -v slice=$slice '$3 == slice { print $1 + $2 }')
;;
*)
disk=${disk#/dev/dsk/}
disk=${disk#/dev/rdsk/}
disk=${disk%s*}
typeset -i ratio=0
ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
awk '/sectors\/cylinder/ {print $2}')
if ((ratio == 0)); then
return
fi
typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
awk -v token="$slice" '$1 == token {print $6}')
((endcyl = (endcyl + 1) / ratio))
;;
esac
echo $endcyl
}
#
# Given a size,disk and total slice number, this function formats the
# disk slices from 0 to the total slice number with the same specified
# size.
#
function partition_disk #<slice_size> <whole_disk_name> <total_slices>
{
typeset -i i=0
typeset slice_size=$1
typeset disk_name=$2
typeset total_slices=$3
typeset cyl
zero_partitions $disk_name
while ((i < $total_slices)); do
if ! is_linux; then
if ((i == 2)); then
((i = i + 1))
continue
fi
fi
log_must set_partition $i "$cyl" $slice_size $disk_name
cyl=$(get_endslice $disk_name $i)
((i = i+1))
done
}
#
# This function continues to write to a filenum number of files into dirnum
# number of directories until either file_write returns an error or the
# maximum number of files per directory have been written.
#
# Usage:
# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
#
# Return value: 0 on success
# non 0 on error
#
# Where :
# destdir: is the directory where everything is to be created under
# dirnum: the maximum number of subdirectories to use, -1 no limit
# filenum: the maximum number of files per subdirectory
# bytes: number of bytes to write
# num_writes: number of types to write out bytes
# data: the data that will be written
#
# E.g.
# fill_fs /testdir 20 25 1024 256 0
#
# Note: bytes * num_writes equals the size of the testfile
#
function fill_fs # destdir dirnum filenum bytes num_writes data
{
typeset destdir=${1:-$TESTDIR}
typeset -i dirnum=${2:-50}
typeset -i filenum=${3:-50}
typeset -i bytes=${4:-8192}
typeset -i num_writes=${5:-10240}
typeset data=${6:-0}
mkdir -p $destdir/{1..$dirnum}
for f in $destdir/{1..$dirnum}/$TESTFILE{1..$filenum}; do
file_write -o create -f $f -b $bytes -c $num_writes -d $data \
|| return
done
}
# Get the specified dataset property in parsable format or fail
function get_prop # property dataset
{
typeset prop=$1
typeset dataset=$2
zfs get -Hpo value "$prop" "$dataset" || log_fail "zfs get $prop $dataset"
}
# Get the specified pool property in parsable format or fail
function get_pool_prop # property pool
{
typeset prop=$1
typeset pool=$2
zpool get -Hpo value "$prop" "$pool" || log_fail "zpool get $prop $pool"
}
# Return 0 if a pool exists; $? otherwise
#
# $1 - pool name
function poolexists
{
typeset pool=$1
if [[ -z $pool ]]; then
log_note "No pool name given."
return 1
fi
zpool get name "$pool" > /dev/null 2>&1
}
# Return 0 if all the specified datasets exist; $? otherwise
#
# $1-n dataset name
function datasetexists
{
if (($# == 0)); then
log_note "No dataset name given."
return 1
fi
zfs get name "$@" > /dev/null 2>&1
}
# return 0 if none of the specified datasets exists, otherwise return 1.
#
# $1-n dataset name
function datasetnonexists
{
if (($# == 0)); then
log_note "No dataset name given."
return 1
fi
while (($# > 0)); do
zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
&& return 1
shift
done
return 0
}
# FreeBSD breaks exports(5) at whitespace and doesn't process escapes
# Solaris just breaks
#
# cf. https://github.com/openzfs/zfs/pull/13165#issuecomment-1059845807
#
# Linux can have spaces (which are \OOO-escaped),
# but can't have backslashes because they're parsed recursively
function shares_can_have_whitespace
{
is_linux
}
function is_shared_freebsd
{
typeset fs=$1
pgrep -q mountd && showmount -E | grep -qx "$fs"
}
function is_shared_illumos
{
typeset fs=$1
typeset mtpt
for mtpt in `share | awk '{print $2}'` ; do
if [[ $mtpt == $fs ]] ; then
return 0
fi
done
typeset stat=$(svcs -H -o STA nfs/server:default)
if [[ $stat != "ON" ]]; then
log_note "Current nfs/server status: $stat"
fi
return 1
}
function is_shared_linux
{
typeset fs=$1
! exportfs -s | awk -v fs="${fs//\\/\\\\}" '/^\// && $1 == fs {exit 1}'
}
#
# Given a mountpoint, or a dataset name, determine if it is shared via NFS.
#
# Returns 0 if shared, 1 otherwise.
#
function is_shared
{
typeset fs=$1
typeset mtpt
if [[ $fs != "/"* ]] ; then
if datasetnonexists "$fs" ; then
return 1
else
mtpt=$(get_prop mountpoint "$fs")
case "$mtpt" in
none|legacy|-) return 1
;;
*) fs=$mtpt
;;
esac
fi
fi
case "$UNAME" in
FreeBSD) is_shared_freebsd "$fs" ;;
Linux) is_shared_linux "$fs" ;;
*) is_shared_illumos "$fs" ;;
esac
}
function is_exported_illumos
{
typeset fs=$1
typeset mtpt _
while read -r mtpt _; do
[ "$mtpt" = "$fs" ] && return
done < /etc/dfs/sharetab
return 1
}
function is_exported_freebsd
{
typeset fs=$1
typeset mtpt _
while read -r mtpt _; do
[ "$mtpt" = "$fs" ] && return
done < /etc/zfs/exports
return 1
}
function is_exported_linux
{
typeset fs=$1
typeset mtpt _
while read -r mtpt _; do
[ "$(printf "$mtpt")" = "$fs" ] && return
done < /etc/exports.d/zfs.exports
return 1
}
#
# Given a mountpoint, or a dataset name, determine if it is exported via
# the os-specific NFS exports file.
#
# Returns 0 if exported, 1 otherwise.
#
function is_exported
{
typeset fs=$1
typeset mtpt
if [[ $fs != "/"* ]] ; then
if datasetnonexists "$fs" ; then
return 1
else
mtpt=$(get_prop mountpoint "$fs")
case $mtpt in
none|legacy|-) return 1
;;
*) fs=$mtpt
;;
esac
fi
fi
case "$UNAME" in
FreeBSD) is_exported_freebsd "$fs" ;;
Linux) is_exported_linux "$fs" ;;
*) is_exported_illumos "$fs" ;;
esac
}
#
# Given a dataset name determine if it is shared via SMB.
#
# Returns 0 if shared, 1 otherwise.
#
function is_shared_smb
{
typeset fs=$1
datasetexists "$fs" || return
if is_linux; then
- net usershare list | grep -xFq "${fs//\//_}"
+ net usershare list | grep -xFq "${fs//[-\/]/_}"
else
log_note "SMB on $UNAME currently unsupported by the test framework"
return 1
fi
}
#
# Given a mountpoint, determine if it is not shared via NFS.
#
# Returns 0 if not shared, 1 otherwise.
#
function not_shared
{
! is_shared $1
}
#
# Given a dataset determine if it is not shared via SMB.
#
# Returns 0 if not shared, 1 otherwise.
#
function not_shared_smb
{
! is_shared_smb $1
}
#
# Helper function to unshare a mountpoint.
#
function unshare_fs #fs
{
typeset fs=$1
if is_shared $fs || is_shared_smb $fs; then
log_must zfs unshare $fs
fi
}
#
# Helper function to share a NFS mountpoint.
#
function share_nfs #fs
{
typeset fs=$1
is_shared "$fs" && return
case "$UNAME" in
Linux)
log_must exportfs "*:$fs"
;;
FreeBSD)
typeset mountd
read -r mountd < /var/run/mountd.pid
log_must eval "printf '%s\t\n' \"$fs\" >> /etc/zfs/exports"
log_must kill -s HUP "$mountd"
;;
*)
log_must share -F nfs "$fs"
;;
esac
return 0
}
#
# Helper function to unshare a NFS mountpoint.
#
function unshare_nfs #fs
{
typeset fs=$1
! is_shared "$fs" && return
case "$UNAME" in
Linux)
log_must exportfs -u "*:$fs"
;;
FreeBSD)
typeset mountd
read -r mountd < /var/run/mountd.pid
awk -v fs="${fs//\\/\\\\}" '$1 != fs' /etc/zfs/exports > /etc/zfs/exports.$$
log_must mv /etc/zfs/exports.$$ /etc/zfs/exports
log_must kill -s HUP "$mountd"
;;
*)
log_must unshare -F nfs $fs
;;
esac
return 0
}
#
# Helper function to show NFS shares.
#
function showshares_nfs
{
case "$UNAME" in
Linux)
exportfs -v
;;
FreeBSD)
showmount
;;
*)
share -F nfs
;;
esac
}
function check_nfs
{
case "$UNAME" in
Linux)
exportfs -s
;;
FreeBSD)
showmount -e
;;
*)
log_unsupported "Unknown platform"
;;
esac || log_unsupported "The NFS utilities are not installed"
}
#
# Check NFS server status and trigger it online.
#
function setup_nfs_server
{
# Cannot share directory in non-global zone.
#
if ! is_global_zone; then
log_note "Cannot trigger NFS server by sharing in LZ."
return
fi
if is_linux; then
#
# Re-synchronize /var/lib/nfs/etab with /etc/exports and
# /etc/exports.d./* to provide a clean test environment.
#
log_must exportfs -r
log_note "NFS server must be started prior to running ZTS."
return
elif is_freebsd; then
log_must kill -s HUP $(</var/run/mountd.pid)
log_note "NFS server must be started prior to running ZTS."
return
fi
typeset nfs_fmri="svc:/network/nfs/server:default"
if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
#
# Only really sharing operation can enable NFS server
# to online permanently.
#
typeset dummy=/tmp/dummy
if [[ -d $dummy ]]; then
log_must rm -rf $dummy
fi
log_must mkdir $dummy
log_must share $dummy
#
# Waiting for fmri's status to be the final status.
# Otherwise, in transition, an asterisk (*) is appended for
# instances, unshare will reverse status to 'DIS' again.
#
# Waiting for 1's at least.
#
log_must sleep 1
timeout=10
while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
do
log_must sleep 1
((timeout -= 1))
done
log_must unshare $dummy
log_must rm -rf $dummy
fi
log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
}
#
# To verify whether calling process is in global zone
#
# Return 0 if in global zone, 1 in non-global zone
#
function is_global_zone
{
if is_linux || is_freebsd; then
return 0
else
typeset cur_zone=$(zonename 2>/dev/null)
[ $cur_zone = "global" ]
fi
}
#
# Verify whether test is permitted to run from
# global zone, local zone, or both
#
# $1 zone limit, could be "global", "local", or "both"(no limit)
#
# Return 0 if permitted, otherwise exit with log_unsupported
#
function verify_runnable # zone limit
{
typeset limit=$1
[[ -z $limit ]] && return 0
if is_global_zone ; then
case $limit in
global|both)
;;
local) log_unsupported "Test is unable to run from "\
"global zone."
;;
*) log_note "Warning: unknown limit $limit - " \
"use both."
;;
esac
else
case $limit in
local|both)
;;
global) log_unsupported "Test is unable to run from "\
"local zone."
;;
*) log_note "Warning: unknown limit $limit - " \
"use both."
;;
esac
reexport_pool
fi
return 0
}
# Return 0 if create successfully or the pool exists; $? otherwise
# Note: In local zones, this function should return 0 silently.
#
# $1 - pool name
# $2-n - [keyword] devs_list
function create_pool #pool devs_list
{
typeset pool=${1%%/*}
shift
if [[ -z $pool ]]; then
log_note "Missing pool name."
return 1
fi
if poolexists $pool ; then
destroy_pool $pool
fi
if is_global_zone ; then
[[ -d /$pool ]] && rm -rf /$pool
log_must zpool create -f $pool $@
fi
return 0
}
# Return 0 if destroy successfully or the pool exists; $? otherwise
# Note: In local zones, this function should return 0 silently.
#
# $1 - pool name
# Destroy pool with the given parameters.
function destroy_pool #pool
{
typeset pool=${1%%/*}
typeset mtpt
if [[ -z $pool ]]; then
log_note "No pool name given."
return 1
fi
if is_global_zone ; then
if poolexists "$pool" ; then
mtpt=$(get_prop mountpoint "$pool")
# At times, syseventd/udev activity can cause attempts
# to destroy a pool to fail with EBUSY. We retry a few
# times allowing failures before requiring the destroy
# to succeed.
log_must_busy zpool destroy -f $pool
[[ -d $mtpt ]] && \
log_must rm -rf $mtpt
else
log_note "Pool does not exist. ($pool)"
return 1
fi
fi
return 0
}
# Return 0 if created successfully; $? otherwise
#
# $1 - dataset name
# $2-n - dataset options
function create_dataset #dataset dataset_options
{
typeset dataset=$1
shift
if [[ -z $dataset ]]; then
log_note "Missing dataset name."
return 1
fi
if datasetexists $dataset ; then
destroy_dataset $dataset
fi
log_must zfs create $@ $dataset
return 0
}
# Return 0 if destroy successfully or the dataset exists; $? otherwise
# Note: In local zones, this function should return 0 silently.
#
# $1 - dataset name
# $2 - custom arguments for zfs destroy
# Destroy dataset with the given parameters.
function destroy_dataset # dataset [args]
{
typeset dataset=$1
typeset mtpt
typeset args=${2:-""}
if [[ -z $dataset ]]; then
log_note "No dataset name given."
return 1
fi
if is_global_zone ; then
if datasetexists "$dataset" ; then
mtpt=$(get_prop mountpoint "$dataset")
log_must_busy zfs destroy $args $dataset
[ -d $mtpt ] && log_must rm -rf $mtpt
else
log_note "Dataset does not exist. ($dataset)"
return 1
fi
fi
return 0
}
#
# Reexport TESTPOOL & TESTPOOL(1-4)
#
function reexport_pool
{
typeset -i cntctr=5
typeset -i i=0
while ((i < cntctr)); do
if ((i == 0)); then
TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
if ! ismounted $TESTPOOL; then
log_must zfs mount $TESTPOOL
fi
else
eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
if eval ! ismounted \$TESTPOOL$i; then
log_must eval zfs mount \$TESTPOOL$i
fi
fi
((i += 1))
done
}
#
# Verify a given disk or pool state
#
# Return 0 is pool/disk matches expected state, 1 otherwise
#
function check_state # pool disk state{online,offline,degraded}
{
typeset pool=$1
typeset disk=${2#$DEV_DSKDIR/}
typeset state=$3
[[ -z $pool ]] || [[ -z $state ]] \
&& log_fail "Arguments invalid or missing"
if [[ -z $disk ]]; then
#check pool state only
zpool get -H -o value health $pool | grep -qi "$state"
else
zpool status -v $pool | grep "$disk" | grep -qi "$state"
fi
}
#
# Get the mountpoint of snapshot
# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
# as its mountpoint
#
function snapshot_mountpoint
{
typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
if [[ $dataset != *@* ]]; then
log_fail "Error name of snapshot '$dataset'."
fi
typeset fs=${dataset%@*}
typeset snap=${dataset#*@}
if [[ -z $fs || -z $snap ]]; then
log_fail "Error name of snapshot '$dataset'."
fi
echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
}
#
# Given a device and 'ashift' value verify it's correctly set on every label
#
function verify_ashift # device ashift
{
typeset device="$1"
typeset ashift="$2"
zdb -e -lll $device | awk -v ashift=$ashift '
/ashift: / {
if (ashift != $2)
exit 1;
else
count++;
}
END {
exit (count != 4);
}'
}
#
# Given a pool and file system, this function will verify the file system
# using the zdb internal tool. Note that the pool is exported and imported
# to ensure it has consistent state.
#
function verify_filesys # pool filesystem dir
{
typeset pool="$1"
typeset filesys="$2"
typeset zdbout="/tmp/zdbout.$$"
shift
shift
typeset dirs=$@
typeset search_path=""
log_note "Calling zdb to verify filesystem '$filesys'"
zfs unmount -a > /dev/null 2>&1
log_must zpool export $pool
if [[ -n $dirs ]] ; then
for dir in $dirs ; do
search_path="$search_path -d $dir"
done
fi
log_must zpool import $search_path $pool
if ! zdb -cudi $filesys > $zdbout 2>&1; then
log_note "Output: zdb -cudi $filesys"
cat $zdbout
rm -f $zdbout
log_fail "zdb detected errors with: '$filesys'"
fi
log_must zfs mount -a
log_must rm -rf $zdbout
}
#
# Given a pool issue a scrub and verify that no checksum errors are reported.
#
function verify_pool
{
typeset pool=${1:-$TESTPOOL}
log_must zpool scrub $pool
log_must wait_scrubbed $pool
typeset -i cksum=$(zpool status $pool | awk '
!NF { isvdev = 0 }
isvdev { errors += $NF }
/CKSUM$/ { isvdev = 1 }
END { print errors }
')
if [[ $cksum != 0 ]]; then
log_must zpool status -v
log_fail "Unexpected CKSUM errors found on $pool ($cksum)"
fi
}
#
# Given a pool, and this function list all disks in the pool
#
function get_disklist # pool
{
echo $(zpool iostat -v $1 | awk '(NR > 4) {print $1}' | \
grep -vEe '^-----' -e "^(mirror|raidz[1-3]|draid[1-3]|spare|log|cache|special|dedup)|\-[0-9]$")
}
#
# Given a pool, and this function list all disks in the pool with their full
# path (like "/dev/sda" instead of "sda").
#
function get_disklist_fullpath # pool
{
get_disklist "-P $1"
}
# /**
# This function kills a given list of processes after a time period. We use
# this in the stress tests instead of STF_TIMEOUT so that we can have processes
# run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
# would be listed as FAIL, which we don't want : we're happy with stress tests
# running for a certain amount of time, then finishing.
#
# @param $1 the time in seconds after which we should terminate these processes
# @param $2..$n the processes we wish to terminate.
# */
function stress_timeout
{
typeset -i TIMEOUT=$1
shift
typeset cpids="$@"
log_note "Waiting for child processes($cpids). " \
"It could last dozens of minutes, please be patient ..."
log_must sleep $TIMEOUT
log_note "Killing child processes after ${TIMEOUT} stress timeout."
typeset pid
for pid in $cpids; do
ps -p $pid > /dev/null 2>&1 &&
log_must kill -USR1 $pid
done
}
#
# Verify a given hotspare disk is inuse or avail
#
# Return 0 is pool/disk matches expected state, 1 otherwise
#
function check_hotspare_state # pool disk state{inuse,avail}
{
typeset pool=$1
typeset disk=${2#$DEV_DSKDIR/}
typeset state=$3
cur_state=$(get_device_state $pool $disk "spares")
[ $state = $cur_state ]
}
#
# Wait until a hotspare transitions to a given state or times out.
#
# Return 0 when pool/disk matches expected state, 1 on timeout.
#
function wait_hotspare_state # pool disk state timeout
{
typeset pool=$1
typeset disk=${2#*$DEV_DSKDIR/}
typeset state=$3
typeset timeout=${4:-60}
typeset -i i=0
while [[ $i -lt $timeout ]]; do
if check_hotspare_state $pool $disk $state; then
return 0
fi
i=$((i+1))
sleep 1
done
return 1
}
#
# Verify a given vdev disk is inuse or avail
#
# Return 0 is pool/disk matches expected state, 1 otherwise
#
function check_vdev_state # pool disk state{online,offline,unavail}
{
typeset pool=$1
typeset disk=${2#*$DEV_DSKDIR/}
typeset state=$3
cur_state=$(get_device_state $pool $disk)
[ $state = $cur_state ]
}
#
# Wait until a vdev transitions to a given state or times out.
#
# Return 0 when pool/disk matches expected state, 1 on timeout.
#
function wait_vdev_state # pool disk state timeout
{
typeset pool=$1
typeset disk=${2#*$DEV_DSKDIR/}
typeset state=$3
typeset timeout=${4:-60}
typeset -i i=0
while [[ $i -lt $timeout ]]; do
if check_vdev_state $pool $disk $state; then
return 0
fi
i=$((i+1))
sleep 1
done
return 1
}
#
# Check the output of 'zpool status -v <pool>',
# and to see if the content of <token> contain the <keyword> specified.
#
# Return 0 is contain, 1 otherwise
#
function check_pool_status # pool token keyword <verbose>
{
typeset pool=$1
typeset token=$2
typeset keyword=$3
typeset verbose=${4:-false}
scan=$(zpool status -v "$pool" 2>/dev/null | awk -v token="$token:" '$1==token')
if [[ $verbose == true ]]; then
log_note $scan
fi
echo $scan | grep -qi "$keyword"
}
#
# The following functions are instance of check_pool_status()
# is_pool_resilvering - to check if the pool resilver is in progress
# is_pool_resilvered - to check if the pool resilver is completed
# is_pool_scrubbing - to check if the pool scrub is in progress
# is_pool_scrubbed - to check if the pool scrub is completed
# is_pool_scrub_stopped - to check if the pool scrub is stopped
# is_pool_scrub_paused - to check if the pool scrub has paused
# is_pool_removing - to check if the pool removing is a vdev
# is_pool_removed - to check if the pool remove is completed
# is_pool_discarding - to check if the pool checkpoint is being discarded
#
function is_pool_resilvering #pool <verbose>
{
check_pool_status "$1" "scan" \
"resilver[ ()0-9A-Za-z:_-]* in progress since" $2
}
function is_pool_resilvered #pool <verbose>
{
check_pool_status "$1" "scan" "resilvered " $2
}
function is_pool_scrubbing #pool <verbose>
{
check_pool_status "$1" "scan" "scrub in progress since " $2
}
function is_pool_scrubbed #pool <verbose>
{
check_pool_status "$1" "scan" "scrub repaired" $2
}
function is_pool_scrub_stopped #pool <verbose>
{
check_pool_status "$1" "scan" "scrub canceled" $2
}
function is_pool_scrub_paused #pool <verbose>
{
check_pool_status "$1" "scan" "scrub paused since " $2
}
function is_pool_removing #pool
{
check_pool_status "$1" "remove" "in progress since "
}
function is_pool_removed #pool
{
check_pool_status "$1" "remove" "completed on"
}
function is_pool_discarding #pool
{
check_pool_status "$1" "checkpoint" "discarding"
}
function wait_for_degraded
{
typeset pool=$1
typeset timeout=${2:-30}
typeset t0=$SECONDS
while :; do
[[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break
log_note "$pool is not yet degraded."
sleep 1
if ((SECONDS - t0 > $timeout)); then
log_note "$pool not degraded after $timeout seconds."
return 1
fi
done
return 0
}
#
# Use create_pool()/destroy_pool() to clean up the information in
# in the given disk to avoid slice overlapping.
#
function cleanup_devices #vdevs
{
typeset pool="foopool$$"
for vdev in $@; do
zero_partitions $vdev
done
poolexists $pool && destroy_pool $pool
create_pool $pool $@
destroy_pool $pool
return 0
}
#/**
# A function to find and locate free disks on a system or from given
# disks as the parameter. It works by locating disks that are in use
# as swap devices and dump devices, and also disks listed in /etc/vfstab
#
# $@ given disks to find which are free, default is all disks in
# the test system
#
# @return a string containing the list of available disks
#*/
function find_disks
{
# Trust provided list, no attempt is made to locate unused devices.
if is_linux || is_freebsd; then
echo "$@"
return
fi
sfi=/tmp/swaplist.$$
dmpi=/tmp/dumpdev.$$
max_finddisksnum=${MAX_FINDDISKSNUM:-6}
swap -l > $sfi
dumpadm > $dmpi 2>/dev/null
disks=${@:-$(echo "" | format -e 2>/dev/null | awk '
BEGIN { FS="."; }
/^Specify disk/{
searchdisks=0;
}
{
if (searchdisks && $2 !~ "^$"){
split($2,arr," ");
print arr[1];
}
}
/^AVAILABLE DISK SELECTIONS:/{
searchdisks=1;
}
')}
unused=""
for disk in $disks; do
# Check for mounted
grep -q "${disk}[sp]" /etc/mnttab && continue
# Check for swap
grep -q "${disk}[sp]" $sfi && continue
# check for dump device
grep -q "${disk}[sp]" $dmpi && continue
# check to see if this disk hasn't been explicitly excluded
# by a user-set environment variable
echo "${ZFS_HOST_DEVICES_IGNORE}" | grep -q "${disk}" && continue
unused_candidates="$unused_candidates $disk"
done
rm $sfi $dmpi
# now just check to see if those disks do actually exist
# by looking for a device pointing to the first slice in
# each case. limit the number to max_finddisksnum
count=0
for disk in $unused_candidates; do
if is_disk_device $DEV_DSKDIR/${disk}s0 && \
[ $count -lt $max_finddisksnum ]; then
unused="$unused $disk"
# do not impose limit if $@ is provided
[[ -z $@ ]] && ((count = count + 1))
fi
done
# finally, return our disk list
echo $unused
}
function add_user_freebsd #<group_name> <user_name> <basedir>
{
typeset group=$1
typeset user=$2
typeset basedir=$3
# Check to see if the user exists.
if id $user > /dev/null 2>&1; then
return 0
fi
# Assign 1000 as the base uid
typeset -i uid=1000
while true; do
pw useradd -u $uid -g $group -d $basedir/$user -m -n $user
case $? in
0) break ;;
# The uid is not unique
65) ((uid += 1)) ;;
*) return 1 ;;
esac
if [[ $uid == 65000 ]]; then
log_fail "No user id available under 65000 for $user"
fi
done
# Silence MOTD
touch $basedir/$user/.hushlogin
return 0
}
#
# Delete the specified user.
#
# $1 login name
#
function del_user_freebsd #<logname>
{
typeset user=$1
if id $user > /dev/null 2>&1; then
log_must pw userdel $user
fi
return 0
}
#
# Select valid gid and create specified group.
#
# $1 group name
#
function add_group_freebsd #<group_name>
{
typeset group=$1
# See if the group already exists.
if pw groupshow $group >/dev/null 2>&1; then
return 0
fi
# Assign 1000 as the base gid
typeset -i gid=1000
while true; do
pw groupadd -g $gid -n $group > /dev/null 2>&1
case $? in
0) return 0 ;;
# The gid is not unique
65) ((gid += 1)) ;;
*) return 1 ;;
esac
if [[ $gid == 65000 ]]; then
log_fail "No user id available under 65000 for $group"
fi
done
}
#
# Delete the specified group.
#
# $1 group name
#
function del_group_freebsd #<group_name>
{
typeset group=$1
pw groupdel -n $group > /dev/null 2>&1
case $? in
# Group does not exist, or was deleted successfully.
0|6|65) return 0 ;;
# Name already exists as a group name
9) log_must pw groupdel $group ;;
*) return 1 ;;
esac
return 0
}
function add_user_illumos #<group_name> <user_name> <basedir>
{
typeset group=$1
typeset user=$2
typeset basedir=$3
log_must useradd -g $group -d $basedir/$user -m $user
return 0
}
function del_user_illumos #<user_name>
{
typeset user=$1
if id $user > /dev/null 2>&1; then
log_must_retry "currently used" 6 userdel $user
fi
return 0
}
function add_group_illumos #<group_name>
{
typeset group=$1
typeset -i gid=100
while true; do
groupadd -g $gid $group > /dev/null 2>&1
case $? in
0) return 0 ;;
# The gid is not unique
4) ((gid += 1)) ;;
*) return 1 ;;
esac
done
}
function del_group_illumos #<group_name>
{
typeset group=$1
groupmod -n $grp $grp > /dev/null 2>&1
case $? in
# Group does not exist.
6) return 0 ;;
# Name already exists as a group name
9) log_must groupdel $grp ;;
*) return 1 ;;
esac
}
function add_user_linux #<group_name> <user_name> <basedir>
{
typeset group=$1
typeset user=$2
typeset basedir=$3
log_must useradd -g $group -d $basedir/$user -m $user
# Add new users to the same group and the command line utils.
# This allows them to be run out of the original users home
# directory as long as it permissioned to be group readable.
cmd_group=$(stat --format="%G" $(command -v zfs))
log_must usermod -a -G $cmd_group $user
return 0
}
function del_user_linux #<user_name>
{
typeset user=$1
if id $user > /dev/null 2>&1; then
log_must_retry "currently used" 6 userdel $user
fi
}
function add_group_linux #<group_name>
{
typeset group=$1
# Assign 100 as the base gid, a larger value is selected for
# Linux because for many distributions 1000 and under are reserved.
while true; do
groupadd $group > /dev/null 2>&1
case $? in
0) return 0 ;;
*) return 1 ;;
esac
done
}
function del_group_linux #<group_name>
{
typeset group=$1
getent group $group > /dev/null 2>&1
case $? in
# Group does not exist.
2) return 0 ;;
# Name already exists as a group name
0) log_must groupdel $group ;;
*) return 1 ;;
esac
return 0
}
#
# Add specified user to specified group
#
# $1 group name
# $2 user name
# $3 base of the homedir (optional)
#
function add_user #<group_name> <user_name> <basedir>
{
typeset group=$1
typeset user=$2
typeset basedir=${3:-"/var/tmp"}
if ((${#group} == 0 || ${#user} == 0)); then
log_fail "group name or user name are not defined."
fi
case "$UNAME" in
FreeBSD)
add_user_freebsd "$group" "$user" "$basedir"
;;
Linux)
add_user_linux "$group" "$user" "$basedir"
;;
*)
add_user_illumos "$group" "$user" "$basedir"
;;
esac
return 0
}
#
# Delete the specified user.
#
# $1 login name
# $2 base of the homedir (optional)
#
function del_user #<logname> <basedir>
{
typeset user=$1
typeset basedir=${2:-"/var/tmp"}
if ((${#user} == 0)); then
log_fail "login name is necessary."
fi
case "$UNAME" in
FreeBSD)
del_user_freebsd "$user"
;;
Linux)
del_user_linux "$user"
;;
*)
del_user_illumos "$user"
;;
esac
[[ -d $basedir/$user ]] && rm -fr $basedir/$user
return 0
}
#
# Select valid gid and create specified group.
#
# $1 group name
#
function add_group #<group_name>
{
typeset group=$1
if ((${#group} == 0)); then
log_fail "group name is necessary."
fi
case "$UNAME" in
FreeBSD)
add_group_freebsd "$group"
;;
Linux)
add_group_linux "$group"
;;
*)
add_group_illumos "$group"
;;
esac
return 0
}
#
# Delete the specified group.
#
# $1 group name
#
function del_group #<group_name>
{
typeset group=$1
if ((${#group} == 0)); then
log_fail "group name is necessary."
fi
case "$UNAME" in
FreeBSD)
del_group_freebsd "$group"
;;
Linux)
del_group_linux "$group"
;;
*)
del_group_illumos "$group"
;;
esac
return 0
}
#
# This function will return true if it's safe to destroy the pool passed
# as argument 1. It checks for pools based on zvols and files, and also
# files contained in a pool that may have a different mountpoint.
#
function safe_to_destroy_pool { # $1 the pool name
typeset pool=""
typeset DONT_DESTROY=""
# We check that by deleting the $1 pool, we're not
# going to pull the rug out from other pools. Do this
# by looking at all other pools, ensuring that they
# aren't built from files or zvols contained in this pool.
for pool in $(zpool list -H -o name)
do
ALTMOUNTPOOL=""
# this is a list of the top-level directories in each of the
# files that make up the path to the files the pool is based on
FILEPOOL=$(zpool status -v $pool | awk -v pool="/$1/" '$0 ~ pool {print $1}')
# this is a list of the zvols that make up the pool
ZVOLPOOL=$(zpool status -v $pool | awk -v zvols="$ZVOL_DEVDIR/$1$" '$0 ~ zvols {print $1}')
# also want to determine if it's a file-based pool using an
# alternate mountpoint...
POOL_FILE_DIRS=$(zpool status -v $pool | \
awk '/\// {print $1}' | \
awk -F/ '!/dev/ {print $2}')
for pooldir in $POOL_FILE_DIRS
do
OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
awk -v pd="${pooldir}$" '$0 ~ pd {print $1}')
ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
done
if [ ! -z "$ZVOLPOOL" ]
then
DONT_DESTROY="true"
log_note "Pool $pool is built from $ZVOLPOOL on $1"
fi
if [ ! -z "$FILEPOOL" ]
then
DONT_DESTROY="true"
log_note "Pool $pool is built from $FILEPOOL on $1"
fi
if [ ! -z "$ALTMOUNTPOOL" ]
then
DONT_DESTROY="true"
log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
fi
done
if [ -z "${DONT_DESTROY}" ]
then
return 0
else
log_note "Warning: it is not safe to destroy $1!"
return 1
fi
}
#
# Verify zfs operation with -p option work as expected
# $1 operation, value could be create, clone or rename
# $2 dataset type, value could be fs or vol
# $3 dataset name
# $4 new dataset name
#
function verify_opt_p_ops
{
typeset ops=$1
typeset datatype=$2
typeset dataset=$3
typeset newdataset=$4
if [[ $datatype != "fs" && $datatype != "vol" ]]; then
log_fail "$datatype is not supported."
fi
# check parameters accordingly
case $ops in
create)
newdataset=$dataset
dataset=""
if [[ $datatype == "vol" ]]; then
ops="create -V $VOLSIZE"
fi
;;
clone)
if [[ -z $newdataset ]]; then
log_fail "newdataset should not be empty" \
"when ops is $ops."
fi
log_must datasetexists $dataset
log_must snapexists $dataset
;;
rename)
if [[ -z $newdataset ]]; then
log_fail "newdataset should not be empty" \
"when ops is $ops."
fi
log_must datasetexists $dataset
;;
*)
log_fail "$ops is not supported."
;;
esac
# make sure the upper level filesystem does not exist
destroy_dataset "${newdataset%/*}" "-rRf"
# without -p option, operation will fail
log_mustnot zfs $ops $dataset $newdataset
log_mustnot datasetexists $newdataset ${newdataset%/*}
# with -p option, operation should succeed
log_must zfs $ops -p $dataset $newdataset
block_device_wait
if ! datasetexists $newdataset ; then
log_fail "-p option does not work for $ops"
fi
# when $ops is create or clone, redo the operation still return zero
if [[ $ops != "rename" ]]; then
log_must zfs $ops -p $dataset $newdataset
fi
return 0
}
#
# Get configuration of pool
# $1 pool name
# $2 config name
#
function get_config
{
typeset pool=$1
typeset config=$2
if ! poolexists "$pool" ; then
return 1
fi
if [ "$(get_pool_prop cachefile "$pool")" = "none" ]; then
zdb -e $pool
else
zdb -C $pool
fi | awk -F: -v cfg="$config:" '$0 ~ cfg {sub(/^'\''/, $2); sub(/'\''$/, $2); print $2}'
}
#
# Privated function. Random select one of items from arguments.
#
# $1 count
# $2-n string
#
function _random_get
{
typeset cnt=$1
shift
typeset str="$@"
typeset -i ind
((ind = RANDOM % cnt + 1))
echo "$str" | cut -f $ind -d ' '
}
#
# Random select one of item from arguments which include NONE string
#
function random_get_with_non
{
typeset -i cnt=$#
((cnt =+ 1))
_random_get "$cnt" "$@"
}
#
# Random select one of item from arguments which doesn't include NONE string
#
function random_get
{
_random_get "$#" "$@"
}
#
# The function will generate a dataset name with specific length
# $1, the length of the name
# $2, the base string to construct the name
#
function gen_dataset_name
{
typeset -i len=$1
typeset basestr="$2"
typeset -i baselen=${#basestr}
typeset -i iter=0
typeset l_name=""
if ((len % baselen == 0)); then
((iter = len / baselen))
else
((iter = len / baselen + 1))
fi
while ((iter > 0)); do
l_name="${l_name}$basestr"
((iter -= 1))
done
echo $l_name
}
#
# Get cksum tuple of dataset
# $1 dataset name
#
# sample zdb output:
# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
function datasetcksum
{
typeset cksum
sync
sync_all_pools
zdb -vvv $1 | awk -F= -v ds="^Dataset $1 "'\\[' '$0 ~ ds && /cksum/ {print $7}'
}
#
# Get the given disk/slice state from the specific field of the pool
#
function get_device_state #pool disk field("", "spares","logs")
{
typeset pool=$1
typeset disk=${2#$DEV_DSKDIR/}
typeset field=${3:-$pool}
zpool status -v "$pool" 2>/dev/null | \
awk -v device=$disk -v pool=$pool -v field=$field \
'BEGIN {startconfig=0; startfield=0; }
/config:/ {startconfig=1}
(startconfig==1) && ($1==field) {startfield=1; next;}
(startfield==1) && ($1==device) {print $2; exit;}
(startfield==1) &&
($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}'
}
#
# get the root filesystem name if it's zfsroot system.
#
# return: root filesystem name
function get_rootfs
{
typeset rootfs=""
if is_freebsd; then
rootfs=$(mount -p | awk '$2 == "/" && $3 == "zfs" {print $1}')
elif ! is_linux; then
rootfs=$(awk '$2 == "/" && $3 == "zfs" {print $1}' \
/etc/mnttab)
fi
if [[ -z "$rootfs" ]]; then
log_fail "Can not get rootfs"
fi
if datasetexists $rootfs; then
echo $rootfs
else
log_fail "This is not a zfsroot system."
fi
}
#
# get the rootfs's pool name
# return:
# rootpool name
#
function get_rootpool
{
typeset rootfs=$(get_rootfs)
echo ${rootfs%%/*}
}
#
# To verify if the require numbers of disks is given
#
function verify_disk_count
{
typeset -i min=${2:-1}
typeset -i count=$(echo "$1" | wc -w)
if ((count < min)); then
log_untested "A minimum of $min disks is required to run." \
" You specified $count disk(s)"
fi
}
function ds_is_volume
{
typeset type=$(get_prop type $1)
[ $type = "volume" ]
}
function ds_is_filesystem
{
typeset type=$(get_prop type $1)
[ $type = "filesystem" ]
}
#
# Check if Trusted Extensions are installed and enabled
#
function is_te_enabled
{
svcs -H -o state labeld 2>/dev/null | grep -q "enabled"
}
# Return the number of CPUs (cross-platform)
function get_num_cpus
{
if is_linux ; then
grep -c '^processor' /proc/cpuinfo
elif is_freebsd; then
sysctl -n kern.smp.cpus
else
psrinfo | wc -l
fi
}
# Utility function to determine if a system has multiple cpus.
function is_mp
{
[[ $(get_num_cpus) -gt 1 ]]
}
function get_cpu_freq
{
if is_linux; then
lscpu | awk '/CPU MHz/ { print $3 }'
elif is_freebsd; then
sysctl -n hw.clockrate
else
psrinfo -v 0 | awk '/processor operates at/ {print $6}'
fi
}
# Run the given command as the user provided.
function user_run
{
typeset user=$1
shift
log_note "user: $user"
log_note "cmd: $*"
typeset out=$TEST_BASE_DIR/out
typeset err=$TEST_BASE_DIR/err
sudo -Eu $user env PATH="$PATH" ksh <<<"$*" >$out 2>$err
typeset res=$?
log_note "out: $(<$out)"
log_note "err: $(<$err)"
return $res
}
#
# Check if the pool contains the specified vdevs
#
# $1 pool
# $2..n <vdev> ...
#
# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
# vdevs is not in the pool, and 2 if pool name is missing.
#
function vdevs_in_pool
{
typeset pool=$1
typeset vdev
if [[ -z $pool ]]; then
log_note "Missing pool name."
return 2
fi
shift
# We could use 'zpool list' to only get the vdevs of the pool but we
# can't reference a mirror/raidz vdev using its ID (i.e mirror-0),
# therefore we use the 'zpool status' output.
typeset tmpfile=$(mktemp)
zpool status -v "$pool" | grep -A 1000 "config:" >$tmpfile
for vdev in "$@"; do
grep -wq ${vdev##*/} $tmpfile || return 1
done
rm -f $tmpfile
return 0
}
function get_max
{
typeset -l i max=$1
shift
for i in "$@"; do
max=$((max > i ? max : i))
done
echo $max
}
# Write data that can be compressed into a directory
function write_compressible
{
typeset dir=$1
typeset megs=$2
typeset nfiles=${3:-1}
typeset bs=${4:-1024k}
typeset fname=${5:-file}
[[ -d $dir ]] || log_fail "No directory: $dir"
# Under Linux fio is not currently used since its behavior can
# differ significantly across versions. This includes missing
# command line options and cases where the --buffer_compress_*
# options fail to behave as expected.
if is_linux; then
typeset file_bytes=$(to_bytes $megs)
typeset bs_bytes=4096
typeset blocks=$(($file_bytes / $bs_bytes))
for (( i = 0; i < $nfiles; i++ )); do
truncate -s $file_bytes $dir/$fname.$i
# Write every third block to get 66% compression.
for (( j = 0; j < $blocks; j += 3 )); do
dd if=/dev/urandom of=$dir/$fname.$i \
seek=$j bs=$bs_bytes count=1 \
conv=notrunc >/dev/null 2>&1
done
done
else
command -v fio > /dev/null || log_unsupported "fio missing"
log_must eval fio \
--name=job \
--fallocate=0 \
--minimal \
--randrepeat=0 \
--buffer_compress_percentage=66 \
--buffer_compress_chunk=4096 \
--directory="$dir" \
--numjobs="$nfiles" \
--nrfiles="$nfiles" \
--rw=write \
--bs="$bs" \
--filesize="$megs" \
"--filename_format='$fname.\$jobnum' >/dev/null"
fi
}
function get_objnum
{
typeset pathname=$1
typeset objnum
[[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
if is_freebsd; then
objnum=$(stat -f "%i" $pathname)
else
objnum=$(stat -c %i $pathname)
fi
echo $objnum
}
#
# Sync data to the pool
#
# $1 pool name
# $2 boolean to force uberblock (and config including zpool cache file) update
#
function sync_pool #pool <force>
{
typeset pool=${1:-$TESTPOOL}
typeset force=${2:-false}
if [[ $force == true ]]; then
log_must zpool sync -f $pool
else
log_must zpool sync $pool
fi
return 0
}
#
# Sync all pools
#
# $1 boolean to force uberblock (and config including zpool cache file) update
#
function sync_all_pools #<force>
{
typeset force=${1:-false}
if [[ $force == true ]]; then
log_must zpool sync -f
else
log_must zpool sync
fi
return 0
}
#
# Wait for zpool 'freeing' property drops to zero.
#
# $1 pool name
#
function wait_freeing #pool
{
typeset pool=${1:-$TESTPOOL}
while true; do
[[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
log_must sleep 1
done
}
#
# Wait for every device replace operation to complete
#
# $1 pool name
#
function wait_replacing #pool
{
typeset pool=${1:-$TESTPOOL}
while zpool status $pool | grep -qE 'replacing-[0-9]+'; do
log_must sleep 1
done
}
# Wait for a pool to be scrubbed
#
# $1 pool name
# $2 timeout
#
function wait_scrubbed #pool timeout
{
typeset timeout=${2:-300}
typeset pool=${1:-$TESTPOOL}
for (( timer = 0; timer < $timeout; timer++ )); do
is_pool_scrubbed $pool && break;
sleep 1;
done
}
# Backup the zed.rc in our test directory so that we can edit it for our test.
#
# Returns: Backup file name. You will need to pass this to zed_rc_restore().
function zed_rc_backup
{
zedrc_backup="$(mktemp)"
cp $ZEDLET_DIR/zed.rc $zedrc_backup
echo $zedrc_backup
}
function zed_rc_restore
{
mv $1 $ZEDLET_DIR/zed.rc
}
#
# Setup custom environment for the ZED.
#
# $@ Optional list of zedlets to run under zed.
function zed_setup
{
if ! is_linux; then
log_unsupported "No zed on $UNAME"
fi
if [[ ! -d $ZEDLET_DIR ]]; then
log_must mkdir $ZEDLET_DIR
fi
if [[ ! -e $VDEVID_CONF ]]; then
log_must touch $VDEVID_CONF
fi
if [[ -e $VDEVID_CONF_ETC ]]; then
log_fail "Must not have $VDEVID_CONF_ETC file present on system"
fi
EXTRA_ZEDLETS=$@
# Create a symlink for /etc/zfs/vdev_id.conf file.
log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
# Setup minimal ZED configuration. Individual test cases should
# add additional ZEDLETs as needed for their specific test.
log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR
log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR
# Scripts must only be user writable.
if [[ -n "$EXTRA_ZEDLETS" ]] ; then
saved_umask=$(umask)
log_must umask 0022
for i in $EXTRA_ZEDLETS ; do
log_must cp ${ZEDLET_LIBEXEC_DIR}/$i $ZEDLET_DIR
done
log_must umask $saved_umask
fi
# Customize the zed.rc file to enable the full debug log.
log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc
echo "ZED_DEBUG_LOG=$ZED_DEBUG_LOG" >>$ZEDLET_DIR/zed.rc
}
#
# Cleanup custom ZED environment.
#
# $@ Optional list of zedlets to remove from our test zed.d directory.
function zed_cleanup
{
if ! is_linux; then
return
fi
for extra_zedlet; do
log_must rm -f ${ZEDLET_DIR}/$extra_zedlet
done
log_must rm -fd ${ZEDLET_DIR}/zed.rc ${ZEDLET_DIR}/zed-functions.sh ${ZEDLET_DIR}/all-syslog.sh ${ZEDLET_DIR}/all-debug.sh ${ZEDLET_DIR}/state \
$ZED_LOG $ZED_DEBUG_LOG $VDEVID_CONF_ETC $VDEVID_CONF \
$ZEDLET_DIR
}
#
# Check if ZED is currently running; if so, returns PIDs
#
function zed_check
{
if ! is_linux; then
return
fi
zedpids="$(pgrep -x zed)"
zedpids2="$(pgrep -x lt-zed)"
echo ${zedpids} ${zedpids2}
}
#
# Check if ZED is currently running, if not start ZED.
#
function zed_start
{
if ! is_linux; then
return
fi
# ZEDLET_DIR=/var/tmp/zed
if [[ ! -d $ZEDLET_DIR ]]; then
log_must mkdir $ZEDLET_DIR
fi
# Verify the ZED is not already running.
zedpids=$(zed_check)
if [ -n "$zedpids" ]; then
# We never, ever, really want it to just keep going if zed
# is already running - usually this implies our test cases
# will break very strangely because whatever we wanted to
# configure zed for won't be listening to our changes in the
# tmpdir
log_fail "ZED already running - ${zedpids}"
else
log_note "Starting ZED"
# run ZED in the background and redirect foreground logging
# output to $ZED_LOG.
log_must truncate -s 0 $ZED_DEBUG_LOG
log_must eval "zed -vF -d $ZEDLET_DIR -P $PATH" \
"-s $ZEDLET_DIR/state -j 1 2>$ZED_LOG &"
fi
return 0
}
#
# Kill ZED process
#
function zed_stop
{
if ! is_linux; then
return ""
fi
log_note "Stopping ZED"
while true; do
zedpids=$(zed_check)
[ ! -n "$zedpids" ] && break
log_must kill $zedpids
sleep 1
done
return 0
}
#
# Drain all zevents
#
function zed_events_drain
{
while [ $(zpool events -H | wc -l) -ne 0 ]; do
sleep 1
zpool events -c >/dev/null
done
}
# Set a variable in zed.rc to something, un-commenting it in the process.
#
# $1 variable
# $2 value
function zed_rc_set
{
var="$1"
val="$2"
# Remove the line
cmd="'/$var/d'"
eval sed -i $cmd $ZEDLET_DIR/zed.rc
# Add it at the end
echo "$var=$val" >> $ZEDLET_DIR/zed.rc
}
#
# Check is provided device is being active used as a swap device.
#
function is_swap_inuse
{
typeset device=$1
if [[ -z $device ]] ; then
log_note "No device specified."
return 1
fi
case "$UNAME" in
Linux)
swapon -s | grep -wq $(readlink -f $device)
;;
FreeBSD)
swapctl -l | grep -wq $device
;;
*)
swap -l | grep -wq $device
;;
esac
}
#
# Setup a swap device using the provided device.
#
function swap_setup
{
typeset swapdev=$1
case "$UNAME" in
Linux)
log_must eval "mkswap $swapdev > /dev/null 2>&1"
log_must swapon $swapdev
;;
FreeBSD)
log_must swapctl -a $swapdev
;;
*)
log_must swap -a $swapdev
;;
esac
return 0
}
#
# Cleanup a swap device on the provided device.
#
function swap_cleanup
{
typeset swapdev=$1
if is_swap_inuse $swapdev; then
if is_linux; then
log_must swapoff $swapdev
elif is_freebsd; then
log_must swapoff $swapdev
else
log_must swap -d $swapdev
fi
fi
return 0
}
#
# Set a global system tunable (64-bit value)
#
# $1 tunable name (use a NAME defined in tunables.cfg)
# $2 tunable values
#
function set_tunable64
{
set_tunable_impl "$1" "$2" Z
}
#
# Set a global system tunable (32-bit value)
#
# $1 tunable name (use a NAME defined in tunables.cfg)
# $2 tunable values
#
function set_tunable32
{
set_tunable_impl "$1" "$2" W
}
function set_tunable_impl
{
typeset name="$1"
typeset value="$2"
typeset mdb_cmd="$3"
eval "typeset tunable=\$$name"
case "$tunable" in
UNSUPPORTED)
log_unsupported "Tunable '$name' is unsupported on $UNAME"
;;
"")
log_fail "Tunable '$name' must be added to tunables.cfg"
;;
*)
;;
esac
[[ -z "$value" ]] && return 1
[[ -z "$mdb_cmd" ]] && return 1
case "$UNAME" in
Linux)
typeset zfs_tunables="/sys/module/zfs/parameters"
echo "$value" >"$zfs_tunables/$tunable"
;;
FreeBSD)
sysctl vfs.zfs.$tunable=$value
;;
SunOS)
echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
;;
esac
}
#
# Get a global system tunable
#
# $1 tunable name (use a NAME defined in tunables.cfg)
#
function get_tunable
{
get_tunable_impl "$1"
}
function get_tunable_impl
{
typeset name="$1"
typeset module="${2:-zfs}"
typeset check_only="$3"
eval "typeset tunable=\$$name"
case "$tunable" in
UNSUPPORTED)
if [ -z "$check_only" ] ; then
log_unsupported "Tunable '$name' is unsupported on $UNAME"
else
return 1
fi
;;
"")
if [ -z "$check_only" ] ; then
log_fail "Tunable '$name' must be added to tunables.cfg"
else
return 1
fi
;;
*)
;;
esac
case "$UNAME" in
Linux)
typeset zfs_tunables="/sys/module/$module/parameters"
cat $zfs_tunables/$tunable
;;
FreeBSD)
sysctl -n vfs.zfs.$tunable
;;
SunOS)
[[ "$module" -eq "zfs" ]] || return 1
;;
esac
}
# Does a tunable exist?
#
# $1: Tunable name
function tunable_exists
{
get_tunable_impl $1 "zfs" 1
}
#
# Compute MD5 digest for given file or stdin if no file given.
# Note: file path must not contain spaces
#
function md5digest
{
typeset file=$1
case "$UNAME" in
FreeBSD)
md5 -q $file
;;
*)
typeset sum _
read -r sum _ < <(md5sum -b $file)
echo $sum
;;
esac
}
#
# Compute SHA256 digest for given file or stdin if no file given.
# Note: file path must not contain spaces
#
function sha256digest
{
typeset file=$1
case "$UNAME" in
FreeBSD)
sha256 -q $file
;;
*)
typeset sum _
read -r sum _ < <(sha256sum -b $file)
echo $sum
;;
esac
}
function new_fs #<args>
{
case "$UNAME" in
FreeBSD)
newfs "$@"
;;
*)
echo y | newfs -v "$@"
;;
esac
}
function stat_size #<path>
{
typeset path=$1
case "$UNAME" in
FreeBSD)
stat -f %z "$path"
;;
*)
stat -c %s "$path"
;;
esac
}
function stat_mtime #<path>
{
typeset path=$1
case "$UNAME" in
FreeBSD)
stat -f %m "$path"
;;
*)
stat -c %Y "$path"
;;
esac
}
function stat_ctime #<path>
{
typeset path=$1
case "$UNAME" in
FreeBSD)
stat -f %c "$path"
;;
*)
stat -c %Z "$path"
;;
esac
}
function stat_crtime #<path>
{
typeset path=$1
case "$UNAME" in
FreeBSD)
stat -f %B "$path"
;;
*)
stat -c %W "$path"
;;
esac
}
function stat_generation #<path>
{
typeset path=$1
case "$UNAME" in
Linux)
getversion "${path}"
;;
*)
stat -f %v "${path}"
;;
esac
}
# Run a command as if it was being run in a TTY.
#
# Usage:
#
# faketty command
#
function faketty
{
if is_freebsd; then
script -q /dev/null env "$@"
else
script --return --quiet -c "$*" /dev/null
fi
}
#
# Produce a random permutation of the integers in a given range (inclusive).
#
function range_shuffle # begin end
{
typeset -i begin=$1
typeset -i end=$2
seq ${begin} ${end} | sort -R
}
#
# Cross-platform xattr helpers
#
function get_xattr # name path
{
typeset name=$1
typeset path=$2
case "$UNAME" in
FreeBSD)
getextattr -qq user "${name}" "${path}"
;;
*)
attr -qg "${name}" "${path}"
;;
esac
}
function set_xattr # name value path
{
typeset name=$1
typeset value=$2
typeset path=$3
case "$UNAME" in
FreeBSD)
setextattr user "${name}" "${value}" "${path}"
;;
*)
attr -qs "${name}" -V "${value}" "${path}"
;;
esac
}
function set_xattr_stdin # name value
{
typeset name=$1
typeset path=$2
case "$UNAME" in
FreeBSD)
setextattr -i user "${name}" "${path}"
;;
*)
attr -qs "${name}" "${path}"
;;
esac
}
function rm_xattr # name path
{
typeset name=$1
typeset path=$2
case "$UNAME" in
FreeBSD)
rmextattr -q user "${name}" "${path}"
;;
*)
attr -qr "${name}" "${path}"
;;
esac
}
function ls_xattr # path
{
typeset path=$1
case "$UNAME" in
FreeBSD)
lsextattr -qq user "${path}"
;;
*)
attr -ql "${path}"
;;
esac
}
function kstat # stat flags?
{
typeset stat=$1
typeset flags=${2-"-n"}
case "$UNAME" in
FreeBSD)
sysctl $flags kstat.zfs.misc.$stat
;;
Linux)
cat "/proc/spl/kstat/zfs/$stat" 2>/dev/null
;;
*)
false
;;
esac
}
function get_arcstat # stat
{
typeset stat=$1
case "$UNAME" in
FreeBSD)
kstat arcstats.$stat
;;
Linux)
kstat arcstats | awk "/$stat/"' { print $3 }'
;;
*)
false
;;
esac
}
function punch_hole # offset length file
{
typeset offset=$1
typeset length=$2
typeset file=$3
case "$UNAME" in
FreeBSD)
truncate -d -o $offset -l $length "$file"
;;
Linux)
fallocate --punch-hole --offset $offset --length $length "$file"
;;
*)
false
;;
esac
}
function zero_range # offset length file
{
typeset offset=$1
typeset length=$2
typeset file=$3
case "$UNAME" in
Linux)
fallocate --zero-range --offset $offset --length $length "$file"
;;
*)
false
;;
esac
}
#
# Wait for the specified arcstat to reach non-zero quiescence.
# If echo is 1 echo the value after reaching quiescence, otherwise
# if echo is 0 print the arcstat we are waiting on.
#
function arcstat_quiescence # stat echo
{
typeset stat=$1
typeset echo=$2
typeset do_once=true
if [[ $echo -eq 0 ]]; then
echo "Waiting for arcstat $1 quiescence."
fi
while $do_once || [ $stat1 -ne $stat2 ] || [ $stat2 -eq 0 ]; do
typeset stat1=$(get_arcstat $stat)
sleep 2
typeset stat2=$(get_arcstat $stat)
do_once=false
done
if [[ $echo -eq 1 ]]; then
echo $stat2
fi
}
function arcstat_quiescence_noecho # stat
{
typeset stat=$1
arcstat_quiescence $stat 0
}
function arcstat_quiescence_echo # stat
{
typeset stat=$1
arcstat_quiescence $stat 1
}
#
# Given an array of pids, wait until all processes
# have completed and check their return status.
#
function wait_for_children #children
{
rv=0
children=("$@")
for child in "${children[@]}"
do
child_exit=0
wait ${child} || child_exit=$?
if [ $child_exit -ne 0 ]; then
echo "child ${child} failed with ${child_exit}"
rv=1
fi
done
return $rv
}
#
# Compare two directory trees recursively in a manner similar to diff(1), but
# using rsync. If there are any discrepancies, a summary of the differences are
# output and a non-zero error is returned.
#
# If you're comparing a directory after a ZIL replay, you should set
# LIBTEST_DIFF_ZIL_REPLAY=1 or use replay_directory_diff which will cause
# directory_diff to ignore mtime changes (the ZIL replay won't fix up mtime
# information).
#
function directory_diff # dir_a dir_b
{
dir_a="$1"
dir_b="$2"
zil_replay="${LIBTEST_DIFF_ZIL_REPLAY:-0}"
# If one of the directories doesn't exist, return 2. This is to match the
# semantics of diff.
if ! [ -d "$dir_a" -a -d "$dir_b" ]; then
return 2
fi
# Run rsync with --dry-run --itemize-changes to get something akin to diff
# output, but rsync is far more thorough in detecting differences (diff
# doesn't compare file metadata, and cannot handle special files).
#
# Also make sure to filter out non-user.* xattrs when comparing. On
# SELinux-enabled systems the copied tree will probably have different
# SELinux labels.
args=("-nicaAHX" '--filter=-x! user.*' "--delete")
# NOTE: Quite a few rsync builds do not support --crtimes which would be
# necessary to verify that creation times are being maintained properly.
# Unfortunately because of this we cannot use it unconditionally but we can
# check if this rsync build supports it and use it then. This check is
# based on the same check in the rsync test suite (testsuite/crtimes.test).
#
# We check ctimes even with zil_replay=1 because the ZIL does store
# creation times and we should make sure they match (if the creation times
# do not match there is a "c" entry in one of the columns).
if rsync --version | grep -q "[, ] crtimes"; then
args+=("--crtimes")
else
log_note "This rsync package does not support --crtimes (-N)."
fi
# If we are testing a ZIL replay, we need to ignore timestamp changes.
# Unfortunately --no-times doesn't do what we want -- it will still tell
# you if the timestamps don't match but rsync will set the timestamps to
# the current time (leading to an itemised change entry). It's simpler to
# just filter out those lines.
if [ "$zil_replay" -eq 0 ]; then
filter=("cat")
else
# Different rsync versions have different numbers of columns. So just
# require that aside from the first two, all other columns must be
# blank (literal ".") or a timestamp field ("[tT]").
filter=("grep" "-v" '^\..[.Tt]\+ ')
fi
diff="$(rsync "${args[@]}" "$dir_a/" "$dir_b/" | "${filter[@]}")"
rv=0
if [ -n "$diff" ]; then
echo "$diff"
rv=1
fi
return $rv
}
#
# Compare two directory trees recursively, without checking whether the mtimes
# match (creation times will be checked if the available rsync binary supports
# it). This is necessary for ZIL replay checks (because the ZIL does not
# contain mtimes and thus after a ZIL replay, mtimes won't match).
#
# This is shorthand for LIBTEST_DIFF_ZIL_REPLAY=1 directory_diff <...>.
#
function replay_directory_diff # dir_a dir_b
{
LIBTEST_DIFF_ZIL_REPLAY=1 directory_diff "$@"
}
#
# Put coredumps into $1/core.{basename}
#
# Output must be saved and passed to pop_coredump_pattern on cleanup
#
function push_coredump_pattern # dir
{
ulimit -c unlimited
case "$UNAME" in
Linux)
cat /proc/sys/kernel/core_pattern /proc/sys/kernel/core_uses_pid
echo "$1/core.%e" >/proc/sys/kernel/core_pattern &&
echo 0 >/proc/sys/kernel/core_uses_pid
;;
FreeBSD)
sysctl -n kern.corefile
sysctl kern.corefile="$1/core.%N" >/dev/null
;;
*)
# Nothing to output – set only for this shell
coreadm -p "$1/core.%f"
;;
esac
}
#
# Put coredumps back into the default location
#
function pop_coredump_pattern
{
[ -s "$1" ] || return 0
case "$UNAME" in
Linux)
typeset pat pid
{ read -r pat; read -r pid; } < "$1"
echo "$pat" >/proc/sys/kernel/core_pattern &&
echo "$pid" >/proc/sys/kernel/core_uses_pid
;;
FreeBSD)
sysctl kern.corefile="$(<"$1")" >/dev/null
;;
esac
}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/include/tunables.cfg b/sys/contrib/openzfs/tests/zfs-tests/include/tunables.cfg
index d6a2fe5db7c6..80e7bcb3bd09 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/include/tunables.cfg
+++ b/sys/contrib/openzfs/tests/zfs-tests/include/tunables.cfg
@@ -1,99 +1,101 @@
# This file exports variables for each tunable used in the test suite.
#
# Different platforms use different names for most tunables. To avoid littering
# the tests with conditional logic for deciding how to set each tunable, the
# logic is instead consolidated to this one file.
#
# Any use of tunables in tests must use a name defined here. New entries
# should be added to the table as needed. Please keep the table sorted
# alphabetically for ease of maintenance.
#
# Platform-specific tunables should still use a NAME from this table for
# consistency. Enter UNSUPPORTED in the column for platforms on which the
# tunable is not implemented.
UNAME=$(uname)
# NAME FreeBSD tunable Linux tunable
cat <<%%%% |
ADMIN_SNAPSHOT UNSUPPORTED zfs_admin_snapshot
ALLOW_REDACTED_DATASET_MOUNT allow_redacted_dataset_mount zfs_allow_redacted_dataset_mount
ARC_MAX arc.max zfs_arc_max
ARC_MIN arc.min zfs_arc_min
ASYNC_BLOCK_MAX_BLOCKS async_block_max_blocks zfs_async_block_max_blocks
CHECKSUM_EVENTS_PER_SECOND checksum_events_per_second zfs_checksum_events_per_second
COMMIT_TIMEOUT_PCT commit_timeout_pct zfs_commit_timeout_pct
COMPRESSED_ARC_ENABLED compressed_arc_enabled zfs_compressed_arc_enabled
CONDENSE_INDIRECT_COMMIT_ENTRY_DELAY_MS condense.indirect_commit_entry_delay_ms zfs_condense_indirect_commit_entry_delay_ms
CONDENSE_INDIRECT_OBSOLETE_PCT condense.indirect_obsolete_pct zfs_condense_indirect_obsolete_pct
CONDENSE_MIN_MAPPING_BYTES condense.min_mapping_bytes zfs_condense_min_mapping_bytes
DBUF_CACHE_SHIFT dbuf.cache_shift dbuf_cache_shift
DEADMAN_CHECKTIME_MS deadman.checktime_ms zfs_deadman_checktime_ms
DEADMAN_FAILMODE deadman.failmode zfs_deadman_failmode
DEADMAN_SYNCTIME_MS deadman.synctime_ms zfs_deadman_synctime_ms
DEADMAN_ZIOTIME_MS deadman.ziotime_ms zfs_deadman_ziotime_ms
DISABLE_IVSET_GUID_CHECK disable_ivset_guid_check zfs_disable_ivset_guid_check
DMU_OFFSET_NEXT_SYNC dmu_offset_next_sync zfs_dmu_offset_next_sync
INITIALIZE_CHUNK_SIZE initialize_chunk_size zfs_initialize_chunk_size
INITIALIZE_VALUE initialize_value zfs_initialize_value
KEEP_LOG_SPACEMAPS_AT_EXPORT keep_log_spacemaps_at_export zfs_keep_log_spacemaps_at_export
LUA_MAX_MEMLIMIT lua.max_memlimit zfs_lua_max_memlimit
L2ARC_MFUONLY l2arc.mfuonly l2arc_mfuonly
L2ARC_NOPREFETCH l2arc.noprefetch l2arc_noprefetch
L2ARC_REBUILD_BLOCKS_MIN_L2SIZE l2arc.rebuild_blocks_min_l2size l2arc_rebuild_blocks_min_l2size
L2ARC_REBUILD_ENABLED l2arc.rebuild_enabled l2arc_rebuild_enabled
L2ARC_TRIM_AHEAD l2arc.trim_ahead l2arc_trim_ahead
L2ARC_WRITE_BOOST l2arc.write_boost l2arc_write_boost
L2ARC_WRITE_MAX l2arc.write_max l2arc_write_max
LIVELIST_CONDENSE_NEW_ALLOC livelist.condense.new_alloc zfs_livelist_condense_new_alloc
LIVELIST_CONDENSE_SYNC_CANCEL livelist.condense.sync_cancel zfs_livelist_condense_sync_cancel
LIVELIST_CONDENSE_SYNC_PAUSE livelist.condense.sync_pause zfs_livelist_condense_sync_pause
LIVELIST_CONDENSE_ZTHR_CANCEL livelist.condense.zthr_cancel zfs_livelist_condense_zthr_cancel
LIVELIST_CONDENSE_ZTHR_PAUSE livelist.condense.zthr_pause zfs_livelist_condense_zthr_pause
LIVELIST_MAX_ENTRIES livelist.max_entries zfs_livelist_max_entries
LIVELIST_MIN_PERCENT_SHARED livelist.min_percent_shared zfs_livelist_min_percent_shared
MAX_DATASET_NESTING max_dataset_nesting zfs_max_dataset_nesting
MAX_MISSING_TVDS max_missing_tvds zfs_max_missing_tvds
METASLAB_DEBUG_LOAD metaslab.debug_load metaslab_debug_load
METASLAB_FORCE_GANGING metaslab.force_ganging metaslab_force_ganging
MULTIHOST_FAIL_INTERVALS multihost.fail_intervals zfs_multihost_fail_intervals
MULTIHOST_HISTORY multihost.history zfs_multihost_history
MULTIHOST_IMPORT_INTERVALS multihost.import_intervals zfs_multihost_import_intervals
MULTIHOST_INTERVAL multihost.interval zfs_multihost_interval
OVERRIDE_ESTIMATE_RECORDSIZE send.override_estimate_recordsize zfs_override_estimate_recordsize
PREFETCH_DISABLE prefetch.disable zfs_prefetch_disable
REBUILD_SCRUB_ENABLED rebuild_scrub_enabled zfs_rebuild_scrub_enabled
REMOVAL_SUSPEND_PROGRESS removal_suspend_progress zfs_removal_suspend_progress
REMOVE_MAX_SEGMENT remove_max_segment zfs_remove_max_segment
RESILVER_MIN_TIME_MS resilver_min_time_ms zfs_resilver_min_time_ms
SCAN_LEGACY scan_legacy zfs_scan_legacy
SCAN_SUSPEND_PROGRESS scan_suspend_progress zfs_scan_suspend_progress
SCAN_VDEV_LIMIT scan_vdev_limit zfs_scan_vdev_limit
SEND_HOLES_WITHOUT_BIRTH_TIME send_holes_without_birth_time send_holes_without_birth_time
SLOW_IO_EVENTS_PER_SECOND slow_io_events_per_second zfs_slow_io_events_per_second
SPA_ASIZE_INFLATION spa.asize_inflation spa_asize_inflation
SPA_DISCARD_MEMORY_LIMIT spa.discard_memory_limit zfs_spa_discard_memory_limit
SPA_LOAD_VERIFY_DATA spa.load_verify_data spa_load_verify_data
SPA_LOAD_VERIFY_METADATA spa.load_verify_metadata spa_load_verify_metadata
TRIM_EXTENT_BYTES_MIN trim.extent_bytes_min zfs_trim_extent_bytes_min
TRIM_METASLAB_SKIP trim.metaslab_skip zfs_trim_metaslab_skip
TRIM_TXG_BATCH trim.txg_batch zfs_trim_txg_batch
TXG_HISTORY txg.history zfs_txg_history
TXG_TIMEOUT txg.timeout zfs_txg_timeout
UNLINK_SUSPEND_PROGRESS UNSUPPORTED zfs_unlink_suspend_progress
+VDEV_FILE_LOGICAL_ASHIFT vdev.file.logical_ashift vdev_file_logical_ashift
VDEV_FILE_PHYSICAL_ASHIFT vdev.file.physical_ashift vdev_file_physical_ashift
+VDEV_MAX_AUTO_ASHIFT vdev.max_auto_ashift zfs_vdev_max_auto_ashift
VDEV_MIN_MS_COUNT vdev.min_ms_count zfs_vdev_min_ms_count
VDEV_VALIDATE_SKIP vdev.validate_skip vdev_validate_skip
VOL_INHIBIT_DEV UNSUPPORTED zvol_inhibit_dev
VOL_MODE vol.mode zvol_volmode
VOL_RECURSIVE vol.recursive UNSUPPORTED
VOL_USE_BLK_MQ UNSUPPORTED zvol_use_blk_mq
XATTR_COMPAT xattr_compat zfs_xattr_compat
ZEVENT_LEN_MAX zevent.len_max zfs_zevent_len_max
ZEVENT_RETAIN_MAX zevent.retain_max zfs_zevent_retain_max
ZIO_SLOW_IO_MS zio.slow_io_ms zio_slow_io_ms
ZIL_SAXATTR zil_saxattr zfs_zil_saxattr
%%%%
while read name FreeBSD Linux; do
eval "export ${name}=\$${UNAME}"
done
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am b/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am
index 4a815db8a6d6..d53316643bc5 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am
@@ -1,1994 +1,2000 @@
CLEANFILES =
dist_noinst_DATA =
include $(top_srcdir)/config/Substfiles.am
datadir_zfs_tests_testsdir = $(datadir)/$(PACKAGE)/zfs-tests/tests
nobase_dist_datadir_zfs_tests_tests_DATA = \
perf/nfs-sample.cfg \
perf/perf.shlib \
\
perf/fio/mkfiles.fio \
perf/fio/random_reads.fio \
perf/fio/random_readwrite.fio \
perf/fio/random_readwrite_fixed.fio \
perf/fio/random_writes.fio \
perf/fio/sequential_reads.fio \
perf/fio/sequential_readwrite.fio \
perf/fio/sequential_writes.fio
nobase_dist_datadir_zfs_tests_tests_SCRIPTS = \
perf/regression/random_reads.ksh \
perf/regression/random_readwrite.ksh \
perf/regression/random_readwrite_fixed.ksh \
perf/regression/random_writes.ksh \
perf/regression/random_writes_zil.ksh \
perf/regression/sequential_reads_arc_cached_clone.ksh \
perf/regression/sequential_reads_arc_cached.ksh \
perf/regression/sequential_reads_dbuf_cached.ksh \
perf/regression/sequential_reads.ksh \
perf/regression/sequential_writes.ksh \
perf/regression/setup.ksh \
\
perf/scripts/prefetch_io.sh
# These lists can be regenerated by running make regen-tests at the root, or, on a *clean* source:
# find functional/ ! -type d ! -name .gitignore ! -name .dirstamp ! -name '*.Po' ! -executable -name '*.in' | sort | sed 's/\.in$//;s/^/\t/;$!s/$/ \\/'
# find functional/ ! -type d ! -name .gitignore ! -name .dirstamp ! -name '*.Po' -executable -name '*.in' | sort | sed 's/\.in$//;s/^/\t/;$!s/$/ \\/'
# find functional/ ! -type d ! -name .gitignore ! -name .dirstamp ! -name '*.Po' ! -name '*.in' ! -name '*.c' | grep -Fe /simd -e /tmpfile | sort | sed 's/^/\t/;$!s/$/ \\/'
# find functional/ ! -type d ! -name .gitignore ! -name .dirstamp ! -name '*.Po' ! -executable ! -name '*.in' ! -name '*.c' | grep -vFe /simd -e /tmpfile | sort | sed 's/^/\t/;$!s/$/ \\/'
# find functional/ ! -type d ! -name .gitignore ! -name .dirstamp ! -name '*.Po' -executable ! -name '*.in' ! -name '*.c' | grep -vFe /simd -e /tmpfile | sort | sed 's/^/\t/;$!s/$/ \\/'
#
# simd and tmpfile are Linux-only and not installed elsewhere
#
# C programs are specced in ../Makefile.am above as part of the main Makefile
find_common := find functional/ ! -type d ! -name .gitignore ! -name .dirstamp ! -name '*.Po'
regen:
@$(MAKE) -C $(top_builddir) clean
@$(MAKE) clean
$(SED) $(ac_inplace) '/^# -- >8 --/q' Makefile.am
echo >> Makefile.am
echo 'nobase_nodist_datadir_zfs_tests_tests_DATA = \' >> Makefile.am
$(find_common) ! -executable -name '*.in' | sort | sed 's/\.in$$//;s/^/\t/;$$!s/$$/ \\/' >> Makefile.am
echo 'nobase_nodist_datadir_zfs_tests_tests_SCRIPTS = \' >> Makefile.am
$(find_common) -executable -name '*.in' | sort | sed 's/\.in$$//;s/^/\t/;$$!s/$$/ \\/' >> Makefile.am
echo >> Makefile.am
echo 'SUBSTFILES += $$(nobase_nodist_datadir_zfs_tests_tests_DATA) $$(nobase_nodist_datadir_zfs_tests_tests_SCRIPTS)' >> Makefile.am
echo >> Makefile.am
echo 'if BUILD_LINUX' >> Makefile.am
echo 'nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \' >> Makefile.am
$(find_common) ! -name '*.in' ! -name '*.c' | grep -Fe /simd -e /tmpfile | sort | sed 's/^/\t/;$$!s/$$/ \\/' >> Makefile.am
echo 'endif' >> Makefile.am
echo >> Makefile.am
echo 'nobase_dist_datadir_zfs_tests_tests_DATA += \' >> Makefile.am
$(find_common) ! -executable ! -name '*.in' ! -name '*.c' | grep -vFe /simd -e /tmpfile | sort | sed 's/^/\t/;$$!s/$$/ \\/' >> Makefile.am
echo >> Makefile.am
echo 'nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \' >> Makefile.am
$(find_common) -executable ! -name '*.in' ! -name '*.c' | grep -vFe /simd -e /tmpfile | sort | sed 's/^/\t/;$$!s/$$/ \\/' >> Makefile.am
# -- >8 --
nobase_nodist_datadir_zfs_tests_tests_DATA = \
functional/pam/utilities.kshlib
nobase_nodist_datadir_zfs_tests_tests_SCRIPTS = \
functional/pyzfs/pyzfs_unittest.ksh
SUBSTFILES += $(nobase_nodist_datadir_zfs_tests_tests_DATA) $(nobase_nodist_datadir_zfs_tests_tests_SCRIPTS)
if BUILD_LINUX
nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/simd/simd_supported.ksh \
functional/tmpfile/cleanup.ksh \
functional/tmpfile/setup.ksh
endif
nobase_dist_datadir_zfs_tests_tests_DATA += \
functional/acl/acl.cfg \
functional/acl/acl_common.kshlib \
functional/alloc_class/alloc_class.cfg \
functional/alloc_class/alloc_class.kshlib \
functional/atime/atime.cfg \
functional/atime/atime_common.kshlib \
functional/cache/cache.cfg \
functional/cache/cache.kshlib \
functional/cachefile/cachefile.cfg \
functional/cachefile/cachefile.kshlib \
functional/casenorm/casenorm.cfg \
functional/casenorm/casenorm.kshlib \
functional/channel_program/channel_common.kshlib \
functional/channel_program/lua_core/tst.args_to_lua.out \
functional/channel_program/lua_core/tst.args_to_lua.zcp \
functional/channel_program/lua_core/tst.divide_by_zero.err \
functional/channel_program/lua_core/tst.divide_by_zero.zcp \
functional/channel_program/lua_core/tst.exists.zcp \
functional/channel_program/lua_core/tst.large_prog.out \
functional/channel_program/lua_core/tst.large_prog.zcp \
functional/channel_program/lua_core/tst.lib_base.lua \
functional/channel_program/lua_core/tst.lib_coroutine.lua \
functional/channel_program/lua_core/tst.lib_strings.lua \
functional/channel_program/lua_core/tst.lib_table.lua \
functional/channel_program/lua_core/tst.nested_neg.zcp \
functional/channel_program/lua_core/tst.nested_pos.zcp \
functional/channel_program/lua_core/tst.recursive.zcp \
functional/channel_program/lua_core/tst.return_large.zcp \
functional/channel_program/lua_core/tst.return_recursive_table.zcp \
functional/channel_program/lua_core/tst.stack_gsub.err \
functional/channel_program/lua_core/tst.stack_gsub.zcp \
functional/channel_program/lua_core/tst.timeout.zcp \
functional/channel_program/synctask_core/tst.bookmark.copy.zcp \
functional/channel_program/synctask_core/tst.bookmark.create.zcp \
functional/channel_program/synctask_core/tst.get_index_props.out \
functional/channel_program/synctask_core/tst.get_index_props.zcp \
functional/channel_program/synctask_core/tst.get_number_props.out \
functional/channel_program/synctask_core/tst.get_number_props.zcp \
functional/channel_program/synctask_core/tst.get_string_props.out \
functional/channel_program/synctask_core/tst.get_string_props.zcp \
functional/channel_program/synctask_core/tst.promote_conflict.zcp \
functional/channel_program/synctask_core/tst.set_props.zcp \
functional/channel_program/synctask_core/tst.snapshot_destroy.zcp \
functional/channel_program/synctask_core/tst.snapshot_neg.zcp \
functional/channel_program/synctask_core/tst.snapshot_recursive.zcp \
+ functional/channel_program/synctask_core/tst.snapshot_rename.zcp \
functional/channel_program/synctask_core/tst.snapshot_simple.zcp \
functional/checksum/default.cfg \
functional/clean_mirror/clean_mirror_common.kshlib \
functional/clean_mirror/default.cfg \
functional/cli_root/cli_common.kshlib \
functional/cli_root/zfs_copies/zfs_copies.cfg \
functional/cli_root/zfs_copies/zfs_copies.kshlib \
functional/cli_root/zfs_create/properties.kshlib \
functional/cli_root/zfs_create/zfs_create.cfg \
functional/cli_root/zfs_create/zfs_create_common.kshlib \
functional/cli_root/zfs_destroy/zfs_destroy.cfg \
functional/cli_root/zfs_destroy/zfs_destroy_common.kshlib \
functional/cli_root/zfs_get/zfs_get_common.kshlib \
functional/cli_root/zfs_get/zfs_get_list_d.kshlib \
functional/cli_root/zfs_jail/jail.conf \
functional/cli_root/zfs_load-key/HEXKEY \
functional/cli_root/zfs_load-key/PASSPHRASE \
functional/cli_root/zfs_load-key/RAWKEY \
functional/cli_root/zfs_load-key/zfs_load-key.cfg \
functional/cli_root/zfs_load-key/zfs_load-key_common.kshlib \
functional/cli_root/zfs_mount/zfs_mount.cfg \
functional/cli_root/zfs_mount/zfs_mount.kshlib \
functional/cli_root/zfs_promote/zfs_promote.cfg \
functional/cli_root/zfs_receive/zstd_test_data.txt \
functional/cli_root/zfs_rename/zfs_rename.cfg \
functional/cli_root/zfs_rename/zfs_rename.kshlib \
functional/cli_root/zfs_rollback/zfs_rollback.cfg \
functional/cli_root/zfs_rollback/zfs_rollback_common.kshlib \
functional/cli_root/zfs_send/zfs_send.cfg \
functional/cli_root/zfs_set/zfs_set_common.kshlib \
functional/cli_root/zfs_share/zfs_share.cfg \
functional/cli_root/zfs_snapshot/zfs_snapshot.cfg \
functional/cli_root/zfs_unmount/zfs_unmount.cfg \
functional/cli_root/zfs_unmount/zfs_unmount.kshlib \
functional/cli_root/zfs_upgrade/zfs_upgrade.kshlib \
functional/cli_root/zfs_wait/zfs_wait.kshlib \
functional/cli_root/zpool_add/zpool_add.cfg \
functional/cli_root/zpool_add/zpool_add.kshlib \
functional/cli_root/zpool_clear/zpool_clear.cfg \
functional/cli_root/zpool_create/draidcfg.gz \
functional/cli_root/zpool_create/zpool_create.cfg \
functional/cli_root/zpool_create/zpool_create.shlib \
functional/cli_root/zpool_destroy/zpool_destroy.cfg \
functional/cli_root/zpool_events/zpool_events.cfg \
functional/cli_root/zpool_events/zpool_events.kshlib \
functional/cli_root/zpool_expand/zpool_expand.cfg \
functional/cli_root/zpool_export/zpool_export.cfg \
functional/cli_root/zpool_export/zpool_export.kshlib \
functional/cli_root/zpool_get/zpool_get.cfg \
functional/cli_root/zpool_get/zpool_get_parsable.cfg \
functional/cli_root/zpool_import/blockfiles/cryptv0.dat.bz2 \
functional/cli_root/zpool_import/blockfiles/missing_ivset.dat.bz2 \
functional/cli_root/zpool_import/blockfiles/unclean_export.dat.bz2 \
functional/cli_root/zpool_import/zpool_import.cfg \
functional/cli_root/zpool_import/zpool_import.kshlib \
functional/cli_root/zpool_initialize/zpool_initialize.kshlib \
functional/cli_root/zpool_labelclear/labelclear.cfg \
functional/cli_root/zpool_remove/zpool_remove.cfg \
functional/cli_root/zpool_reopen/zpool_reopen.cfg \
functional/cli_root/zpool_reopen/zpool_reopen.shlib \
functional/cli_root/zpool_resilver/zpool_resilver.cfg \
functional/cli_root/zpool_scrub/zpool_scrub.cfg \
functional/cli_root/zpool_split/zpool_split.cfg \
functional/cli_root/zpool_trim/zpool_trim.kshlib \
functional/cli_root/zpool_upgrade/blockfiles/zfs-broken-mirror1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-broken-mirror2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v10.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v11.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v12.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v13.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v14.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v15.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1mirror1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1mirror2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1mirror3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1raidz1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1raidz2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1raidz3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1stripe1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1stripe2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1stripe3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2mirror1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2mirror2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2mirror3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2raidz1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2raidz2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2raidz3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2stripe1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2stripe2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2stripe3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3hotspare1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3hotspare2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3hotspare3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3mirror1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3mirror2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3mirror3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3raidz1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3raidz21.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3raidz22.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3raidz23.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3raidz2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3raidz3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3stripe1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3stripe2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3stripe3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v4.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v5.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v6.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v7.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v8.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v999.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v9.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-vBROKEN.dat.bz2 \
functional/cli_root/zpool_upgrade/zpool_upgrade.cfg \
functional/cli_root/zpool_upgrade/zpool_upgrade.kshlib \
functional/cli_root/zpool_wait/zpool_wait.kshlib \
functional/cli_user/misc/misc.cfg \
functional/cli_user/zfs_list/zfs_list.cfg \
functional/cli_user/zfs_list/zfs_list.kshlib \
functional/compression/compress.cfg \
functional/compression/testpool_zstd.tar.gz \
functional/deadman/deadman.cfg \
functional/delegate/delegate.cfg \
functional/delegate/delegate_common.kshlib \
functional/devices/devices.cfg \
functional/devices/devices_common.kshlib \
functional/events/events.cfg \
functional/events/events_common.kshlib \
functional/fault/fault.cfg \
functional/grow/grow.cfg \
functional/history/history.cfg \
functional/history/history_common.kshlib \
functional/history/i386.migratedpool.DAT.Z \
functional/history/i386.orig_history.txt \
functional/history/sparc.migratedpool.DAT.Z \
functional/history/sparc.orig_history.txt \
functional/history/zfs-pool-v4.dat.Z \
functional/inheritance/config001.cfg \
functional/inheritance/config002.cfg \
functional/inheritance/config003.cfg \
functional/inheritance/config004.cfg \
functional/inheritance/config005.cfg \
functional/inheritance/config006.cfg \
functional/inheritance/config007.cfg \
functional/inheritance/config008.cfg \
functional/inheritance/config009.cfg \
functional/inheritance/config010.cfg \
functional/inheritance/config011.cfg \
functional/inheritance/config012.cfg \
functional/inheritance/config013.cfg \
functional/inheritance/config014.cfg \
functional/inheritance/config015.cfg \
functional/inheritance/config016.cfg \
functional/inheritance/config017.cfg \
functional/inheritance/config018.cfg \
functional/inheritance/config019.cfg \
functional/inheritance/config020.cfg \
functional/inheritance/config021.cfg \
functional/inheritance/config022.cfg \
functional/inheritance/config023.cfg \
functional/inheritance/config024.cfg \
functional/inheritance/inherit.kshlib \
functional/inheritance/README.config \
functional/inheritance/README.state \
functional/inheritance/state001.cfg \
functional/inheritance/state002.cfg \
functional/inheritance/state003.cfg \
functional/inheritance/state004.cfg \
functional/inheritance/state005.cfg \
functional/inheritance/state006.cfg \
functional/inheritance/state007.cfg \
functional/inheritance/state008.cfg \
functional/inheritance/state009.cfg \
functional/inheritance/state010.cfg \
functional/inheritance/state011.cfg \
functional/inheritance/state012.cfg \
functional/inheritance/state013.cfg \
functional/inheritance/state014.cfg \
functional/inheritance/state015.cfg \
functional/inheritance/state016.cfg \
functional/inheritance/state017.cfg \
functional/inheritance/state018.cfg \
functional/inheritance/state019.cfg \
functional/inheritance/state020.cfg \
functional/inheritance/state021.cfg \
functional/inheritance/state022.cfg \
functional/inheritance/state023.cfg \
functional/inheritance/state024.cfg \
functional/inuse/inuse.cfg \
functional/io/io.cfg \
functional/l2arc/l2arc.cfg \
functional/largest_pool/largest_pool.cfg \
functional/migration/migration.cfg \
functional/migration/migration.kshlib \
functional/mmap/mmap.cfg \
functional/mmp/mmp.cfg \
functional/mmp/mmp.kshlib \
functional/mv_files/mv_files.cfg \
functional/mv_files/mv_files_common.kshlib \
functional/nopwrite/nopwrite.shlib \
functional/no_space/enospc.cfg \
functional/online_offline/online_offline.cfg \
functional/pool_checkpoint/pool_checkpoint.kshlib \
functional/projectquota/projectquota.cfg \
functional/projectquota/projectquota_common.kshlib \
functional/quota/quota.cfg \
functional/quota/quota.kshlib \
functional/redacted_send/redacted.cfg \
functional/redacted_send/redacted.kshlib \
functional/redundancy/redundancy.cfg \
functional/redundancy/redundancy.kshlib \
functional/refreserv/refreserv.cfg \
functional/removal/removal.kshlib \
functional/replacement/replacement.cfg \
functional/reservation/reservation.cfg \
functional/reservation/reservation.shlib \
functional/rsend/dedup_encrypted_zvol.bz2 \
functional/rsend/dedup_encrypted_zvol.zsend.bz2 \
functional/rsend/dedup.zsend.bz2 \
functional/rsend/fs.tar.gz \
functional/rsend/rsend.cfg \
functional/rsend/rsend.kshlib \
functional/scrub_mirror/default.cfg \
functional/scrub_mirror/scrub_mirror_common.kshlib \
functional/slog/slog.cfg \
functional/slog/slog.kshlib \
functional/snapshot/snapshot.cfg \
functional/snapused/snapused.kshlib \
functional/sparse/sparse.cfg \
functional/trim/trim.cfg \
functional/trim/trim.kshlib \
functional/truncate/truncate.cfg \
functional/upgrade/upgrade_common.kshlib \
functional/user_namespace/user_namespace.cfg \
functional/user_namespace/user_namespace_common.kshlib \
functional/userquota/userquota.cfg \
functional/userquota/userquota_common.kshlib \
functional/vdev_zaps/vdev_zaps.kshlib \
functional/xattr/xattr.cfg \
functional/xattr/xattr_common.kshlib \
functional/zvol/zvol.cfg \
functional/zvol/zvol_cli/zvol_cli.cfg \
functional/zvol/zvol_common.shlib \
functional/zvol/zvol_ENOSPC/zvol_ENOSPC.cfg \
functional/zvol/zvol_misc/zvol_misc_common.kshlib \
functional/zvol/zvol_swap/zvol_swap.cfg
nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/acl/off/cleanup.ksh \
functional/acl/off/dosmode.ksh \
functional/acl/off/posixmode.ksh \
functional/acl/off/setup.ksh \
functional/acl/posix/cleanup.ksh \
functional/acl/posix/posix_001_pos.ksh \
functional/acl/posix/posix_002_pos.ksh \
functional/acl/posix/posix_003_pos.ksh \
functional/acl/posix/posix_004_pos.ksh \
functional/acl/posix-sa/cleanup.ksh \
functional/acl/posix-sa/posix_001_pos.ksh \
functional/acl/posix-sa/posix_002_pos.ksh \
functional/acl/posix-sa/posix_003_pos.ksh \
functional/acl/posix-sa/posix_004_pos.ksh \
functional/acl/posix-sa/setup.ksh \
functional/acl/posix/setup.ksh \
functional/alloc_class/alloc_class_001_pos.ksh \
functional/alloc_class/alloc_class_002_neg.ksh \
functional/alloc_class/alloc_class_003_pos.ksh \
functional/alloc_class/alloc_class_004_pos.ksh \
functional/alloc_class/alloc_class_005_pos.ksh \
functional/alloc_class/alloc_class_006_pos.ksh \
functional/alloc_class/alloc_class_007_pos.ksh \
functional/alloc_class/alloc_class_008_pos.ksh \
functional/alloc_class/alloc_class_009_pos.ksh \
functional/alloc_class/alloc_class_010_pos.ksh \
functional/alloc_class/alloc_class_011_neg.ksh \
functional/alloc_class/alloc_class_012_pos.ksh \
functional/alloc_class/alloc_class_013_pos.ksh \
functional/alloc_class/cleanup.ksh \
functional/alloc_class/setup.ksh \
functional/append/file_append.ksh \
functional/append/threadsappend_001_pos.ksh \
functional/append/cleanup.ksh \
functional/append/setup.ksh \
functional/arc/arcstats_runtime_tuning.ksh \
functional/arc/cleanup.ksh \
functional/arc/dbufstats_001_pos.ksh \
functional/arc/dbufstats_002_pos.ksh \
functional/arc/dbufstats_003_pos.ksh \
functional/arc/setup.ksh \
functional/atime/atime_001_pos.ksh \
functional/atime/atime_002_neg.ksh \
functional/atime/atime_003_pos.ksh \
functional/atime/cleanup.ksh \
functional/atime/root_atime_off.ksh \
functional/atime/root_atime_on.ksh \
functional/atime/root_relatime_on.ksh \
functional/atime/setup.ksh \
functional/bootfs/bootfs_001_pos.ksh \
functional/bootfs/bootfs_002_neg.ksh \
functional/bootfs/bootfs_003_pos.ksh \
functional/bootfs/bootfs_004_neg.ksh \
functional/bootfs/bootfs_005_neg.ksh \
functional/bootfs/bootfs_006_pos.ksh \
functional/bootfs/bootfs_007_pos.ksh \
functional/bootfs/bootfs_008_pos.ksh \
functional/bootfs/cleanup.ksh \
functional/bootfs/setup.ksh \
functional/btree/btree_negative.ksh \
functional/btree/btree_positive.ksh \
functional/cache/cache_001_pos.ksh \
functional/cache/cache_002_pos.ksh \
functional/cache/cache_003_pos.ksh \
functional/cache/cache_004_neg.ksh \
functional/cache/cache_005_neg.ksh \
functional/cache/cache_006_pos.ksh \
functional/cache/cache_007_neg.ksh \
functional/cache/cache_008_neg.ksh \
functional/cache/cache_009_pos.ksh \
functional/cache/cache_010_pos.ksh \
functional/cache/cache_011_pos.ksh \
functional/cache/cache_012_pos.ksh \
functional/cache/cleanup.ksh \
functional/cachefile/cachefile_001_pos.ksh \
functional/cachefile/cachefile_002_pos.ksh \
functional/cachefile/cachefile_003_pos.ksh \
functional/cachefile/cachefile_004_pos.ksh \
functional/cachefile/cleanup.ksh \
functional/cachefile/setup.ksh \
functional/cache/setup.ksh \
functional/casenorm/case_all_values.ksh \
functional/casenorm/cleanup.ksh \
functional/casenorm/insensitive_formd_delete.ksh \
functional/casenorm/insensitive_formd_lookup.ksh \
functional/casenorm/insensitive_none_delete.ksh \
functional/casenorm/insensitive_none_lookup.ksh \
functional/casenorm/mixed_create_failure.ksh \
functional/casenorm/mixed_formd_delete.ksh \
functional/casenorm/mixed_formd_lookup_ci.ksh \
functional/casenorm/mixed_formd_lookup.ksh \
functional/casenorm/mixed_none_delete.ksh \
functional/casenorm/mixed_none_lookup_ci.ksh \
functional/casenorm/mixed_none_lookup.ksh \
functional/casenorm/norm_all_values.ksh \
functional/casenorm/sensitive_formd_delete.ksh \
functional/casenorm/sensitive_formd_lookup.ksh \
functional/casenorm/sensitive_none_delete.ksh \
functional/casenorm/sensitive_none_lookup.ksh \
functional/casenorm/setup.ksh \
functional/channel_program/lua_core/cleanup.ksh \
functional/channel_program/lua_core/setup.ksh \
functional/channel_program/lua_core/tst.args_to_lua.ksh \
functional/channel_program/lua_core/tst.divide_by_zero.ksh \
functional/channel_program/lua_core/tst.exists.ksh \
functional/channel_program/lua_core/tst.integer_illegal.ksh \
functional/channel_program/lua_core/tst.integer_overflow.ksh \
functional/channel_program/lua_core/tst.language_functions_neg.ksh \
functional/channel_program/lua_core/tst.language_functions_pos.ksh \
functional/channel_program/lua_core/tst.large_prog.ksh \
functional/channel_program/lua_core/tst.libraries.ksh \
functional/channel_program/lua_core/tst.memory_limit.ksh \
functional/channel_program/lua_core/tst.nested_neg.ksh \
functional/channel_program/lua_core/tst.nested_pos.ksh \
functional/channel_program/lua_core/tst.nvlist_to_lua.ksh \
functional/channel_program/lua_core/tst.recursive_neg.ksh \
functional/channel_program/lua_core/tst.recursive_pos.ksh \
functional/channel_program/lua_core/tst.return_large.ksh \
functional/channel_program/lua_core/tst.return_nvlist_neg.ksh \
functional/channel_program/lua_core/tst.return_nvlist_pos.ksh \
functional/channel_program/lua_core/tst.return_recursive_table.ksh \
functional/channel_program/lua_core/tst.stack_gsub.ksh \
functional/channel_program/lua_core/tst.timeout.ksh \
functional/channel_program/synctask_core/cleanup.ksh \
functional/channel_program/synctask_core/setup.ksh \
functional/channel_program/synctask_core/tst.bookmark.copy.ksh \
functional/channel_program/synctask_core/tst.bookmark.create.ksh \
functional/channel_program/synctask_core/tst.destroy_fs.ksh \
functional/channel_program/synctask_core/tst.destroy_snap.ksh \
functional/channel_program/synctask_core/tst.get_count_and_limit.ksh \
functional/channel_program/synctask_core/tst.get_index_props.ksh \
functional/channel_program/synctask_core/tst.get_mountpoint.ksh \
functional/channel_program/synctask_core/tst.get_neg.ksh \
functional/channel_program/synctask_core/tst.get_number_props.ksh \
functional/channel_program/synctask_core/tst.get_string_props.ksh \
functional/channel_program/synctask_core/tst.get_type.ksh \
functional/channel_program/synctask_core/tst.get_userquota.ksh \
functional/channel_program/synctask_core/tst.get_written.ksh \
functional/channel_program/synctask_core/tst.inherit.ksh \
functional/channel_program/synctask_core/tst.list_bookmarks.ksh \
functional/channel_program/synctask_core/tst.list_children.ksh \
functional/channel_program/synctask_core/tst.list_clones.ksh \
functional/channel_program/synctask_core/tst.list_holds.ksh \
functional/channel_program/synctask_core/tst.list_snapshots.ksh \
functional/channel_program/synctask_core/tst.list_system_props.ksh \
functional/channel_program/synctask_core/tst.list_user_props.ksh \
functional/channel_program/synctask_core/tst.parse_args_neg.ksh \
functional/channel_program/synctask_core/tst.promote_conflict.ksh \
functional/channel_program/synctask_core/tst.promote_multiple.ksh \
functional/channel_program/synctask_core/tst.promote_simple.ksh \
functional/channel_program/synctask_core/tst.rollback_mult.ksh \
functional/channel_program/synctask_core/tst.rollback_one.ksh \
functional/channel_program/synctask_core/tst.set_props.ksh \
functional/channel_program/synctask_core/tst.snapshot_destroy.ksh \
functional/channel_program/synctask_core/tst.snapshot_neg.ksh \
functional/channel_program/synctask_core/tst.snapshot_recursive.ksh \
+ functional/channel_program/synctask_core/tst.snapshot_rename.ksh \
functional/channel_program/synctask_core/tst.snapshot_simple.ksh \
functional/channel_program/synctask_core/tst.terminate_by_signal.ksh \
functional/chattr/chattr_001_pos.ksh \
functional/chattr/chattr_002_neg.ksh \
functional/chattr/cleanup.ksh \
functional/chattr/setup.ksh \
functional/checksum/cleanup.ksh \
functional/checksum/filetest_001_pos.ksh \
functional/checksum/filetest_002_pos.ksh \
functional/checksum/run_blake3_test.ksh \
functional/checksum/run_edonr_test.ksh \
functional/checksum/run_sha2_test.ksh \
functional/checksum/run_skein_test.ksh \
functional/checksum/setup.ksh \
functional/clean_mirror/clean_mirror_001_pos.ksh \
functional/clean_mirror/clean_mirror_002_pos.ksh \
functional/clean_mirror/clean_mirror_003_pos.ksh \
functional/clean_mirror/clean_mirror_004_pos.ksh \
functional/clean_mirror/cleanup.ksh \
functional/clean_mirror/setup.ksh \
functional/cli_root/zdb/zdb_002_pos.ksh \
functional/cli_root/zdb/zdb_003_pos.ksh \
functional/cli_root/zdb/zdb_004_pos.ksh \
functional/cli_root/zdb/zdb_005_pos.ksh \
functional/cli_root/zdb/zdb_006_pos.ksh \
functional/cli_root/zdb/zdb_args_neg.ksh \
functional/cli_root/zdb/zdb_args_pos.ksh \
functional/cli_root/zdb/zdb_block_size_histogram.ksh \
functional/cli_root/zdb/zdb_checksum.ksh \
functional/cli_root/zdb/zdb_decompress.ksh \
functional/cli_root/zdb/zdb_decompress_zstd.ksh \
functional/cli_root/zdb/zdb_display_block.ksh \
functional/cli_root/zdb/zdb_label_checksum.ksh \
functional/cli_root/zdb/zdb_object_range_neg.ksh \
functional/cli_root/zdb/zdb_object_range_pos.ksh \
functional/cli_root/zdb/zdb_objset_id.ksh \
functional/cli_root/zdb/zdb_recover_2.ksh \
functional/cli_root/zdb/zdb_recover.ksh \
functional/cli_root/zfs_bookmark/cleanup.ksh \
functional/cli_root/zfs_bookmark/setup.ksh \
functional/cli_root/zfs_bookmark/zfs_bookmark_cliargs.ksh \
functional/cli_root/zfs_change-key/cleanup.ksh \
functional/cli_root/zfs_change-key/setup.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_child.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_clones.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_format.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_inherit.ksh \
functional/cli_root/zfs_change-key/zfs_change-key.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_load.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_location.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_pbkdf2iters.ksh \
functional/cli_root/zfs/cleanup.ksh \
functional/cli_root/zfs_clone/cleanup.ksh \
functional/cli_root/zfs_clone/setup.ksh \
functional/cli_root/zfs_clone/zfs_clone_001_neg.ksh \
functional/cli_root/zfs_clone/zfs_clone_002_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_003_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_004_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_005_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_006_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_007_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_008_neg.ksh \
functional/cli_root/zfs_clone/zfs_clone_009_neg.ksh \
functional/cli_root/zfs_clone/zfs_clone_010_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_deeply_nested.ksh \
functional/cli_root/zfs_clone/zfs_clone_encrypted.ksh \
functional/cli_root/zfs_clone/zfs_clone_rm_nested.ksh \
functional/cli_root/zfs_copies/cleanup.ksh \
functional/cli_root/zfs_copies/setup.ksh \
functional/cli_root/zfs_copies/zfs_copies_001_pos.ksh \
functional/cli_root/zfs_copies/zfs_copies_002_pos.ksh \
functional/cli_root/zfs_copies/zfs_copies_003_pos.ksh \
functional/cli_root/zfs_copies/zfs_copies_004_neg.ksh \
functional/cli_root/zfs_copies/zfs_copies_005_neg.ksh \
functional/cli_root/zfs_copies/zfs_copies_006_pos.ksh \
functional/cli_root/zfs_create/cleanup.ksh \
functional/cli_root/zfs_create/setup.ksh \
functional/cli_root/zfs_create/zfs_create_001_pos.ksh \
functional/cli_root/zfs_create/zfs_create_002_pos.ksh \
functional/cli_root/zfs_create/zfs_create_003_pos.ksh \
functional/cli_root/zfs_create/zfs_create_004_pos.ksh \
functional/cli_root/zfs_create/zfs_create_005_pos.ksh \
functional/cli_root/zfs_create/zfs_create_006_pos.ksh \
functional/cli_root/zfs_create/zfs_create_007_pos.ksh \
functional/cli_root/zfs_create/zfs_create_008_neg.ksh \
functional/cli_root/zfs_create/zfs_create_009_neg.ksh \
functional/cli_root/zfs_create/zfs_create_010_neg.ksh \
functional/cli_root/zfs_create/zfs_create_011_pos.ksh \
functional/cli_root/zfs_create/zfs_create_012_pos.ksh \
functional/cli_root/zfs_create/zfs_create_013_pos.ksh \
functional/cli_root/zfs_create/zfs_create_014_pos.ksh \
functional/cli_root/zfs_create/zfs_create_crypt_combos.ksh \
functional/cli_root/zfs_create/zfs_create_dryrun.ksh \
functional/cli_root/zfs_create/zfs_create_encrypted.ksh \
functional/cli_root/zfs_create/zfs_create_nomount.ksh \
functional/cli_root/zfs_create/zfs_create_verbose.ksh \
functional/cli_root/zfs_destroy/cleanup.ksh \
functional/cli_root/zfs_destroy/setup.ksh \
functional/cli_root/zfs_destroy/zfs_clone_livelist_condense_and_disable.ksh \
functional/cli_root/zfs_destroy/zfs_clone_livelist_condense_races.ksh \
functional/cli_root/zfs_destroy/zfs_clone_livelist_dedup.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_001_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_002_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_003_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_004_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_005_neg.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_006_neg.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_007_neg.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_008_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_009_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_010_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_011_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_012_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_013_neg.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_014_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_015_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_016_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_clone_livelist.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_dev_removal_condense.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_dev_removal.ksh \
functional/cli_root/zfs_diff/cleanup.ksh \
functional/cli_root/zfs_diff/setup.ksh \
functional/cli_root/zfs_diff/zfs_diff_changes.ksh \
functional/cli_root/zfs_diff/zfs_diff_cliargs.ksh \
functional/cli_root/zfs_diff/zfs_diff_encrypted.ksh \
functional/cli_root/zfs_diff/zfs_diff_mangle.ksh \
functional/cli_root/zfs_diff/zfs_diff_timestamp.ksh \
functional/cli_root/zfs_diff/zfs_diff_types.ksh \
functional/cli_root/zfs_get/cleanup.ksh \
functional/cli_root/zfs_get/setup.ksh \
functional/cli_root/zfs_get/zfs_get_001_pos.ksh \
functional/cli_root/zfs_get/zfs_get_002_pos.ksh \
functional/cli_root/zfs_get/zfs_get_003_pos.ksh \
functional/cli_root/zfs_get/zfs_get_004_pos.ksh \
functional/cli_root/zfs_get/zfs_get_005_neg.ksh \
functional/cli_root/zfs_get/zfs_get_006_neg.ksh \
functional/cli_root/zfs_get/zfs_get_007_neg.ksh \
functional/cli_root/zfs_get/zfs_get_008_pos.ksh \
functional/cli_root/zfs_get/zfs_get_009_pos.ksh \
functional/cli_root/zfs_get/zfs_get_010_neg.ksh \
functional/cli_root/zfs_ids_to_path/cleanup.ksh \
functional/cli_root/zfs_ids_to_path/setup.ksh \
functional/cli_root/zfs_ids_to_path/zfs_ids_to_path_001_pos.ksh \
functional/cli_root/zfs_inherit/cleanup.ksh \
functional/cli_root/zfs_inherit/setup.ksh \
functional/cli_root/zfs_inherit/zfs_inherit_001_neg.ksh \
functional/cli_root/zfs_inherit/zfs_inherit_002_neg.ksh \
functional/cli_root/zfs_inherit/zfs_inherit_003_pos.ksh \
functional/cli_root/zfs_inherit/zfs_inherit_mountpoint.ksh \
functional/cli_root/zfs_jail/cleanup.ksh \
functional/cli_root/zfs_jail/setup.ksh \
functional/cli_root/zfs_jail/zfs_jail_001_pos.ksh \
functional/cli_root/zfs_load-key/cleanup.ksh \
functional/cli_root/zfs_load-key/setup.ksh \
functional/cli_root/zfs_load-key/zfs_load-key_all.ksh \
functional/cli_root/zfs_load-key/zfs_load-key_file.ksh \
functional/cli_root/zfs_load-key/zfs_load-key_https.ksh \
functional/cli_root/zfs_load-key/zfs_load-key.ksh \
functional/cli_root/zfs_load-key/zfs_load-key_location.ksh \
functional/cli_root/zfs_load-key/zfs_load-key_noop.ksh \
functional/cli_root/zfs_load-key/zfs_load-key_recursive.ksh \
functional/cli_root/zfs_mount/cleanup.ksh \
functional/cli_root/zfs_mount/setup.ksh \
functional/cli_root/zfs_mount/zfs_mount_001_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_002_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_003_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_004_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_005_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_006_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_007_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_008_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_009_neg.ksh \
functional/cli_root/zfs_mount/zfs_mount_010_neg.ksh \
functional/cli_root/zfs_mount/zfs_mount_011_neg.ksh \
functional/cli_root/zfs_mount/zfs_mount_012_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_013_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_014_neg.ksh \
functional/cli_root/zfs_mount/zfs_mount_all_001_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_all_fail.ksh \
functional/cli_root/zfs_mount/zfs_mount_all_mountpoints.ksh \
functional/cli_root/zfs_mount/zfs_mount_encrypted.ksh \
functional/cli_root/zfs_mount/zfs_mount_remount.ksh \
functional/cli_root/zfs_mount/zfs_mount_test_race.ksh \
functional/cli_root/zfs_mount/zfs_multi_mount.ksh \
functional/cli_root/zfs_program/cleanup.ksh \
functional/cli_root/zfs_program/setup.ksh \
functional/cli_root/zfs_program/zfs_program_json.ksh \
functional/cli_root/zfs_promote/cleanup.ksh \
functional/cli_root/zfs_promote/setup.ksh \
functional/cli_root/zfs_promote/zfs_promote_001_pos.ksh \
functional/cli_root/zfs_promote/zfs_promote_002_pos.ksh \
functional/cli_root/zfs_promote/zfs_promote_003_pos.ksh \
functional/cli_root/zfs_promote/zfs_promote_004_pos.ksh \
functional/cli_root/zfs_promote/zfs_promote_005_pos.ksh \
functional/cli_root/zfs_promote/zfs_promote_006_neg.ksh \
functional/cli_root/zfs_promote/zfs_promote_007_neg.ksh \
functional/cli_root/zfs_promote/zfs_promote_008_pos.ksh \
functional/cli_root/zfs_promote/zfs_promote_encryptionroot.ksh \
functional/cli_root/zfs_property/cleanup.ksh \
functional/cli_root/zfs_property/setup.ksh \
functional/cli_root/zfs_property/zfs_written_property_001_pos.ksh \
functional/cli_root/zfs_receive/cleanup.ksh \
functional/cli_root/zfs_receive/receive-o-x_props_aliases.ksh \
functional/cli_root/zfs_receive/receive-o-x_props_override.ksh \
functional/cli_root/zfs_receive/setup.ksh \
functional/cli_root/zfs_receive/zfs_receive_001_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_002_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_003_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_004_neg.ksh \
functional/cli_root/zfs_receive/zfs_receive_005_neg.ksh \
functional/cli_root/zfs_receive/zfs_receive_006_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_007_neg.ksh \
functional/cli_root/zfs_receive/zfs_receive_008_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_009_neg.ksh \
functional/cli_root/zfs_receive/zfs_receive_010_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_011_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_012_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_013_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_014_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_015_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_016_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_-e.ksh \
functional/cli_root/zfs_receive/zfs_receive_from_encrypted.ksh \
functional/cli_root/zfs_receive/zfs_receive_from_zstd.ksh \
functional/cli_root/zfs_receive/zfs_receive_new_props.ksh \
functional/cli_root/zfs_receive/zfs_receive_raw_-d.ksh \
functional/cli_root/zfs_receive/zfs_receive_raw_incremental.ksh \
functional/cli_root/zfs_receive/zfs_receive_raw.ksh \
functional/cli_root/zfs_receive/zfs_receive_to_encrypted.ksh \
functional/cli_root/zfs_receive/zfs_receive_-wR-encrypted-mix.ksh \
functional/cli_root/zfs_receive/zfs_receive_corrective.ksh \
functional/cli_root/zfs_receive/zfs_receive_compressed_corrective.ksh \
functional/cli_root/zfs_rename/cleanup.ksh \
functional/cli_root/zfs_rename/setup.ksh \
functional/cli_root/zfs_rename/zfs_rename_001_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_002_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_003_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_004_neg.ksh \
functional/cli_root/zfs_rename/zfs_rename_005_neg.ksh \
functional/cli_root/zfs_rename/zfs_rename_006_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_007_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_008_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_009_neg.ksh \
functional/cli_root/zfs_rename/zfs_rename_010_neg.ksh \
functional/cli_root/zfs_rename/zfs_rename_011_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_012_neg.ksh \
functional/cli_root/zfs_rename/zfs_rename_013_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_014_neg.ksh \
functional/cli_root/zfs_rename/zfs_rename_encrypted_child.ksh \
functional/cli_root/zfs_rename/zfs_rename_mountpoint.ksh \
functional/cli_root/zfs_rename/zfs_rename_nounmount.ksh \
functional/cli_root/zfs_rename/zfs_rename_to_encrypted.ksh \
functional/cli_root/zfs_reservation/cleanup.ksh \
functional/cli_root/zfs_reservation/setup.ksh \
functional/cli_root/zfs_reservation/zfs_reservation_001_pos.ksh \
functional/cli_root/zfs_reservation/zfs_reservation_002_pos.ksh \
functional/cli_root/zfs_rollback/cleanup.ksh \
functional/cli_root/zfs_rollback/setup.ksh \
functional/cli_root/zfs_rollback/zfs_rollback_001_pos.ksh \
functional/cli_root/zfs_rollback/zfs_rollback_002_pos.ksh \
functional/cli_root/zfs_rollback/zfs_rollback_003_neg.ksh \
functional/cli_root/zfs_rollback/zfs_rollback_004_neg.ksh \
functional/cli_root/zfs_send/cleanup.ksh \
functional/cli_root/zfs_send/setup.ksh \
functional/cli_root/zfs_send/zfs_send_001_pos.ksh \
functional/cli_root/zfs_send/zfs_send_002_pos.ksh \
functional/cli_root/zfs_send/zfs_send_003_pos.ksh \
functional/cli_root/zfs_send/zfs_send_004_neg.ksh \
functional/cli_root/zfs_send/zfs_send_005_pos.ksh \
functional/cli_root/zfs_send/zfs_send_006_pos.ksh \
functional/cli_root/zfs_send/zfs_send_007_pos.ksh \
functional/cli_root/zfs_send/zfs_send-b.ksh \
functional/cli_root/zfs_send/zfs_send_encrypted.ksh \
functional/cli_root/zfs_send/zfs_send_encrypted_unloaded.ksh \
functional/cli_root/zfs_send/zfs_send_raw.ksh \
functional/cli_root/zfs_send/zfs_send_skip_missing.ksh \
functional/cli_root/zfs_send/zfs_send_sparse.ksh \
functional/cli_root/zfs_set/cache_001_pos.ksh \
functional/cli_root/zfs_set/cache_002_neg.ksh \
functional/cli_root/zfs_set/canmount_001_pos.ksh \
functional/cli_root/zfs_set/canmount_002_pos.ksh \
functional/cli_root/zfs_set/canmount_003_pos.ksh \
functional/cli_root/zfs_set/canmount_004_pos.ksh \
functional/cli_root/zfs_set/checksum_001_pos.ksh \
functional/cli_root/zfs_set/cleanup.ksh \
functional/cli_root/zfs_set/compression_001_pos.ksh \
functional/cli_root/zfs_set/mountpoint_001_pos.ksh \
functional/cli_root/zfs_set/mountpoint_002_pos.ksh \
functional/cli_root/zfs_set/mountpoint_003_pos.ksh \
functional/cli_root/zfs_set/onoffs_001_pos.ksh \
functional/cli_root/zfs_set/property_alias_001_pos.ksh \
functional/cli_root/zfs_set/readonly_001_pos.ksh \
functional/cli_root/zfs_set/reservation_001_neg.ksh \
functional/cli_root/zfs_set/ro_props_001_pos.ksh \
functional/cli_root/zfs_set/setup.ksh \
functional/cli_root/zfs_set/share_mount_001_neg.ksh \
functional/cli_root/zfs_set/snapdir_001_pos.ksh \
functional/cli_root/zfs/setup.ksh \
functional/cli_root/zfs_set/user_property_001_pos.ksh \
functional/cli_root/zfs_set/user_property_002_pos.ksh \
functional/cli_root/zfs_set/user_property_003_neg.ksh \
functional/cli_root/zfs_set/user_property_004_pos.ksh \
functional/cli_root/zfs_set/version_001_neg.ksh \
functional/cli_root/zfs_set/zfs_set_001_neg.ksh \
functional/cli_root/zfs_set/zfs_set_002_neg.ksh \
functional/cli_root/zfs_set/zfs_set_003_neg.ksh \
functional/cli_root/zfs_set/zfs_set_feature_activation.ksh \
functional/cli_root/zfs_set/zfs_set_keylocation.ksh \
functional/cli_root/zfs_share/cleanup.ksh \
functional/cli_root/zfs_share/setup.ksh \
functional/cli_root/zfs_share/zfs_share_001_pos.ksh \
functional/cli_root/zfs_share/zfs_share_002_pos.ksh \
functional/cli_root/zfs_share/zfs_share_003_pos.ksh \
functional/cli_root/zfs_share/zfs_share_004_pos.ksh \
functional/cli_root/zfs_share/zfs_share_005_pos.ksh \
functional/cli_root/zfs_share/zfs_share_006_pos.ksh \
functional/cli_root/zfs_share/zfs_share_007_neg.ksh \
functional/cli_root/zfs_share/zfs_share_008_neg.ksh \
functional/cli_root/zfs_share/zfs_share_009_neg.ksh \
functional/cli_root/zfs_share/zfs_share_010_neg.ksh \
functional/cli_root/zfs_share/zfs_share_011_pos.ksh \
functional/cli_root/zfs_share/zfs_share_012_pos.ksh \
functional/cli_root/zfs_share/zfs_share_013_pos.ksh \
functional/cli_root/zfs_share/zfs_share_concurrent_shares.ksh \
functional/cli_root/zfs_snapshot/cleanup.ksh \
functional/cli_root/zfs_snapshot/setup.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_001_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_002_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_003_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_004_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_005_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_006_pos.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_007_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_008_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_009_pos.ksh \
functional/cli_root/zfs_sysfs/cleanup.ksh \
functional/cli_root/zfs_sysfs/setup.ksh \
functional/cli_root/zfs_sysfs/zfeature_set_unsupported.ksh \
functional/cli_root/zfs_sysfs/zfs_get_unsupported.ksh \
functional/cli_root/zfs_sysfs/zfs_set_unsupported.ksh \
functional/cli_root/zfs_sysfs/zfs_sysfs_live.ksh \
functional/cli_root/zfs_sysfs/zpool_get_unsupported.ksh \
functional/cli_root/zfs_sysfs/zpool_set_unsupported.ksh \
functional/cli_root/zfs_unload-key/cleanup.ksh \
functional/cli_root/zfs_unload-key/setup.ksh \
functional/cli_root/zfs_unload-key/zfs_unload-key_all.ksh \
functional/cli_root/zfs_unload-key/zfs_unload-key.ksh \
functional/cli_root/zfs_unload-key/zfs_unload-key_recursive.ksh \
functional/cli_root/zfs_unmount/cleanup.ksh \
functional/cli_root/zfs_unmount/setup.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_001_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_002_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_003_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_004_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_005_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_006_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_007_neg.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_008_neg.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_009_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_all_001_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_nested.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_unload_keys.ksh \
functional/cli_root/zfs_unshare/cleanup.ksh \
functional/cli_root/zfs_unshare/setup.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_001_pos.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_002_pos.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_003_pos.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_004_neg.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_005_neg.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_006_pos.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_007_pos.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_008_pos.ksh \
functional/cli_root/zfs_upgrade/cleanup.ksh \
functional/cli_root/zfs_upgrade/setup.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_001_pos.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_002_pos.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_003_pos.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_004_pos.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_005_pos.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_006_neg.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_007_neg.ksh \
functional/cli_root/zfs_wait/cleanup.ksh \
functional/cli_root/zfs_wait/setup.ksh \
functional/cli_root/zfs_wait/zfs_wait_deleteq.ksh \
functional/cli_root/zfs_wait/zfs_wait_getsubopt.ksh \
functional/cli_root/zfs/zfs_001_neg.ksh \
functional/cli_root/zfs/zfs_002_pos.ksh \
functional/cli_root/zfs/zfs_003_neg.ksh \
functional/cli_root/zhack/zhack_label_checksum.ksh \
functional/cli_root/zpool_add/add_nested_replacing_spare.ksh \
functional/cli_root/zpool_add/add-o_ashift.ksh \
functional/cli_root/zpool_add/add_prop_ashift.ksh \
functional/cli_root/zpool_add/cleanup.ksh \
functional/cli_root/zpool_add/setup.ksh \
functional/cli_root/zpool_add/zpool_add_001_pos.ksh \
functional/cli_root/zpool_add/zpool_add_002_pos.ksh \
functional/cli_root/zpool_add/zpool_add_003_pos.ksh \
functional/cli_root/zpool_add/zpool_add_004_pos.ksh \
functional/cli_root/zpool_add/zpool_add_005_pos.ksh \
functional/cli_root/zpool_add/zpool_add_006_pos.ksh \
functional/cli_root/zpool_add/zpool_add_007_neg.ksh \
functional/cli_root/zpool_add/zpool_add_008_neg.ksh \
functional/cli_root/zpool_add/zpool_add_009_neg.ksh \
functional/cli_root/zpool_add/zpool_add_010_pos.ksh \
functional/cli_root/zpool_add/zpool_add_dryrun_output.ksh \
functional/cli_root/zpool_attach/attach-o_ashift.ksh \
functional/cli_root/zpool_attach/cleanup.ksh \
functional/cli_root/zpool_attach/setup.ksh \
functional/cli_root/zpool_attach/zpool_attach_001_neg.ksh \
functional/cli_root/zpool/cleanup.ksh \
functional/cli_root/zpool_clear/cleanup.ksh \
functional/cli_root/zpool_clear/setup.ksh \
functional/cli_root/zpool_clear/zpool_clear_001_pos.ksh \
functional/cli_root/zpool_clear/zpool_clear_002_neg.ksh \
functional/cli_root/zpool_clear/zpool_clear_003_neg.ksh \
functional/cli_root/zpool_clear/zpool_clear_readonly.ksh \
functional/cli_root/zpool_create/cleanup.ksh \
functional/cli_root/zpool_create/create-o_ashift.ksh \
functional/cli_root/zpool_create/setup.ksh \
functional/cli_root/zpool_create/zpool_create_001_pos.ksh \
functional/cli_root/zpool_create/zpool_create_002_pos.ksh \
functional/cli_root/zpool_create/zpool_create_003_pos.ksh \
functional/cli_root/zpool_create/zpool_create_004_pos.ksh \
functional/cli_root/zpool_create/zpool_create_005_pos.ksh \
functional/cli_root/zpool_create/zpool_create_006_pos.ksh \
functional/cli_root/zpool_create/zpool_create_007_neg.ksh \
functional/cli_root/zpool_create/zpool_create_008_pos.ksh \
functional/cli_root/zpool_create/zpool_create_009_neg.ksh \
functional/cli_root/zpool_create/zpool_create_010_neg.ksh \
functional/cli_root/zpool_create/zpool_create_011_neg.ksh \
functional/cli_root/zpool_create/zpool_create_012_neg.ksh \
functional/cli_root/zpool_create/zpool_create_014_neg.ksh \
functional/cli_root/zpool_create/zpool_create_015_neg.ksh \
functional/cli_root/zpool_create/zpool_create_016_pos.ksh \
functional/cli_root/zpool_create/zpool_create_017_neg.ksh \
functional/cli_root/zpool_create/zpool_create_018_pos.ksh \
functional/cli_root/zpool_create/zpool_create_019_pos.ksh \
functional/cli_root/zpool_create/zpool_create_020_pos.ksh \
functional/cli_root/zpool_create/zpool_create_021_pos.ksh \
functional/cli_root/zpool_create/zpool_create_022_pos.ksh \
functional/cli_root/zpool_create/zpool_create_023_neg.ksh \
functional/cli_root/zpool_create/zpool_create_024_pos.ksh \
functional/cli_root/zpool_create/zpool_create_crypt_combos.ksh \
functional/cli_root/zpool_create/zpool_create_draid_001_pos.ksh \
functional/cli_root/zpool_create/zpool_create_draid_002_pos.ksh \
functional/cli_root/zpool_create/zpool_create_draid_003_pos.ksh \
functional/cli_root/zpool_create/zpool_create_draid_004_pos.ksh \
functional/cli_root/zpool_create/zpool_create_dryrun_output.ksh \
functional/cli_root/zpool_create/zpool_create_encrypted.ksh \
functional/cli_root/zpool_create/zpool_create_features_001_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_002_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_003_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_004_neg.ksh \
functional/cli_root/zpool_create/zpool_create_features_005_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_006_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_007_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_008_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_009_pos.ksh \
functional/cli_root/zpool_create/zpool_create_tempname.ksh \
functional/cli_root/zpool_destroy/zpool_destroy_001_pos.ksh \
functional/cli_root/zpool_destroy/zpool_destroy_002_pos.ksh \
functional/cli_root/zpool_destroy/zpool_destroy_003_neg.ksh \
functional/cli_root/zpool_detach/cleanup.ksh \
functional/cli_root/zpool_detach/setup.ksh \
functional/cli_root/zpool_detach/zpool_detach_001_neg.ksh \
functional/cli_root/zpool_events/cleanup.ksh \
functional/cli_root/zpool_events/setup.ksh \
functional/cli_root/zpool_events/zpool_events_clear.ksh \
functional/cli_root/zpool_events/zpool_events_clear_retained.ksh \
functional/cli_root/zpool_events/zpool_events_cliargs.ksh \
functional/cli_root/zpool_events/zpool_events_duplicates.ksh \
functional/cli_root/zpool_events/zpool_events_errors.ksh \
functional/cli_root/zpool_events/zpool_events_follow.ksh \
functional/cli_root/zpool_events/zpool_events_poolname.ksh \
functional/cli_root/zpool_expand/cleanup.ksh \
functional/cli_root/zpool_expand/setup.ksh \
functional/cli_root/zpool_expand/zpool_expand_001_pos.ksh \
functional/cli_root/zpool_expand/zpool_expand_002_pos.ksh \
functional/cli_root/zpool_expand/zpool_expand_003_neg.ksh \
functional/cli_root/zpool_expand/zpool_expand_004_pos.ksh \
functional/cli_root/zpool_expand/zpool_expand_005_pos.ksh \
functional/cli_root/zpool_export/cleanup.ksh \
functional/cli_root/zpool_export/setup.ksh \
functional/cli_root/zpool_export/zpool_export_001_pos.ksh \
functional/cli_root/zpool_export/zpool_export_002_pos.ksh \
functional/cli_root/zpool_export/zpool_export_003_neg.ksh \
functional/cli_root/zpool_export/zpool_export_004_pos.ksh \
functional/cli_root/zpool_get/cleanup.ksh \
functional/cli_root/zpool_get/setup.ksh \
functional/cli_root/zpool_get/zpool_get_001_pos.ksh \
functional/cli_root/zpool_get/zpool_get_002_pos.ksh \
functional/cli_root/zpool_get/zpool_get_003_pos.ksh \
functional/cli_root/zpool_get/zpool_get_004_neg.ksh \
functional/cli_root/zpool_get/zpool_get_005_pos.ksh \
functional/cli_root/zpool_history/cleanup.ksh \
functional/cli_root/zpool_history/setup.ksh \
functional/cli_root/zpool_history/zpool_history_001_neg.ksh \
functional/cli_root/zpool_history/zpool_history_002_pos.ksh \
functional/cli_root/zpool_import/cleanup.ksh \
functional/cli_root/zpool_import/import_cachefile_device_added.ksh \
functional/cli_root/zpool_import/import_cachefile_device_removed.ksh \
functional/cli_root/zpool_import/import_cachefile_device_replaced.ksh \
functional/cli_root/zpool_import/import_cachefile_mirror_attached.ksh \
functional/cli_root/zpool_import/import_cachefile_mirror_detached.ksh \
functional/cli_root/zpool_import/import_cachefile_paths_changed.ksh \
functional/cli_root/zpool_import/import_cachefile_shared_device.ksh \
functional/cli_root/zpool_import/import_devices_missing.ksh \
functional/cli_root/zpool_import/import_paths_changed.ksh \
functional/cli_root/zpool_import/import_rewind_config_changed.ksh \
functional/cli_root/zpool_import/import_rewind_device_replaced.ksh \
functional/cli_root/zpool_import/setup.ksh \
functional/cli_root/zpool_import/zpool_import_001_pos.ksh \
functional/cli_root/zpool_import/zpool_import_002_pos.ksh \
functional/cli_root/zpool_import/zpool_import_003_pos.ksh \
functional/cli_root/zpool_import/zpool_import_004_pos.ksh \
functional/cli_root/zpool_import/zpool_import_005_pos.ksh \
functional/cli_root/zpool_import/zpool_import_006_pos.ksh \
functional/cli_root/zpool_import/zpool_import_007_pos.ksh \
functional/cli_root/zpool_import/zpool_import_008_pos.ksh \
functional/cli_root/zpool_import/zpool_import_009_neg.ksh \
functional/cli_root/zpool_import/zpool_import_010_pos.ksh \
functional/cli_root/zpool_import/zpool_import_011_neg.ksh \
functional/cli_root/zpool_import/zpool_import_012_pos.ksh \
functional/cli_root/zpool_import/zpool_import_013_neg.ksh \
functional/cli_root/zpool_import/zpool_import_014_pos.ksh \
functional/cli_root/zpool_import/zpool_import_015_pos.ksh \
functional/cli_root/zpool_import/zpool_import_016_pos.ksh \
functional/cli_root/zpool_import/zpool_import_017_pos.ksh \
functional/cli_root/zpool_import/zpool_import_all_001_pos.ksh \
functional/cli_root/zpool_import/zpool_import_encrypted.ksh \
functional/cli_root/zpool_import/zpool_import_encrypted_load.ksh \
functional/cli_root/zpool_import/zpool_import_errata3.ksh \
functional/cli_root/zpool_import/zpool_import_errata4.ksh \
functional/cli_root/zpool_import/zpool_import_features_001_pos.ksh \
functional/cli_root/zpool_import/zpool_import_features_002_neg.ksh \
functional/cli_root/zpool_import/zpool_import_features_003_pos.ksh \
functional/cli_root/zpool_import/zpool_import_missing_001_pos.ksh \
functional/cli_root/zpool_import/zpool_import_missing_002_pos.ksh \
functional/cli_root/zpool_import/zpool_import_missing_003_pos.ksh \
functional/cli_root/zpool_import/zpool_import_rename_001_pos.ksh \
functional/cli_root/zpool_initialize/cleanup.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_attach_detach_add_remove.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_fault_export_import_online.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_import_export.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_offline_export_import_online.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_online_offline.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_split.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_start_and_cancel_neg.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_start_and_cancel_pos.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_suspend_resume.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_unsupported_vdevs.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_verify_checksums.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_verify_initialized.ksh \
functional/cli_root/zpool_labelclear/zpool_labelclear_active.ksh \
functional/cli_root/zpool_labelclear/zpool_labelclear_exported.ksh \
functional/cli_root/zpool_labelclear/zpool_labelclear_removed.ksh \
functional/cli_root/zpool_labelclear/zpool_labelclear_valid.ksh \
functional/cli_root/zpool_offline/cleanup.ksh \
functional/cli_root/zpool_offline/setup.ksh \
functional/cli_root/zpool_offline/zpool_offline_001_pos.ksh \
functional/cli_root/zpool_offline/zpool_offline_002_neg.ksh \
functional/cli_root/zpool_offline/zpool_offline_003_pos.ksh \
functional/cli_root/zpool_online/cleanup.ksh \
functional/cli_root/zpool_online/setup.ksh \
functional/cli_root/zpool_online/zpool_online_001_pos.ksh \
functional/cli_root/zpool_online/zpool_online_002_neg.ksh \
functional/cli_root/zpool_remove/cleanup.ksh \
functional/cli_root/zpool_remove/setup.ksh \
functional/cli_root/zpool_remove/zpool_remove_001_neg.ksh \
functional/cli_root/zpool_remove/zpool_remove_002_pos.ksh \
functional/cli_root/zpool_remove/zpool_remove_003_pos.ksh \
functional/cli_root/zpool_reopen/cleanup.ksh \
functional/cli_root/zpool_reopen/setup.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_001_pos.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_002_pos.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_003_pos.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_004_pos.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_005_pos.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_006_neg.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_007_pos.ksh \
functional/cli_root/zpool_replace/cleanup.ksh \
functional/cli_root/zpool_replace/replace-o_ashift.ksh \
functional/cli_root/zpool_replace/replace_prop_ashift.ksh \
functional/cli_root/zpool_replace/setup.ksh \
functional/cli_root/zpool_replace/zpool_replace_001_neg.ksh \
functional/cli_root/zpool_resilver/cleanup.ksh \
functional/cli_root/zpool_resilver/setup.ksh \
functional/cli_root/zpool_resilver/zpool_resilver_bad_args.ksh \
functional/cli_root/zpool_resilver/zpool_resilver_restart.ksh \
functional/cli_root/zpool_scrub/cleanup.ksh \
functional/cli_root/zpool_scrub/setup.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_001_neg.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_002_pos.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_003_pos.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_004_pos.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_005_pos.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_encrypted_unloaded.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_multiple_copies.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_offline_device.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_print_repairing.ksh \
functional/cli_root/zpool_set/cleanup.ksh \
functional/cli_root/zpool_set/setup.ksh \
functional/cli_root/zpool/setup.ksh \
functional/cli_root/zpool_set/zpool_set_001_pos.ksh \
functional/cli_root/zpool_set/zpool_set_002_neg.ksh \
functional/cli_root/zpool_set/zpool_set_003_neg.ksh \
functional/cli_root/zpool_set/zpool_set_ashift.ksh \
functional/cli_root/zpool_set/zpool_set_features.ksh \
functional/cli_root/zpool_split/cleanup.ksh \
functional/cli_root/zpool_split/setup.ksh \
functional/cli_root/zpool_split/zpool_split_cliargs.ksh \
functional/cli_root/zpool_split/zpool_split_devices.ksh \
functional/cli_root/zpool_split/zpool_split_dryrun_output.ksh \
functional/cli_root/zpool_split/zpool_split_encryption.ksh \
functional/cli_root/zpool_split/zpool_split_indirect.ksh \
functional/cli_root/zpool_split/zpool_split_props.ksh \
functional/cli_root/zpool_split/zpool_split_resilver.ksh \
functional/cli_root/zpool_split/zpool_split_vdevs.ksh \
functional/cli_root/zpool_split/zpool_split_wholedisk.ksh \
functional/cli_root/zpool_status/cleanup.ksh \
functional/cli_root/zpool_status/setup.ksh \
functional/cli_root/zpool_status/zpool_status_001_pos.ksh \
functional/cli_root/zpool_status/zpool_status_002_pos.ksh \
functional/cli_root/zpool_status/zpool_status_003_pos.ksh \
functional/cli_root/zpool_status/zpool_status_004_pos.ksh \
functional/cli_root/zpool_status/zpool_status_005_pos.ksh \
functional/cli_root/zpool_status/zpool_status_features_001_pos.ksh \
functional/cli_root/zpool_sync/cleanup.ksh \
functional/cli_root/zpool_sync/setup.ksh \
functional/cli_root/zpool_sync/zpool_sync_001_pos.ksh \
functional/cli_root/zpool_sync/zpool_sync_002_neg.ksh \
functional/cli_root/zpool_trim/cleanup.ksh \
functional/cli_root/zpool_trim/setup.ksh \
functional/cli_root/zpool_trim/zpool_trim_attach_detach_add_remove.ksh \
functional/cli_root/zpool_trim/zpool_trim_fault_export_import_online.ksh \
functional/cli_root/zpool_trim/zpool_trim_import_export.ksh \
functional/cli_root/zpool_trim/zpool_trim_multiple.ksh \
functional/cli_root/zpool_trim/zpool_trim_neg.ksh \
functional/cli_root/zpool_trim/zpool_trim_offline_export_import_online.ksh \
functional/cli_root/zpool_trim/zpool_trim_online_offline.ksh \
functional/cli_root/zpool_trim/zpool_trim_partial.ksh \
functional/cli_root/zpool_trim/zpool_trim_rate.ksh \
functional/cli_root/zpool_trim/zpool_trim_rate_neg.ksh \
functional/cli_root/zpool_trim/zpool_trim_secure.ksh \
functional/cli_root/zpool_trim/zpool_trim_split.ksh \
functional/cli_root/zpool_trim/zpool_trim_start_and_cancel_neg.ksh \
functional/cli_root/zpool_trim/zpool_trim_start_and_cancel_pos.ksh \
functional/cli_root/zpool_trim/zpool_trim_suspend_resume.ksh \
functional/cli_root/zpool_trim/zpool_trim_unsupported_vdevs.ksh \
functional/cli_root/zpool_trim/zpool_trim_verify_checksums.ksh \
functional/cli_root/zpool_trim/zpool_trim_verify_trimmed.ksh \
functional/cli_root/zpool_upgrade/cleanup.ksh \
functional/cli_root/zpool_upgrade/setup.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_001_pos.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_002_pos.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_003_pos.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_004_pos.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_005_neg.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_006_neg.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_007_pos.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_008_pos.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_009_neg.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_features_001_pos.ksh \
functional/cli_root/zpool_wait/cleanup.ksh \
functional/cli_root/zpool_wait/scan/cleanup.ksh \
functional/cli_root/zpool_wait/scan/setup.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_rebuild.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_replace_cancel.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_replace.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_resilver.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_scrub_basic.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_scrub_cancel.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_scrub_flag.ksh \
functional/cli_root/zpool_wait/setup.ksh \
functional/cli_root/zpool_wait/zpool_wait_discard.ksh \
functional/cli_root/zpool_wait/zpool_wait_freeing.ksh \
functional/cli_root/zpool_wait/zpool_wait_initialize_basic.ksh \
functional/cli_root/zpool_wait/zpool_wait_initialize_cancel.ksh \
functional/cli_root/zpool_wait/zpool_wait_initialize_flag.ksh \
functional/cli_root/zpool_wait/zpool_wait_multiple.ksh \
functional/cli_root/zpool_wait/zpool_wait_no_activity.ksh \
functional/cli_root/zpool_wait/zpool_wait_remove_cancel.ksh \
functional/cli_root/zpool_wait/zpool_wait_remove.ksh \
functional/cli_root/zpool_wait/zpool_wait_trim_basic.ksh \
functional/cli_root/zpool_wait/zpool_wait_trim_cancel.ksh \
functional/cli_root/zpool_wait/zpool_wait_trim_flag.ksh \
functional/cli_root/zpool_wait/zpool_wait_usage.ksh \
functional/cli_root/zpool/zpool_001_neg.ksh \
functional/cli_root/zpool/zpool_002_pos.ksh \
functional/cli_root/zpool/zpool_003_pos.ksh \
functional/cli_root/zpool/zpool_colors.ksh \
functional/cli_user/misc/arcstat_001_pos.ksh \
functional/cli_user/misc/arc_summary_001_pos.ksh \
functional/cli_user/misc/arc_summary_002_neg.ksh \
+ functional/cli_user/misc/zilstat_001_pos.ksh \
functional/cli_user/misc/cleanup.ksh \
functional/cli_user/misc/setup.ksh \
functional/cli_user/misc/zdb_001_neg.ksh \
functional/cli_user/misc/zfs_001_neg.ksh \
functional/cli_user/misc/zfs_allow_001_neg.ksh \
functional/cli_user/misc/zfs_clone_001_neg.ksh \
functional/cli_user/misc/zfs_create_001_neg.ksh \
functional/cli_user/misc/zfs_destroy_001_neg.ksh \
functional/cli_user/misc/zfs_get_001_neg.ksh \
functional/cli_user/misc/zfs_inherit_001_neg.ksh \
functional/cli_user/misc/zfs_mount_001_neg.ksh \
functional/cli_user/misc/zfs_promote_001_neg.ksh \
functional/cli_user/misc/zfs_receive_001_neg.ksh \
functional/cli_user/misc/zfs_rename_001_neg.ksh \
functional/cli_user/misc/zfs_rollback_001_neg.ksh \
functional/cli_user/misc/zfs_send_001_neg.ksh \
functional/cli_user/misc/zfs_set_001_neg.ksh \
functional/cli_user/misc/zfs_share_001_neg.ksh \
functional/cli_user/misc/zfs_snapshot_001_neg.ksh \
functional/cli_user/misc/zfs_unallow_001_neg.ksh \
functional/cli_user/misc/zfs_unmount_001_neg.ksh \
functional/cli_user/misc/zfs_unshare_001_neg.ksh \
functional/cli_user/misc/zfs_upgrade_001_neg.ksh \
functional/cli_user/misc/zpool_001_neg.ksh \
functional/cli_user/misc/zpool_add_001_neg.ksh \
functional/cli_user/misc/zpool_attach_001_neg.ksh \
functional/cli_user/misc/zpool_clear_001_neg.ksh \
functional/cli_user/misc/zpool_create_001_neg.ksh \
functional/cli_user/misc/zpool_destroy_001_neg.ksh \
functional/cli_user/misc/zpool_detach_001_neg.ksh \
functional/cli_user/misc/zpool_export_001_neg.ksh \
functional/cli_user/misc/zpool_get_001_neg.ksh \
functional/cli_user/misc/zpool_history_001_neg.ksh \
functional/cli_user/misc/zpool_import_001_neg.ksh \
functional/cli_user/misc/zpool_import_002_neg.ksh \
functional/cli_user/misc/zpool_offline_001_neg.ksh \
functional/cli_user/misc/zpool_online_001_neg.ksh \
functional/cli_user/misc/zpool_remove_001_neg.ksh \
functional/cli_user/misc/zpool_replace_001_neg.ksh \
functional/cli_user/misc/zpool_scrub_001_neg.ksh \
functional/cli_user/misc/zpool_set_001_neg.ksh \
functional/cli_user/misc/zpool_status_001_neg.ksh \
functional/cli_user/misc/zpool_upgrade_001_neg.ksh \
functional/cli_user/misc/zpool_wait_privilege.ksh \
functional/cli_user/zfs_list/cleanup.ksh \
functional/cli_user/zfs_list/setup.ksh \
functional/cli_user/zfs_list/zfs_list_001_pos.ksh \
functional/cli_user/zfs_list/zfs_list_002_pos.ksh \
functional/cli_user/zfs_list/zfs_list_003_pos.ksh \
functional/cli_user/zfs_list/zfs_list_004_neg.ksh \
functional/cli_user/zfs_list/zfs_list_005_neg.ksh \
functional/cli_user/zfs_list/zfs_list_007_pos.ksh \
functional/cli_user/zfs_list/zfs_list_008_neg.ksh \
functional/cli_user/zpool_iostat/cleanup.ksh \
functional/cli_user/zpool_iostat/setup.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_001_neg.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_002_pos.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_003_neg.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_004_pos.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_005_pos.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_-c_disable.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_-c_homedir.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_-c_searchpath.ksh \
functional/cli_user/zpool_list/cleanup.ksh \
functional/cli_user/zpool_list/setup.ksh \
functional/cli_user/zpool_list/zpool_list_001_pos.ksh \
functional/cli_user/zpool_list/zpool_list_002_neg.ksh \
functional/cli_user/zpool_status/cleanup.ksh \
functional/cli_user/zpool_status/setup.ksh \
functional/cli_user/zpool_status/zpool_status_003_pos.ksh \
functional/cli_user/zpool_status/zpool_status_-c_disable.ksh \
functional/cli_user/zpool_status/zpool_status_-c_homedir.ksh \
functional/cli_user/zpool_status/zpool_status_-c_searchpath.ksh \
functional/compression/cleanup.ksh \
functional/compression/compress_001_pos.ksh \
functional/compression/compress_002_pos.ksh \
functional/compression/compress_003_pos.ksh \
functional/compression/compress_004_pos.ksh \
functional/compression/compress_zstd_bswap.ksh \
functional/compression/l2arc_compressed_arc_disabled.ksh \
functional/compression/l2arc_compressed_arc.ksh \
functional/compression/l2arc_encrypted.ksh \
functional/compression/l2arc_encrypted_no_compressed_arc.ksh \
functional/compression/setup.ksh \
functional/cp_files/cleanup.ksh \
functional/cp_files/cp_files_001_pos.ksh \
functional/cp_files/setup.ksh \
functional/crtime/cleanup.ksh \
functional/crtime/crtime_001_pos.ksh \
functional/crtime/setup.ksh \
functional/ctime/cleanup.ksh \
functional/ctime/ctime_001_pos.ksh \
functional/ctime/setup.ksh \
functional/deadman/deadman_ratelimit.ksh \
functional/deadman/deadman_sync.ksh \
functional/deadman/deadman_zio.ksh \
functional/delegate/cleanup.ksh \
functional/delegate/setup.ksh \
functional/delegate/zfs_allow_001_pos.ksh \
functional/delegate/zfs_allow_002_pos.ksh \
functional/delegate/zfs_allow_003_pos.ksh \
functional/delegate/zfs_allow_004_pos.ksh \
functional/delegate/zfs_allow_005_pos.ksh \
functional/delegate/zfs_allow_006_pos.ksh \
functional/delegate/zfs_allow_007_pos.ksh \
functional/delegate/zfs_allow_008_pos.ksh \
functional/delegate/zfs_allow_009_neg.ksh \
functional/delegate/zfs_allow_010_pos.ksh \
functional/delegate/zfs_allow_011_neg.ksh \
functional/delegate/zfs_allow_012_neg.ksh \
functional/delegate/zfs_unallow_001_pos.ksh \
functional/delegate/zfs_unallow_002_pos.ksh \
functional/delegate/zfs_unallow_003_pos.ksh \
functional/delegate/zfs_unallow_004_pos.ksh \
functional/delegate/zfs_unallow_005_pos.ksh \
functional/delegate/zfs_unallow_006_pos.ksh \
functional/delegate/zfs_unallow_007_neg.ksh \
functional/delegate/zfs_unallow_008_neg.ksh \
functional/devices/cleanup.ksh \
functional/devices/devices_001_pos.ksh \
functional/devices/devices_002_neg.ksh \
functional/devices/devices_003_pos.ksh \
functional/devices/setup.ksh \
functional/dos_attributes/cleanup.ksh \
functional/dos_attributes/read_dos_attrs_001.ksh \
functional/dos_attributes/setup.ksh \
functional/dos_attributes/write_dos_attrs_001.ksh \
functional/events/cleanup.ksh \
functional/events/events_001_pos.ksh \
functional/events/events_002_pos.ksh \
functional/events/setup.ksh \
functional/events/zed_fd_spill.ksh \
functional/events/zed_rc_filter.ksh \
functional/exec/cleanup.ksh \
functional/exec/exec_001_pos.ksh \
functional/exec/exec_002_neg.ksh \
functional/exec/setup.ksh \
+ functional/fadvise/cleanup.ksh \
+ functional/fadvise/fadvise_sequential.ksh \
+ functional/fadvise/setup.ksh \
functional/fallocate/cleanup.ksh \
functional/fallocate/fallocate_prealloc.ksh \
functional/fallocate/fallocate_punch-hole.ksh \
functional/fallocate/fallocate_zero-range.ksh \
functional/fallocate/setup.ksh \
functional/fault/auto_offline_001_pos.ksh \
functional/fault/auto_online_001_pos.ksh \
functional/fault/auto_online_002_pos.ksh \
functional/fault/auto_replace_001_pos.ksh \
functional/fault/auto_spare_001_pos.ksh \
functional/fault/auto_spare_002_pos.ksh \
functional/fault/auto_spare_ashift.ksh \
functional/fault/auto_spare_multiple.ksh \
functional/fault/auto_spare_shared.ksh \
functional/fault/cleanup.ksh \
functional/fault/decompress_fault.ksh \
functional/fault/decrypt_fault.ksh \
functional/fault/scrub_after_resilver.ksh \
functional/fault/setup.ksh \
functional/fault/zpool_status_-s.ksh \
functional/features/async_destroy/async_destroy_001_pos.ksh \
functional/features/async_destroy/cleanup.ksh \
functional/features/async_destroy/setup.ksh \
functional/features/large_dnode/cleanup.ksh \
functional/features/large_dnode/large_dnode_001_pos.ksh \
functional/features/large_dnode/large_dnode_002_pos.ksh \
functional/features/large_dnode/large_dnode_003_pos.ksh \
functional/features/large_dnode/large_dnode_004_neg.ksh \
functional/features/large_dnode/large_dnode_005_pos.ksh \
functional/features/large_dnode/large_dnode_006_pos.ksh \
functional/features/large_dnode/large_dnode_007_neg.ksh \
functional/features/large_dnode/large_dnode_008_pos.ksh \
functional/features/large_dnode/large_dnode_009_pos.ksh \
functional/features/large_dnode/setup.ksh \
functional/grow/grow_pool_001_pos.ksh \
functional/grow/grow_replicas_001_pos.ksh \
functional/history/cleanup.ksh \
functional/history/history_001_pos.ksh \
functional/history/history_002_pos.ksh \
functional/history/history_003_pos.ksh \
functional/history/history_004_pos.ksh \
functional/history/history_005_neg.ksh \
functional/history/history_006_neg.ksh \
functional/history/history_007_pos.ksh \
functional/history/history_008_pos.ksh \
functional/history/history_009_pos.ksh \
functional/history/history_010_pos.ksh \
functional/history/setup.ksh \
functional/inheritance/cleanup.ksh \
functional/inheritance/inherit_001_pos.ksh \
functional/inuse/inuse_001_pos.ksh \
functional/inuse/inuse_003_pos.ksh \
functional/inuse/inuse_004_pos.ksh \
functional/inuse/inuse_005_pos.ksh \
functional/inuse/inuse_006_pos.ksh \
functional/inuse/inuse_007_pos.ksh \
functional/inuse/inuse_008_pos.ksh \
functional/inuse/inuse_009_pos.ksh \
functional/inuse/setup.ksh \
functional/io/cleanup.ksh \
functional/io/io_uring.ksh \
functional/io/libaio.ksh \
functional/io/mmap.ksh \
functional/io/posixaio.ksh \
functional/io/psync.ksh \
functional/io/setup.ksh \
functional/io/sync.ksh \
functional/l2arc/cleanup.ksh \
functional/l2arc/l2arc_arcstats_pos.ksh \
functional/l2arc/l2arc_l2miss_pos.ksh \
functional/l2arc/l2arc_mfuonly_pos.ksh \
functional/l2arc/persist_l2arc_001_pos.ksh \
functional/l2arc/persist_l2arc_002_pos.ksh \
functional/l2arc/persist_l2arc_003_neg.ksh \
functional/l2arc/persist_l2arc_004_pos.ksh \
functional/l2arc/persist_l2arc_005_pos.ksh \
functional/l2arc/setup.ksh \
functional/large_files/cleanup.ksh \
functional/large_files/large_files_001_pos.ksh \
functional/large_files/large_files_002_pos.ksh \
functional/large_files/setup.ksh \
functional/largest_pool/largest_pool_001_pos.ksh \
functional/libzfs/cleanup.ksh \
functional/libzfs/libzfs_input.ksh \
functional/libzfs/setup.ksh \
functional/limits/cleanup.ksh \
functional/limits/filesystem_count.ksh \
functional/limits/filesystem_limit.ksh \
functional/limits/setup.ksh \
functional/limits/snapshot_count.ksh \
functional/limits/snapshot_limit.ksh \
functional/link_count/cleanup.ksh \
functional/link_count/link_count_001.ksh \
functional/link_count/link_count_root_inode.ksh \
functional/link_count/setup.ksh \
functional/log_spacemap/log_spacemap_import_logs.ksh \
functional/migration/cleanup.ksh \
functional/migration/migration_001_pos.ksh \
functional/migration/migration_002_pos.ksh \
functional/migration/migration_003_pos.ksh \
functional/migration/migration_004_pos.ksh \
functional/migration/migration_005_pos.ksh \
functional/migration/migration_006_pos.ksh \
functional/migration/migration_007_pos.ksh \
functional/migration/migration_008_pos.ksh \
functional/migration/migration_009_pos.ksh \
functional/migration/migration_010_pos.ksh \
functional/migration/migration_011_pos.ksh \
functional/migration/migration_012_pos.ksh \
functional/migration/setup.ksh \
functional/mmap/cleanup.ksh \
functional/mmap/mmap_libaio_001_pos.ksh \
functional/mmap/mmap_read_001_pos.ksh \
functional/mmap/mmap_seek_001_pos.ksh \
functional/mmap/mmap_sync_001_pos.ksh \
functional/mmap/mmap_write_001_pos.ksh \
functional/mmap/setup.ksh \
functional/mmp/cleanup.ksh \
functional/mmp/mmp_active_import.ksh \
functional/mmp/mmp_exported_import.ksh \
functional/mmp/mmp_hostid.ksh \
functional/mmp/mmp_inactive_import.ksh \
functional/mmp/mmp_interval.ksh \
functional/mmp/mmp_on_off.ksh \
functional/mmp/mmp_on_thread.ksh \
functional/mmp/mmp_on_uberblocks.ksh \
functional/mmp/mmp_on_zdb.ksh \
functional/mmp/mmp_reset_interval.ksh \
functional/mmp/mmp_write_distribution.ksh \
functional/mmp/mmp_write_uberblocks.ksh \
functional/mmp/multihost_history.ksh \
functional/mmp/setup.ksh \
functional/mount/cleanup.ksh \
functional/mount/setup.ksh \
functional/mount/umount_001.ksh \
functional/mount/umountall_001.ksh \
functional/mount/umount_unlinked_drain.ksh \
functional/mv_files/cleanup.ksh \
functional/mv_files/mv_files_001_pos.ksh \
functional/mv_files/mv_files_002_pos.ksh \
functional/mv_files/random_creation.ksh \
functional/mv_files/setup.ksh \
functional/nestedfs/cleanup.ksh \
functional/nestedfs/nestedfs_001_pos.ksh \
functional/nestedfs/setup.ksh \
functional/nopwrite/cleanup.ksh \
functional/nopwrite/nopwrite_copies.ksh \
functional/nopwrite/nopwrite_mtime.ksh \
functional/nopwrite/nopwrite_negative.ksh \
functional/nopwrite/nopwrite_promoted_clone.ksh \
functional/nopwrite/nopwrite_recsize.ksh \
functional/nopwrite/nopwrite_sync.ksh \
functional/nopwrite/nopwrite_varying_compression.ksh \
functional/nopwrite/nopwrite_volume.ksh \
functional/nopwrite/setup.ksh \
functional/no_space/cleanup.ksh \
functional/no_space/enospc_001_pos.ksh \
functional/no_space/enospc_002_pos.ksh \
functional/no_space/enospc_003_pos.ksh \
functional/no_space/enospc_df.ksh \
functional/no_space/enospc_rm.ksh \
functional/no_space/setup.ksh \
functional/online_offline/cleanup.ksh \
functional/online_offline/online_offline_001_pos.ksh \
functional/online_offline/online_offline_002_neg.ksh \
functional/online_offline/online_offline_003_neg.ksh \
functional/online_offline/setup.ksh \
functional/pam/cleanup.ksh \
functional/pam/pam_basic.ksh \
functional/pam/pam_nounmount.ksh \
functional/pam/pam_short_password.ksh \
functional/pam/setup.ksh \
functional/pool_checkpoint/checkpoint_after_rewind.ksh \
functional/pool_checkpoint/checkpoint_big_rewind.ksh \
functional/pool_checkpoint/checkpoint_capacity.ksh \
functional/pool_checkpoint/checkpoint_conf_change.ksh \
functional/pool_checkpoint/checkpoint_discard_busy.ksh \
functional/pool_checkpoint/checkpoint_discard.ksh \
functional/pool_checkpoint/checkpoint_discard_many.ksh \
functional/pool_checkpoint/checkpoint_indirect.ksh \
functional/pool_checkpoint/checkpoint_invalid.ksh \
functional/pool_checkpoint/checkpoint_lun_expsz.ksh \
functional/pool_checkpoint/checkpoint_open.ksh \
functional/pool_checkpoint/checkpoint_removal.ksh \
functional/pool_checkpoint/checkpoint_rewind.ksh \
functional/pool_checkpoint/checkpoint_ro_rewind.ksh \
functional/pool_checkpoint/checkpoint_sm_scale.ksh \
functional/pool_checkpoint/checkpoint_twice.ksh \
functional/pool_checkpoint/checkpoint_vdev_add.ksh \
functional/pool_checkpoint/checkpoint_zdb.ksh \
functional/pool_checkpoint/checkpoint_zhack_feat.ksh \
functional/pool_checkpoint/cleanup.ksh \
functional/pool_checkpoint/setup.ksh \
functional/pool_names/pool_names_001_pos.ksh \
functional/pool_names/pool_names_002_neg.ksh \
functional/poolversion/cleanup.ksh \
functional/poolversion/poolversion_001_pos.ksh \
functional/poolversion/poolversion_002_pos.ksh \
functional/poolversion/setup.ksh \
functional/privilege/cleanup.ksh \
functional/privilege/privilege_001_pos.ksh \
functional/privilege/privilege_002_pos.ksh \
functional/privilege/setup.ksh \
functional/procfs/cleanup.ksh \
functional/procfs/pool_state.ksh \
functional/procfs/procfs_list_basic.ksh \
functional/procfs/procfs_list_concurrent_readers.ksh \
functional/procfs/procfs_list_stale_read.ksh \
functional/procfs/setup.ksh \
functional/projectquota/cleanup.ksh \
functional/projectquota/projectid_001_pos.ksh \
functional/projectquota/projectid_002_pos.ksh \
functional/projectquota/projectid_003_pos.ksh \
functional/projectquota/projectquota_001_pos.ksh \
functional/projectquota/projectquota_002_pos.ksh \
functional/projectquota/projectquota_003_pos.ksh \
functional/projectquota/projectquota_004_neg.ksh \
functional/projectquota/projectquota_005_pos.ksh \
functional/projectquota/projectquota_006_pos.ksh \
functional/projectquota/projectquota_007_pos.ksh \
functional/projectquota/projectquota_008_pos.ksh \
functional/projectquota/projectquota_009_pos.ksh \
functional/projectquota/projectspace_001_pos.ksh \
functional/projectquota/projectspace_002_pos.ksh \
functional/projectquota/projectspace_003_pos.ksh \
functional/projectquota/projectspace_004_pos.ksh \
functional/projectquota/projecttree_001_pos.ksh \
functional/projectquota/projecttree_002_pos.ksh \
functional/projectquota/projecttree_003_neg.ksh \
functional/projectquota/setup.ksh \
functional/quota/cleanup.ksh \
functional/quota/quota_001_pos.ksh \
functional/quota/quota_002_pos.ksh \
functional/quota/quota_003_pos.ksh \
functional/quota/quota_004_pos.ksh \
functional/quota/quota_005_pos.ksh \
functional/quota/quota_006_neg.ksh \
functional/quota/setup.ksh \
functional/raidz/cleanup.ksh \
functional/raidz/raidz_001_neg.ksh \
functional/raidz/raidz_002_pos.ksh \
functional/raidz/raidz_003_pos.ksh \
functional/raidz/raidz_004_pos.ksh \
functional/raidz/setup.ksh \
functional/redacted_send/cleanup.ksh \
functional/redacted_send/redacted_compressed.ksh \
functional/redacted_send/redacted_contents.ksh \
functional/redacted_send/redacted_deleted.ksh \
functional/redacted_send/redacted_disabled_feature.ksh \
functional/redacted_send/redacted_embedded.ksh \
functional/redacted_send/redacted_holes.ksh \
functional/redacted_send/redacted_incrementals.ksh \
functional/redacted_send/redacted_largeblocks.ksh \
functional/redacted_send/redacted_many_clones.ksh \
functional/redacted_send/redacted_mixed_recsize.ksh \
functional/redacted_send/redacted_mounts.ksh \
functional/redacted_send/redacted_negative.ksh \
functional/redacted_send/redacted_origin.ksh \
functional/redacted_send/redacted_panic.ksh \
functional/redacted_send/redacted_props.ksh \
functional/redacted_send/redacted_resume.ksh \
functional/redacted_send/redacted_size.ksh \
functional/redacted_send/redacted_volume.ksh \
functional/redacted_send/setup.ksh \
functional/redundancy/cleanup.ksh \
functional/redundancy/redundancy_draid1.ksh \
functional/redundancy/redundancy_draid2.ksh \
functional/redundancy/redundancy_draid3.ksh \
functional/redundancy/redundancy_draid_damaged1.ksh \
functional/redundancy/redundancy_draid_damaged2.ksh \
functional/redundancy/redundancy_draid.ksh \
functional/redundancy/redundancy_draid_spare1.ksh \
functional/redundancy/redundancy_draid_spare2.ksh \
functional/redundancy/redundancy_draid_spare3.ksh \
functional/redundancy/redundancy_mirror.ksh \
functional/redundancy/redundancy_raidz1.ksh \
functional/redundancy/redundancy_raidz2.ksh \
functional/redundancy/redundancy_raidz3.ksh \
functional/redundancy/redundancy_raidz.ksh \
functional/redundancy/redundancy_stripe.ksh \
functional/redundancy/setup.ksh \
functional/refquota/cleanup.ksh \
functional/refquota/refquota_001_pos.ksh \
functional/refquota/refquota_002_pos.ksh \
functional/refquota/refquota_003_pos.ksh \
functional/refquota/refquota_004_pos.ksh \
functional/refquota/refquota_005_pos.ksh \
functional/refquota/refquota_006_neg.ksh \
functional/refquota/refquota_007_neg.ksh \
functional/refquota/refquota_008_neg.ksh \
functional/refquota/setup.ksh \
functional/refreserv/cleanup.ksh \
functional/refreserv/refreserv_001_pos.ksh \
functional/refreserv/refreserv_002_pos.ksh \
functional/refreserv/refreserv_003_pos.ksh \
functional/refreserv/refreserv_004_pos.ksh \
functional/refreserv/refreserv_005_pos.ksh \
functional/refreserv/refreserv_multi_raidz.ksh \
functional/refreserv/refreserv_raidz.ksh \
functional/refreserv/setup.ksh \
functional/removal/cleanup.ksh \
functional/removal/removal_all_vdev.ksh \
functional/removal/removal_cancel.ksh \
functional/removal/removal_check_space.ksh \
functional/removal/removal_condense_export.ksh \
functional/removal/removal_multiple_indirection.ksh \
functional/removal/removal_nopwrite.ksh \
functional/removal/removal_remap_deadlists.ksh \
functional/removal/removal_reservation.ksh \
functional/removal/removal_resume_export.ksh \
functional/removal/removal_sanity.ksh \
functional/removal/removal_with_add.ksh \
functional/removal/removal_with_create_fs.ksh \
functional/removal/removal_with_dedup.ksh \
functional/removal/removal_with_errors.ksh \
functional/removal/removal_with_export.ksh \
functional/removal/removal_with_faulted.ksh \
functional/removal/removal_with_ganging.ksh \
functional/removal/removal_with_remove.ksh \
functional/removal/removal_with_scrub.ksh \
functional/removal/removal_with_send.ksh \
functional/removal/removal_with_send_recv.ksh \
functional/removal/removal_with_snapshot.ksh \
functional/removal/removal_with_write.ksh \
functional/removal/removal_with_zdb.ksh \
functional/removal/remove_attach_mirror.ksh \
functional/removal/remove_expanded.ksh \
functional/removal/remove_indirect.ksh \
functional/removal/remove_mirror.ksh \
functional/removal/remove_mirror_sanity.ksh \
functional/removal/remove_raidz.ksh \
functional/rename_dirs/cleanup.ksh \
functional/rename_dirs/rename_dirs_001_pos.ksh \
functional/rename_dirs/setup.ksh \
functional/replacement/attach_import.ksh \
functional/replacement/attach_multiple.ksh \
functional/replacement/attach_rebuild.ksh \
functional/replacement/attach_resilver.ksh \
functional/replacement/cleanup.ksh \
functional/replacement/detach.ksh \
functional/replacement/rebuild_disabled_feature.ksh \
functional/replacement/rebuild_multiple.ksh \
functional/replacement/rebuild_raidz.ksh \
functional/replacement/replace_import.ksh \
functional/replacement/replace_rebuild.ksh \
functional/replacement/replace_resilver.ksh \
functional/replacement/resilver_restart_001.ksh \
functional/replacement/resilver_restart_002.ksh \
functional/replacement/scrub_cancel.ksh \
functional/replacement/setup.ksh \
functional/reservation/cleanup.ksh \
functional/reservation/reservation_001_pos.ksh \
functional/reservation/reservation_002_pos.ksh \
functional/reservation/reservation_003_pos.ksh \
functional/reservation/reservation_004_pos.ksh \
functional/reservation/reservation_005_pos.ksh \
functional/reservation/reservation_006_pos.ksh \
functional/reservation/reservation_007_pos.ksh \
functional/reservation/reservation_008_pos.ksh \
functional/reservation/reservation_009_pos.ksh \
functional/reservation/reservation_010_pos.ksh \
functional/reservation/reservation_011_pos.ksh \
functional/reservation/reservation_012_pos.ksh \
functional/reservation/reservation_013_pos.ksh \
functional/reservation/reservation_014_pos.ksh \
functional/reservation/reservation_015_pos.ksh \
functional/reservation/reservation_016_pos.ksh \
functional/reservation/reservation_017_pos.ksh \
functional/reservation/reservation_018_pos.ksh \
functional/reservation/reservation_019_pos.ksh \
functional/reservation/reservation_020_pos.ksh \
functional/reservation/reservation_021_neg.ksh \
functional/reservation/reservation_022_pos.ksh \
functional/reservation/setup.ksh \
functional/rootpool/cleanup.ksh \
functional/rootpool/rootpool_002_neg.ksh \
functional/rootpool/rootpool_003_neg.ksh \
functional/rootpool/rootpool_007_pos.ksh \
functional/rootpool/setup.ksh \
functional/rsend/cleanup.ksh \
functional/rsend/recv_dedup_encrypted_zvol.ksh \
functional/rsend/recv_dedup.ksh \
functional/rsend/rsend_001_pos.ksh \
functional/rsend/rsend_002_pos.ksh \
functional/rsend/rsend_003_pos.ksh \
functional/rsend/rsend_004_pos.ksh \
functional/rsend/rsend_005_pos.ksh \
functional/rsend/rsend_006_pos.ksh \
functional/rsend/rsend_007_pos.ksh \
functional/rsend/rsend_008_pos.ksh \
functional/rsend/rsend_009_pos.ksh \
functional/rsend/rsend_010_pos.ksh \
functional/rsend/rsend_011_pos.ksh \
functional/rsend/rsend_012_pos.ksh \
functional/rsend/rsend_013_pos.ksh \
functional/rsend/rsend_014_pos.ksh \
functional/rsend/rsend_016_neg.ksh \
functional/rsend/rsend_019_pos.ksh \
functional/rsend/rsend_020_pos.ksh \
functional/rsend/rsend_021_pos.ksh \
functional/rsend/rsend_022_pos.ksh \
functional/rsend/rsend_024_pos.ksh \
functional/rsend/rsend_025_pos.ksh \
functional/rsend/rsend_026_neg.ksh \
functional/rsend/rsend_027_pos.ksh \
functional/rsend/rsend_028_neg.ksh \
functional/rsend/rsend_029_neg.ksh \
functional/rsend/send-c_embedded_blocks.ksh \
functional/rsend/send-c_incremental.ksh \
functional/rsend/send-c_lz4_disabled.ksh \
functional/rsend/send-c_mixed_compression.ksh \
functional/rsend/send-cpL_varied_recsize.ksh \
functional/rsend/send-c_props.ksh \
functional/rsend/send-c_recv_dedup.ksh \
functional/rsend/send-c_recv_lz4_disabled.ksh \
functional/rsend/send-c_resume.ksh \
functional/rsend/send-c_stream_size_estimate.ksh \
functional/rsend/send-c_verify_contents.ksh \
functional/rsend/send-c_verify_ratio.ksh \
functional/rsend/send-c_volume.ksh \
functional/rsend/send-c_zstreamdump.ksh \
functional/rsend/send_doall.ksh \
functional/rsend/send_encrypted_files.ksh \
functional/rsend/send_encrypted_hierarchy.ksh \
functional/rsend/send_encrypted_props.ksh \
functional/rsend/send_encrypted_truncated_files.ksh \
functional/rsend/send_freeobjects.ksh \
functional/rsend/send_holds.ksh \
functional/rsend/send_hole_birth.ksh \
functional/rsend/send_invalid.ksh \
functional/rsend/send-L_toggle.ksh \
functional/rsend/send_mixed_raw.ksh \
functional/rsend/send_partial_dataset.ksh \
functional/rsend/send_raw_ashift.ksh \
functional/rsend/send_raw_spill_block.ksh \
functional/rsend/send_realloc_dnode_size.ksh \
functional/rsend/send_realloc_encrypted_files.ksh \
functional/rsend/send_realloc_files.ksh \
functional/rsend/send_spill_block.ksh \
functional/rsend/send-wR_encrypted_zvol.ksh \
functional/rsend/setup.ksh \
functional/scrub_mirror/cleanup.ksh \
functional/scrub_mirror/scrub_mirror_001_pos.ksh \
functional/scrub_mirror/scrub_mirror_002_pos.ksh \
functional/scrub_mirror/scrub_mirror_003_pos.ksh \
functional/scrub_mirror/scrub_mirror_004_pos.ksh \
functional/scrub_mirror/setup.ksh \
functional/slog/cleanup.ksh \
functional/slog/setup.ksh \
functional/slog/slog_001_pos.ksh \
functional/slog/slog_002_pos.ksh \
functional/slog/slog_003_pos.ksh \
functional/slog/slog_004_pos.ksh \
functional/slog/slog_005_pos.ksh \
functional/slog/slog_006_pos.ksh \
functional/slog/slog_007_pos.ksh \
functional/slog/slog_008_neg.ksh \
functional/slog/slog_009_neg.ksh \
functional/slog/slog_010_neg.ksh \
functional/slog/slog_011_neg.ksh \
functional/slog/slog_012_neg.ksh \
functional/slog/slog_013_pos.ksh \
functional/slog/slog_014_pos.ksh \
functional/slog/slog_015_neg.ksh \
functional/slog/slog_016_pos.ksh \
functional/slog/slog_replay_fs_001.ksh \
functional/slog/slog_replay_fs_002.ksh \
functional/slog/slog_replay_volume.ksh \
functional/snapshot/cleanup.ksh \
functional/snapshot/clone_001_pos.ksh \
functional/snapshot/rollback_001_pos.ksh \
functional/snapshot/rollback_002_pos.ksh \
functional/snapshot/rollback_003_pos.ksh \
functional/snapshot/setup.ksh \
functional/snapshot/snapshot_001_pos.ksh \
functional/snapshot/snapshot_002_pos.ksh \
functional/snapshot/snapshot_003_pos.ksh \
functional/snapshot/snapshot_004_pos.ksh \
functional/snapshot/snapshot_005_pos.ksh \
functional/snapshot/snapshot_006_pos.ksh \
functional/snapshot/snapshot_007_pos.ksh \
functional/snapshot/snapshot_008_pos.ksh \
functional/snapshot/snapshot_009_pos.ksh \
functional/snapshot/snapshot_010_pos.ksh \
functional/snapshot/snapshot_011_pos.ksh \
functional/snapshot/snapshot_012_pos.ksh \
functional/snapshot/snapshot_013_pos.ksh \
functional/snapshot/snapshot_014_pos.ksh \
functional/snapshot/snapshot_015_pos.ksh \
functional/snapshot/snapshot_016_pos.ksh \
functional/snapshot/snapshot_017_pos.ksh \
functional/snapshot/snapshot_018_pos.ksh \
functional/snapused/cleanup.ksh \
functional/snapused/setup.ksh \
functional/snapused/snapused_001_pos.ksh \
functional/snapused/snapused_002_pos.ksh \
functional/snapused/snapused_003_pos.ksh \
functional/snapused/snapused_004_pos.ksh \
functional/snapused/snapused_005_pos.ksh \
functional/sparse/cleanup.ksh \
functional/sparse/setup.ksh \
functional/sparse/sparse_001_pos.ksh \
functional/stat/cleanup.ksh \
functional/stat/setup.ksh \
functional/stat/stat_001_pos.ksh \
functional/suid/cleanup.ksh \
functional/suid/setup.ksh \
functional/suid/suid_write_to_none.ksh \
functional/suid/suid_write_to_sgid.ksh \
functional/suid/suid_write_to_suid.ksh \
functional/suid/suid_write_to_suid_sgid.ksh \
functional/suid/suid_write_zil_replay.ksh \
functional/trim/autotrim_config.ksh \
functional/trim/autotrim_integrity.ksh \
functional/trim/autotrim_trim_integrity.ksh \
functional/trim/cleanup.ksh \
functional/trim/setup.ksh \
functional/trim/trim_config.ksh \
functional/trim/trim_integrity.ksh \
functional/trim/trim_l2arc.ksh \
functional/truncate/cleanup.ksh \
functional/truncate/setup.ksh \
functional/truncate/truncate_001_pos.ksh \
functional/truncate/truncate_002_pos.ksh \
functional/truncate/truncate_timestamps.ksh \
functional/upgrade/cleanup.ksh \
functional/upgrade/setup.ksh \
functional/upgrade/upgrade_projectquota_001_pos.ksh \
functional/upgrade/upgrade_readonly_pool.ksh \
functional/upgrade/upgrade_userobj_001_pos.ksh \
functional/user_namespace/cleanup.ksh \
functional/user_namespace/setup.ksh \
functional/user_namespace/user_namespace_001.ksh \
functional/user_namespace/user_namespace_002.ksh \
functional/user_namespace/user_namespace_003.ksh \
functional/user_namespace/user_namespace_004.ksh \
functional/userquota/cleanup.ksh \
functional/userquota/groupspace_001_pos.ksh \
functional/userquota/groupspace_002_pos.ksh \
functional/userquota/groupspace_003_pos.ksh \
functional/userquota/setup.ksh \
functional/userquota/userquota_001_pos.ksh \
functional/userquota/userquota_002_pos.ksh \
functional/userquota/userquota_003_pos.ksh \
functional/userquota/userquota_004_pos.ksh \
functional/userquota/userquota_005_neg.ksh \
functional/userquota/userquota_006_pos.ksh \
functional/userquota/userquota_007_pos.ksh \
functional/userquota/userquota_008_pos.ksh \
functional/userquota/userquota_009_pos.ksh \
functional/userquota/userquota_010_pos.ksh \
functional/userquota/userquota_011_pos.ksh \
functional/userquota/userquota_012_neg.ksh \
functional/userquota/userquota_013_pos.ksh \
functional/userquota/userspace_001_pos.ksh \
functional/userquota/userspace_002_pos.ksh \
functional/userquota/userspace_003_pos.ksh \
functional/userquota/userspace_encrypted.ksh \
functional/userquota/userspace_send_encrypted.ksh \
functional/vdev_zaps/cleanup.ksh \
functional/vdev_zaps/setup.ksh \
functional/vdev_zaps/vdev_zaps_001_pos.ksh \
functional/vdev_zaps/vdev_zaps_002_pos.ksh \
functional/vdev_zaps/vdev_zaps_003_pos.ksh \
functional/vdev_zaps/vdev_zaps_004_pos.ksh \
functional/vdev_zaps/vdev_zaps_005_pos.ksh \
functional/vdev_zaps/vdev_zaps_006_pos.ksh \
functional/vdev_zaps/vdev_zaps_007_pos.ksh \
functional/write_dirs/cleanup.ksh \
functional/write_dirs/setup.ksh \
functional/write_dirs/write_dirs_001_pos.ksh \
functional/write_dirs/write_dirs_002_pos.ksh \
functional/xattr/cleanup.ksh \
functional/xattr/setup.ksh \
functional/xattr/xattr_001_pos.ksh \
functional/xattr/xattr_002_neg.ksh \
functional/xattr/xattr_003_neg.ksh \
functional/xattr/xattr_004_pos.ksh \
functional/xattr/xattr_005_pos.ksh \
functional/xattr/xattr_006_pos.ksh \
functional/xattr/xattr_007_neg.ksh \
functional/xattr/xattr_008_pos.ksh \
functional/xattr/xattr_009_neg.ksh \
functional/xattr/xattr_010_neg.ksh \
functional/xattr/xattr_011_pos.ksh \
functional/xattr/xattr_012_pos.ksh \
functional/xattr/xattr_013_pos.ksh \
functional/xattr/xattr_compat.ksh \
functional/zpool_influxdb/cleanup.ksh \
functional/zpool_influxdb/setup.ksh \
functional/zpool_influxdb/zpool_influxdb.ksh \
functional/zvol/zvol_cli/cleanup.ksh \
functional/zvol/zvol_cli/setup.ksh \
functional/zvol/zvol_cli/zvol_cli_001_pos.ksh \
functional/zvol/zvol_cli/zvol_cli_002_pos.ksh \
functional/zvol/zvol_cli/zvol_cli_003_neg.ksh \
functional/zvol/zvol_ENOSPC/cleanup.ksh \
functional/zvol/zvol_ENOSPC/setup.ksh \
functional/zvol/zvol_ENOSPC/zvol_ENOSPC_001_pos.ksh \
functional/zvol/zvol_misc/cleanup.ksh \
functional/zvol/zvol_misc/setup.ksh \
functional/zvol/zvol_misc/zvol_misc_001_neg.ksh \
functional/zvol/zvol_misc/zvol_misc_002_pos.ksh \
functional/zvol/zvol_misc/zvol_misc_003_neg.ksh \
functional/zvol/zvol_misc/zvol_misc_004_pos.ksh \
functional/zvol/zvol_misc/zvol_misc_005_neg.ksh \
functional/zvol/zvol_misc/zvol_misc_006_pos.ksh \
functional/zvol/zvol_misc/zvol_misc_fua.ksh \
functional/zvol/zvol_misc/zvol_misc_hierarchy.ksh \
functional/zvol/zvol_misc/zvol_misc_rename_inuse.ksh \
functional/zvol/zvol_misc/zvol_misc_snapdev.ksh \
functional/zvol/zvol_misc/zvol_misc_trim.ksh \
functional/zvol/zvol_misc/zvol_misc_volmode.ksh \
functional/zvol/zvol_misc/zvol_misc_zil.ksh \
functional/zvol/zvol_stress/cleanup.ksh \
functional/zvol/zvol_stress/setup.ksh \
functional/zvol/zvol_stress/zvol_stress.ksh \
functional/zvol/zvol_swap/cleanup.ksh \
functional/zvol/zvol_swap/setup.ksh \
functional/zvol/zvol_swap/zvol_swap_001_pos.ksh \
functional/zvol/zvol_swap/zvol_swap_002_pos.ksh \
functional/zvol/zvol_swap/zvol_swap_003_pos.ksh \
functional/zvol/zvol_swap/zvol_swap_004_pos.ksh \
functional/zvol/zvol_swap/zvol_swap_005_pos.ksh \
functional/zvol/zvol_swap/zvol_swap_006_pos.ksh
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/channel_program/synctask_core/tst.snapshot_rename.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/channel_program/synctask_core/tst.snapshot_rename.ksh
new file mode 100755
index 000000000000..0561e4b7c635
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/channel_program/synctask_core/tst.snapshot_rename.ksh
@@ -0,0 +1,41 @@
+#!/bin/ksh -p
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright (c) 2022 by Andriy Gapon. All rights reserved.
+#
+
+. $STF_SUITE/tests/functional/channel_program/channel_common.kshlib
+
+#
+# DESCRIPTION: Make sure basic snapshot functionality works in channel programs
+#
+
+verify_runnable "global"
+
+fs=$TESTPOOL/$TESTFS/testchild
+snapname1=testsnap1
+snapname2=testsnap2
+
+function cleanup
+{
+ destroy_dataset $fs "-R"
+}
+
+log_onexit cleanup
+
+log_must zfs create $fs
+
+log_must_program_sync $TESTPOOL \
+ $ZCP_ROOT/synctask_core/tst.snapshot_rename.zcp $fs $snapname1 $snapname2
+
+log_pass "Snapshot renaming works"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/channel_program/synctask_core/tst.snapshot_rename.zcp b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/channel_program/synctask_core/tst.snapshot_rename.zcp
new file mode 100644
index 000000000000..ef893d1551d9
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/channel_program/synctask_core/tst.snapshot_rename.zcp
@@ -0,0 +1,27 @@
+--
+-- This file and its contents are supplied under the terms of the
+-- Common Development and Distribution License ("CDDL"), version 1.0.
+-- You may only use this file in accordance with the terms of version
+-- 1.0 of the CDDL.
+--
+-- A full copy of the text of the CDDL should have accompanied this
+-- source. A copy of the CDDL is also available via the Internet at
+-- http://www.illumos.org/license/CDDL.
+--
+
+--
+-- Copyright (c) 2022 by Andriy Gapon. All rights reserved.
+--
+
+-- This program should be invoked as "zfs program <pool> <prog> <fs> <snap>"
+
+args = ...
+argv = args["argv"]
+assert(zfs.sync.snapshot(argv[1] .. "@" .. argv[2]) == 0)
+assert(zfs.sync.rename_snapshot(argv[1], argv[2], argv[3]) == 0)
+snaps = {}
+for s in zfs.list.snapshots(argv[1]) do
+ table.insert(snaps, s)
+end
+assert(#snaps == 1)
+assert(snaps[1] == (argv[1] .. "@" .. argv[3]))
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/checksum/filetest_002_pos.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/checksum/filetest_002_pos.ksh
index a0be1c2050b9..23e7aa577484 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/checksum/filetest_002_pos.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/checksum/filetest_002_pos.ksh
@@ -1,91 +1,91 @@
#! /bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright (c) 2018, 2019 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/include/properties.shlib
. $STF_SUITE/tests/functional/checksum/default.cfg
# DESCRIPTION:
# Sanity test to make sure checksum algorithms work.
# For each checksum, create a file in the pool using that checksum. Verify
# that there are no checksum errors. Next, for each checksum, create a single
# file in the pool using that checksum, corrupt the file, and verify that we
# correctly catch the checksum errors.
#
# STRATEGY:
# Test 1
# 1. For each checksum:
# 2. Create a file using the checksum
# 3. Corrupt all level 1 blocks in the file
# 4. Export and import the pool
# 5. Verify that there are checksum errors
verify_runnable "both"
function cleanup
{
rm -fr $TESTDIR/*
}
log_assert "Test corrupting files at L1 and seeing checksum errors"
log_onexit cleanup
WRITESZ=1048576
NWRITES=5
# Get a list of vdevs in our pool
set -A array $(get_disklist_fullpath)
# Get the first vdev, since we will corrupt it later
firstvdev=${array[0]}
typeset -i j=1
while [[ $j -lt ${#CHECKSUM_TYPES[*]} ]]; do
type=${CHECKSUM_TYPES[$j]}
log_must zfs set checksum=$type $TESTPOOL
log_must file_write -o overwrite -f $TESTDIR/test_$type \
-b $WRITESZ -c $NWRITES -d R
# Corrupt the level 1 blocks of this file
corrupt_blocks_at_level $TESTDIR/test_$type 1
log_must zpool export $TESTPOOL
log_must zpool import $TESTPOOL
- log_mustnot eval "cat $TESTDIR/test_$type >/dev/null"
+ log_mustnot eval "dd if=$TESTDIR/test_$type of=/dev/null bs=$WRITESZ count=$NWRITES"
cksum=$(zpool status -P -v $TESTPOOL | grep "$firstvdev" | \
awk '{print $5}')
log_assert "Checksum '$type' caught $cksum checksum errors"
log_must [ $cksum -ne 0 ]
rm -f $TESTDIR/test_$type
log_must zpool clear $TESTPOOL
(( j = j + 1 ))
done
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/add-o_ashift.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/add-o_ashift.ksh
index 8d5ce5efa528..0166e84baa18 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/add-o_ashift.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/add-o_ashift.ksh
@@ -1,100 +1,103 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2017, loli10K. All rights reserved.
# Copyright (c) 2020 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zpool_add/zpool_add.kshlib
#
# DESCRIPTION:
# 'zpool add -o ashift=<n> ...' should work with different ashift
# values.
#
# STRATEGY:
# 1. Create a pool with default values.
# 2. Verify 'zpool add -o ashift=<n>' works with allowed values (9-16).
# 3. Verify setting kernel tunable for file vdevs works correctly.
# 4. Verify 'zpool add -o ashift=<n>' doesn't accept other invalid values.
#
verify_runnable "global"
function cleanup
{
log_must set_tunable64 VDEV_FILE_PHYSICAL_ASHIFT $orig_ashift
poolexists $TESTPOOL && destroy_pool $TESTPOOL
rm -f $disk1 $disk2
}
log_assert "zpool add -o ashift=<n>' works with different ashift values"
log_onexit cleanup
disk1=$TEST_BASE_DIR/disk1
disk2=$TEST_BASE_DIR/disk2
log_must mkfile $SIZE $disk1
log_must mkfile $SIZE $disk2
+logical_ashift=$(get_tunable VDEV_FILE_LOGICAL_ASHIFT)
orig_ashift=$(get_tunable VDEV_FILE_PHYSICAL_ASHIFT)
+max_auto_ashift=$(get_tunable VDEV_MAX_AUTO_ASHIFT)
typeset ashifts=("9" "10" "11" "12" "13" "14" "15" "16")
for ashift in ${ashifts[@]}
do
log_must zpool create $TESTPOOL $disk1
log_must zpool add -o ashift=$ashift $TESTPOOL $disk2
log_must verify_ashift $disk2 $ashift
# clean things for the next run
log_must zpool destroy $TESTPOOL
log_must zpool labelclear $disk1
log_must zpool labelclear $disk2
#
# Make sure we can also set the ashift using the tunable.
#
log_must zpool create $TESTPOOL $disk1
log_must set_tunable64 VDEV_FILE_PHYSICAL_ASHIFT $ashift
log_must zpool add $TESTPOOL $disk2
- log_must verify_ashift $disk2 $ashift
+ exp=$(( (ashift <= max_auto_ashift) ? ashift : logical_ashift ))
+ log_must verify_ashift $disk2 $exp
# clean things for the next run
log_must set_tunable64 VDEV_FILE_PHYSICAL_ASHIFT $orig_ashift
log_must zpool destroy $TESTPOOL
log_must zpool labelclear $disk1
log_must zpool labelclear $disk2
done
typeset badvals=("off" "on" "1" "8" "17" "1b" "ff" "-")
for badval in ${badvals[@]}
do
log_must zpool create $TESTPOOL $disk1
log_mustnot zpool add -o ashift="$badval" $TESTPOOL $disk2
# clean things for the next run
log_must zpool destroy $TESTPOOL
log_must zpool labelclear $disk1
log_mustnot zpool labelclear $disk2
done
log_pass "zpool add -o ashift=<n>' works with different ashift values"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/misc/zilstat_001_pos.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/misc/zilstat_001_pos.ksh
new file mode 100755
index 000000000000..9bf6a94cfc84
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/misc/zilstat_001_pos.ksh
@@ -0,0 +1,37 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+is_freebsd && ! python3 -c 'import sysctl' 2>/dev/null && log_unsupported "python3 sysctl module missing"
+
+set -A args "" "-s \",\"" "-v" \
+ "-f time,zcwc,zimnb,zimsb"
+
+log_assert "zilstat generates output and doesn't return an error code"
+
+typeset -i i=0
+while [[ $i -lt ${#args[*]} ]]; do
+ log_must eval "zilstat ${args[i]} > /dev/null"
+ ((i = i + 1))
+done
+log_pass "zilstat generates output and doesn't return an error code"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fadvise/cleanup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fadvise/cleanup.ksh
new file mode 100755
index 000000000000..8b5b43a74c16
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fadvise/cleanup.ksh
@@ -0,0 +1,28 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+
+#
+# Portions Copyright (c) 2022 Information2 Software, Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+default_cleanup
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fadvise/fadvise_sequential.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fadvise/fadvise_sequential.ksh
new file mode 100755
index 000000000000..7b7d1d379ac6
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fadvise/fadvise_sequential.ksh
@@ -0,0 +1,80 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Portions Copyright (c) 2022 Information2 Software, Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/include/math.shlib
+
+#
+# DESCRIPTION:
+# Test posix_fadvise.
+#
+# STRATEGY:
+# 1. Set primarycache to metadata in order to disable prefetch
+# 2. Write some data to file
+# 3. get data_size field from arcstat
+# 4. call file_fadvise with POSIX_FADV_SEQUENTIAL
+# 5. get data_size field from arcstat again
+# 6. latter data_size should be bigger than former one
+#
+
+# NOTE: if HAVE_FILE_FADVISE is not defined former data_size
+# should less or eaqul to latter one
+
+verify_runnable "global"
+
+FILE=$TESTDIR/$TESTFILE0
+BLKSZ=$(get_prop recordsize $TESTPOOL)
+
+function cleanup
+{
+ log_must zfs set primarycache=all $TESTPOOL
+ [[ -e $TESTDIR ]] && log_must rm -Rf $TESTDIR/*
+}
+
+getstat() {
+ awk -v c="$1" '$1 == c {print $3; exit}' /proc/spl/kstat/zfs/arcstats
+}
+
+log_assert "Ensure fadvise prefetch data"
+
+log_onexit cleanup
+
+log_must zfs set primarycache=metadata $TESTPOOL
+
+log_must file_write -o create -f $FILE -b $BLKSZ -c 1000
+sync_pool $TESTPOOL
+
+data_size1=$(getstat data_size)
+
+log_must file_fadvise -f $FILE -a 2
+sleep 10
+
+data_size2=$(getstat data_size)
+log_note "original data_size is $data_size1, final data_size is $data_size2"
+
+log_must [ $data_size1 -le $data_size2 ]
+
+log_pass "Ensure data could be prefetched"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fadvise/setup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fadvise/setup.ksh
new file mode 100755
index 000000000000..8ddd73307bb4
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fadvise/setup.ksh
@@ -0,0 +1,30 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+
+#
+# Portions Copyright (c) 2022 Information2 Software, Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+DISK=${DISKS%% *}
+default_setup_noexit $DISK
+log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fault/auto_spare_002_pos.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fault/auto_spare_002_pos.ksh
index e9517bad7131..bd32be9a4ff8 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fault/auto_spare_002_pos.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fault/auto_spare_002_pos.ksh
@@ -1,97 +1,97 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
# CDDL HEADER END
#
#
# Copyright (c) 2017 by Intel Corporation. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/fault/fault.cfg
#
# DESCRIPTION:
# Testing Fault Management Agent ZED Logic - Automated Auto-Spare Test when
# drive is faulted due to CHECKSUM ERRORS.
#
# STRATEGY:
# 1. Create a pool with hot spares
# 2. Create a filesystem with the primary cache disable to force reads
# 3. Write a file to the pool to be read back
# 4. Inject CHECKSUM ERRORS on read with a zinject error handler
# 5. Verify the ZED kicks in a hot spare and expected pool/device status
# 6. Clear the fault
# 7. Verify the hot spare is available and expected pool/device status
#
verify_runnable "both"
function cleanup
{
log_must zinject -c all
destroy_pool $TESTPOOL
rm -f $VDEV_FILES $SPARE_FILE
}
log_assert "Testing automated auto-spare FMA test"
log_onexit cleanup
# Events not supported on FreeBSD
if ! is_freebsd; then
# Clear events from previous runs
zed_events_drain
fi
TESTFILE="/$TESTPOOL/$TESTFS/testfile"
for type in "mirror" "raidz" "raidz2"; do
# 1. Create a pool with hot spares
log_must truncate -s $MINVDEVSIZE $VDEV_FILES $SPARE_FILE
log_must zpool create -f $TESTPOOL $type $VDEV_FILES \
spare $SPARE_FILE
# 2. Create a filesystem with the primary cache disable to force reads
log_must zfs create -o primarycache=none $TESTPOOL/$TESTFS
log_must zfs set recordsize=16k $TESTPOOL/$TESTFS
# 3. Write a file to the pool to be read back
log_must dd if=/dev/urandom of=$TESTFILE bs=1M count=64
# 4. Inject CHECKSUM ERRORS on read with a zinject error handler
log_must zinject -d $FAULT_FILE -e corrupt -f 50 -T read $TESTPOOL
- log_must cp $TESTFILE /dev/null
+ log_must dd if=$TESTFILE of=/dev/null bs=1M count=64
# 5. Verify the ZED kicks in a hot spare and expected pool/device status
log_note "Wait for ZED to auto-spare"
log_must wait_vdev_state $TESTPOOL $FAULT_FILE "DEGRADED" 60
log_must wait_vdev_state $TESTPOOL $SPARE_FILE "ONLINE" 60
log_must wait_hotspare_state $TESTPOOL $SPARE_FILE "INUSE"
log_must check_state $TESTPOOL "" "DEGRADED"
# 6. Clear the fault
log_must zinject -c all
log_must zpool clear $TESTPOOL $FAULT_FILE
# 7. Verify the hot spare is available and expected pool/device status
log_must wait_vdev_state $TESTPOOL $FAULT_FILE "ONLINE" 60
log_must wait_hotspare_state $TESTPOOL $SPARE_FILE "AVAIL"
log_must check_state $TESTPOOL "" "ONLINE"
cleanup
done
log_pass "Auto-spare test successful"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send_encrypted_props.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send_encrypted_props.ksh
index 793904db91ca..c0c7b682def9 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send_encrypted_props.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send_encrypted_props.ksh
@@ -1,215 +1,223 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
# CDDL HEADER END
#
#
# Copyright (c) 2018 by Datto Inc. All rights reserved.
#
. $STF_SUITE/tests/functional/rsend/rsend.kshlib
#
# DESCRIPTION:
# Verify that zfs properly handles encryption properties when receiving
# send streams.
#
# STRATEGY:
# 1. Create a few unencrypted and encrypted test datasets with some data
# 2. Take snapshots of these datasets in preparation for sending
# 3. Verify that 'zfs recv -o keylocation=prompt' fails
# 4. Verify that 'zfs recv -x <encryption prop>' fails on a raw send stream
# 5. Verify that encryption properties cannot be changed on incrementals
# 6. Verify that a simple send can be received as an encryption root
# 7. Verify that an unencrypted props send can be received as an
# encryption root
# 8. Verify that an unencrypted recursive send can be received as an
# encryption root
# 9. Verify that an unencrypted props send can be received as an
# encryption child
# 10. Verify that an unencrypted recursive send can be received as an
# encryption child
#
verify_runnable "both"
function cleanup
{
destroy_dataset $TESTPOOL/recv "-r"
destroy_dataset $TESTPOOL/crypt "-r"
destroy_dataset $TESTPOOL/ds "-r"
[[ -f $sendfile ]] && log_must rm $sendfile
[[ -f $keyfile ]] && log_must rm $keyfile
}
log_onexit cleanup
log_assert "'zfs recv' must properly handle encryption properties"
typeset keyfile=/$TESTPOOL/pkey
typeset sendfile=/$TESTPOOL/sendfile
typeset snap=$TESTPOOL/ds@snap1
typeset snap2=$TESTPOOL/ds@snap2
typeset esnap=$TESTPOOL/crypt@snap1
typeset esnap2=$TESTPOOL/crypt@snap2
log_must eval "echo 'password' > $keyfile"
log_must zfs create $TESTPOOL/ds
log_must zfs create $TESTPOOL/ds/ds1
log_must zfs create -o encryption=on -o keyformat=passphrase \
-o keylocation=file://$keyfile $TESTPOOL/crypt
log_must zfs create $TESTPOOL/crypt/ds1
log_must zfs create -o keyformat=passphrase -o keylocation=file://$keyfile \
$TESTPOOL/crypt/ds2
log_must mkfile 1M /$TESTPOOL/ds/$TESTFILE0
log_must cp /$TESTPOOL/ds/$TESTFILE0 /$TESTPOOL/crypt/$TESTFILE0
typeset cksum=$(md5digest /$TESTPOOL/ds/$TESTFILE0)
log_must zfs snap -r $snap
log_must zfs snap -r $snap2
log_must zfs snap -r $esnap
log_must zfs snap -r $esnap2
# Embedded data is incompatible with encrypted datasets, so we cannot
# allow embedded streams to be received.
log_note "Must not be able to receive an embedded stream as encrypted"
log_mustnot eval "zfs send -e $TESTPOOL/crypt/ds1 | zfs recv $TESTPOOL/recv"
# We currently don't have an elegant and secure way to pass the passphrase
# in via prompt because the send stream itself is using stdin.
log_note "Must not be able to use 'keylocation=prompt' on receive"
log_must eval "zfs send $snap > $sendfile"
log_mustnot eval "zfs recv -o encryption=on -o keyformat=passphrase" \
"$TESTPOOL/recv < $sendfile"
log_mustnot eval "zfs recv -o encryption=on -o keyformat=passphrase" \
"-o keylocation=prompt $TESTPOOL/recv < $sendfile"
# Raw sends do not have access to the decrypted data so we cannot override
# the encryption settings without losing the data.
log_note "Must not be able to disable encryption properties on raw send"
log_must eval "zfs send -w $esnap > $sendfile"
log_mustnot eval "zfs recv -x encryption $TESTPOOL/recv < $sendfile"
log_mustnot eval "zfs recv -x keyformat $TESTPOOL/recv < $sendfile"
log_mustnot eval "zfs recv -x pbkdf2iters $TESTPOOL/recv < $sendfile"
# Encryption properties are set upon creating the dataset. Changing them
# afterwards requires using 'zfs change-key' or an update from a raw send.
log_note "Must not be able to change encryption properties on incrementals"
log_must eval "zfs send $esnap | zfs recv -o encryption=on" \
"-o keyformat=passphrase -o keylocation=file://$keyfile $TESTPOOL/recv"
log_mustnot eval "zfs send -i $esnap $esnap2 |" \
"zfs recv -o encryption=aes-128-ccm $TESTPOOL/recv"
log_mustnot eval "zfs send -i $esnap $esnap2 |" \
"zfs recv -o keyformat=hex $TESTPOOL/recv"
log_mustnot eval "zfs send -i $esnap $esnap2 |" \
"zfs recv -o pbkdf2iters=100k $TESTPOOL/recv"
log_must zfs destroy -r $TESTPOOL/recv
# Test that we can receive a simple stream as an encryption root.
log_note "Must be able to receive stream as encryption root"
ds=$TESTPOOL/recv
log_must eval "zfs send $snap > $sendfile"
log_must eval "zfs recv -o encryption=on -o keyformat=passphrase" \
"-o keylocation=file://$keyfile $ds < $sendfile"
log_must test "$(get_prop 'encryption' $ds)" == "aes-256-gcm"
log_must test "$(get_prop 'encryptionroot' $ds)" == "$ds"
log_must test "$(get_prop 'keyformat' $ds)" == "passphrase"
log_must test "$(get_prop 'keylocation' $ds)" == "file://$keyfile"
log_must test "$(get_prop 'mounted' $ds)" == "yes"
recv_cksum=$(md5digest /$ds/$TESTFILE0)
log_must test "$recv_cksum" == "$cksum"
log_must zfs destroy -r $ds
+# Test that we can override sharesmb property for encrypted raw stream.
+log_note "Must be able to override sharesmb property for encrypted raw stream"
+ds=$TESTPOOL/recv
+log_must eval "zfs send -w $esnap > $sendfile"
+log_must eval "zfs recv -o sharesmb=on $ds < $sendfile"
+log_must test "$(get_prop 'sharesmb' $ds)" == "on"
+log_must zfs destroy -r $ds
+
# Test that we can override encryption properties on a properties stream
# of an unencrypted dataset, turning it into an encryption root.
log_note "Must be able to receive stream with props as encryption root"
ds=$TESTPOOL/recv
log_must eval "zfs send -p $snap > $sendfile"
log_must eval "zfs recv -o encryption=on -o keyformat=passphrase" \
"-o keylocation=file://$keyfile $ds < $sendfile"
log_must test "$(get_prop 'encryption' $ds)" == "aes-256-gcm"
log_must test "$(get_prop 'encryptionroot' $ds)" == "$ds"
log_must test "$(get_prop 'keyformat' $ds)" == "passphrase"
log_must test "$(get_prop 'keylocation' $ds)" == "file://$keyfile"
log_must test "$(get_prop 'mounted' $ds)" == "yes"
recv_cksum=$(md5digest /$ds/$TESTFILE0)
log_must test "$recv_cksum" == "$cksum"
log_must zfs destroy -r $ds
# Test that we can override encryption properties on a recursive stream
# of an unencrypted dataset, turning it into an encryption root. The root
# dataset of the stream should become an encryption root with all children
# inheriting from it.
log_note "Must be able to receive recursive stream as encryption root"
ds=$TESTPOOL/recv
log_must eval "zfs send -R $snap > $sendfile"
log_must eval "zfs recv -o encryption=on -o keyformat=passphrase" \
"-o keylocation=file://$keyfile $ds < $sendfile"
log_must test "$(get_prop 'encryption' $ds)" == "aes-256-gcm"
log_must test "$(get_prop 'encryptionroot' $ds)" == "$ds"
log_must test "$(get_prop 'keyformat' $ds)" == "passphrase"
log_must test "$(get_prop 'keylocation' $ds)" == "file://$keyfile"
log_must test "$(get_prop 'mounted' $ds)" == "yes"
recv_cksum=$(md5digest /$ds/$TESTFILE0)
log_must test "$recv_cksum" == "$cksum"
log_must zfs destroy -r $ds
# Test that we can override an unencrypted properties stream's encryption
# settings, receiving it as an encrypted child.
log_note "Must be able to receive stream with props to encrypted child"
ds=$TESTPOOL/crypt/recv
log_must eval "zfs send -p $snap > $sendfile"
log_must eval "zfs recv -x encryption $ds < $sendfile"
log_must test "$(get_prop 'encryptionroot' $ds)" == "$TESTPOOL/crypt"
log_must test "$(get_prop 'encryption' $ds)" == "aes-256-gcm"
log_must test "$(get_prop 'keyformat' $ds)" == "passphrase"
log_must test "$(get_prop 'mounted' $ds)" == "yes"
recv_cksum=$(md5digest /$ds/$TESTFILE0)
log_must test "$recv_cksum" == "$cksum"
log_must zfs destroy -r $ds
# Test that we can override an unencrypted recursive stream's encryption
# settings, receiving all datasets as encrypted children.
log_note "Must be able to receive recursive stream to encrypted child"
ds=$TESTPOOL/crypt/recv
log_must eval "zfs send -R $snap > $sendfile"
log_must eval "zfs recv -x encryption $ds < $sendfile"
log_must test "$(get_prop 'encryptionroot' $ds)" == "$TESTPOOL/crypt"
log_must test "$(get_prop 'encryption' $ds)" == "aes-256-gcm"
log_must test "$(get_prop 'keyformat' $ds)" == "passphrase"
log_must test "$(get_prop 'mounted' $ds)" == "yes"
recv_cksum=$(md5digest /$ds/$TESTFILE0)
log_must test "$recv_cksum" == "$cksum"
log_must zfs destroy -r $ds
# Test that we can override an unencrypted, incremental, recursive stream's
# encryption settings, receiving all datasets as encrypted children.
log_note "Must be able to receive recursive stream to encrypted child"
ds=$TESTPOOL/crypt/recv
log_must eval "zfs send -R $snap2 > $sendfile"
log_must eval "zfs recv -x encryption $ds < $sendfile"
log_must test "$(get_prop 'encryptionroot' $ds)" == "$TESTPOOL/crypt"
log_must test "$(get_prop 'encryption' $ds)" == "aes-256-gcm"
log_must test "$(get_prop 'keyformat' $ds)" == "passphrase"
log_must test "$(get_prop 'mounted' $ds)" == "yes"
recv_cksum=$(md5digest /$ds/$TESTFILE0)
log_must test "$recv_cksum" == "$cksum"
log_must zfs destroy -r $ds
# Check that we haven't printed the key to the zpool history log
log_mustnot eval "zpool history -i | grep -q 'wkeydata'"
log_pass "'zfs recv' properly handles encryption properties"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/user_namespace/user_namespace_002.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/user_namespace/user_namespace_002.ksh
index b04898fa81a6..cfc478cd3592 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/user_namespace/user_namespace_002.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/user_namespace/user_namespace_002.ksh
@@ -1,115 +1,115 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
. $STF_SUITE/tests/functional/user_namespace/user_namespace_common.kshlib
#
# DESCRIPTION:
# Regression test for delegation of datasets to user namespaces.
#
# STRATEGY:
# 1. Delegate a dataset to a user namespace.
# 2. Check that 'zfs list' is only able to see inside the delegation.
# 3. Check that 'zfs create' is able to create only inside the delegation.
# 4. Check that the filesystems can be mounted inside the delegation,
# and that file permissions are appropriate.
# 5. Check that 'zfs destroy' is able to destroy only inside the delegation.
# 6. Check that 'zfs unzone' has a desirable effect.
#
verify_runnable "both"
user_ns_cleanup() {
if [ -n "$proc_ns_added" ]; then
log_must zfs unzone $proc_ns_added $TESTPOOL/userns
fi
if [ -n "$unshared_pid" ]; then
kill -9 $unshared_pid
# Give it a sec to make the global cleanup more reliable.
sleep 1
fi
log_must zfs destroy -r $TESTPOOL/userns
}
log_onexit user_ns_cleanup
log_assert "Check zfs/zpool command delegation in user namespaces"
# Create the baseline datasets.
log_must zfs create -o zoned=on $TESTPOOL/userns
log_must zfs create -o zoned=on $TESTPOOL/userns/testds
# Partial match should be denied; hence we also set this to be 'zoned'.
log_must zfs create -o zoned=on $TESTPOOL/user
# 1. Create a user namespace with a cloned mount namespace, then delegate.
unshare -Urm echo test
if [ "$?" -ne "0" ]; then
log_unsupported "Failed to create user namespace"
fi
unshare -Urm /usr/bin/sleep 1h &
unshared_pid=$!
if [ "$?" -ne "0" ]; then
log_unsupported "Failed to create user namespace"
fi
proc_ns=/proc/$unshared_pid/ns/user
sleep 2 # Wait for unshare to acquire user namespace
log_note "unshare: child=${unshared_pid} proc_ns=${proc_ns}"
NSENTER="nsenter -t $unshared_pid --all"
$NSENTER echo test
if [ "$?" -ne "0" ]; then
log_unsupported "Failed to enter user namespace"
fi
# 1b. Pre-test by checking that 'zone' does something new.
list="$($NSENTER zfs list -r -H -o name | tr '\n' ' ')"
log_must test -z "$list"
log_must zfs zone $proc_ns $TESTPOOL/userns
-proc_ns_added="$ns"
+proc_ns_added="$proc_ns"
# 2. 'zfs list'
list="$($NSENTER zfs list -r -H -o name $TESTPOOL | tr '\n' ' ')"
log_must test "$list" = "$TESTPOOL $TESTPOOL/userns $TESTPOOL/userns/testds "
# 3. 'zfs create'
log_must $NSENTER zfs create $TESTPOOL/userns/created
log_mustnot $NSENTER zfs create $TESTPOOL/user/created
# 4. Check file permissions (create mounts the filesystem). The 'permissions'
# check is simply, does it get mapped to user namespace's root/root?
log_must $NSENTER df -h /$TESTPOOL/userns/created
log_must $NSENTER mkfile 8192 /$TESTPOOL/userns/created/testfile
uidgid=$($NSENTER stat -c '%u %g' /$TESTPOOL/userns/created/testfile)
log_must test "${uidgid}" = "0 0"
# 5. 'zfs destroy'
log_must $NSENTER zfs destroy $TESTPOOL/userns/created
log_mustnot $NSENTER zfs destroy $TESTPOOL/user
# 6. 'zfs unzone' should have an effect
log_must zfs unzone $proc_ns $TESTPOOL/userns
proc_ns_added=""
list="$($NSENTER zfs list -r -H -o name | tr '\n' ' ')"
log_must test -z "$list"
log_pass "Check zfs/zpool command delegation in user namespaces"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/user_namespace/user_namespace_003.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/user_namespace/user_namespace_003.ksh
index 2a875d09b6ae..6a746c6d33f6 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/user_namespace/user_namespace_003.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/user_namespace/user_namespace_003.ksh
@@ -1,97 +1,97 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
. $STF_SUITE/tests/functional/user_namespace/user_namespace_common.kshlib
#
# DESCRIPTION:
# Regression test for delegation of datasets to user namespaces.
#
# STRATEGY:
# 1. Delegate two datasets with distinctive names to a user namespace.
# 2. Check that 'zfs list' is not able to see datasets outside of the
# delegation, which have a prefix matching one of the delegated sets.
# Also, check that all the delegated sets are visible.
#
verify_runnable "both"
user_ns_cleanup() {
if [ -n "$proc_ns_added" ]; then
log_must zfs unzone $proc_ns_added $TESTPOOL/userns
log_must zfs unzone $proc_ns_added $TESTPOOL/otheruserns
fi
if [ -n "$unshared_pid" ]; then
kill -9 $unshared_pid
# Give it a sec to make the global cleanup more reliable.
sleep 1
fi
log_must zfs destroy -r $TESTPOOL/userns
log_must zfs destroy -r $TESTPOOL/usernsisitnot
log_must zfs destroy -r $TESTPOOL/otheruserns
}
log_onexit user_ns_cleanup
log_assert "Check zfs list command handling of dataset visibility in user namespaces"
# Create the baseline dataset.
log_must zfs create -o zoned=on $TESTPOOL/userns
# Datasets with a prefix matching the delegated dataset should not be
# automatically considered visible.
log_must zfs create -o zoned=on $TESTPOOL/usernsisitnot
# All delegated datasets should be visible.
log_must zfs create -o zoned=on $TESTPOOL/otheruserns
# 1. Create a user namespace with a cloned mount namespace, then delegate.
unshare -Urm echo test
if [ "$?" -ne "0" ]; then
log_unsupported "Failed to create user namespace"
fi
unshare -Urm /usr/bin/sleep 1h &
unshared_pid=$!
if [ "$?" -ne "0" ]; then
log_unsupported "Failed to create user namespace"
fi
proc_ns=/proc/$unshared_pid/ns/user
sleep 2 # Wait for unshare to acquire user namespace
log_note "unshare: child=${unshared_pid} proc_ns=${proc_ns}"
NSENTER="nsenter -t $unshared_pid --all"
$NSENTER echo test
if [ "$?" -ne "0" ]; then
log_unsupported "Failed to enter user namespace"
fi
# 1b. Pre-test by checking that 'zone' does something new.
list="$($NSENTER zfs list -r -H -o name | tr '\n' ' ')"
log_must test -z "$list"
log_must zfs zone $proc_ns $TESTPOOL/userns
log_must zfs zone $proc_ns $TESTPOOL/otheruserns
-proc_ns_added="$ns"
+proc_ns_added="$proc_ns"
# 2. 'zfs list'
list="$($NSENTER zfs list -r -H -o name $TESTPOOL | tr '\n' ' ')"
log_must test "$list" = "$TESTPOOL $TESTPOOL/otheruserns $TESTPOOL/userns "
log_pass "Check zfs list command handling of dataset visibility in user namespaces"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_stress/zvol_stress.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_stress/zvol_stress.ksh
index 883d9984be43..3431d33d97d0 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_stress/zvol_stress.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_stress/zvol_stress.ksh
@@ -1,169 +1,170 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
# Copyright (c) 2022 by Lawrence Livermore National Security, LLC.
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/reservation/reservation.shlib
. $STF_SUITE/tests/functional/zvol/zvol_common.shlib
#
# DESCRIPTION:
# Stress test multithreaded transfers to multiple zvols. Also verify
# zvol errors show up in zpool status.
#
# STRATEGY:
#
# For both the normal submit_bio() codepath and the blk-mq codepath, do
# the following:
#
# 1. Create one zvol per CPU
# 2. In parallel, spawn an fio "write and verify" for each zvol
# 3. Inject write errors
# 4. Write to one of the zvols with dd and verify the errors
#
verify_runnable "global"
num_zvols=$(get_num_cpus)
# If we were making one big zvol from all the pool space, it would
# be this big:
biggest_zvol_size_possible=$(largest_volsize_from_pool $TESTPOOL)
# Crude calculation: take the biggest zvol size we could possibly
# create, knock 10% off it (for overhead) and divide by the number
# of ZVOLs we want to make.
#
# Round the value using a printf
typeset -f each_zvol_size=$(( floor($biggest_zvol_size_possible * 0.9 / \
$num_zvols )))
typeset tmpdir="$(mktemp -d zvol_stress_fio_state.XXXXXX)"
function create_zvols
{
log_note "Creating $num_zvols zvols that are ${each_zvol_size}B each"
for i in $(seq $num_zvols) ; do
log_must zfs create -V $each_zvol_size $TESTPOOL/testvol$i
block_device_wait "$ZVOL_DEVDIR/$TESTPOOL/testvol$i"
done
}
function destroy_zvols
{
for i in $(seq $num_zvols) ; do
log_must_busy zfs destroy $TESTPOOL/testvol$i
done
}
function do_zvol_stress
{
# Write 10% of each zvol, or 50MB, whichever is less
zvol_write_size=$((each_zvol_size / 10))
if [ $zvol_write_size -gt $((50 * 1048576)) ] ; then
zvol_write_size=$((50 * 1048576))
fi
zvol_write_size_mb=$(($zvol_write_size / 1048576))
if is_linux ; then
engine=libaio
else
engine=psync
fi
# Spawn off one fio per zvol in parallel
pids=""
for i in $(seq $num_zvols) ; do
# Spawn one fio per zvol as its own process
fio --ioengine=$engine --name=zvol_stress$i --direct=0 \
--filename="$ZVOL_DEVDIR/$TESTPOOL/testvol$i" --bs=1048576 \
--iodepth=10 --readwrite=randwrite --size=${zvol_write_size} \
--verify_async=2 --numjobs=1 --verify=sha1 \
--verify_fatal=1 \
--continue_on_error=none \
--error_dump=1 \
--exitall_on_error \
--aux-path="$tmpdir" --do_verify=1 &
pids="$pids $!"
done
# Wait for all the spawned fios to finish and look for errors
fail=""
i=0
for pid in $pids ; do
log_note "$s waiting on $pid"
if ! wait $pid ; then
log_fail "fio error on $TESTPOOL/testvol$i"
fi
i=$(($i + 1))
done
}
function cleanup
{
log_must zinject -c all
log_must zpool clear $TESTPOOL
destroy_zvols
set_blk_mq 0
# Remove all fio's leftover state files
if [ -n "$tmpdir" ] ; then
log_must rm -fd "$tmpdir"/*.state "$tmpdir"
fi
}
log_onexit cleanup
log_assert "Stress test zvols"
set_blk_mq 0
create_zvols
# Do some fio write/verifies in parallel
do_zvol_stress
destroy_zvols
# Enable blk-mq (block multi-queue), and re-run the same test
set_blk_mq 1
create_zvols
do_zvol_stress
# Inject some errors, and verify we see some IO errors in zpool status
for DISK in $DISKS ; do
log_must zinject -d $DISK -f 10 -e io -T write $TESTPOOL
done
log_must dd if=/dev/zero of=$ZVOL_DEVDIR/$TESTPOOL/testvol1 bs=512 count=50
+sync_pool $TESTPOOL
log_must zinject -c all
# We should see write errors
typeset -i write_errors=$(zpool status -p | awk '
!NF { isvdev = 0 }
isvdev { errors += $4 }
/CKSUM$/ { isvdev = 1 }
END { print errors }
')
if [ $write_errors -eq 0 ] ; then
log_fail "Expected to see some write errors"
else
log_note "Correctly saw $write_errors write errors"
fi
log_pass "Done with zvol_stress"
diff --git a/sys/modules/zfs/Makefile b/sys/modules/zfs/Makefile
index 2c931e19a0d6..71affa119f0b 100644
--- a/sys/modules/zfs/Makefile
+++ b/sys/modules/zfs/Makefile
@@ -1,484 +1,484 @@
# $FreeBSD$
SRCDIR=${SRCTOP}/sys/contrib/openzfs/module
INCDIR=${SRCTOP}/sys/contrib/openzfs/include
KMOD= zfs
.PATH: ${SRCDIR}/avl \
${SRCDIR}/lua \
${SRCDIR}/nvpair \
${SRCDIR}/icp/algs/blake3 \
${SRCDIR}/icp/asm-aarch64/blake3 \
${SRCDIR}/icp/asm-ppc64/blake3 \
${SRCDIR}/icp/asm-x86_64/blake3 \
${SRCDIR}/icp/algs/edonr \
${SRCDIR}/os/freebsd/spl \
${SRCDIR}/os/freebsd/zfs \
${SRCDIR}/unicode \
${SRCDIR}/zcommon \
${SRCDIR}/zfs \
${SRCDIR}/zstd \
${SRCDIR}/zstd/lib/common \
${SRCDIR}/zstd/lib/compress \
${SRCDIR}/zstd/lib/decompress
CFLAGS+= -I${INCDIR}
CFLAGS+= -I${INCDIR}/os/freebsd
CFLAGS+= -I${INCDIR}/os/freebsd/spl
CFLAGS+= -I${INCDIR}/os/freebsd/zfs
CFLAGS+= -I${SRCDIR}/zstd/include
CFLAGS+= -I${SRCDIR}/icp/include
CFLAGS+= -I${.CURDIR}
CFLAGS+= -D__KERNEL__ -DFREEBSD_NAMECACHE -DBUILDING_ZFS \
-DHAVE_UIO_ZEROCOPY -DWITHOUT_NETDUMP -D__KERNEL -D_SYS_CONDVAR_H_ \
-D_SYS_VMEM_H_ -DIN_FREEBSD_BASE
.if ${MACHINE_ARCH} == "amd64"
CFLAGS+= -D__x86_64 -DHAVE_SSE2 -DHAVE_SSSE3 -DHAVE_SSE4_1 -DHAVE_SSE4_2 \
-DHAVE_AVX -DHAVE_AVX2 -DHAVE_AVX512F -DHAVE_AVX512VL -DHAVE_AVX512BW
.endif
.if ${MACHINE_ARCH} == "i386" || ${MACHINE_ARCH} == "powerpc" || \
${MACHINE_ARCH} == "powerpcspe" || ${MACHINE_ARCH} == "arm"
CFLAGS+= -DBITS_PER_LONG=32
.else
CFLAGS+= -DBITS_PER_LONG=64
.endif
SRCS= vnode_if.h device_if.h bus_if.h
# avl
SRCS+= avl.c
# icp
SRCS+= edonr.c
#icp/algs/blake3
SRCS+= blake3.c \
blake3_generic.c \
blake3_impl.c \
blake3_x86-64.c
#icp/asm-aarch64/blake3
SRCS+= b3_aarch64_sse2.S \
b3_aarch64_sse41.S
#icp/asm-ppc64/blake3
SRCS+= b3_ppc64le_sse2.S \
b3_ppc64le_sse41.S
#icp/asm-x86_64/blake3
SRCS+= blake3_avx2.S \
blake3_avx512.S \
blake3_sse2.S \
blake3_sse41.S
#lua
SRCS+= lapi.c \
lauxlib.c \
lbaselib.c \
lcode.c \
lcompat.c \
lcorolib.c \
lctype.c \
ldebug.c \
ldo.c \
lfunc.c \
lgc.c \
llex.c \
lmem.c \
lobject.c \
lopcodes.c \
lparser.c \
lstate.c \
lstring.c \
lstrlib.c \
ltable.c \
ltablib.c \
ltm.c \
lvm.c \
lzio.c
#nvpair
SRCS+= nvpair.c \
fnvpair.c \
nvpair_alloc_spl.c \
nvpair_alloc_fixed.c
#os/freebsd/spl
SRCS+= acl_common.c \
btree.c \
callb.c \
list.c \
spl_acl.c \
spl_cmn_err.c \
spl_dtrace.c \
spl_kmem.c \
spl_kstat.c \
spl_misc.c \
spl_policy.c \
spl_string.c \
spl_sunddi.c \
spl_sysevent.c \
spl_taskq.c \
spl_uio.c \
spl_vfs.c \
spl_vm.c \
spl_zone.c \
sha256c.c \
sha512c.c \
spl_procfs_list.c \
spl_zlib.c
.if ${MACHINE_ARCH} == "i386" || ${MACHINE_ARCH} == "powerpc" || \
${MACHINE_ARCH} == "powerpcspe" || ${MACHINE_ARCH} == "arm"
SRCS+= spl_atomic.c
.endif
#os/freebsd/zfs
SRCS+= abd_os.c \
crypto_os.c \
dmu_os.c \
+ event_os.c \
hkdf.c \
kmod_core.c \
spa_os.c \
sysctl_os.c \
vdev_file.c \
vdev_label_os.c \
vdev_geom.c \
zfs_acl.c \
zfs_ctldir.c \
zfs_dir.c \
zfs_ioctl_compat.c \
zfs_ioctl_os.c \
zfs_log.c \
zfs_racct.c \
zfs_replay.c \
zfs_vfsops.c \
zfs_vnops_os.c \
zfs_znode.c \
zio_crypt.c \
zvol_os.c
#unicode
SRCS+= uconv.c \
u8_textprep.c
#zcommon
SRCS+= zfeature_common.c \
zfs_comutil.c \
zfs_deleg.c \
zfs_fletcher.c \
zfs_fletcher_avx512.c \
zfs_fletcher_intel.c \
zfs_fletcher_sse.c \
zfs_fletcher_superscalar.c \
zfs_fletcher_superscalar4.c \
zfs_namecheck.c \
zfs_prop.c \
zpool_prop.c \
zprop_common.c
#zfs
SRCS+= abd.c \
aggsum.c \
arc.c \
arc_os.c \
blake3_zfs.c \
blkptr.c \
bplist.c \
bpobj.c \
cityhash.c \
dbuf.c \
dbuf_stats.c \
bptree.c \
bqueue.c \
dataset_kstats.c \
ddt.c \
ddt_zap.c \
dmu.c \
dmu_diff.c \
dmu_object.c \
dmu_objset.c \
dmu_recv.c \
dmu_redact.c \
dmu_send.c \
dmu_traverse.c \
dmu_tx.c \
dmu_zfetch.c \
dnode.c \
dnode_sync.c \
dsl_dataset.c \
dsl_deadlist.c \
dsl_deleg.c \
dsl_bookmark.c \
dsl_dir.c \
dsl_crypt.c \
dsl_destroy.c \
dsl_pool.c \
dsl_prop.c \
dsl_scan.c \
dsl_synctask.c \
dsl_userhold.c \
edonr_zfs.c \
fm.c \
gzip.c \
lzjb.c \
lz4.c \
lz4_zfs.c \
metaslab.c \
mmp.c \
multilist.c \
objlist.c \
pathname.c \
range_tree.c \
refcount.c \
rrwlock.c \
sa.c \
sha256.c \
skein_zfs.c \
spa.c \
- spa_boot.c \
spa_checkpoint.c \
spa_config.c \
spa_errlog.c \
spa_history.c \
spa_log_spacemap.c \
spa_misc.c \
spa_stats.c \
space_map.c \
space_reftree.c \
txg.c \
uberblock.c \
unique.c \
vdev.c \
vdev_cache.c \
vdev_draid.c \
vdev_draid_rand.c \
vdev_indirect.c \
vdev_indirect_births.c \
vdev_indirect_mapping.c \
vdev_initialize.c \
vdev_label.c \
vdev_mirror.c \
vdev_missing.c \
vdev_queue.c \
vdev_raidz.c \
vdev_raidz_math.c \
vdev_raidz_math_scalar.c \
vdev_raidz_math_avx2.c \
vdev_raidz_math_avx512bw.c \
vdev_raidz_math_avx512f.c \
vdev_raidz_math_sse2.c \
vdev_raidz_math_ssse3.c \
vdev_rebuild.c \
vdev_removal.c \
vdev_root.c \
vdev_trim.c \
zap.c \
zap_leaf.c \
zap_micro.c \
zcp.c \
zcp_get.c \
zcp_global.c \
zcp_iter.c \
zcp_set.c \
zcp_synctask.c \
zfeature.c \
zfs_byteswap.c \
zfs_chksum.c \
zfs_debug.c \
zfs_file_os.c \
zfs_fm.c \
zfs_fuid.c \
zfs_ioctl.c \
zfs_onexit.c \
zfs_quota.c \
zfs_ratelimit.c \
zfs_rlock.c \
zfs_sa.c \
zfs_vnops.c \
zil.c \
zio.c \
zio_checksum.c \
zio_compress.c \
zio_inject.c \
zle.c \
zrlock.c \
zthr.c \
zvol.c
SRCS+= zfs_zstd.c \
entropy_common.c \
error_private.c \
fse_compress.c \
fse_decompress.c \
hist.c \
huf_compress.c \
huf_decompress.c \
pool.c \
xxhash.c \
zstd_common.c \
zstd_compress.c \
zstd_compress_literals.c \
zstd_compress_sequences.c \
zstd_compress_superblock.c \
zstd_ddict.c \
zstd_decompress.c \
zstd_decompress_block.c \
zstd_double_fast.c \
zstd_fast.c \
zstd_lazy.c \
zstd_ldm.c \
zstd_opt.c
.include <bsd.kmod.mk>
CFLAGS+= -include ${SRCTOP}/sys/cddl/compat/opensolaris/sys/debug_compat.h
CFLAGS+= -include ${INCDIR}/os/freebsd/spl/sys/ccompile.h
CFLAGS+= -include ${SRCTOP}/sys/modules/zfs/static_ccompile.h
CWARNFLAGS+= ${OPENZFS_CWARNFLAGS}
CFLAGS.gcc+= -Wno-pointer-to-int-cast
CFLAGS.lapi.c= -Wno-cast-qual
CFLAGS.lcompat.c= -Wno-cast-qual
CFLAGS.lobject.c= -Wno-cast-qual
CFLAGS.ltable.c= -Wno-cast-qual
CFLAGS.lvm.c= -Wno-cast-qual
CFLAGS.nvpair.c= -Wno-cast-qual -DHAVE_RPC_TYPES
CFLAGS.spl_string.c= -Wno-cast-qual
CFLAGS.spl_vm.c= -Wno-cast-qual
CFLAGS.spl_zlib.c= -Wno-cast-qual
CFLAGS.abd.c= -Wno-cast-qual
CFLAGS.zfs_log.c= -Wno-cast-qual
CFLAGS.zfs_vnops_os.c= -Wno-pointer-arith
CFLAGS.u8_textprep.c= -Wno-cast-qual
CFLAGS.zfs_fletcher.c= -Wno-cast-qual -Wno-pointer-arith
CFLAGS.zfs_fletcher_intel.c= -Wno-cast-qual -Wno-pointer-arith
CFLAGS.zfs_fletcher_sse.c= -Wno-cast-qual -Wno-pointer-arith
CFLAGS.zfs_fletcher_avx512.c= -Wno-cast-qual -Wno-pointer-arith
CFLAGS.zprop_common.c= -Wno-cast-qual
CFLAGS.ddt.c= -Wno-cast-qual
CFLAGS.dmu.c= -Wno-cast-qual
CFLAGS.dmu_traverse.c= -Wno-cast-qual
CFLAGS.dnode.c+= ${NO_WUNUSED_BUT_SET_VARIABLE}
CFLAGS.dsl_dir.c= -Wno-cast-qual
CFLAGS.dsl_deadlist.c= -Wno-cast-qual
CFLAGS.dsl_prop.c= -Wno-cast-qual
CFLAGS.edonr.c= -Wno-cast-qual
CFLAGS.fm.c= -Wno-cast-qual
CFLAGS.lz4.c= -Wno-cast-qual
CFLAGS.lz4_zfs.c= -Wno-cast-qual
CFLAGS.spa.c= -Wno-cast-qual
CFLAGS.spa_misc.c= -Wno-cast-qual
CFLAGS.sysctl_os.c= -include ${SRCTOP}/sys/modules/zfs/zfs_config.h
CFLAGS.vdev_draid.c= -Wno-cast-qual
CFLAGS.vdev_raidz.c= -Wno-cast-qual
CFLAGS.vdev_raidz_math.c= -Wno-cast-qual
CFLAGS.vdev_raidz_math_scalar.c= -Wno-cast-qual
CFLAGS.vdev_raidz_math_avx2.c= -Wno-cast-qual -Wno-duplicate-decl-specifier
CFLAGS.vdev_raidz_math_avx512f.c= -Wno-cast-qual -Wno-duplicate-decl-specifier
CFLAGS.vdev_raidz_math_sse2.c= -Wno-cast-qual -Wno-duplicate-decl-specifier
CFLAGS.zap_leaf.c= -Wno-cast-qual
CFLAGS.zap_micro.c= -Wno-cast-qual
CFLAGS.zcp.c= -Wno-cast-qual
CFLAGS.zfs_fm.c= -Wno-cast-qual ${NO_WUNUSED_BUT_SET_VARIABLE}
CFLAGS.zfs_ioctl.c= -Wno-cast-qual
CFLAGS.zil.c= -Wno-cast-qual
CFLAGS.zio.c= -Wno-cast-qual
CFLAGS.zrlock.c= -Wno-cast-qual
CFLAGS.zfs_zstd.c= -Wno-cast-qual -Wno-pointer-arith
CFLAGS.zstd.c= -U__BMI__ -fno-tree-vectorize
CFLAGS.entropy_common.c= -U__BMI__ -fno-tree-vectorize
CFLAGS.error_private.c= -U__BMI__ -fno-tree-vectorize
CFLAGS.fse_compress.c= -U__BMI__ -fno-tree-vectorize
CFLAGS.fse_decompress.c= -U__BMI__ -fno-tree-vectorize
CFLAGS.hist.c= -U__BMI__ -fno-tree-vectorize
CFLAGS.huf_compress.c= -U__BMI__ -fno-tree-vectorize
CFLAGS.huf_decompress.c= -U__BMI__ -fno-tree-vectorize
CFLAGS.pool.c= -U__BMI__ -fno-tree-vectorize
CFLAGS.xxhash.c= -U__BMI__ -fno-tree-vectorize
CFLAGS.zstd_common.c= -U__BMI__ -fno-tree-vectorize
CFLAGS.zstd_compress.c= -U__BMI__ -fno-tree-vectorize
CFLAGS.zstd_compress_literals.c= -U__BMI__ -fno-tree-vectorize
CFLAGS.zstd_compress_sequences.c= -U__BMI__ -fno-tree-vectorize
CFLAGS.zstd_compress_superblock.c= -U__BMI__ -fno-tree-vectorize
CFLAGS.zstd_ddict.c= -U__BMI__ -fno-tree-vectorize
CFLAGS.zstd_decompress.c= -U__BMI__ -fno-tree-vectorize
CFLAGS.zstd_decompress_block.c= -U__BMI__ -fno-tree-vectorize
CFLAGS.zstd_double_fast.c= -U__BMI__ -fno-tree-vectorize
CFLAGS.zstd_fast.c= -U__BMI__ -fno-tree-vectorize
CFLAGS.zstd_lazy.c= -U__BMI__ -fno-tree-vectorize
CFLAGS.zstd_ldm.c= -U__BMI__ -fno-tree-vectorize
CFLAGS.zstd_opt.c= -U__BMI__ -fno-tree-vectorize
.if ${MACHINE_CPUARCH} == "aarch64"
__ZFS_ZSTD_AARCH64_FLAGS= -include ${SRCDIR}/zstd/include/aarch64_compat.h
CFLAGS.zstd.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.entropy_common.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.error_private.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.fse_compress.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.fse_decompress.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.hist.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.huf_compress.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.huf_decompress.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.pool.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.xxhash.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_common.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_compress.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_compress_literals.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_compress_sequences.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_compress_superblock.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_ddict.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_decompress.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_decompress_block.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_double_fast.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_fast.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_lazy.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_ldm.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_opt.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
b3_aarch64_sse2.o: b3_aarch64_sse2.S
${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${.IMPSRC} \
-o ${.TARGET}
${CTFCONVERT_CMD}
b3_aarch64_sse41.o: b3_aarch64_sse41.S
${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${.IMPSRC} \
-o ${.TARGET}
${CTFCONVERT_CMD}
.endif
CFLAGS.zstd.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.entropy_common.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.error_private.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.fse_compress.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.fse_compress.c+= ${NO_WUNUSED_BUT_SET_VARIABLE}
CFLAGS.fse_decompress.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.hist.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.huf_compress.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.huf_decompress.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.pool.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.xxhash.c+= -include ${SRCTOP}/sys/sys/_null.h
CFLAGS.xxhash.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_common.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_compress.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_compress_literals.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_compress_sequences.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_compress_superblock.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_compress_superblock.c+= ${NO_WUNUSED_BUT_SET_VARIABLE}
CFLAGS.zstd_ddict.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_decompress.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_decompress_block.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_double_fast.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_fast.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_lazy.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_ldm.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_opt.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
diff --git a/sys/modules/zfs/zfs_config.h b/sys/modules/zfs/zfs_config.h
index 88cb658c50eb..9ef55380e57d 100644
--- a/sys/modules/zfs/zfs_config.h
+++ b/sys/modules/zfs/zfs_config.h
@@ -1,979 +1,988 @@
/*
* $FreeBSD$
*/
/* zfs_config.h. Generated from zfs_config.h.in by configure. */
/* zfs_config.h.in. Generated from configure.ac by autoheader. */
/* Define to 1 if translation of program messages to the user's native
language is requested. */
/* #undef ENABLE_NLS */
/* bio_end_io_t wants 1 arg */
/* #undef HAVE_1ARG_BIO_END_IO_T */
/* lookup_bdev() wants 1 arg */
/* #undef HAVE_1ARG_LOOKUP_BDEV */
/* submit_bio() wants 1 arg */
/* #undef HAVE_1ARG_SUBMIT_BIO */
/* bdi_setup_and_register() wants 2 args */
/* #undef HAVE_2ARGS_BDI_SETUP_AND_REGISTER */
/* vfs_getattr wants 2 args */
/* #undef HAVE_2ARGS_VFS_GETATTR */
/* zlib_deflate_workspacesize() wants 2 args */
/* #undef HAVE_2ARGS_ZLIB_DEFLATE_WORKSPACESIZE */
/* bdi_setup_and_register() wants 3 args */
/* #undef HAVE_3ARGS_BDI_SETUP_AND_REGISTER */
/* vfs_getattr wants 3 args */
/* #undef HAVE_3ARGS_VFS_GETATTR */
/* vfs_getattr wants 4 args */
/* #undef HAVE_4ARGS_VFS_GETATTR */
/* kernel has access_ok with 'type' parameter */
/* #undef HAVE_ACCESS_OK_TYPE */
/* posix_acl has refcount_t */
/* #undef HAVE_ACL_REFCOUNT */
/* add_disk() returns int */
/* #undef HAVE_ADD_DISK_RET */
/* Define if host toolchain supports AES */
#define HAVE_AES 1
/* Define if you have [rt] */
#define HAVE_AIO_H 1
#ifdef __amd64__
#ifndef RESCUE
/* Define if host toolchain supports AVX */
#define HAVE_AVX 1
#endif
/* Define if host toolchain supports AVX2 */
#define HAVE_AVX2 1
/* Define if host toolchain supports AVX512BW */
#define HAVE_AVX512BW 1
/* Define if host toolchain supports AVX512CD */
#define HAVE_AVX512CD 1
/* Define if host toolchain supports AVX512DQ */
#define HAVE_AVX512DQ 1
/* Define if host toolchain supports AVX512ER */
#define HAVE_AVX512ER 1
/* Define if host toolchain supports AVX512F */
#define HAVE_AVX512F 1
/* Define if host toolchain supports AVX512IFMA */
#define HAVE_AVX512IFMA 1
/* Define if host toolchain supports AVX512PF */
#define HAVE_AVX512PF 1
/* Define if host toolchain supports AVX512VBMI */
#define HAVE_AVX512VBMI 1
/* Define if host toolchain supports AVX512VL */
#define HAVE_AVX512VL 1
#endif
/* bdevname() is available */
/* #undef HAVE_BDEVNAME */
/* bdev_check_media_change() exists */
/* #undef HAVE_BDEV_CHECK_MEDIA_CHANGE */
/* bdev_*_io_acct() available */
/* #undef HAVE_BDEV_IO_ACCT */
/* bdev_max_discard_sectors() is available */
/* #undef HAVE_BDEV_MAX_DISCARD_SECTORS */
/* bdev_max_secure_erase_sectors() is available */
/* #undef HAVE_BDEV_MAX_SECURE_ERASE_SECTORS */
/* block_device_operations->submit_bio() returns void */
/* #undef HAVE_BDEV_SUBMIT_BIO_RETURNS_VOID */
/* bdev_whole() is available */
/* #undef HAVE_BDEV_WHOLE */
/* bio_alloc() takes 4 arguments */
/* #undef HAVE_BIO_ALLOC_4ARG */
/* bio->bi_bdev->bd_disk exists */
/* #undef HAVE_BIO_BDEV_DISK */
/* bio->bi_opf is defined */
/* #undef HAVE_BIO_BI_OPF */
/* bio->bi_status exists */
/* #undef HAVE_BIO_BI_STATUS */
/* bio has bi_iter */
/* #undef HAVE_BIO_BVEC_ITER */
/* bio_*_io_acct() available */
/* #undef HAVE_BIO_IO_ACCT */
/* bio_max_segs() is implemented */
/* #undef HAVE_BIO_MAX_SEGS */
/* bio_set_dev() is available */
/* #undef HAVE_BIO_SET_DEV */
/* bio_set_dev() GPL-only */
/* #undef HAVE_BIO_SET_DEV_GPL_ONLY */
/* bio_set_dev() is a macro */
/* #undef HAVE_BIO_SET_DEV_MACRO */
/* bio_set_op_attrs is available */
/* #undef HAVE_BIO_SET_OP_ATTRS */
/* blkdev_get_by_path() handles ERESTARTSYS */
/* #undef HAVE_BLKDEV_GET_ERESTARTSYS */
/* blkdev_issue_discard() is available */
/* #undef HAVE_BLKDEV_ISSUE_DISCARD */
/* blkdev_issue_secure_erase() is available */
/* #undef HAVE_BLKDEV_ISSUE_SECURE_ERASE */
/* blkdev_reread_part() exists */
/* #undef HAVE_BLKDEV_REREAD_PART */
/* blkg_tryget() is available */
/* #undef HAVE_BLKG_TRYGET */
/* blkg_tryget() GPL-only */
/* #undef HAVE_BLKG_TRYGET_GPL_ONLY */
/* blk_alloc_disk() exists */
/* #undef HAVE_BLK_ALLOC_DISK */
/* blk_alloc_queue() expects request function */
/* #undef HAVE_BLK_ALLOC_QUEUE_REQUEST_FN */
/* blk_alloc_queue_rh() expects request function */
/* #undef HAVE_BLK_ALLOC_QUEUE_REQUEST_FN_RH */
/* blk_cleanup_disk() exists */
/* #undef HAVE_BLK_CLEANUP_DISK */
/* block multiqueue is available */
/* #undef HAVE_BLK_MQ */
/* blk queue backing_dev_info is dynamic */
/* #undef HAVE_BLK_QUEUE_BDI_DYNAMIC */
/* blk_queue_discard() is available */
/* #undef HAVE_BLK_QUEUE_DISCARD */
/* blk_queue_flag_clear() exists */
/* #undef HAVE_BLK_QUEUE_FLAG_CLEAR */
/* blk_queue_flag_set() exists */
/* #undef HAVE_BLK_QUEUE_FLAG_SET */
/* blk_queue_flush() is available */
/* #undef HAVE_BLK_QUEUE_FLUSH */
/* blk_queue_flush() is GPL-only */
/* #undef HAVE_BLK_QUEUE_FLUSH_GPL_ONLY */
/* blk_queue_secdiscard() is available */
/* #undef HAVE_BLK_QUEUE_SECDISCARD */
/* blk_queue_secure_erase() is available */
/* #undef HAVE_BLK_QUEUE_SECURE_ERASE */
/* blk_queue_update_readahead() exists */
/* #undef HAVE_BLK_QUEUE_UPDATE_READAHEAD */
/* blk_queue_write_cache() exists */
/* #undef HAVE_BLK_QUEUE_WRITE_CACHE */
/* blk_queue_write_cache() is GPL-only */
/* #undef HAVE_BLK_QUEUE_WRITE_CACHE_GPL_ONLY */
/* Define if revalidate_disk() in block_device_operations */
/* #undef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK */
/* Define to 1 if you have the Mac OS X function CFLocaleCopyCurrent in the
CoreFoundation framework. */
/* #undef HAVE_CFLOCALECOPYCURRENT */
/* Define to 1 if you have the Mac OS X function
CFLocaleCopyPreferredLanguages in the CoreFoundation framework. */
/* #undef HAVE_CFLOCALECOPYPREFERREDLANGUAGES */
/* Define to 1 if you have the Mac OS X function CFPreferencesCopyAppValue in
the CoreFoundation framework. */
/* #undef HAVE_CFPREFERENCESCOPYAPPVALUE */
/* check_disk_change() exists */
/* #undef HAVE_CHECK_DISK_CHANGE */
/* clear_inode() is available */
/* #undef HAVE_CLEAR_INODE */
/* dentry uses const struct dentry_operations */
/* #undef HAVE_CONST_DENTRY_OPERATIONS */
/* copy_from_iter() is available */
/* #undef HAVE_COPY_FROM_ITER */
/* copy_to_iter() is available */
/* #undef HAVE_COPY_TO_ITER */
/* yes */
/* #undef HAVE_CPU_HOTPLUG */
/* current_time() exists */
/* #undef HAVE_CURRENT_TIME */
/* Define if the GNU dcgettext() function is already present or preinstalled.
*/
/* #undef HAVE_DCGETTEXT */
/* DECLARE_EVENT_CLASS() is available */
/* #undef HAVE_DECLARE_EVENT_CLASS */
/* dequeue_signal() takes 4 arguments */
/* #undef HAVE_DEQUEUE_SIGNAL_4ARG */
/* lookup_bdev() wants dev_t arg */
/* #undef HAVE_DEVT_LOOKUP_BDEV */
/* sops->dirty_inode() wants flags */
/* #undef HAVE_DIRTY_INODE_WITH_FLAGS */
/* disk_*_io_acct() available */
/* #undef HAVE_DISK_IO_ACCT */
/* disk_update_readahead() exists */
/* #undef HAVE_DISK_UPDATE_READAHEAD */
/* Define to 1 if you have the <dlfcn.h> header file. */
#define HAVE_DLFCN_H 1
/* d_make_root() is available */
/* #undef HAVE_D_MAKE_ROOT */
/* d_prune_aliases() is available */
/* #undef HAVE_D_PRUNE_ALIASES */
/* dops->d_revalidate() operation takes nameidata */
/* #undef HAVE_D_REVALIDATE_NAMEIDATA */
/* eops->encode_fh() wants child and parent inodes */
/* #undef HAVE_ENCODE_FH_WITH_INODE */
/* sops->evict_inode() exists */
/* #undef HAVE_EVICT_INODE */
/* FALLOC_FL_ZERO_RANGE is defined */
/* #undef HAVE_FALLOC_FL_ZERO_RANGE */
/* fault_in_iov_iter_readable() is available */
/* #undef HAVE_FAULT_IN_IOV_ITER_READABLE */
/* fops->aio_fsync() exists */
/* #undef HAVE_FILE_AIO_FSYNC */
/* file_dentry() is available */
/* #undef HAVE_FILE_DENTRY */
+/* fops->fadvise() exists */
+/* #undef HAVE_FILE_FADVISE */
+
/* file_inode() is available */
/* #undef HAVE_FILE_INODE */
/* iops->follow_link() cookie */
/* #undef HAVE_FOLLOW_LINK_COOKIE */
/* iops->follow_link() nameidata */
/* #undef HAVE_FOLLOW_LINK_NAMEIDATA */
/* fops->fsync() with range */
/* #undef HAVE_FSYNC_RANGE */
/* fops->fsync() without dentry */
/* #undef HAVE_FSYNC_WITHOUT_DENTRY */
+/* yes */
+/* #undef HAVE_GENERIC_FADVISE */
+
/* generic_fillattr requires struct user_namespace* */
/* #undef HAVE_GENERIC_FILLATTR_USERNS */
/* generic_*_io_acct() 3 arg available */
/* #undef HAVE_GENERIC_IO_ACCT_3ARG */
/* generic_*_io_acct() 4 arg available */
/* #undef HAVE_GENERIC_IO_ACCT_4ARG */
/* generic_readlink is global */
/* #undef HAVE_GENERIC_READLINK */
/* generic_setxattr() exists */
/* #undef HAVE_GENERIC_SETXATTR */
/* generic_write_checks() takes kiocb */
/* #undef HAVE_GENERIC_WRITE_CHECKS_KIOCB */
/* Define if the GNU gettext() function is already present or preinstalled. */
/* #undef HAVE_GETTEXT */
/* iops->get_acl() exists */
/* #undef HAVE_GET_ACL */
/* iops->get_acl() takes rcu */
/* #undef HAVE_GET_ACL_RCU */
/* iops->get_link() cookie */
/* #undef HAVE_GET_LINK_COOKIE */
/* iops->get_link() delayed */
/* #undef HAVE_GET_LINK_DELAYED */
/* group_info->gid exists */
/* #undef HAVE_GROUP_INFO_GID */
/* has_capability() is available */
/* #undef HAVE_HAS_CAPABILITY */
/* Define if you have the iconv() function and it works. */
#define HAVE_ICONV 1
/* Define if compiler supports -Winfinite-recursion */
/* #undef HAVE_INFINITE_RECURSION */
/* yes */
/* #undef HAVE_INODE_LOCK_SHARED */
/* inode_owner_or_capable() exists */
/* #undef HAVE_INODE_OWNER_OR_CAPABLE */
/* inode_owner_or_capable() takes user_ns */
/* #undef HAVE_INODE_OWNER_OR_CAPABLE_IDMAPPED */
/* inode_set_flags() exists */
/* #undef HAVE_INODE_SET_FLAGS */
/* inode_set_iversion() exists */
/* #undef HAVE_INODE_SET_IVERSION */
/* inode->i_*time's are timespec64 */
/* #undef HAVE_INODE_TIMESPEC64_TIMES */
/* timestamp_truncate() exists */
/* #undef HAVE_INODE_TIMESTAMP_TRUNCATE */
/* Define to 1 if you have the <inttypes.h> header file. */
#define HAVE_INTTYPES_H 1
/* in_compat_syscall() is available */
/* #undef HAVE_IN_COMPAT_SYSCALL */
/* iops->create() takes struct user_namespace* */
/* #undef HAVE_IOPS_CREATE_USERNS */
/* iops->mkdir() takes struct user_namespace* */
/* #undef HAVE_IOPS_MKDIR_USERNS */
/* iops->mknod() takes struct user_namespace* */
/* #undef HAVE_IOPS_MKNOD_USERNS */
/* iops->permission() takes struct user_namespace* */
/* #undef HAVE_IOPS_PERMISSION_USERNS */
/* iops->rename() takes struct user_namespace* */
/* #undef HAVE_IOPS_RENAME_USERNS */
/* iops->symlink() takes struct user_namespace* */
/* #undef HAVE_IOPS_SYMLINK_USERNS */
/* iov_iter_advance() is available */
/* #undef HAVE_IOV_ITER_ADVANCE */
/* iov_iter_count() is available */
/* #undef HAVE_IOV_ITER_COUNT */
/* iov_iter_fault_in_readable() is available */
/* #undef HAVE_IOV_ITER_FAULT_IN_READABLE */
/* iov_iter_revert() is available */
/* #undef HAVE_IOV_ITER_REVERT */
/* iov_iter_type() is available */
/* #undef HAVE_IOV_ITER_TYPE */
/* iov_iter types are available */
/* #undef HAVE_IOV_ITER_TYPES */
/* yes */
/* #undef HAVE_IO_SCHEDULE_TIMEOUT */
/* Define to 1 if you have the `issetugid' function. */
#define HAVE_ISSETUGID 1
/* kernel has kernel_fpu_* functions */
/* #undef HAVE_KERNEL_FPU */
/* kernel has asm/fpu/api.h */
/* #undef HAVE_KERNEL_FPU_API_HEADER */
/* kernel fpu internal */
/* #undef HAVE_KERNEL_FPU_INTERNAL */
/* kernel has asm/fpu/internal.h */
/* #undef HAVE_KERNEL_FPU_INTERNAL_HEADER */
/* uncached_acl_sentinel() exists */
/* #undef HAVE_KERNEL_GET_ACL_HANDLE_CACHE */
/* kernel does stack verification */
/* #undef HAVE_KERNEL_OBJTOOL */
/* kernel has linux/objtool.h */
/* #undef HAVE_KERNEL_OBJTOOL_HEADER */
/* kernel_read() take loff_t pointer */
/* #undef HAVE_KERNEL_READ_PPOS */
/* timer_list.function gets a timer_list */
/* #undef HAVE_KERNEL_TIMER_FUNCTION_TIMER_LIST */
/* struct timer_list has a flags member */
/* #undef HAVE_KERNEL_TIMER_LIST_FLAGS */
/* timer_setup() is available */
/* #undef HAVE_KERNEL_TIMER_SETUP */
/* kernel_write() take loff_t pointer */
/* #undef HAVE_KERNEL_WRITE_PPOS */
/* kmem_cache_create_usercopy() exists */
/* #undef HAVE_KMEM_CACHE_CREATE_USERCOPY */
/* kstrtoul() exists */
/* #undef HAVE_KSTRTOUL */
/* ktime_get_coarse_real_ts64() exists */
/* #undef HAVE_KTIME_GET_COARSE_REAL_TS64 */
/* ktime_get_raw_ts64() exists */
/* #undef HAVE_KTIME_GET_RAW_TS64 */
/* kvmalloc exists */
/* #undef HAVE_KVMALLOC */
/* Define if you have [aio] */
/* #undef HAVE_LIBAIO */
/* Define if you have [blkid] */
/* #undef HAVE_LIBBLKID */
/* Define if you have [crypto] */
#define HAVE_LIBCRYPTO 1
/* Define if you have [tirpc] */
/* #undef HAVE_LIBTIRPC */
/* Define if you have [udev] */
/* #undef HAVE_LIBUDEV */
/* Define if you have [uuid] */
/* #undef HAVE_LIBUUID */
/* linux/blk-cgroup.h exists */
/* #undef HAVE_LINUX_BLK_CGROUP_HEADER */
/* lseek_execute() is available */
/* #undef HAVE_LSEEK_EXECUTE */
/* makedev() is declared in sys/mkdev.h */
/* #undef HAVE_MAKEDEV_IN_MKDEV */
/* makedev() is declared in sys/sysmacros.h */
/* #undef HAVE_MAKEDEV_IN_SYSMACROS */
/* Noting that make_request_fn() returns blk_qc_t */
/* #undef HAVE_MAKE_REQUEST_FN_RET_QC */
/* Noting that make_request_fn() returns void */
/* #undef HAVE_MAKE_REQUEST_FN_RET_VOID */
/* iops->mkdir() takes umode_t */
/* #undef HAVE_MKDIR_UMODE_T */
/* Define to 1 if you have the `mlockall' function. */
#define HAVE_MLOCKALL 1
/* lookup_bdev() wants mode arg */
/* #undef HAVE_MODE_LOOKUP_BDEV */
/* Define if host toolchain supports MOVBE */
#define HAVE_MOVBE 1
/* new_sync_read()/new_sync_write() are available */
/* #undef HAVE_NEW_SYNC_READ */
/* folio_wait_bit() exists */
/* #undef HAVE_PAGEMAP_FOLIO_WAIT_BIT */
/* iops->getattr() takes a path */
/* #undef HAVE_PATH_IOPS_GETATTR */
/* Define if host toolchain supports PCLMULQDQ */
#define HAVE_PCLMULQDQ 1
/* percpu_counter_add_batch() is defined */
/* #undef HAVE_PERCPU_COUNTER_ADD_BATCH */
/* percpu_counter_init() wants gfp_t */
/* #undef HAVE_PERCPU_COUNTER_INIT_WITH_GFP */
/* posix_acl_chmod() exists */
/* #undef HAVE_POSIX_ACL_CHMOD */
/* posix_acl_from_xattr() needs user_ns */
/* #undef HAVE_POSIX_ACL_FROM_XATTR_USERNS */
/* posix_acl_release() is available */
/* #undef HAVE_POSIX_ACL_RELEASE */
/* posix_acl_release() is GPL-only */
/* #undef HAVE_POSIX_ACL_RELEASE_GPL_ONLY */
/* posix_acl_valid() wants user namespace */
/* #undef HAVE_POSIX_ACL_VALID_WITH_NS */
/* proc_ops structure exists */
/* #undef HAVE_PROC_OPS_STRUCT */
/* iops->put_link() cookie */
/* #undef HAVE_PUT_LINK_COOKIE */
/* iops->put_link() delayed */
/* #undef HAVE_PUT_LINK_DELAYED */
/* iops->put_link() nameidata */
/* #undef HAVE_PUT_LINK_NAMEIDATA */
/* If available, contains the Python version number currently in use. */
#define HAVE_PYTHON "3.7"
/* qat is enabled and existed */
/* #undef HAVE_QAT */
/* register_shrinker is vararg */
/* #undef HAVE_REGISTER_SHRINKER_VARARG */
/* iops->rename() wants flags */
/* #undef HAVE_RENAME_WANTS_FLAGS */
/* REQ_DISCARD is defined */
/* #undef HAVE_REQ_DISCARD */
/* REQ_FLUSH is defined */
/* #undef HAVE_REQ_FLUSH */
/* REQ_OP_DISCARD is defined */
/* #undef HAVE_REQ_OP_DISCARD */
/* REQ_OP_FLUSH is defined */
/* #undef HAVE_REQ_OP_FLUSH */
/* REQ_OP_SECURE_ERASE is defined */
/* #undef HAVE_REQ_OP_SECURE_ERASE */
/* REQ_PREFLUSH is defined */
/* #undef HAVE_REQ_PREFLUSH */
/* revalidate_disk() is available */
/* #undef HAVE_REVALIDATE_DISK */
/* revalidate_disk_size() is available */
/* #undef HAVE_REVALIDATE_DISK_SIZE */
/* struct rw_semaphore has member activity */
/* #undef HAVE_RWSEM_ACTIVITY */
/* struct rw_semaphore has atomic_long_t member count */
/* #undef HAVE_RWSEM_ATOMIC_LONG_COUNT */
/* linux/sched/signal.h exists */
/* #undef HAVE_SCHED_SIGNAL_HEADER */
/* Define to 1 if you have the <security/pam_modules.h> header file. */
#define HAVE_SECURITY_PAM_MODULES_H 1
/* setattr_prepare() is available, doesn't accept user_namespace */
/* #undef HAVE_SETATTR_PREPARE_NO_USERNS */
/* setattr_prepare() accepts user_namespace */
/* #undef HAVE_SETATTR_PREPARE_USERNS */
/* iops->set_acl() exists, takes 3 args */
/* #undef HAVE_SET_ACL */
/* iops->set_acl() takes 4 args */
/* #undef HAVE_SET_ACL_USERNS */
/* set_cached_acl() is usable */
/* #undef HAVE_SET_CACHED_ACL_USABLE */
/* set_special_state() exists */
/* #undef HAVE_SET_SPECIAL_STATE */
/* struct shrink_control exists */
/* #undef HAVE_SHRINK_CONTROL_STRUCT */
/* kernel_siginfo_t exists */
/* #undef HAVE_SIGINFO */
/* signal_stop() exists */
/* #undef HAVE_SIGNAL_STOP */
/* new shrinker callback wants 2 args */
/* #undef HAVE_SINGLE_SHRINKER_CALLBACK */
/* cs->count_objects exists */
/* #undef HAVE_SPLIT_SHRINKER_CALLBACK */
#if defined(__amd64__) || defined(__i386__)
/* Define if host toolchain supports SSE */
#define HAVE_SSE 1
/* Define if host toolchain supports SSE2 */
#define HAVE_SSE2 1
/* Define if host toolchain supports SSE3 */
#define HAVE_SSE3 1
/* Define if host toolchain supports SSE4.1 */
#define HAVE_SSE4_1 1
/* Define if host toolchain supports SSE4.2 */
#define HAVE_SSE4_2 1
/* Define if host toolchain supports SSSE3 */
#define HAVE_SSSE3 1
#endif
/* STACK_FRAME_NON_STANDARD is defined */
/* #undef HAVE_STACK_FRAME_NON_STANDARD */
/* standalone <linux/stdarg.h> exists */
/* #undef HAVE_STANDALONE_LINUX_STDARG */
/* Define to 1 if you have the <stdint.h> header file. */
#define HAVE_STDINT_H 1
/* Define to 1 if you have the <stdio.h> header file. */
#define HAVE_STDIO_H 1
/* Define to 1 if you have the <stdlib.h> header file. */
#define HAVE_STDLIB_H 1
/* Define to 1 if you have the <strings.h> header file. */
#define HAVE_STRINGS_H 1
/* Define to 1 if you have the <string.h> header file. */
#define HAVE_STRING_H 1
/* Define to 1 if you have the `strlcat' function. */
#define HAVE_STRLCAT 1
/* Define to 1 if you have the `strlcpy' function. */
#define HAVE_STRLCPY 1
/* submit_bio is member of struct block_device_operations */
/* #undef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */
/* super_setup_bdi_name() exits */
/* #undef HAVE_SUPER_SETUP_BDI_NAME */
/* super_block->s_user_ns exists */
/* #undef HAVE_SUPER_USER_NS */
/* struct kobj_type has default_groups */
/* #undef HAVE_SYSFS_DEFAULT_GROUPS */
/* Define to 1 if you have the <sys/stat.h> header file. */
#define HAVE_SYS_STAT_H 1
/* Define to 1 if you have the <sys/types.h> header file. */
#define HAVE_SYS_TYPES_H 1
/* i_op->tmpfile() exists */
/* #undef HAVE_TMPFILE */
/* i_op->tmpfile() has userns */
/* #undef HAVE_TMPFILE_USERNS */
/* totalhigh_pages() exists */
/* #undef HAVE_TOTALHIGH_PAGES */
/* kernel has totalram_pages() */
/* #undef HAVE_TOTALRAM_PAGES_FUNC */
/* Define to 1 if you have the `udev_device_get_is_initialized' function. */
/* #undef HAVE_UDEV_DEVICE_GET_IS_INITIALIZED */
/* kernel has __kernel_fpu_* functions */
/* #undef HAVE_UNDERSCORE_KERNEL_FPU */
/* Define to 1 if you have the <unistd.h> header file. */
#define HAVE_UNISTD_H 1
/* iops->getattr() takes struct user_namespace* */
/* #undef HAVE_USERNS_IOPS_GETATTR */
/* user_namespace->ns.inum exists */
/* #undef HAVE_USER_NS_COMMON_INUM */
/* iops->getattr() takes a vfsmount */
/* #undef HAVE_VFSMOUNT_IOPS_GETATTR */
/* aops->direct_IO() uses iovec */
/* #undef HAVE_VFS_DIRECT_IO_IOVEC */
/* aops->direct_IO() uses iov_iter without rw */
/* #undef HAVE_VFS_DIRECT_IO_ITER */
/* aops->direct_IO() uses iov_iter with offset */
/* #undef HAVE_VFS_DIRECT_IO_ITER_OFFSET */
/* aops->direct_IO() uses iov_iter with rw and offset */
/* #undef HAVE_VFS_DIRECT_IO_ITER_RW_OFFSET */
/* filemap_dirty_folio exists */
/* #undef HAVE_VFS_FILEMAP_DIRTY_FOLIO */
/* All required iov_iter interfaces are available */
/* #undef HAVE_VFS_IOV_ITER */
/* fops->iterate() is available */
/* #undef HAVE_VFS_ITERATE */
/* fops->iterate_shared() is available */
/* #undef HAVE_VFS_ITERATE_SHARED */
/* fops->readdir() is available */
/* #undef HAVE_VFS_READDIR */
/* address_space_operations->readpages exists */
/* #undef HAVE_VFS_READPAGES */
/* read_folio exists */
/* #undef HAVE_VFS_READ_FOLIO */
/* fops->read/write_iter() are available */
/* #undef HAVE_VFS_RW_ITERATE */
/* __set_page_dirty_nobuffers exists */
/* #undef HAVE_VFS_SET_PAGE_DIRTY_NOBUFFERS */
/* __vmalloc page flags exists */
/* #undef HAVE_VMALLOC_PAGE_KERNEL */
/* yes */
/* #undef HAVE_WAIT_ON_BIT_ACTION */
/* wait_queue_entry_t exists */
/* #undef HAVE_WAIT_QUEUE_ENTRY_T */
/* wq_head->head and wq_entry->entry exist */
/* #undef HAVE_WAIT_QUEUE_HEAD_ENTRY */
/* xattr_handler->get() wants dentry */
/* #undef HAVE_XATTR_GET_DENTRY */
/* xattr_handler->get() wants both dentry and inode */
/* #undef HAVE_XATTR_GET_DENTRY_INODE */
+/* xattr_handler->get() wants dentry and inode and flags */
+/* #undef HAVE_XATTR_GET_DENTRY_INODE_FLAGS */
+
/* xattr_handler->get() wants xattr_handler */
/* #undef HAVE_XATTR_GET_HANDLER */
/* xattr_handler has name */
/* #undef HAVE_XATTR_HANDLER_NAME */
/* xattr_handler->list() wants dentry */
/* #undef HAVE_XATTR_LIST_DENTRY */
/* xattr_handler->list() wants xattr_handler */
/* #undef HAVE_XATTR_LIST_HANDLER */
/* xattr_handler->list() wants simple */
/* #undef HAVE_XATTR_LIST_SIMPLE */
/* xattr_handler->set() wants dentry */
/* #undef HAVE_XATTR_SET_DENTRY */
/* xattr_handler->set() wants both dentry and inode */
/* #undef HAVE_XATTR_SET_DENTRY_INODE */
/* xattr_handler->set() wants xattr_handler */
/* #undef HAVE_XATTR_SET_HANDLER */
/* xattr_handler->set() takes user_namespace */
/* #undef HAVE_XATTR_SET_USERNS */
/* Define if host toolchain supports XSAVE */
#define HAVE_XSAVE 1
/* Define if host toolchain supports XSAVEOPT */
#define HAVE_XSAVEOPT 1
/* Define if host toolchain supports XSAVES */
#define HAVE_XSAVES 1
/* ZERO_PAGE() is GPL-only */
/* #undef HAVE_ZERO_PAGE_GPL_ONLY */
/* Define if you have [z] */
#define HAVE_ZLIB 1
/* __posix_acl_chmod() exists */
/* #undef HAVE___POSIX_ACL_CHMOD */
/* kernel exports FPU functions */
/* #undef KERNEL_EXPORTS_X86_FPU */
/* TBD: fetch(3) support */
#if 0
/* whether the chosen libfetch is to be loaded at run-time */
#define LIBFETCH_DYNAMIC 1
/* libfetch is fetch(3) */
#define LIBFETCH_IS_FETCH 1
/* libfetch is libcurl */
#define LIBFETCH_IS_LIBCURL 0
/* soname of chosen libfetch */
#define LIBFETCH_SONAME "libfetch.so.6"
#endif
/* Define to the sub-directory where libtool stores uninstalled libraries. */
#define LT_OBJDIR ".libs/"
/* make_request_fn() return type */
/* #undef MAKE_REQUEST_FN_RET */
/* hardened module_param_call */
/* #undef MODULE_PARAM_CALL_CONST */
/* struct shrink_control has nid */
/* #undef SHRINK_CONTROL_HAS_NID */
/* using complete_and_exit() instead */
/* #undef SPL_KTHREAD_COMPLETE_AND_EXIT */
/* Defined for legacy compatibility. */
#define SPL_META_ALIAS ZFS_META_ALIAS
/* Defined for legacy compatibility. */
#define SPL_META_RELEASE ZFS_META_RELEASE
/* Defined for legacy compatibility. */
#define SPL_META_VERSION ZFS_META_VERSION
/* pde_data() is PDE_DATA() */
/* #undef SPL_PDE_DATA */
/* Define to 1 if all of the C90 standard headers exist (not just the ones
required in a freestanding environment). This macro is provided for
backward compatibility; new code need not use it. */
#define SYSTEM_FREEBSD 1
/* True if ZFS is to be compiled for a Linux system */
/* #undef SYSTEM_LINUX */
/* Version number of package */
/* #undef ZFS_DEBUG */
/* /dev/zfs minor */
/* #undef ZFS_DEVICE_MINOR */
/* enum node_stat_item contains NR_FILE_PAGES */
/* #undef ZFS_ENUM_NODE_STAT_ITEM_NR_FILE_PAGES */
/* enum node_stat_item contains NR_INACTIVE_ANON */
/* #undef ZFS_ENUM_NODE_STAT_ITEM_NR_INACTIVE_ANON */
/* enum node_stat_item contains NR_INACTIVE_FILE */
/* #undef ZFS_ENUM_NODE_STAT_ITEM_NR_INACTIVE_FILE */
/* enum zone_stat_item contains NR_FILE_PAGES */
/* #undef ZFS_ENUM_ZONE_STAT_ITEM_NR_FILE_PAGES */
/* enum zone_stat_item contains NR_INACTIVE_ANON */
/* #undef ZFS_ENUM_ZONE_STAT_ITEM_NR_INACTIVE_ANON */
/* enum zone_stat_item contains NR_INACTIVE_FILE */
/* #undef ZFS_ENUM_ZONE_STAT_ITEM_NR_INACTIVE_FILE */
/* GENHD_FL_EXT_DEVT flag is not available */
/* #undef ZFS_GENHD_FL_EXT_DEVT */
/* GENHD_FL_NO_PART_SCAN flag is available */
/* #undef ZFS_GENHD_FL_NO_PART */
/* global_node_page_state() exists */
/* #undef ZFS_GLOBAL_NODE_PAGE_STATE */
/* global_zone_page_state() exists */
/* #undef ZFS_GLOBAL_ZONE_PAGE_STATE */
/* Define to 1 if GPL-only symbols can be used */
/* #undef ZFS_IS_GPL_COMPATIBLE */
/* Define the project alias string. */
-#define ZFS_META_ALIAS "zfs-2.1.99-FreeBSD_ga582d5299"
+#define ZFS_META_ALIAS "zfs-2.1.99-FreeBSD_gc629f0bf6"
/* Define the project author. */
#define ZFS_META_AUTHOR "OpenZFS"
/* Define the project release date. */
/* #undef ZFS_META_DATA */
/* Define the maximum compatible kernel version. */
#define ZFS_META_KVER_MAX "5.19"
/* Define the minimum compatible kernel version. */
#define ZFS_META_KVER_MIN "3.10"
/* Define the project license. */
#define ZFS_META_LICENSE "CDDL"
/* Define the libtool library 'age' version information. */
/* #undef ZFS_META_LT_AGE */
/* Define the libtool library 'current' version information. */
/* #undef ZFS_META_LT_CURRENT */
/* Define the libtool library 'revision' version information. */
/* #undef ZFS_META_LT_REVISION */
/* Define the project name. */
#define ZFS_META_NAME "zfs"
/* Define the project release. */
-#define ZFS_META_RELEASE "FreeBSD_ga582d5299"
+#define ZFS_META_RELEASE "FreeBSD_gc629f0bf6"
/* Define the project version. */
#define ZFS_META_VERSION "2.1.99"
/* count is located in percpu_ref.data */
/* #undef ZFS_PERCPU_REF_COUNT_IN_DATA */
diff --git a/sys/modules/zfs/zfs_gitrev.h b/sys/modules/zfs/zfs_gitrev.h
index fcf064562d07..e933621159d5 100644
--- a/sys/modules/zfs/zfs_gitrev.h
+++ b/sys/modules/zfs/zfs_gitrev.h
@@ -1 +1 @@
-#define ZFS_META_GITREV "zfs-2.1.99-1335-ga582d5299"
+#define ZFS_META_GITREV "zfs-2.1.99-1404-gc629f0bf6"

File Metadata

Mime Type
application/octet-stream
Expires
Mon, Apr 22, 2:13 AM (2 d)
Storage Engine
chunks
Storage Format
Chunks
Storage Handle
LZJz004UU1cc
Default Alt Text
(6 MB)

Event Timeline